aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--LICENSE2
-rw-r--r--NEWS346
-rw-r--r--build/root.build7
-rw-r--r--build2/b.cxx369
-rw-r--r--build2/buildfile4
m---------config0
-rw-r--r--doc/.gitignore4
-rw-r--r--doc/buildfile278
-rwxr-xr-xdoc/cli.sh28
-rw-r--r--doc/manual.cli2319
-rw-r--r--doc/testscript.cli55
-rw-r--r--libbuild2/adhoc-rule-buildscript.cxx1031
-rw-r--r--libbuild2/adhoc-rule-buildscript.hxx24
-rw-r--r--libbuild2/adhoc-rule-cxx.cxx86
-rw-r--r--libbuild2/adhoc-rule-cxx.hxx26
-rw-r--r--libbuild2/adhoc-rule-regex-pattern.cxx135
-rw-r--r--libbuild2/adhoc-rule-regex-pattern.hxx4
-rw-r--r--libbuild2/algorithm.cxx1144
-rw-r--r--libbuild2/algorithm.hxx253
-rw-r--r--libbuild2/algorithm.ixx237
-rw-r--r--libbuild2/b-cmdline.cxx38
-rw-r--r--libbuild2/b-cmdline.hxx1
-rw-r--r--libbuild2/b-options.cxx247
-rw-r--r--libbuild2/b-options.hxx50
-rw-r--r--libbuild2/b-options.ixx60
-rw-r--r--libbuild2/b.cli135
-rw-r--r--libbuild2/bash/rule.cxx14
-rw-r--r--libbuild2/bash/rule.hxx2
-rw-r--r--libbuild2/bin/def-rule.cxx10
-rw-r--r--libbuild2/bin/init.cxx138
-rw-r--r--libbuild2/bin/init.hxx6
-rw-r--r--libbuild2/bin/target.cxx6
-rw-r--r--libbuild2/bin/target.hxx15
-rw-r--r--libbuild2/bin/utility.cxx4
-rw-r--r--libbuild2/build/script/builtin-options.cxx72
-rw-r--r--libbuild2/build/script/builtin-options.hxx68
-rw-r--r--libbuild2/build/script/builtin-options.ixx120
-rw-r--r--libbuild2/build/script/builtin.cli72
-rw-r--r--libbuild2/build/script/parser.cxx908
-rw-r--r--libbuild2/build/script/parser.hxx55
-rw-r--r--libbuild2/build/script/runner.cxx35
-rw-r--r--libbuild2/build/script/script.cxx44
-rw-r--r--libbuild2/build/script/script.hxx3
-rw-r--r--libbuild2/buildfile182
-rw-r--r--libbuild2/buildspec.cxx4
-rw-r--r--libbuild2/c/init.cxx287
-rw-r--r--libbuild2/c/init.hxx21
-rw-r--r--libbuild2/c/target.hxx2
-rw-r--r--libbuild2/cc/buildfile14
-rw-r--r--libbuild2/cc/common.cxx229
-rw-r--r--libbuild2/cc/common.hxx63
-rw-r--r--libbuild2/cc/common.txx15
-rw-r--r--libbuild2/cc/compile-rule.cxx1365
-rw-r--r--libbuild2/cc/compile-rule.hxx5
-rw-r--r--libbuild2/cc/functions.cxx4
-rw-r--r--libbuild2/cc/gcc.cxx177
-rw-r--r--libbuild2/cc/guess.cxx125
-rw-r--r--libbuild2/cc/init.cxx29
-rw-r--r--libbuild2/cc/install-rule.cxx524
-rw-r--r--libbuild2/cc/install-rule.hxx45
-rw-r--r--libbuild2/cc/lexer+comment.test.testscript5
-rw-r--r--libbuild2/cc/lexer+raw-string-literal.test.testscript2
-rw-r--r--libbuild2/cc/lexer.cxx34
-rw-r--r--libbuild2/cc/lexer.hxx30
-rw-r--r--libbuild2/cc/lexer.test.cxx2
-rw-r--r--libbuild2/cc/link-rule.cxx215
-rw-r--r--libbuild2/cc/module.cxx149
-rw-r--r--libbuild2/cc/module.hxx15
-rw-r--r--libbuild2/cc/msvc.cxx129
-rw-r--r--libbuild2/cc/parser.cxx24
-rw-r--r--libbuild2/cc/parser.hxx11
-rw-r--r--libbuild2/cc/parser.test.cxx2
-rw-r--r--libbuild2/cc/pkgconfig-libpkgconf.cxx7
-rw-r--r--libbuild2/cc/pkgconfig.cxx440
-rw-r--r--libbuild2/cc/pkgconfig.hxx8
-rw-r--r--libbuild2/cc/predefs-rule.cxx379
-rw-r--r--libbuild2/cc/predefs-rule.hxx45
-rw-r--r--libbuild2/cc/std.cppm6781
-rw-r--r--libbuild2/cc/target.cxx27
-rw-r--r--libbuild2/cc/target.hxx35
-rw-r--r--libbuild2/cc/types.cxx15
-rw-r--r--libbuild2/cc/windows-rpath.cxx23
-rw-r--r--libbuild2/cli/buildfile71
-rw-r--r--libbuild2/cli/export.hxx37
-rw-r--r--libbuild2/cli/init.cxx (renamed from build2/cli/init.cxx)10
-rw-r--r--libbuild2/cli/init.hxx (renamed from build2/cli/init.hxx)12
-rw-r--r--libbuild2/cli/module.hxx (renamed from build2/cli/module.hxx)10
-rw-r--r--libbuild2/cli/rule.cxx (renamed from build2/cli/rule.cxx)6
-rw-r--r--libbuild2/cli/rule.hxx (renamed from build2/cli/rule.hxx)13
-rw-r--r--libbuild2/cli/target.cxx (renamed from build2/cli/target.cxx)4
-rw-r--r--libbuild2/cli/target.hxx (renamed from build2/cli/target.hxx)15
-rw-r--r--libbuild2/common-options.cxx76
-rw-r--r--libbuild2/common-options.hxx26
-rw-r--r--libbuild2/config/functions.cxx2
-rw-r--r--libbuild2/config/host-config.cxx.in3
-rw-r--r--libbuild2/config/init.cxx53
-rw-r--r--libbuild2/config/module.hxx2
-rw-r--r--libbuild2/config/operation.cxx77
-rw-r--r--libbuild2/config/operation.hxx6
-rw-r--r--libbuild2/context.cxx578
-rw-r--r--libbuild2/context.hxx114
-rw-r--r--libbuild2/context.ixx8
-rw-r--r--libbuild2/cxx/init.cxx575
-rw-r--r--libbuild2/cxx/init.hxx18
-rw-r--r--libbuild2/cxx/target.cxx13
-rw-r--r--libbuild2/cxx/target.hxx20
-rw-r--r--libbuild2/diagnostics.cxx58
-rw-r--r--libbuild2/diagnostics.hxx18
-rw-r--r--libbuild2/dist/init.cxx29
-rw-r--r--libbuild2/dist/operation.cxx354
-rw-r--r--libbuild2/dist/rule.cxx33
-rw-r--r--libbuild2/dump.cxx1039
-rw-r--r--libbuild2/dump.hxx32
-rw-r--r--libbuild2/dyndep.cxx316
-rw-r--r--libbuild2/dyndep.hxx88
-rw-r--r--libbuild2/file-cache.hxx8
-rw-r--r--libbuild2/file-cache.ixx8
-rw-r--r--libbuild2/file.cxx545
-rw-r--r--libbuild2/file.hxx92
-rw-r--r--libbuild2/file.ixx20
-rw-r--r--libbuild2/filesystem.cxx2
-rw-r--r--libbuild2/filesystem.hxx2
-rw-r--r--libbuild2/function.cxx9
-rw-r--r--libbuild2/function.hxx9
-rw-r--r--libbuild2/function.test.cxx2
-rw-r--r--libbuild2/functions-bool.cxx6
-rw-r--r--libbuild2/functions-builtin.cxx118
-rw-r--r--libbuild2/functions-filesystem.cxx38
-rw-r--r--libbuild2/functions-integer.cxx66
-rw-r--r--libbuild2/functions-json.cxx335
-rw-r--r--libbuild2/functions-name.cxx246
-rw-r--r--libbuild2/functions-path.cxx323
-rw-r--r--libbuild2/functions-process-path.cxx53
-rw-r--r--libbuild2/functions-process.cxx15
-rw-r--r--libbuild2/functions-project-name.cxx39
-rw-r--r--libbuild2/functions-regex.cxx467
-rw-r--r--libbuild2/functions-string.cxx227
-rw-r--r--libbuild2/functions-target-triplet.cxx30
-rw-r--r--libbuild2/functions-target.cxx108
-rw-r--r--libbuild2/in/rule.cxx11
-rw-r--r--libbuild2/in/rule.hxx5
-rw-r--r--libbuild2/in/target.cxx10
-rw-r--r--libbuild2/install/functions.cxx116
-rw-r--r--libbuild2/install/init.cxx274
-rw-r--r--libbuild2/install/operation.cxx358
-rw-r--r--libbuild2/install/operation.hxx64
-rw-r--r--libbuild2/install/rule.cxx614
-rw-r--r--libbuild2/install/rule.hxx174
-rw-r--r--libbuild2/install/utility.cxx259
-rw-r--r--libbuild2/install/utility.hxx45
-rw-r--r--libbuild2/json.cxx904
-rw-r--r--libbuild2/json.hxx369
-rw-r--r--libbuild2/json.ixx349
-rw-r--r--libbuild2/lexer.cxx96
-rw-r--r--libbuild2/lexer.hxx19
-rw-r--r--libbuild2/module.cxx300
-rw-r--r--libbuild2/module.hxx63
-rw-r--r--libbuild2/operation.cxx382
-rw-r--r--libbuild2/operation.hxx37
-rw-r--r--libbuild2/parser.cxx3182
-rw-r--r--libbuild2/parser.hxx110
-rw-r--r--libbuild2/prerequisite.cxx4
-rw-r--r--libbuild2/prerequisite.hxx30
-rw-r--r--libbuild2/recipe.cxx1
-rw-r--r--libbuild2/recipe.hxx1
-rw-r--r--libbuild2/rule.cxx98
-rw-r--r--libbuild2/rule.hxx53
-rw-r--r--libbuild2/scheduler.cxx25
-rw-r--r--libbuild2/scheduler.hxx68
-rw-r--r--libbuild2/scheduler.ixx14
-rw-r--r--libbuild2/scheduler.txx42
-rw-r--r--libbuild2/scope.cxx67
-rw-r--r--libbuild2/scope.hxx57
-rw-r--r--libbuild2/scope.ixx12
-rw-r--r--libbuild2/script/builtin-options.cxx50
-rw-r--r--libbuild2/script/parser.cxx24
-rw-r--r--libbuild2/script/parser.hxx11
-rw-r--r--libbuild2/script/regex.hxx12
-rw-r--r--libbuild2/script/run.cxx256
-rw-r--r--libbuild2/script/script.cxx9
-rw-r--r--libbuild2/script/script.hxx10
-rw-r--r--libbuild2/search.cxx59
-rw-r--r--libbuild2/search.hxx9
-rw-r--r--libbuild2/target-type.hxx27
-rw-r--r--libbuild2/target.cxx316
-rw-r--r--libbuild2/target.hxx379
-rw-r--r--libbuild2/target.ixx71
-rw-r--r--libbuild2/test/common.cxx6
-rw-r--r--libbuild2/test/init.cxx18
-rw-r--r--libbuild2/test/operation.cxx14
-rw-r--r--libbuild2/test/rule.cxx132
-rw-r--r--libbuild2/test/script/lexer.cxx8
-rw-r--r--libbuild2/test/script/lexer.hxx2
-rw-r--r--libbuild2/test/script/parser.cxx26
-rw-r--r--libbuild2/test/script/script.cxx2
-rw-r--r--libbuild2/token.cxx21
-rw-r--r--libbuild2/token.hxx8
-rw-r--r--libbuild2/types-parsers.cxx67
-rw-r--r--libbuild2/types-parsers.hxx11
-rw-r--r--libbuild2/types.hxx16
-rw-r--r--libbuild2/types.ixx6
-rw-r--r--libbuild2/utility-installed.cxx8
-rw-r--r--libbuild2/utility-uninstalled.cxx12
-rw-r--r--libbuild2/utility.cxx51
-rw-r--r--libbuild2/utility.hxx31
-rw-r--r--libbuild2/variable.cxx1106
-rw-r--r--libbuild2/variable.hxx208
-rw-r--r--libbuild2/variable.ixx210
-rw-r--r--libbuild2/variable.txx520
-rw-r--r--libbuild2/version/rule.cxx6
-rw-r--r--manifest12
-rw-r--r--old-tests/variable/override/buildfile4
-rw-r--r--old-tests/variable/override/p/buildfile2
-rw-r--r--old-tests/variable/type-pattern-append/buildfile2
-rw-r--r--tests/build/root.build7
-rw-r--r--tests/cc/modules/common.testscript18
-rw-r--r--tests/cc/modules/modules.testscript19
-rw-r--r--tests/cc/preprocessed/testscript1
-rw-r--r--tests/dependency/recipe/testscript2
-rw-r--r--tests/directive/config.testscript4
-rw-r--r--tests/expansion/escape.testscript17
-rw-r--r--tests/function/builtin/testscript28
-rw-r--r--tests/function/json/buildfile4
-rw-r--r--tests/function/json/testscript257
-rw-r--r--tests/function/path/testscript72
-rw-r--r--tests/function/regex/testscript116
-rw-r--r--tests/function/string/testscript56
-rw-r--r--tests/test/script/runner/set.testscript5
-rw-r--r--tests/type/json/buildfile4
-rw-r--r--tests/type/json/testscript504
-rw-r--r--tests/type/map/buildfile4
-rw-r--r--tests/type/map/testscript70
-rw-r--r--tests/type/set/buildfile4
-rw-r--r--tests/type/set/testscript55
-rw-r--r--tests/type/vector/buildfile4
-rw-r--r--tests/type/vector/testscript57
-rw-r--r--tests/value/concat.testscript42
-rw-r--r--tests/value/reverse.testscript55
-rw-r--r--tests/variable/override/testscript2
-rw-r--r--tests/variable/target-type-pattern-specific/testscript19
241 files changed, 34531 insertions, 5264 deletions
diff --git a/.gitignore b/.gitignore
index 5a9e741..dfb9bab 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,10 +5,16 @@
*.d
*.t
*.i
+*.i.*
*.ii
+*.ii.*
*.o
*.obj
+*.gcm
+*.pcm
+*.ifc
*.so
+*.dylib
*.dll
*.a
*.lib
diff --git a/LICENSE b/LICENSE
index 6d28197..f2f9ac7 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2014-2022 the build2 authors (see the AUTHORS file).
+Copyright (c) 2014-2024 the build2 authors (see the AUTHORS file).
Copyright (c) Microsoft Corporation for the libbuild2/cc/msvc-setup.h file.
Permission is hereby granted, free of charge, to any person obtaining a copy
diff --git a/NEWS b/NEWS
index ef9a8fd..597156e 100644
--- a/NEWS
+++ b/NEWS
@@ -18,27 +18,112 @@ Version 0.16.0
# buildfile
#
lib{hello}: {hxx cxx}{*}
- lib{hello}: mm{*}: include = ($cxx.target.class == 'macos')
+ lib{hello}: mm{*}: include = ($cxx.target.class == 'macos')
Note also that while there is support for linking Objective-C/C++
executables and libraries, this is done using the C/C++ compiler driver
and no attempt to automatically link any necessary Objective-C runtime
- (such as -lobjc) is made.
+ (such as -lobjc) is made. For details, refer to "Objective-C Compilation"
+ and "Objective-C++ Compilation" in the manual.
+
+ * Support for Assembler with C Preprocessor (.S) compilation.
+
+ Specifically, the c module now provides the c.as-cpp submodule which can
+ be loaded in order to register the S{} target type and enable Assembler
+ with C Preprocessor compilation in the c compile rule. For details, refer
+ to "Assembler with C Preprocessor Compilation" in the manual.
+
+ * Support for buildfile importation.
+
+ A project can now export buildfiles that can then be imported by other
+ projects. This mechanism is primarily useful for exporting target type
+ definitions and ad hoc rules.
+
+ Specifically, a project can now place *.build files into its build/export/
+ subdirectory (or *.build2 and build2/export/ in the alternative naming
+ scheme). Such files can then be imported by other projects as buildfile{}
+ targets. For example:
+
+ import thrift%buildfile{thrift-cxx}
+
+ While for other target types the semantics of import is to load the
+ project's export stub and return the exported target, for buildfile{} the
+ semantics is to source the imported buildfile at the point of importation.
+
+ Note that care must be taken when authoring exported buildfiles since they
+ will be sourced by other projects in unpredictable circumstances. In
+ particular, the import directive by default does not prevent sourcing the
+ same buildfile multiple times (neither in the same project nor in the same
+ scope). As a result, if certain parts must only be sourced once per
+ project (such as target type definitions), then they must be factored into
+ a separate buildfile (in build/export/) that is imported by the "main"
+ exported buildfile with the `once` attribute. For example, the above
+ thrift-cxx.build may contain:
+
+ import [once] thrift%buildfile{thrift-cxx-target-type}
+
+ See also "install Module" in the manual for details on the exported
+ buildfile installation.
+
+ * Support for defining explicit (as opposed to ad hoc) target groups.
+
+ A user-defined explicit target group must be derived from the group base
+ target type. If desired, it can be marked as "see-through", meaning that
+ when it is listed as a prerequisite of a target, the matching rule will
+ "see" its members, rather than the group itself. For example:
+
+ define [see_through] thrift_cxx: group
+
+ define thrift: file
+ thrift{*}: extension = thrift
+
+ exe{hello}: cxx{hello} thrift_cxx{data}
+ thrift_cxx{data}: thrift{data}
+
+ Explicit group members can be specified statically, injected by an ad hoc
+ rule, or extracted dynamically by the depdb-dyndep builtin (see the next
+ NEWS item). For example:
+
+ thrift_cxx{data}<{hxx cxx}{data_constants}>: thrift{data} # Static.
+
+ thrift_cxx{~'/(.+)/'}<{hxx cxx}{^'/\1_types/'}>: thrift{~'/\1/'} # Inject.
+ {{
+ depdb dyndep --dyn-target ... # Dynamic.
+ }}
+
+ * Support for dynamic target extraction in addition to prerequisites.
+
+ This functionality is enabled with the depdb-dyndep --dyn-target option.
+ If the recipe target is an explicit group (see the previous NEWS item),
+ then the dynamically extracted targets are added as its members.
+ Otherwise, the listed targets are added as ad hoc group members. In both
+ cases the dynamically extracted target is ignored if it is already
+ specified as a static member or injected by a rule. Note that this
+ functionality is not available in the --byproduct mode. See the
+ depdb-dyndep builtin options description for details.
+
+ * New `lines` depdb-dyndep dependency format in addition to `make`.
+
+ The `lines` format lists targets and/or prerequisites one per line. See
+ the depdb-dyndep builtin options description for details.
* Low verbosity diagnostics rework.
The low verbosity (level 1) rule diagnostics format has been adjusted to
include the output target where appropriate. The implementation has also
been redesigned to go through the uniform print_diag() API, including for
- the `diag` pseudo-builtin in ad hoc recipes. Specifically, the `diag`
- builtin now expects its arguments to be in one of the following two forms
- (which correspond to the two forms of print_diag()):
+ the `diag` pseudo-builtin in ad hoc recipes.
+
+ Specifically, the `diag` builtin now expects its arguments to be in one of
+ the following two forms (which correspond to the two print_diag() forms):
diag <prog> <l-target> <comb> <r-target>...
diag <prog> <r-target>...
If the `diag` builtin is not specified, the default diagnostics is now
- equivalent to, for update:
+ equivalent to:
+
+ For update:
diag <prog> ($<[0]) -> $>
@@ -47,12 +132,215 @@ Version 0.16.0
diag <prog> $>
For details, see the print_diag() API description in diagnostics.hxx. See
- also GH issue #40 for additional background/details.
+ also GitHub issue #40 for additional background/details.
+
+ * Buffering of diagnostics from child processes.
+
+ By default, unless running serially or --no-diag-buffer is specified,
+ diagnostics issued by child processes (compilers, etc) is buffered and
+ printed all at once after each child exits in order to prevent
+ interleaving. See also the new --[no-]diag-color options.
+
+ * New $path.posix_string() and $path.posix_representation() functions.
+
+ These functions are similar to $path.string() and $path.representation()
+ except that they always return the string/representation of a path in the
+ POSIX notation, that is, using forward slashes.
+
+ * New $regex.filter[_out]_{match,search}(<vals>, <pat>) functions.
+
+ The match versions return elements of a list that match (filter) or do not
+ match (filter_out) the regular expression. The search versions do the same
+ except for the search instead of match regex semantics.
+
+ * New $find(<sequence>, <value>), $find_index(<sequence>, <value>) functions.
+
+ The $find() function returns true if the sequence contains the specified
+ value. The $find_index() function returns the index of the first element
+ in the sequence that is equal to the specified value or $size(<sequence>)
+ if none is found. For string sequences, it's possible to request case-
+ insensitive comparison with a flag, for example:
+
+ if ($find ($values, 'foo', icase))
+ ...
+
+ * New $integer_sequence(<begin>, <end>[, <step>]) function.
+
+ This function returns the list of uint64 integers starting from <begin>
+ (including) to <end> (excluding) with the specified <step> or 1 if
+ unspecified. For example:
+
+ hdr = foo.hxx bar.hxx baz.hxx
+ src = foo.cxx bar.cxx baz.cxx
+
+ assert ($size($hdr) == $size($src)) "hdr and src expected to be parallel"
+
+ for i: $integer_sequence(0, $size($hdr))
+ {
+ h = ($hdr[$i])
+ s = ($src[$i])
+ ...
+ }
+
+ * New $is_a(<name>, <target-type>), $filter[_out](<names>, <target-types>)
+ functions.
+
+ $is_a() returns true if the <name>'s target type is-a <target-type>. Note
+ that this is a dynamic type check that takes into account target type
+ inheritance.
+
+ $filter[_out]() return names with target types which are-a (filter) or
+ not are-a (filter_out) one of <target-types>.
+
+ In particular, these functions are useful for filtering prerequisite
+ targets ($<) in ad hoc recipes and rules.
+
+ * Support for the hex notation for the uint64 type.
+
+ Specifically, now we can do:
+
+ x = [uint64] 0x0000ffff
+
+ cxx.poptions += "-DOFFSET=$x" # -DOFFSET=65535
+ cxx.poptions += "-DOFFSET=$string($x, 16)" # -DOFFSET=0xffff
+ cxx.poptions += "-DOFFSET=$string($x, 16, 8)" # -DOFFSET=0x0000ffff
+
+ Note that there is no hex notation support for the int64 (signed) type.
+
+ * Support for the `for` and `while` loops in Buildscript recipes and
+ Testscript.
+
+ For example:
+
+ for v: $values
+ ...
+ end
+
+ cat values.txt | for -n v
+ ...
+ end
+
+ while (!$regex.match(...))
+ ...
+ end
+
+ See "Command-For" and "Command-While" in the Testscript manual for
+ details.
+
+ * New `find` builtin in Buildscript recipes and Testscript.
+
+ For example:
+
+ find gen/ -type f -name '*.?xx' | for -n f
+ ...
+ end
+
+ See "find" in the Testscript manual for details.
+
+ * Improvements to escape sequence support.
+
+ In the double-quoted strings we now only do effective escaping of the
+ special [$("\] characters, line continuations, plus [)] for symmetry.
+
+ There is now support for "escape sequence expansion" in the $\X form where
+ \X can be any of the C/C++ simple escape sequences (\n, \t, etc) plus \0
+ (which in C/C++ is an octal escape sequence). For example:
+
+ info "foo$\n$\tbar$\n$\tbaz"
+
+ Will print:
+
+ buildfile:1:1: info: foo
+ bar
+ baz
+
+ * New include_arch installation location and the corresponding
+ config.install.include_arch configuration variable.
+
+ This location is meant for architecture-specific files, such as
+ configuration headers. By default it's the same as the standard include
+ location but can be configured by the user to a different value (for
+ example, /usr/include/x86_64-linux-gnu/) for platforms that support
+ multiple architectures from the same installation location. This is how
+ one would normally use it from a buildfile:
+
+ # The generated configuration header may contain target architecture-
+ # specific information so install it into include_arch/ instead of
+ # include/.
+ #
+ h{*}: install = include/libhello/
+ h{config}: install = include_arch/libhello/
+
+ * Support for installation filtering.
+
+ While project authors determine what gets installed at the buildfile
+ level, the users of the project can now further filter the installation
+ using the config.install.filter variable. For details, see "Installation
+ Filtering" in the manual.
+
+ * Support for relocatable installations.
+
+ A relocatable installation can be moved to a directory other than its
+ original installation location. To request a relocatable installation, set
+ the config.install.relocatable variable to true. For details, see
+ "Relocatable Installation" in the manual.
+
+ * Support for installation manifest.
+
+ During the install operation, the config.install.manifest variable can be
+ set to a file path (or `-`) in order to write the information about all
+ the filesystem entries being installed into the specified file (or
+ stdout). The format of the installation manifest is "JSON lines". For
+ details, see the config.install.manifest variable documentation in the
+ install module.
+
+ * Ability to remap paths in source distributions.
+
+ The dist target-specific variable can now specify a path besides true or
+ false. This path is the "imaginary" source location which is used to
+ derive the corresponding distribution location. This location can be
+ either a directory path (to remap with the same file name) or a file path
+ (to remap with a different name). If the path is relative, then it is
+ treated relative to the target directory. Note that to make things less
+ error-prone, simple paths without any directory separators are not allowed
+ (use ./<name> instead).
+
+ Note that if multiple targets end up with the same source location, the
+ behavior is undefined and no diagnostics is issued. Note also that such
+ remapping has naturally no effect in the bootstrap distribution mode.
* The in.substitution variable has been renamed to in.mode.
The original name is still recognized for backwards compatibility.
+ * Ability to specify `in` rule substitutions as key-value pairs.
+
+ See "in Module" in the manual for details.
+
+ * New public/private variables model.
+
+ Now unqualified variables are project-private and can be typed, meaning
+ that a value assigned to a variable with such a name anywhere within the
+ project will have this type. For example:
+
+ [uint64] priority = [null]
+ [uint64] stack_size = [null]
+
+ priority = 1 # Ok.
+ stack_size = abc # Error.
+
+ Besides the type, variable attributes can specify visibility (project by
+ default) and overridability (false by default). For example:
+
+ thread{*}:
+ {
+ [uint64, visibility=target] priority = [null]
+ [uint64, visibility=target] stack_size = [null]
+ }
+
+ thread{foo}: priority = 1 # Ok.
+ priority = 1 # Error.
+
* Support for post hoc prerequisites.
Unlike normal and ad hoc prerequisites, a post hoc prerequisite is built
@@ -85,6 +373,50 @@ Version 0.16.0
before the target, post hoc prerequisite is only guaranteed to be built
before the end of the overall build.
+ * Support for dumping build system state in the JSON format.
+
+ The new --dump-format option can be used to select the desired format.
+ Its valid values are `buildfile` and `json-v0.1`. For details on the JSON
+ dump format see "Appendix A - JSON Dump Format" in the manual.
+
+ * Change to the --dump option semantics.
+
+ This option now recognizes two additional values: `match-pre` and
+ `match-post` to dump the state of pre/post-operations. The `match` value
+ now only triggers dumping of the main operation.
+
+ * New --dump-scope and --dump-target options to limit --dump output.
+
+ * New --load-only option in addition to --match-only.
+
+ This option has the effect of loading all the subdirectory buildfiles that
+ are not explicitly included and is primarily useful in combination with
+ --dump.
+
+ * Quoted/display target names in the JSON structured result are now
+ consistent with the JSON dump.
+
+ Specifically, before we had `target` (display) and `quoted_target` and now
+ we have `target` (quoted) and `display_target`. Note that this is a
+ backwards-incompatible change.
+
+ * The dist meta-operation no longer invokes the install program.
+
+ This results in a substantial speedup, especially on Windows. The use of
+ install (or another install-like program) can still be forced with
+ explicit config.dist.cmd=install.
+
+ * Clang -Wunqualified-std-cast-call warning was remapped to -Wextra.
+
+ Clang 15 introduced the -Wunqualified-std-cast-call warning which warns
+ about unqualified calls to std::move() and std::forward() (because they
+ can be "hijacked" via ADL). Surprisingly, this warning is enabled by
+ default, as opposed to with -Wextra or at least -Wall. It has also proven
+ to be quite disruptive, causing a large number of warnings in a large
+ number of packages. So we have "remapped" it to -Wextra for now and in the
+ future may "relax" it to -Wall and potentially to being enabled by
+ default. See GitHub issue #259 for background and details.
+
Version 0.15.0
* Generated C/C++ headers and ad hoc sources are now updated during match.
diff --git a/build/root.build b/build/root.build
index 911787f..ffc1a0f 100644
--- a/build/root.build
+++ b/build/root.build
@@ -22,9 +22,16 @@ if ($cxx.target.system == 'win32-msvc')
if ($cxx.class == 'msvc')
cxx.coptions += /wd4251 /wd4275 /wd4800
elif ($cxx.id == 'gcc')
+{
cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object \
-Wno-stringop-overread # libbutl
+ if ($cxx.version.major >= 13)
+ cxx.coptions += -Wno-dangling-reference
+}
+elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15)
+ cxx.coptions += -Wno-unqualified-std-cast-call
+
cxx.poptions =+ "-I$out_root" "-I$src_root"
# While we don't have any C sources to compile, we need to get the C compiler
diff --git a/build2/b.cxx b/build2/b.cxx
index 68c77c8..8decadf 100644
--- a/build2/b.cxx
+++ b/build2/b.cxx
@@ -54,8 +54,7 @@
#ifndef BUILD2_BOOTSTRAP
# include <libbuild2/bash/init.hxx>
-
-# include <build2/cli/init.hxx>
+# include <libbuild2/cli/init.hxx>
#endif
using namespace butl;
@@ -149,32 +148,20 @@ namespace build2
s.begin_object ();
- // Target.
- //
- {
- // Change the stream verbosity (see print_lines() for details).
- //
- ostringstream os;
- stream_verb (os, stream_verbosity (1, 0));
- os << t;
- s.member ("target", os.str ());
- }
-
// Quoted target.
//
- {
- names ns (t.as_name ()); // Note: potentially adds an extension.
+ s.member_name ("target");
+ dump_quoted_target_name (s, t);
- ostringstream os;
- stream_verb (os, stream_verbosity (1, 0));
- to_stream (os, ns, quote_mode::effective, '@');
- s.member ("quoted_target", os.str ());
- }
+ // Display target.
+ //
+ s.member_name ("display_target");
+ dump_display_target_name (s, t);
- s.member ("target_type", t.key ().type->name, false /* check */);
+ s.member ("target_type", t.type ().name, false /* check */);
if (t.is_a<dir> ())
- s.member ("target_path", t.key ().dir->string ());
+ s.member ("target_path", t.dir.string ());
else if (const auto* pt = t.is_a<path_target> ())
s.member ("target_path", pt->path ().string ());
@@ -329,6 +316,7 @@ main (int argc, char* argv[])
init_diag (cmdl.verbosity,
ops.silent (),
cmdl.progress,
+ cmdl.diag_color,
ops.no_line (),
ops.no_column (),
fdterm (stderr_fd ()));
@@ -383,8 +371,8 @@ main (int argc, char* argv[])
load_builtin_module (&in::build2_in_load);
#ifndef BUILD2_BOOTSTRAP
- load_builtin_module (&cli::build2_cli_load);
load_builtin_module (&bash::build2_bash_load);
+ load_builtin_module (&cli::build2_cli_load);
#endif
// Start up the scheduler and allocate lock shards.
@@ -428,10 +416,14 @@ main (int argc, char* argv[])
pctx = nullptr; // Free first to reuse memory.
}
+ optional<match_only_level> mo;
+ if (ops.load_only ()) mo = match_only_level::alias;
+ else if (ops.match_only ()) mo = match_only_level::all;
+
pctx.reset (new context (sched,
mutexes,
fcache,
- ops.match_only (),
+ mo,
ops.no_external_modules (),
ops.dry_run (),
ops.no_diag_buffer (),
@@ -450,13 +442,14 @@ main (int argc, char* argv[])
// Parse the buildspec.
//
buildspec bspec;
+ path_name bspec_name ("<buildspec>");
try
{
istringstream is (cmdl.buildspec);
is.exceptions (istringstream::failbit | istringstream::badbit);
parser p (*pctx);
- bspec = p.parse_buildspec (is, path_name ("<buildspec>"));
+ bspec = p.parse_buildspec (is, bspec_name);
}
catch (const io_error&)
{
@@ -475,16 +468,22 @@ main (int argc, char* argv[])
// Note: omit reserving anything for the info meta-operation since it
// won't be loading the buildfiles and needs to be as fast as possible.
//
- if (bspec.size () == 1 &&
- bspec.front ().size () == 1 &&
- (bspec.front ().name == "info" ||
- (bspec.front ().name.empty () &&
- bspec.front ().front ().name == "info")))
- ;
- else
+ bool mo_info (bspec.size () == 1 &&
+ bspec.front ().size () == 1 &&
+ (bspec.front ().name == "info" ||
+ (bspec.front ().name.empty () &&
+ bspec.front ().front ().name == "info")));
+
+ if (!mo_info)
+ {
+ // Note: also adjust in bpkg if adjusting here.
+ //
pctx->reserve (context::reserves {
30000 /* targets */,
1100 /* variables */});
+ }
+
+ bool load_only (ops.load_only ());
const path& buildfile (ops.buildfile_specified ()
? ops.buildfile ()
@@ -492,12 +491,164 @@ main (int argc, char* argv[])
bool dump_load (false);
bool dump_match (false);
- if (ops.dump_specified ())
+ bool dump_match_pre (false);
+ bool dump_match_post (false);
+ for (const string& p: ops.dump ())
{
- dump_load = ops.dump ().find ("load") != ops.dump ().end ();
- dump_match = ops.dump ().find ("match") != ops.dump ().end ();
+ if (p == "load") dump_load = true;
+ else if (p == "match") dump_match = true;
+ else if (p == "match-pre") dump_match_pre = true;
+ else if (p == "match-post") dump_match_post = true;
+ else fail << "unknown phase '" << p << "' specified with --dump";
}
+ dump_format dump_fmt (dump_format::buildfile);
+ if (ops.dump_format_specified ())
+ {
+ const string& f (ops.dump_format ());
+
+ if (f == "json-v0.1")
+ {
+#ifdef BUILD2_BOOTSTRAP
+ fail << "json dump not supported in bootstrap build system";
+#endif
+ dump_fmt = dump_format::json;
+ }
+ else if (f != "buildfile")
+ {
+ diag_record dr (fail);
+
+ dr << "unsupported format '" << f << "' specified with --dump-format";
+
+ if (f.compare (0, 4, "json") == 0)
+ dr << info << "supported json format version is json-v0.1";
+ }
+ }
+
+ auto dump = [&trace, &ops, dump_fmt] (context& ctx, optional<action> a)
+ {
+ const dir_paths& scopes (ops.dump_scope ());
+ const vector<pair<name, optional<name>>>& targets (ops.dump_target ());
+
+ if (scopes.empty () && targets.empty ())
+ build2::dump (ctx, a, dump_fmt);
+ else
+ {
+ auto comp_norm = [] (dir_path& d, const char* what)
+ {
+ try
+ {
+ if (d.relative ())
+ d.complete ();
+
+ d.normalize ();
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid path '" << e.path << "' specified with " << what;
+ }
+ };
+
+ // If exact is false then return any outer scope that contains this
+ // directory except for the global scope.
+ //
+ auto find_scope = [&ctx, &comp_norm] (dir_path& d,
+ bool exact,
+ const char* what) -> const scope*
+ {
+ comp_norm (d, what);
+
+ // This is always the output directory (specifically, see the target
+ // case below).
+ //
+ const scope& s (ctx.scopes.find_out (d));
+
+ return ((exact ? s.out_path () == d : s != ctx.global_scope)
+ ? &s
+ : nullptr);
+ };
+
+ // Dump scopes.
+ //
+ for (dir_path d: scopes)
+ {
+ const scope* s (find_scope (d, true, "--dump-scope"));
+
+ if (s == nullptr)
+ l5 ([&]{trace << "unknown target scope " << d
+ << " specified with --dump-scope";});
+
+ build2::dump (s, a, dump_fmt);
+ }
+
+ // Dump targets.
+ //
+ for (const pair<name, optional<name>>& p: targets)
+ {
+ const target* t (nullptr);
+
+ // Find the innermost known scope that contains this target. This
+ // is where we are going to resolve its type.
+ //
+ dir_path d (p.second ? p.second->dir : p.first.dir);
+
+ if (const scope* s = find_scope (d, false, "--dump-target"))
+ {
+ // Complete relative directories in names.
+ //
+ name n (p.first), o;
+
+ if (p.second)
+ {
+ comp_norm (n.dir, "--dump-target");
+ o.dir = move (d);
+ }
+ else
+ n.dir = move (d);
+
+ // Similar logic to parser::enter_target::find_target() as used by
+ // the dump directive. Except here we treat unknown target type as
+ // unknown target.
+ //
+ auto r (s->find_target_type (n, location ()));
+
+ if (r.first != nullptr)
+ {
+ t = ctx.targets.find (*r.first, // target type
+ n.dir,
+ o.dir,
+ n.value,
+ r.second, // extension
+ trace);
+
+ if (t == nullptr)
+ l5 ([&]
+ {
+ // @@ TODO: default_extension?
+ //
+ target::combine_name (n.value, r.second, false);
+ names ns {move (n)};
+ if (p.second)
+ ns.push_back (move (o));
+
+ trace << "unknown target " << ns
+ << " specified with --dump-target";
+ });
+ }
+ else
+ l5 ([&]{trace << "unknown target type '" << n.type << "' in "
+ << *s << " specified with --dump-target";});
+
+ }
+ else
+ l5 ([&]{trace << "unknown target scope " << d
+ << " specified with --dump-target";});
+
+ build2::dump (t, a, dump_fmt);
+ }
+ }
+ };
+
// If not NULL, then lifted points to the operation that has been "lifted"
// to the meta-operaion (see the logic below for details). Skip is the
// position of the next operation.
@@ -514,7 +665,10 @@ main (int argc, char* argv[])
// Note that this constructor is cheap and so we rather call it always
// instead of resorting to dynamic allocations.
//
- json::stream_serializer js (cout);
+ // Note also that we disable pretty-printing if there is also the JSON
+ // dump and thus we need to combine the two in the JSON Lines format.
+ //
+ json::stream_serializer js (cout, dump_fmt == dump_format::json ? 0 : 2);
if (ops.structured_result_specified () &&
ops.structured_result () == structured_result_format::json)
@@ -564,8 +718,7 @@ main (int argc, char* argv[])
context& ctx (*pctx);
- const path p ("<buildspec>");
- const location l (p, 0, 0); //@@ TODO
+ const location l (bspec_name, 0, 0); //@@ TODO (also bpkg::pkg_configure())
meta_operation_id mid (0); // Not yet translated.
const meta_operation_info* mif (nullptr);
@@ -703,12 +856,19 @@ main (int argc, char* argv[])
}
}
- if (out_base.relative ())
- out_base = work / out_base;
+ try
+ {
+ if (out_base.relative ())
+ out_base = work / out_base;
- // This directory came from the command line so actualize it.
- //
- out_base.normalize (true);
+ // This directory came from the command line so actualize it.
+ //
+ out_base.normalize (true);
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid out_base directory '" << e.path << "'";
+ }
// The order in which we determine the roots depends on whether
// src_base was specified explicitly.
@@ -734,12 +894,19 @@ main (int argc, char* argv[])
if (!exists (src_base))
fail << "src_base directory " << src_base << " does not exist";
- if (src_base.relative ())
- src_base = work / src_base;
+ try
+ {
+ if (src_base.relative ())
+ src_base = work / src_base;
- // Also came from the command line, so actualize.
- //
- src_base.normalize (true);
+ // Also came from the command line, so actualize.
+ //
+ src_base.normalize (true);
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid src_base directory '" << e.path << "'";
+ }
// Make sure out_base is not a subdirectory of src_base. Who would
// want to do that, you may ask. Well, you would be surprised...
@@ -894,8 +1061,13 @@ main (int argc, char* argv[])
// Now that we have src_root, load the src_root bootstrap file,
// if there is one.
//
+ // As an optimization, omit discovering subprojects for the info
+ // meta-operation if not needed.
+ //
bootstrap_pre (rs, altn);
- bootstrap_src (rs, altn);
+ bootstrap_src (rs, altn,
+ nullopt /* amalgamation */,
+ !mo_info || info_subprojects (mparams) /*subprojects*/);
// If this is a simple project, then implicitly load the test and
// install modules.
@@ -1112,23 +1284,38 @@ main (int argc, char* argv[])
if (oif->outer_id != 0)
outer_oif = lookup (oif->outer_id);
+ if (!oparams.empty ())
+ {
+ // Operation parameters belong to outer operation, if any.
+ //
+ auto* i (outer_oif != nullptr ? outer_oif : oif);
+
+ if (i->operation_pre == nullptr)
+ fail (l) << "unexpected parameters for operation " << i->name;
+ }
+
// Handle pre/post operations.
//
if (auto po = oif->pre_operation)
{
- if ((orig_pre_oid = po (ctx, oparams, mid, l)) != 0)
+ if ((orig_pre_oid = po (
+ ctx,
+ outer_oif == nullptr ? oparams : values {},
+ mid,
+ l)) != 0)
{
assert (orig_pre_oid != default_id);
pre_oif = lookup (orig_pre_oid);
pre_oid = pre_oif->id; // De-alias.
}
}
- else if (!oparams.empty ())
- fail (l) << "unexpected parameters for operation " << oif->name;
if (auto po = oif->post_operation)
{
- if ((orig_post_oid = po (ctx, oparams, mid)) != 0)
+ if ((orig_post_oid = po (
+ ctx,
+ outer_oif == nullptr ? oparams : values {},
+ mid)) != 0)
{
assert (orig_post_oid != default_id);
post_oif = lookup (orig_post_oid);
@@ -1174,6 +1361,9 @@ main (int argc, char* argv[])
// defined there (common with non-intrusive project conversions
// where everything is built from a single root buildfile).
//
+ // Note: we use find_plausible_buildfile() and not find_buildfile()
+ // to look in outer directories.
+ //
optional<path> bf (
find_buildfile (src_base, src_base, altn, buildfile));
@@ -1239,6 +1429,9 @@ main (int argc, char* argv[])
break;
}
+ if (load_only && (mid != perform_id || oid != update_id))
+ fail << "--load-only requires perform(update) action";
+
// Now load the buildfiles and search the targets.
//
action_targets tgs;
@@ -1270,6 +1463,9 @@ main (int argc, char* argv[])
if (tt == nullptr)
fail (l) << "unknown target type " << tn.type;
+ if (load_only && !tt->is_a<alias> ())
+ fail << "--load-only requires alias target";
+
if (mif->search != nullptr)
{
// If the directory is relative, assume it is relative to work
@@ -1277,10 +1473,17 @@ main (int argc, char* argv[])
//
dir_path& d (tn.dir);
- if (d.relative ())
- d = work / d;
+ try
+ {
+ if (d.relative ())
+ d = work / d;
- d.normalize (true); // Actualize since came from command line.
+ d.normalize (true); // Actualize since came from command line.
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid target directory '" << e.path << "'";
+ }
if (ts.forwarded)
d = rs.out_path () / d.leaf (rs.src_path ()); // Remap.
@@ -1300,8 +1503,10 @@ main (int argc, char* argv[])
}
} // target
- if (dump_load)
- dump (ctx);
+ // Delay until after match in the --load-only mode (see below).
+ //
+ if (dump_load && !load_only)
+ dump (ctx, nullopt /* action */);
// Finally, match the rules and perform the operation.
//
@@ -1315,6 +1520,12 @@ main (int argc, char* argv[])
ctx.current_operation (*pre_oif, oif);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, oparams, false /* inner */, l);
+
+ if (pre_oif->operation_pre != nullptr)
+ pre_oif->operation_pre (ctx, {}, true /* inner */, l);
+
action a (mid, pre_oid, oid);
{
@@ -1326,13 +1537,19 @@ main (int argc, char* argv[])
if (mif->match != nullptr)
mif->match (mparams, a, tgs, diag, true /* progress */);
- if (dump_match)
+ if (dump_match_pre)
dump (ctx, a);
if (mif->execute != nullptr && !ctx.match_only)
mif->execute (mparams, a, tgs, diag, true /* progress */);
}
+ if (pre_oif->operation_post != nullptr)
+ pre_oif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, oparams, false /* inner */);
+
if (mif->operation_post != nullptr)
mif->operation_post (ctx, mparams, pre_oid);
@@ -1344,6 +1561,15 @@ main (int argc, char* argv[])
ctx.current_operation (*oif, outer_oif);
+ if (outer_oif != nullptr && outer_oif->operation_pre != nullptr)
+ outer_oif->operation_pre (ctx, oparams, false /* inner */, l);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx,
+ outer_oif == nullptr ? oparams : values {},
+ true /* inner */,
+ l);
+
action a (mid, oid, oif->outer_id);
{
@@ -1362,6 +1588,14 @@ main (int argc, char* argv[])
mif->execute (mparams, a, tgs, diag, true /* progress */);
}
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx,
+ outer_oif == nullptr ? oparams : values {},
+ true /* inner */);
+
+ if (outer_oif != nullptr && outer_oif->operation_post != nullptr)
+ outer_oif->operation_post (ctx, oparams, false /* inner */);
+
if (post_oid != 0)
{
tgs.reset ();
@@ -1374,6 +1608,12 @@ main (int argc, char* argv[])
ctx.current_operation (*post_oif, oif);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, oparams, false /* inner */, l);
+
+ if (post_oif->operation_pre != nullptr)
+ post_oif->operation_pre (ctx, {}, true /* inner */, l);
+
action a (mid, post_oid, oid);
{
@@ -1385,13 +1625,19 @@ main (int argc, char* argv[])
if (mif->match != nullptr)
mif->match (mparams, a, tgs, diag, true /* progress */);
- if (dump_match)
+ if (dump_match_post)
dump (ctx, a);
if (mif->execute != nullptr && !ctx.match_only)
mif->execute (mparams, a, tgs, diag, true /* progress */);
}
+ if (post_oif->operation_post != nullptr)
+ post_oif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, oparams, false /* inner */);
+
if (mif->operation_post != nullptr)
mif->operation_post (ctx, mparams, post_oid);
@@ -1399,6 +1645,9 @@ main (int argc, char* argv[])
<< ", id " << static_cast<uint16_t> (post_oid);});
}
+ if (dump_load && load_only)
+ dump (ctx, nullopt /* action */);
+
if (mif->operation_post != nullptr)
mif->operation_post (ctx, mparams, oid);
diff --git a/build2/buildfile b/build2/buildfile
index 0c21388..0111ed2 100644
--- a/build2/buildfile
+++ b/build2/buildfile
@@ -11,7 +11,7 @@ libs = $libbutl
include ../libbuild2/
libs += ../libbuild2/lib{build2}
-for m: bash bin c cc cxx in version
+for m: bash bin c cc cli cxx in version
{
include ../libbuild2/$m/
libs += ../libbuild2/$m/lib{build2-$m}
@@ -45,6 +45,8 @@ copyright = $process.run_regex( \
obj{b}: cxx.poptions += -DBUILD2_COPYRIGHT=\"$copyright\"
+# NOTE: remember to update bpkg buildfile if changing anything here.
+#
switch $cxx.target.class
{
case 'linux'
diff --git a/config b/config
-Subproject 02ba26b218d3d3db6c56e014655faf463cefa98
+Subproject 4ad4bb7c30aca1e705448ba8d51a210bbd47bb5
diff --git a/doc/.gitignore b/doc/.gitignore
index 9d45a89..d33dca5 100644
--- a/doc/.gitignore
+++ b/doc/.gitignore
@@ -3,3 +3,7 @@ b.1
build2-*-manual.xhtml
*.ps
*.pdf
+
+# Auto-extracted documentation.
+#
+functions-*.cli
diff --git a/doc/buildfile b/doc/buildfile
index c797761..5508ddb 100644
--- a/doc/buildfile
+++ b/doc/buildfile
@@ -11,8 +11,286 @@ xhtml{*}: extension = xhtml
css{common pre-box man} \
file{man-*}
+# @@ TODO: why no testscript manual?
+
./: doc{build2-build-system-manual*} \
css{code-box common doc pre-box toc} \
file{manual.cli doc-* *.html2ps}
./: file{cli.sh}
+
+# The build2 function documentation format for auto-extraction.
+#
+# Each listed .cxx file is expected to provide functions for one function
+# family. In order to plug a new family/file, perform the following steps:
+#
+# 1. List the corresponding functions-<family>.cxx file stem below.
+# 2. Add a section and source the generated .cli file in manual.cli.
+#
+# The functions-<family>.cxx file is expected to contain one or more comments
+# in the following form:
+#
+# // <synopsis-line>+
+# // <blank-line>
+# // (<paragraph-line>+|<code-block-line>+
+# // <blank-line>)+
+#
+# That is, the comment starts with one or more synopsis lines followed by a
+# blank line followed by a mixture of text paragraphs and/or preformatted code
+# blocks separated by blank lines. The comment must be terminated with a blank
+# line. See functions-regex.cxx for comprehensive examples.
+#
+# The synopsis line should be in the form:
+#
+# // $[<family>.]<name>(...)
+#
+# Each synopsis line may or may not be be qualified with <family>. The rule is
+# as follows: If the function can only be called qualified, then the synopsis
+# should contains a single qualified line. If the function can be called
+# unqualified, then the synopsis should contains a single unqualified line.
+# If some signatures can be called unqualifed while some -- only qualified,
+# then there should be both qualified and unqualified lines. Note that there
+# can also be functions with different <name>s in a single synopsis block.
+#
+# The text paragraphs may contain `...` and <...> fragments which are
+# translated to \c{} and \ci{}, respectively. Note that these fragments cannot
+# span multiple lines.
+#
+# The preformatted code blocks must be indented four spaces (not counting
+# the space after //).
+#
+# There is problem with distinguishing blanks within a code block and a blank
+# that separates the code block from the subsequent paragraph (or another code
+# block). Strictly speaking, such a blank should be indented with four spaces
+# but trailing spaces at the end of the line are generally frowned upon and in
+# our code should be automatically zapped on save.
+#
+# So what we are going to do is treat a single blank line between two code
+# lines as belonging to the code block rather than separating two code
+# blocks. The latter can be achieved with a double blank line. Note that this
+# means we cannot have double blank lines within a code block.
+
+# @@ TODO: using file{.cli}, change to cli{} once switch to ad hoc recipes.
+# @@ TODO: get rid of backlink below once switch to ad hoc recipes.
+
+for ff: functions-builtin \
+ functions-string \
+ functions-integer \
+ functions-json \
+ functions-bool \
+ functions-path \
+ functions-name \
+ functions-target \
+ functions-regex \
+ functions-process \
+ functions-filesystem \
+ functions-project-name \
+ functions-process-path \
+ functions-target-triplet
+{
+ alias{functions}: file{$(ff).cli}: $src_root/libbuild2/cxx{$ff}
+ file{$(ff).cli}: backlink = true # @@ TMP until migrate to recipe (see cli.sh)
+}
+
+file{~'/(functions-.+)\.cli/'}: cxx{~'/\1/'}
+{{
+ diag doc $< -> $>
+
+ i = $path($<) # Input.
+ o = $path($>) # Output.
+
+ # Extract the family name.
+ #
+ family = $regex.replace($name($<), 'functions-(.+)', '\1')
+ family = $regex.replace($family, '-', '_')
+
+ echo "// Auto-extracted from $leaf($i) for \$$(family).*\(\)" >$o
+
+ # The overall plan is as follows: read the file line by line recognizing the
+ # function documentation comments and maintaining the parsing state.
+ #
+ # Parsing state, one of:
+ #
+ # none -- outside of a documentation comment
+ # syno -- inside synopsis
+ # para -- inside text
+ # code -- inside preformatted code block
+ # blnk -- blank line separating synopsis/para/code
+ #
+ s = none # Current state.
+ p = none # Previous state.
+
+ ln = [uint64] 0 # Line number.
+ for -n l <=$i
+ ln += 1
+
+ # Look for a C++ comments and extract its text.
+ #
+ t = $regex.match($l, '\s*// ?(.*)', return_subs)
+
+ # Note that when writing the output we use the "leading blank line" rather
+ # than trailing approach. That is, we write the blank before starting the
+ # next block rather than after.
+
+ if ($t == [null])
+ if ($s != 'none')
+ if ($s != 'blnk')
+ exit "$i:$ln: blank line expected after description"
+ end
+
+ # Close delayed code block (see below for details).
+ #
+ if ($p == 'code')
+ echo "\\" >>$o # end code
+ end
+
+ echo "\"" >>$o # end cli doc string
+ end
+
+ p = $s
+ s = 'none'
+ else
+ # This is a comment. What we do next depends on which state we are in.
+ #
+ if ($s == 'none' || $s == 'syno')
+ p = $s
+
+ # See if this is a synopsys line.
+ #
+ if $regex.match($t, '\$.+\(.+\)')
+ if ($s == 'none')
+ synopsis = [strings] # Accumulate synopsis lines.
+ s = 'syno'
+ end
+
+ synopsis += $t
+ elif ($s == 'syno')
+ if ($t != '')
+ exit "$i:$ln: blank line expected after synopsis"
+ end
+
+ echo "$\n\"" >>$o # start cli doc string
+
+ # Write the heading. Use the first function name as id.
+ #
+ # Note that while the functions in the synopsis could be
+ # unqualified, in the heading we always write them qualified. We
+ # also have to suppress duplicates since the same function can be
+ # mentioned in the synopsis both qualified and unqualified.
+ #
+ id = [null]
+ hs = [strings]
+ for t: $synopsis
+ t = $regex.replace($t, '\$(.+)\(.+\)', '\1') # Extract func name.
+ f = $regex.match($t, '(.+)\..+', return_subs) # Extract family.
+
+ if ($f == [null])
+ t = "$(family).$t" # Qualify.
+ elif ($f != $family)
+ exit "$i:$ln: function family in $t does not match $family"
+ end
+
+ if ($id == [null]) # First.
+ id = $regex.replace($t, '\.', '-')
+ end
+
+ # Suppress duplicates.
+ #
+ if! $find($hs, $t)
+ hs += $t
+ end
+ end
+
+ h = $regex.merge($hs, '(.+)', '\\c{$\1()}', ', ')
+
+ echo "\\h2#functions-$id|$h|$\n" >>$o # heading
+
+ echo "\\" >>$o # start synopsis
+ for t: $synopsis
+ echo $t >>$o # synopsis line
+ end
+ echo "\\" >>$o # end synopsis
+
+ s = 'blnk'
+ end
+ else # para|code|blnk
+ # See if this is a code line.
+ #
+ c = $regex.match($t, ' (.+)', return_subs)
+
+ if ($c != [null])
+ # Code line.
+ #
+ if ($s == 'para')
+ exit "$i:$ln: blank line expected before code block"
+ end
+
+ # Treat a single blank line between two code lines as belonging to
+ # the code block rather than separating two code blocks (see above
+ # for details).
+ #
+ if ($s == 'blnk')
+ if ($p == 'code')
+ echo '' >>$o # continue code, write preceding blank
+ s = 'code'
+ else
+ echo "$\n\\" >>$o # start code
+ end
+ end
+
+ echo $regex.replace($c, '"', '\\"') >>$o # write code line
+
+ p = $s
+ s = 'code'
+ elif ($t != '')
+ # Paragraph line.
+ #
+ if ($s == 'code')
+ exit "$i:$ln: blank line expected after code block"
+ end
+
+ # Close delayed code block (see above for details).
+ #
+ if ($p == 'code')
+ echo "\\" >>$o # end code
+ end
+
+ if ($s == 'blnk')
+ echo '' >>$o # start para
+ end
+
+ t = $regex.replace($t, '\\', '\\\\') # Escape backslashed
+ t = $regex.replace($t, '"', '\\"') # Escape double quotes.
+
+ # Convert `` to \c{} and <> to \ci{}.
+ #
+ t = $regex.replace($t, '`([^`]+)`', '\\c{\1}')
+ t = $regex.replace($t, '<([^\s<>]+)>', '\\ci{\1}')
+
+ echo $t >>$o # write para line
+
+ p = $s
+ s = 'para'
+ else
+ # Blank line.
+ #
+
+ # Note that we delay closing the code block in case this blank line
+ # is followed by another code line (and is therefore treated as
+ # belonging to the code block; see above for details).
+ #
+ if ($s != 'code' && $p == 'code')
+ echo "\\" >>$o # end code
+ end
+
+ #if ($s == 'para')
+ # end para
+ #end
+
+ p = $s
+ s = 'blnk'
+ end
+ end
+ end
+ end
+}}
diff --git a/doc/cli.sh b/doc/cli.sh
index 7aa1aff..398371c 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -1,6 +1,6 @@
#! /usr/bin/env bash
-version=0.16.0-a.0.z
+version=0.17.0-a.0.z
trap 'exit 1' ERR
set -o errtrace # Trap in functions.
@@ -47,6 +47,7 @@ function compile ()
--generate-html --html-suffix .xhtml \
--html-prologue-file man-prologue.xhtml \
--html-epilogue-file man-epilogue.xhtml \
+--link-regex '%b(#.+)?%build2-build-system-manual.xhtml$1%' \
../libbuild2/$n.cli
cli -I .. \
@@ -58,6 +59,7 @@ function compile ()
--generate-man --man-suffix .1 --ascii-tree \
--man-prologue-file man-prologue.1 \
--man-epilogue-file man-epilogue.1 \
+--link-regex '%b(#.+)?%$1%' \
../libbuild2/$n.cli
}
@@ -91,6 +93,13 @@ function xhtml_to_ps () # <from> <to> [<html2ps-options>]
function compile_doc () # <file> <prefix> <suffix>
{
+ local file="$1"
+ shift
+ local prefix="$1"
+ shift
+ local suffix="$1"
+ shift
+
cli -I .. \
-v version="$(echo "$version" | sed -e 's/^\([^.]*\.[^.]*\).*/\1/')" \
-v date="$date" \
@@ -104,11 +113,12 @@ function compile_doc () # <file> <prefix> <suffix>
--link-regex '%bdep([-.].+)%../../bdep/doc/bdep$1%' \
--link-regex '%testscript(#.+)?%build2-testscript-manual.xhtml$1%' \
--link-regex '%build2(#.+)?%build2-build-system-manual.xhtml$1%' \
---output-prefix "$2" \
---output-suffix "$3" \
-"$1"
+--output-prefix "$prefix" \
+--output-suffix "$suffix" \
+"${@}" \
+"$file"
- local n="$2$(basename -s .cli $1)$3"
+ local n="$prefix$(basename -s .cli $file)$suffix"
xhtml_to_ps "$n.xhtml" "$n-a4.ps" -f doc.html2ps:a4.html2ps
ps2pdf14 -sPAPERSIZE=a4 -dOptimize=true -dEmbedAllFonts=true "$n-a4.ps" "$n-a4.pdf"
@@ -117,7 +127,13 @@ function compile_doc () # <file> <prefix> <suffix>
ps2pdf14 -sPAPERSIZE=letter -dOptimize=true -dEmbedAllFonts=true "$n-letter.ps" "$n-letter.pdf"
}
-compile_doc manual.cli 'build2-build-system-'
+# @@ TODO: replace -I. with $out_base and get rid of backlinking once
+# migrated to reciped.
+#
+# Note: we have to manually map \h to h2 since we break the doc string.
+#
+b update: alias{functions}
+compile_doc manual.cli 'build2-build-system-' '' --html-heading-map h=h2 -I .
compile_doc testscript.cli 'build2-' '-manual'
# Generate INSTALL in ../
diff --git a/doc/manual.cli b/doc/manual.cli
index 9b89345..847691d 100644
--- a/doc/manual.cli
+++ b/doc/manual.cli
@@ -156,7 +156,7 @@ Let's now try to build and run our program (\c{b} is the build system driver):
$ cd hello/ # Change to project root.
$ b
-c++ cxx{hello}
+c++ cxx{hello} -> obje{hello}
ld exe{hello}
$ ls -1
@@ -177,7 +177,7 @@ Or, if we are on Windows and using Visual Studio:
> cd hello
> b
-c++ cxx{hello}
+c++ cxx{hello} -> obje{hello}
ld exe{hello}
> dir /b
@@ -235,7 +235,7 @@ noise, the commands being executed are by default shown abbreviated and with
the same target type notation as we used in the \c{buildfile}. For example:
\
-c++ cxx{hello}
+c++ cxx{hello} -> obje{hello}
ld exe{hello}
\
@@ -275,7 +275,8 @@ above listings. In our \c{buildfile} we refer to the executable target as
extension, if any, will be determined based on the compiler's target platform
by the rule doing the linking. In this sense, target types are a
platform-independent replacement of file extensions (though they do have other
-benefits, such as allowing non-file targets as well as being hierarchical).
+benefits, such as allowing non-file targets as well as being hierarchical;
+see \l{#targets-types Target Types} for details).
Let's revisit the dependency declaration line from our \c{buildfile}:
@@ -371,7 +372,7 @@ Nothing really new here: we've specified the default extension for the
prerequisites. If you have experience with other build systems, then
explicitly listing headers might seem strange to you. As will be discussed
later, in \c{build2} we have to explicitly list all the prerequisites of a
-target that should end up in a distribution of our project.
+target that should end up in a source distribution of our project.
\N|You don't have to list \i{all} headers that you include, only the ones
belonging to your project. Like all modern C/C++ build systems, \c{build2}
@@ -421,11 +422,11 @@ exe{hello}: {hxx cxx}{**}
development more pleasant and less error prone: you don't need to update your
\c{buildfile} every time you add, remove, or rename a source file and you
won't forget to explicitly list headers, a mistake that is often only detected
-when trying to build a distribution of a project. On the other hand, there is
-the possibility of including stray source files into your build without
-noticing. And, for more complex projects, name patterns can become fairly
-complex (see \l{#name-patterns Name Patterns} for details). Note also that on
-modern hardware the performance of wildcard searches hardly warrants a
+when trying to build a source distribution of a project. On the other hand,
+there is the possibility of including stray source files into your build
+without noticing. And, for more complex projects, name patterns can become
+fairly complex (see \l{#name-patterns Name Patterns} for details). Note also
+that on modern hardware the performance of wildcard searches hardly warrants a
consideration.
In our experience, when combined with modern version control systems like
@@ -597,7 +598,7 @@ configuration \i{persistent}. We will see an example of this shortly.
Next up are the \c{test}, \c{install}, and \c{dist} modules. As their names
suggest, they provide support for testing, installation and preparation of
-distributions. Specifically, the \c{test} module defines the \c{test}
+source distributions. Specifically, the \c{test} module defines the \c{test}
operation, the \c{install} module defines the \c{install} and \c{uninstall}
operations, and the \c{dist} module defines the \c{dist}
(meta-)operation. Again, we will try them out in a moment.
@@ -681,7 +682,8 @@ notation for \c{dir{\}}. As we will see shortly, it fits naturally with other
uses of directories in \c{buildfiles} (for example, in scopes).
The \c{dir{\}} target type is an \i{alias} (and, in fact, is derived from more
-general \c{alias{\}}). Building it means building all its prerequisites.
+general \c{alias{\}}; see \l{#targets-types Target Types} for
+details). Building it means building all its prerequisites.
\N|If you are familiar with \c{make}, then you can probably see the similarity
with the ubiquitous \c{all} pseudo-target. In \c{build2} we instead use
@@ -756,7 +758,7 @@ Let's take a look at a slightly more realistic root \c{buildfile}:
Here we have the customary \c{README.md} and \c{LICENSE} files as well as the
package \c{manifest}. Listing them as prerequisites achieves two things: they
will be installed if/when our project is installed and, as mentioned earlier,
-they will be included into the project distribution.
+they will be included into the project source distribution.
The \c{README.md} and \c{LICENSE} files use the \c{doc{\}} target type. We
could have used the generic \c{file{\}} but using the more precise \c{doc{\}}
@@ -815,7 +817,7 @@ in this new layout:
\
$ cd hello/ # Change to project root.
$ b
-c++ hello/cxx{hello}
+c++ hello/cxx{hello} -> hello/obje{hello}
ld hello/exe{hello}
$ tree ./
@@ -892,8 +894,8 @@ libhello/
└── README.md
\
-The overall layout (\c{build/}, \c{libhello/} source directory) as well as the
-contents of the root files (\c{bootstrap.build}, \c{root.build}, root
+The overall layout (\c{build/}, \c{libhello/} source subdirectory) as well as
+the contents of the root files (\c{bootstrap.build}, \c{root.build}, root
\c{buildfile}) are exactly the same. There is, however, the new file
\c{export.build} in \c{build/}, the new subdirectory \c{tests/}, and the
contents of the project's source subdirectory \c{libhello/} look quite a bit
@@ -973,7 +975,7 @@ To start, let's build it in the \c{hello-out/} directory next to the project:
$ b hello/@hello-out/
mkdir fsdir{hello-out/}
mkdir hello-out/fsdir{hello/}
-c++ hello/hello/cxx{hello}@hello-out/hello/
+c++ hello/hello/cxx{hello} -> hello-out/hello/obje{hello}
ld hello-out/hello/exe{hello}
$ ls -1
@@ -1053,21 +1055,25 @@ We could have also specified out for an in source build, but that's redundant:
$ b hello/@hello/
\
-There is another example of this elaborate target specification in the build
-diagnostics:
+There is another example of this elaborate target specification that can be
+seen in the build diagnostics, for instance, when installing headers of a
+library (the \c{install} operation is discussed in the next section):
\
-c++ hello/hello/cxx{hello}@hello-out/hello/
+$ b install: libhello/@libhello-out/
+...
+install libhello/libhello/hxx{hello}@libhello-out/libhello/ ->
+ /usr/local/include/
\
-Notice, however, that now the target (\c{cxx{hello\}}) is on the left of
+Notice, however, that now the target (\c{hxx{hello\}}) is on the left of
\c{@}, that is, in the src directory. It does, however, make sense if you
-think about it: our \c{hello.cxx} is a \i{source file}, it is not built and it
-resides in the project's source directory. This is in contrast, for example,
-to the \c{exe{hello\}} target which is the output of the build system and goes
-to the out directory. So in \c{build2} targets can be either in src or in out
-(there can also be \i{out of any project} targets, for example, installed
-files).
+think about it: our \c{hello.hxx} is a \i{source file}, in a sense that it is
+not built and it resides in the project's source directory. This is in
+contrast, for example, to the \c{exe{hello\}} target which is the output of
+the build system and goes to the out directory. So in \c{build2} targets can
+be either in src or in out (there can also be \i{out of any project} targets,
+for example, installed files).
The elaborate target specification can also be used in \c{buildfiles}. We
haven't encountered any so far because targets mentioned without explicit
@@ -1360,7 +1366,7 @@ prerequisite-specific). These and other variable-related topics will be
covered in subsequent sections.|
One typical place to find \c{src/out_root} expansions is in the include search
-path options. For example, the source directory \c{buildfile} generated by
+path options. For example, the source subdirectory \c{buildfile} generated by
\l{bdep-new(1)} for an executable project actually looks like this
(\c{poptions} stands for \i{preprocessor options}):
@@ -1500,7 +1506,7 @@ In the few cases that we do, we use the following syntax:
If the scope directory is relative, then it is assumed to be relative to the
current scope. As an exercise for understanding, let's reimplement our
\c{hello} project as a single \c{buildfile}. That is, we move the contents of
-the source directory \c{buildfile} into the root \c{buildfile}:
+the source subdirectory \c{buildfile} into the root \c{buildfile}:
\
$ tree hello/
@@ -1530,7 +1536,7 @@ which explicitly opens scopes to define the build over the upstream project's
subdirectory structure.|
Seeing this merged \c{buildfile} may make you wonder what exactly caused the
-loading of the source directory \c{buildfile} in our normal setup. In other
+loading of the source subdirectory \c{buildfile} in our normal setup. In other
words, when we build our \c{hello} from the project root, who loads
\c{hello/buildfile} and why?
@@ -1876,7 +1882,7 @@ $ b configure: hello/@hello-gcc/,forward
$ cd hello/ # Change to project root.
$ b
-c++ hello/cxx{hello}@../hello-gcc/hello/
+c++ hello/cxx{hello} -> ../hello-gcc/hello/obje{hello}
ld ../hello-gcc/hello/exe{hello}
ln ../hello-gcc/hello/exe{hello} -> hello/
\
@@ -1986,7 +1992,7 @@ actual output:
\
$ b test
-c++ hello/cxx{hello}
+c++ hello/cxx{hello} -> hello/obje{hello}
ld hello/exe{hello}
test hello/exe{hello}
--- test.out
@@ -2088,9 +2094,9 @@ expect to see when running the tests:
\
b test
-c++ hello/cxx{hello}
+c++ hello/cxx{hello} -> hello/obje{hello}
ld hello/exe{hello}
-test hello/testscript{testscript} hello/exe{hello}
+test hello/exe{hello} + hello/testscript{testscript}
hello/testscript:7:1: error: hello/hello exit code 0 == 0
info: stdout: hello/test-hello/missing-name/stdout
\
@@ -2137,7 +2143,7 @@ libhello/
\
Specifically, there is no \c{testscript} in \c{libhello/}, the project's
-source directory. Instead, we have the \c{tests/} subdirectory which itself
+source subdirectory. Instead, we have the \c{tests/} subdirectory which itself
looks like a project: it contains the \c{build/} subdirectory with all the
familiar files, etc. In fact, \c{tests} is a \i{subproject} of our
\c{libhello} project.
@@ -2312,36 +2318,40 @@ If the value of the \c{install} variable is not \c{false}, then it is normally
a relative path with the first path component being one of these names:
\
-name default override
----- ------- --------
-root config.install.root
+name default override
+---- ------- --------
+root config.install.root
-data_root root/ config.install.data_root
-exec_root root/ config.install.exec_root
+data_root root/ config.install.data_root
+exec_root root/ config.install.exec_root
-bin exec_root/bin/ config.install.bin
-sbin exec_root/sbin/ config.install.sbin
-lib exec_root/lib/ config.install.lib
-libexec exec_root/libexec/<project>/ config.install.libexec
-pkgconfig lib/pkgconfig/ config.install.pkgconfig
+bin exec_root/bin/ config.install.bin
+sbin exec_root/sbin/ config.install.sbin
+lib exec_root/lib/ config.install.lib
+libexec exec_root/libexec/<project>/ config.install.libexec
+pkgconfig lib/pkgconfig/ config.install.pkgconfig
-etc data_root/etc/ config.install.etc
-include data_root/include/ config.install.include
-share data_root/share/ config.install.share
-data share/<project>/ config.install.data
+etc data_root/etc/ config.install.etc
+include data_root/include/ config.install.include
+include_arch include/ config.install.include_arch
+share data_root/share/ config.install.share
+data share/<project>/ config.install.data
+buildfile share/build2/export/<project>/ config.install.buildfile
-doc share/doc/<project>/ config.install.doc
-legal doc/ config.install.legal
-man share/man/ config.install.man
-man<N> man/man<N>/ config.install.man<N>
+doc share/doc/<project>/ config.install.doc
+legal doc/ config.install.legal
+man share/man/ config.install.man
+man<N> man/man<N>/ config.install.man<N>
\
Let's see what's going on here: The default install directory tree is derived
from the \c{config.install.root} value but the location of each node in this
tree can be overridden by the user that installs our project using the
-corresponding \c{config.install.*} variables. In our \c{buildfiles}, in turn,
-we use the node names instead of actual directories. As an example, here is a
-\c{buildfile} fragment from the source directory of our \c{libhello} project:
+corresponding \c{config.install.*} variables (see the \l{#module-install
+\c{install}} module documentation for details on their meaning). In our
+\c{buildfiles}, in turn, we use the node names instead of actual
+directories. As an example, here is a \c{buildfile} fragment from the source
+subdirectory of our \c{libhello} project:
\
hxx{*}:
@@ -2367,7 +2377,7 @@ root/include/libhello/
In the above \c{buildfile} fragment we also see the use of the
\c{install.subdirs} variable. Setting it to \c{true} instructs the \c{install}
module to recreate subdirectories starting from this point in the project's
-directory hierarchy. For example, if our \c{libhello/} source directory had
+directory hierarchy. For example, if our \c{libhello/} source subdirectory had
the \c{details/} subdirectory with the \c{utility.hxx} header, then this
header would have been installed as
\c{.../include/libhello/details/utility.hxx}.
@@ -2397,11 +2407,11 @@ lib{hello}: cxx.pkgconfig.include = include/hello/
\h2#intro-operations-dist|Distributing|
The last module that we load in our \c{bootstrap.build} is \c{dist} which
-provides support for the preparation of distributions and defines the \c{dist}
-meta-operation. Similar to \c{configure}, \c{dist} is a meta-operation rather
-than an operation because, conceptually, we are preparing a distribution for
-performing operations (like \c{update}, \c{test}) on targets rather than
-targets themselves.
+provides support for the preparation of source distributions and defines the
+\c{dist} meta-operation. Similar to \c{configure}, \c{dist} is a
+meta-operation rather than an operation because, conceptually, we are
+preparing a distribution for performing operations (like \c{update}, \c{test})
+on targets rather than targets themselves.
The preparation of a correct distribution requires that all the necessary
project files (sources, documentation, etc) be listed as prerequisites in the
@@ -2481,7 +2491,7 @@ $ b dist
Let's now take a look at an example of customizing what gets distributed.
Most of the time you will be using this mechanism to include certain targets
-from out. Here is a fragment from the \c{libhello} source directory
+from out. Here is a fragment from the \c{libhello} source subdirectory
\c{buildfile}:
\
@@ -2519,9 +2529,9 @@ state identical to distributed.
\h#intro-import|Target Importation|
Recall that if we need to depend on a target defined in another \c{buildfile}
-within our project, then we simply include said \c{buildfile} and reference
-the target. For example, if our \c{hello} included both an executable and a
-library in separate subdirectories next to each other:
+within our project, then we simply include the said \c{buildfile} and
+reference the target. For example, if our \c{hello} included both an
+executable and a library in separate subdirectories next to each other:
\
hello/
@@ -2596,9 +2606,9 @@ source directory to use its in source build (\c{out_root\ ==\ src_root}):
\
$ b hello/ config.import.libhello=libhello/
-c++ libhello/libhello/cxx{hello}
+c++ libhello/libhello/cxx{hello} -> libhello/libhello/objs{hello}
ld libhello/libhello/libs{hello}
-c++ hello/hello/cxx{hello}
+c++ hello/hello/cxx{hello} -> hello/hello/obje{hello}
ld hello/hello/exe{hello}
\
@@ -2613,9 +2623,9 @@ $ b configure: hello/@hello-clang/ config.cxx=clang++ \
config.import.libhello=libhello-clang/
$ b hello-clang/
-c++ libhello/libhello/cxx{hello}@libhello-clang/libhello/
+c++ libhello/libhello/cxx{hello} -> libhello-clang/libhello/objs{hello}
ld libhello-clang/libhello/libs{hello}
-c++ hello/hello/cxx{hello}@hello-clang/hello/
+c++ hello/hello/cxx{hello} -> hello-clang/hello/obje{hello}
ld hello-clang/hello/exe{hello}
\
@@ -2653,7 +2663,8 @@ subproject, or from an installation directory.
\N|Importation of an installed library will work even if it is not a
\c{build2} project. Besides finding the library itself, the link rule will
also try to locate its \c{pkg-config(1)} file and, if present, extract
-additional compile/link flags from it. The link rule also automatically
+additional compile/link flags from it (see \l{#cc-import-installed Importation
+of Installed Libraries} for details). The link rule also automatically
produces \c{pkg-config(1)} files for libraries that it installs.|
\N|A common problem with importing and using third-party C/C++ libraries is
@@ -2891,7 +2902,16 @@ recursively. And when the library is installed, this information is carried
over to its \c{pkg-config(1)} file.
\N|Similar to the \c{c.*} and \c{cc.*} sets discussed earlier, there are also
-\c{c.export.*} and \c{cc.export.*} sets.|
+\c{c.export.*} and \c{cc.export.*} sets.
+
+Note, however, that there is no \c{*.export.coptions} since a library imposing
+compilation options on its consumers is bad practice (too coarse-grained, does
+not compose, etc). Instead, the recommended approach is to specify in the
+library documentation that it expects its consumers to use a certain
+compilation option. And if your library is unusable without exporting a
+compilation option and you are sure benefits outweigh the drawbacks, then you
+can specify it as part of \c{*.export.poptions} (it is still a good idea to
+prominently document this).|
Here are the parts relevant to the library metadata protocol in the above
\c{buildfile}:
@@ -2989,7 +3009,12 @@ memory.
Note also that this only applies to shared libraries. In case of static
libraries, both interface and implementation dependencies are always linked,
-recursively.|
+recursively. Specifically, when linking a shared library, only libraries
+specified in its \c{*.export.libs} are linked. While when linking a static
+library, all its library prerequisites as well as those specified in
+\c{*.libs} are linked. Note that \c{*.export.libs} is not used when linking a
+static library since it is naturally assumed that all such libraries are also
+specified as library prerequisites or in \c{*.libs}.|
The remaining lines in the library metadata fragment are:
@@ -3048,6 +3073,16 @@ a binary-ful (\i{binful}) shared variants. Note also that binless libraries
can depend on binful libraries and are fully supported where the
\c{pkg-config(1)} functionality is concerned.
+One counter-intuitive aspect of having a binless library that depends on a
+system binful library, for example, \c{-lm}, is that you still have to specify
+the system library in both \c{*.export.libs} and \c{*.libs} because the latter
+is used when linking the static variant of the binless library. For example:
+
+\
+cxx.libs = -lm
+lib{hello}: cxx.export.libs = -lm
+\
+
If you are creating a new library with \l{bdep-new(1)} and are certain that it
will always be binless and in all configurations, then you can produce a
simplified \c{buildfile} by specifying the \c{binless} option, for example:
@@ -3162,9 +3197,10 @@ hello/
└── buildfile
$ b hello/
-c++ hello/libhello/libhello/cxx{hello}
+c++ hello/libhello/libhello/cxx{hello} ->
+ hello/libhello/libhello/objs{hello}
ld hello/libhello/libhello/libs{hello}
-c++ hello/hello/cxx{hello}
+c++ hello/hello/cxx{hello} -> hello/hello/obje{hello}
ld hello/hello/exe{hello}
\
@@ -3219,7 +3255,7 @@ configuration inheritance. As an example, let's configure the above bundled
\
$ b configure: hello/ config.cxx=clang++ config.cxx.coptions=-g
-$ b tree
+$ tree
hello/
├── build/
│ ├── config.build
@@ -3602,8 +3638,9 @@ info $path.directory($src_base) # $src_base
info $path.base($path.leaf($src_base)) # foo
\
-Note that functions in \c{build2} are \i{pure} in a sense that they do not
-alter the build state in any way.
+Note that the majority of functions in \c{build2} are \i{pure} in a sense that
+they do not alter the build state in any way (see \l{#functions Functions} for
+details).
\N|Functions in \c{build2} are currently defined either by the build system
core or build system modules and are implemented in C++. In the future it will
@@ -4122,7 +4159,7 @@ source subdirectory \c{buildfile} of an executable created with this option:
# Unit tests.
#
-exe{*.test}
+exe{*.test}:
{
test = true
install = false
@@ -4211,18 +4248,8 @@ files as belonging to unit tests. Because it is a second-level extension, we
have to indicate this fact to the pattern matching machinery with the trailing
triple dot (meaning \"there are more extensions coming\"). If we didn't do
that, \c{.test} would have been treated as a first-level extension explicitly
-specified for our source files.
-
-\N|If you need to specify a name that does not have an extension, then end it
-with a single dot. For example, for a header \c{utility} you would write
-\c{hxx{utility.\}}. If you need to specify a name with an actual trailing dot,
-then escape it with a double dot, for example, \c{hxx{utility..\}}.
-
-More generally, anywhere in a name, a double dot can be used to specify a dot
-that should not be considered the extension separator while a triple dot \-
-which should. For example, in \c{obja{foo.a.o\}} the extension is \c{.o} and
-if instead we wanted \c{.a.o} to be considered the extension, then we could
-rewrite it either as \c{obja{foo.a..o\}} or as \c{obja{foo...a.o\}}.|
+specified for our source files (see \l{#targets-types Target Types} for
+details).
The next couple of lines set target type/pattern-specific variables to treat
all unit test executables as tests that should not be installed:
@@ -4427,7 +4454,7 @@ scopes. For that we use the \c{dump} directive.
Without any arguments, \c{dump} prints (to \c{stderr}) the contents of the
scope it was encountered in and at that point of processing the \c{buildfile}.
Its output includes variables, targets and their prerequisites, as well as
-nested scopes, recursively. As an example, let's print the source directory
+nested scopes, recursively. As an example, let's print the source subdirectory
scope of our \c{hello} executable project. Here is its \c{buildfile} with
the \c{dump} directive at the end:
@@ -4490,7 +4517,8 @@ buildfile:5:1: dump:
The output of \c{dump} might look familiar: in \l{#intro-dirs-scopes Output
Directories and Scopes} we've used the \c{--dump} option to print the entire
build state, which looks pretty similar. In fact, the \c{dump} directive uses
-the same mechanism but allows us to print individual scopes and targets.
+the same mechanism but allows us to print individual scopes and targets from
+within a \c{buildfile}.
There is, however, an important difference to keep in mind: \c{dump} prints
the state of a target or scope at the point in the \c{buildfile} load phase
@@ -4504,6 +4532,9 @@ a result, while the \c{dump} directive should be sufficient in most cases,
sometimes you may need to use the \c{--dump} option to examine the build state
just before rule execution.
+\N|It is possible to limit the output of \c{--dump} to specific scopes and/or
+targets with the \c{--dump-scope} and \c{--dump-target} options.|
+
Let's now move from state to behavior. As we already know, to see the
underlying commands executed by the build system we use the \c{-v} options
(which is equivalent to \c{--verbose\ 2}). Note, however, that these are
@@ -4771,6 +4802,9 @@ executable target in the \c{<project>%exe{<project>\}} form, the
\c{config.<project>} variable is treated as an alias for
\c{config.import.<project>.<project>.exe}.
+For an imported \c{buildfile}, \c{<project>} may refer to either the importing
+project or the project from which the said \c{buildfile} was imported.
+
The build system core reserves \c{build} and \c{import} as the second
component in configuration variables as well as \c{configured} as the third
and subsequent components.|
@@ -4932,6 +4966,32 @@ if! $defined(config.libhello.database)
fail 'config.libhello.database must be specified'
\
+\N|A configuration variable without a default value is omitted from
+\c{config.build} unless the value is specified by the user. This semantics is
+useful for values that are normally derived from other configuration values
+but could also be specified by the user. If the value is derived, then we
+don't want it saved in \c{config.build} since that would prevent it from
+being re-derived if the configuration values it is based on are changed.
+For example:
+
+\
+config [strings] config.hello.database
+
+assert ($size($config.hello.database) > 0) \
+ 'database must be specified with config.hello.database'
+
+config [bool, config.report.variable=multi] config.hello.multi_database
+
+multi = ($defined(config.hello.multi_database) \
+ ? $config.hello.multi_database \
+ : $size(config.hello.database) > 1)
+
+assert ($multi || $size(config.hello.database) == 1) \
+ 'one database can be specified if config.hello.multi_database=false'
+\
+
+|
+
If computing the default value is expensive or requires elaborate logic, then
the handling of a configuration variable can be broken down into two steps
along these lines:
@@ -5236,6 +5296,29 @@ config libhello@/tmp/libhello/
woptions -Wall -Wextra -Wno-extra -Werror
\
+The \c{config.report.module} attribute can be used to override the reporting
+module name, that is, \c{config} in the \c{config\ libhello@/tmp/libhello/}
+line above. It is primarily useful in imported \c{buildfiles} that wish to
+report non-\c{config.*} variables under their own name. For example:
+
+\
+config [string] config.rtos.board
+
+# Load the board description and report key information such as the
+# capability revoker.
+#
+...
+revoker = ...
+
+config [config.report.module=rtos] revoker
+\
+
+\
+$ b config.rtos.board=ibex-safe-simulator -v
+rtos hello@/tmp/hello/
+ board ibex-safe-simulator
+ revoker hardware
+\
\h#proj-config-propag|Configuration Propagation|
@@ -5505,6 +5588,684 @@ configuration header into two, one public and installed while the other
private.|
+\h1#targets|Targets and Target Types|
+
+\N{This chapter is a work in progress and is incomplete.}
+
+
+\h#targets-types|Target Types|
+
+A target type is part of a target's identity. The core idea behind the concept
+of target types is to abstract away from file extensions which can vary from
+project to project (for example, C++ source files extensions) or from platform
+to platform (for example, executable file extensions). It also allows us to
+have non-file-based targets.
+
+Target types form a \i{base-derived} inheritance tree. The root of this tree
+is the abstract \c{target{\}} type. The \c{build2} core defines a number of
+standard target types, such as \c{file{\}}, \c{doc{\}}, and \c{exe{\}}. Build
+system modules can define additional target types that are based on the
+standard ones (or on types defined by other modules). For example, the \c{c}
+module that provides the C compilation support defines the \c{h{\}} and
+\c{c{\}} target types. Finally, \c{buildfiles} can derive project-local target
+types using the \c{define} directive.
+
+\N|If a target type represents a file type with a well-established extension,
+then by convention such an extension is used as the target type name. For
+example, the C language header and source files use the \c{.h} and \c{.c}
+extensions and the target types are called \c{h{\}} and \c{c{\}}.
+
+Speaking of conventions, as you may have noticed, when mentioning a target
+type we customarily add \c{{\}} after its name. We found that this helps with
+comprehension since target type names are often short (you can also search for
+\c{<type>{} to narrow it down to target types). In a way this is a similar
+approach to adding \c{()} after a function name except here we use \c{{\}},
+which mimics target type usage in target names, for example \c{c{hello\}} for
+\c{hello.c}.|
+
+The following listing shows the hierarchy of the standard target types defined
+by the \c{build2} core (the abstract target types are marked with \c{*}) while
+the following sections describe each standard target type in detail. For
+target types defined by a module refer to the respective module documentation.
+
+\
+ .-----target*------------.
+ | | |
+ mtime_target*---. alias fsdir
+ | | |
+ path_target* group dir
+ |
+ .---------file----.
+ | | |
+ .----doc-----. exe buildfile
+ | | |
+legal man manifest
+ |
+ man<N>
+\
+
+While target types replace (potentially variable) extensions, there still
+needs to be a mechanism for specifying them since in most cases targets have
+to be mapped to files. There are several ways this can be achieved.
+
+If a target type represents a file type with a well-established extension,
+then such an extension is normally used by default and we don't need to take
+any extra steps. For example the \c{h{\}} and \c{c{\}} target types for C
+header and source files default to the \c{.h} and \c{.c} extensions,
+respectively, and if our project follows this convention, then we can simply
+write:
+
+\
+exe{utility}: c{utility} h{utility}
+\
+
+And \c{c{utility\}} will be mapped to \c{utility.c} and \c{h{utility\}} \-
+to \c{utility.h}.
+
+There are two variants of this default extension case: fixed extension and
+customizable extension. A target type may choose to fix the default extension
+if it's a bad idea to deviate from the default extension. A good example of
+such a target is \c{man1{\}}, which fixes the default extension to be
+\c{.1}. More commonly, however, a target will have a default extension but
+will allow customizing it with the \c{extension} variable.
+
+A good example where extension customization is often required are the
+\c{hxx{\}} and \c{cxx{\}} target types for C++ header and source files, which
+default to the \c{.hxx} and \c{.cxx} extensions, respectively. If our project
+uses other extensions, for example, \c{.hpp} and \c{.cpp}, then we can adjust
+the defaults (typically done in \c{root.build}, after loading the \c{cxx}
+module):
+
+\
+hxx{*}: extension = hpp
+cxx{*}: extension = cpp
+\
+
+Then we can write:
+
+\
+exe{utility}: cxx{utility} hxx{utility}
+\
+
+And \c{cxx{utility\}} will be mapped to \c{utility.cpp} and \c{hxx{utility\}}
+\- to \c{utility.hpp}.
+
+What about \c{exe{utility\}}, where does its extension come from? This is an
+example of a target type with an extension that varies from platform to
+platform. In such cases the extension is expected to be assigned by the rule
+that matches the target. In the above example, the link rule from the \c{cxx}
+module that matches updating \c{exe{utility\}} will assign a suitable
+extension based on the target platform of the C++ compiler that it was
+instructed to use.
+
+Finally, it is always possible to specify the file extension explicitly as
+part of the target name. For example:
+
+\
+exe{utility}: cxx{utility.cc} hxx{utility.hh}
+\
+
+This is normally only needed if the default extension is not appropriate or if
+the target type does not have a default extension, as is the case, for
+example, for the \l{#targets-types-file \c{file{\}}} and \l{#targets-types-doc
+\c{doc{\}}} target types. This mechanism can also be used to override the
+automatically derived extension. For example:
+
+\
+exe{($cxx.target.class == 'windows' ? utility.com : utility)}: ...
+\
+
+\N|If you need to specify a name that does not have an extension, then end it
+with a single dot. For example, for a header \c{utility} you would write
+\c{hxx{utility.\}}. If you need to specify a name with an actual trailing dot,
+then escape it with a double dot, for example, \c{hxx{utility..\}}.
+
+More generally, anywhere in a name, a double dot can be used to specify a dot
+that should not be considered the extension separator while a triple dot \-
+which should. For example, in \c{obja{foo.a.o\}} the extension is \c{.o} and
+if instead we wanted \c{.a.o} to be considered the extension, then we could
+rewrite it either as \c{obja{foo.a..o\}} or as \c{obja{foo...a.o\}}.|
+
+To derive a new target type in a \c{buildfile} we use the \c{define}
+directive. Such target types are project-local, meaning they cannot be
+exported to other projects. Typically this is used to provide a more
+meaningful name to a set of files and also avoid having to specify their
+extensions explicitly. Compare:
+
+\
+./: doc{README.md PACKAGE-README.md INSTALL.md}
+\
+
+To:
+
+\
+define md: doc
+doc{*}: extension = md
+
+./: md{README PACKAGE-README INSTALL}
+\
+
+
+\h2#targets-types-target|\c{target{\}}|
+
+The \c{target{\}} target type is a root of the target type hierarchy. It is
+abstract and is not commonly used directly, except perhaps in patterns (target
+type/pattern-specific variable, pattern rules).
+
+
+\h2#targets-types-alias|\c{alias{\}} and \c{dir{\}}|
+
+The \c{alias{\}} target type is used for non-file-based targets that serve as
+aliases for their prerequisite.
+
+\N|Alias targets in \c{build2} are roughly equivalent to phony targets in
+\c{make}.|
+
+For example:
+
+\
+alias{tests}: exe{test1 test2 test3}
+\
+
+\
+$ b test: alias{tests}
+\
+
+An \c{alias{\}} target can also serve as an \"action\" if supplied with an ad
+hoc recipe (or matched by an ad hoc pattern rule). For example:
+
+\
+alias{strip}: exe{hello}
+{{
+ diag strip $<
+ strip $path($<)
+}}
+\
+
+The \c{dir{\}} target type is a special kind of alias that represents a
+directory. Building it means building everything inside the directory. See
+\l{#intro-proj-struct Project Structure} for background.
+
+A target without a type that ends with a directory separator (\c{/}) is
+automatically treated as \c{dir{\}}. For example, the following two lines are
+equivalent:
+
+\
+./: exe{test1 test2}
+dir{./}: exe{test1 test2}
+\
+
+Omitting the target type in such situations is customary.
+
+
+\h2#targets-types-fsdir|\c{fsdir{\}}|
+
+The \c{fsdir{\}} target type represents a filesystem directory. Unlike
+\c{dir{\}} above, it is not an alias and listing an \c{fsdir{\}} directory as
+a prerequisite of a target will cause that directory to be created on
+\c{update} and removed on \c{clean}.
+
+While we usually don't need to list explicit \c{fsdir{\}} prerequisites for
+our targets, one situation where this is necessary is when the target resides
+in a subdirectory that does not correspond to an existing source directory. A
+typical example of this situation is placing object files into subdirectories.
+Compare:
+
+\
+obj{foo}: c{foo}
+sub/obj{bar}: c{bar} fsdir{sub/}
+\
+
+
+\h2#targets-types-mtime-path|\c{mtime_target{\}} and \c{path_target{\}}|
+
+The \c{mtime_target{\}} target type represents a target that uses modification
+times to determine if it is out of date. The \c{path_target{\}} target type
+represents a target that has a corresponding filesystem entry. It is derived
+from \c{mtime_target{\}} and uses the modification time of that filesystem
+entry to determine if the target is out of date.
+
+Both of these target types are abstract and are not commonly used directly,
+except perhaps in patterns (target type/pattern-specific variable, pattern
+rules).
+
+
+\h2#targets-types-group|\c{group{\}}|
+
+The \c{group{\}} target type represents a user-defined explicit target group,
+that is, a target that has multiple member targets that are all built together
+with a single recipe.
+
+Normally this target type is not used to declare targets or prerequisites but
+rather as a base of a derived group. If desired, such a derived group can be
+marked with an attribute as \"see-through\", meaning that when the group is
+listed as a prerequisite of a target, the matching rule \"sees\" its members,
+rather than the group itself. For example:
+
+\
+define [see_through] thrift_cxx: group
+\
+
+
+\h2#targets-types-file|\c{file{\}}|
+
+The \c{file{\}} target type represents a generic file. This target type is
+used as a base for most of the file-based targets and can also be used to
+declare targets and prerequisites when there are no more specific target
+types.
+
+A target or prerequisite without a target type is automatically treated as
+\c{file{\}}. However, omitting a target type in such situations is not
+customary.
+
+The \c{file{\}} target type has no default extension and one cannot be
+assigned with the \c{extension} variable. As a result, if a \c{file{\}} target
+has an extension, then it must be specified explicitly as part of the target
+name. For example:
+
+\
+./: file{example.conf}
+\
+
+\h2#targets-types-doc|\c{doc{\}}, \c{legal{\}}, and \c{man{\}}|
+
+The \c{doc{\}} target type represents a generic documentation file. It has
+semantics similar to \c{file{\}} (from which it derives): it can be used as a
+base or declare targets/prerequisites and there is no default extension. One
+notable difference, however, is that \c{doc{\}} targets are by default
+installed into the \c{doc/} installation location (see \l{#module-install
+\c{install} Module}). For example:
+
+\
+./: doc{README.md ChangeLog.txt}
+\
+
+The \c{legal{\}} target type is derived from \c{doc{\}} and represents a legal
+documentation file, such as a license, copyright notice, authorship
+information, etc. The main purpose of having a separate target type like this
+is to help with installing licensing-related files into a different
+location. To this effect, \c{legal{\}} targets are installed into the
+\c{legal/} installation location, which by default is the same as \c{doc/} but
+can be customized. For example:
+
+\
+./: legal{COPYRIGHT LICENSE AUTHORS.md}
+\
+
+The \c{man{\}} target type is derived from \c{doc{\}} and represents a manual
+page. This target type requires an explicit extension specification and is
+installed into the \c{man/} installation location
+
+\N|If you are using the \c{man{\}} target type directly (instead of one of
+\c{man<N>{\}} described below), for example, to install a localized version of
+a man page, then you will likely need to adjust the installation location
+on the per target basis.|
+
+The \c{man<N>{\}} target types (where \c{<N>} is an integer between 1 and 9)
+are derived from \c{man{\}} and represent manual pages in the respective
+sections. These target types have fixed default extensions \c{.<N>} (but an
+explicit extension can still be specified, for example \c{man1{foo.1p\}}) and
+are installed into the \c{man<N>/} installation locations. For example:
+
+\
+./: man1{foo}
+\
+
+
+\h2#targets-types-exe|\c{exe{\}}|
+
+The \c{exe{\}} target type represents an executable file. Executables in
+\c{build2} appear in two distinct but sometimes overlapping contexts: We can
+build an executable target, for example from C source files. Or we can list an
+executable target as a prerequisite in order to execute it as part of a
+recipe. And sometimes this can be the same executable target. For example,
+one project may build an executable target that is a source code generator and
+another project may import this executable target and use it in its recipes in
+order to generate some source code.
+
+To support this semantics the \c{exe{\}} target type has a peculiar default
+extension logic. Specifically, if the \c{exe{\}} target is \"output\", then
+the extension is expected to be assigned by the matching rule according to the
+target platform for which this executable is built. But if it does not,
+then we fall back to no extension (for example, a script). If, however, the
+\c{exe{\}} target is \"input\" (that is, it's listed as a prerequisite and
+there is no corresponding \"output\" target), then the extension of the host
+platform is used as the default.
+
+In all these cases the extension can also be specified explicitly. This, for
+example, would be necessary if the executable were a batch file:
+
+\
+h{generate}: exe{generate.bat}
+{{
+ diag $< -> $>
+ $< -o $path($>)
+}}
+\
+
+Here, without the explicit extension, the \c{.exe} extension would have been
+used by default.
+
+
+\h1#variables|Variables|
+
+\N{This chapter is a work in progress and is incomplete.}
+
+The following variable/value types can currently be used in \c{buildfiles}:
+
+\
+bool
+
+int64
+int64s
+
+uint64
+uint64s
+
+string
+strings
+string_set
+string_map
+
+path
+paths
+dir_path
+dir_paths
+
+json
+json_array
+json_object
+json_set
+json_map
+
+name
+names
+name_pair
+
+cmdline
+project_name
+target_triplet
+\
+
+Note that while expansions in the target and prerequisite-specific assignments
+happen in the corresponding target and prerequisite contexts, respectively,
+for type/pattern-specific assignments they happen in the scope context. Plus,
+a type/pattern-specific prepend/append is applied at the time of expansion for
+the actual target. For example:
+
+\
+x = s
+
+file{foo}: # target
+{
+ x += t # s t
+ y = $x y # s t y
+}
+
+file{foo}: file{bar} # prerequisite
+{
+ x += p # x t p
+ y = $x y # x t p y
+}
+
+file{b*}: # type/pattern
+{
+ x += w # <append w>
+ y = $x w # <assign s w>
+}
+
+x = S
+
+info $(file{bar}: x) # S w
+info $(file{bar}: y) # s w
+\
+
+
+\h1#functions|Functions|
+
+\N{This chapter is a work in progress and is incomplete.}
+
+
+Functions in \c{build2} are organized into families, such as the
+\c{$string.*()} family for manipulating strings or \c{$regex.*()} for working
+with regular expressions. Most functions are pure and those that are not,
+such as \c{$builtin.getenv()}, are explicitly documented as such.
+
+Some functions, such as from the \c{$regex.*()} family, can only be called
+fully qualified with their family name. For example:
+
+\
+if $regex.match($name, '(.+)-(.+)')
+ ...
+\
+
+While other functions can be called without explicit qualification. For
+example:
+
+\
+path = $getenv('PATH')
+\
+
+There are also functions that can be called unqualified only for certain types
+of arguments (this fact will be reflected in their synopsis and/or
+documentation). Note, however, that every function can always be called
+qualified.
+"
+
+// $builtin.*()
+//
+"
+\h#functions-builtin|Builtin Functions|
+
+The \c{$builtin.*()} function family contains fundamental \c{build2}
+functions.
+"
+source <functions-builtin.cli>;
+
+// $string.*()
+//
+"
+\h#functions-string|String Functions|
+"
+source <functions-string.cli>;
+
+
+// $integer.*()
+//
+"
+\h#functions-integer|Integer Functions|
+"
+source <functions-integer.cli>;
+
+
+// $bool.*()
+//
+"
+\h#functions-bool|Bool Functions|
+"
+source <functions-bool.cli>;
+
+
+// $path.*()
+//
+"
+\h#functions-path|Path Functions|
+
+The \c{$path.*()} function family contains function that manipulating
+filesystem paths.
+"
+source <functions-path.cli>;
+
+
+// $name.*()
+//
+"
+\h#functions-name|Name Functions|
+
+The \c{$name.*()} function family contains function that operate on target and
+prerequisite names. See also the \l{#functions-target \c{$target.*()} function
+family} for functions that operate on actual targets.
+"
+source <functions-name.cli>;
+
+
+// $target.*()
+//
+"
+\h#functions-target|Target Functions|
+
+The \c{$target.*()} function family contains function that operate on
+targets. See also the \l{#functions-name \c{$name.*()} function family} for
+functions that operate on target (and prerequisite) names.
+"
+source <functions-target.cli>;
+
+
+// $regex.*()
+//
+"
+\h#functions-regex|Regex Functions|
+
+The \c{$regex.*()} function family contains function that provide
+comprehensive regular expression matching and substitution facilities. The
+supported regular expression flavor is ECMAScript (more specifically,
+ECMA-262-based C++11 regular expressions).
+
+In the \c{$regex.*()} functions the substitution escape sequences in the
+format string (the \ci{fmt} argument) are extended with a subset of the Perl
+escape sequences: \c{\\n}, \c{\\u}, \c{\\l}, \c{\\U}, \c{\\L}, \c{\\E},
+\c{\\1} ... \c{\\9}, and \c{\\\\}. Note that the standard ECMAScript escape
+sequences (\c{$1}, \c{$2}, \c{$&}, etc) are still supported.
+
+Note that functions from the \c{$regex.*()} family can only be called fully
+qualified with their family name. For example:
+
+\
+if $regex.match($name, '(.+)-(.+)')
+ ...
+\
+
+"
+source <functions-regex.cli>;
+
+// $json.*()
+//
+"
+\h#functions-json|JSON Functions|
+
+The \c{$json.*()} function family contains function that operate on the JSON
+types: \c{json}, \c{json_array}, and \c{json_object}. For example:
+
+\
+j = [json] one@1 two@abc three@([json] x@1 y@-1)
+
+for m: $j
+{
+ n = $member_name($m)
+ v = $member_value($m)
+
+ info $n $value_type($v) $v
+}
+\
+
+"
+source <functions-json.cli>;
+
+
+// $process.*()
+//
+"
+\h#functions-process|Process Functions|
+"
+source <functions-process.cli>;
+
+
+// $filesystem.*()
+//
+"
+\h#functions-filesystem|Filesystem Functions|
+"
+source <functions-filesystem.cli>;
+
+
+// $project_name.*()
+//
+"
+\h#functions-project_name|Project Name Functions|
+
+The \c{$project_name.*()} function family contains function that operate on
+the \c{project_name} type.
+"
+source <functions-project-name.cli>;
+
+
+// $process_path.*()
+//
+"
+\h#functions-process-path|Process Path Functions|
+
+The \c{$process_path.*()} function family contains function that operate on
+the \c{process_path} type and its extended \c{process_path_ex} variant. These
+types describe a path to an executable that, if necessary, has been found in
+\c{PATH}, completed with an extension, etc. The \c{process_path_ex} variant
+includes additional metadata, such as the stable process name for diagnostics
+and the executable checksum for change tracking.
+"
+source <functions-process-path.cli>;
+
+
+// $target_triplet.*()
+//
+"
+\h#functions-target-triplet|Target Triplet Functions|
+
+The \c{$target_triplet.*()} function family contains function that operate on
+the \c{target_triplet} type that represents the ubiquitous
+\c{\i{cpu}-\i{vendor}-\i{os}} target platform triplet.
+"
+source <functions-target-triplet.cli>;
+
+
+"
+\h1#directives|Directives|
+
+\N{This chapter is a work in progress and is incomplete.}
+
+\h#directives-define|\c{define}|
+
+\
+define <derived>: <base>
+\
+
+Define a new target type \c{<derived>} by inheriting from existing target type
+\c{<base>}. See \l{#targets-types Target Types} for details.
+
+
+\h#directives-include|\c{include}|
+
+\
+include <file>
+include <directory>
+\
+
+Load the specified file (the first form) or \c{buildfile} in the specified
+directory (the second form). In both cases the file is loaded in the scope
+corresponding to its directory. Subsequent inclusions of the same file are
+automatically ignored. See also \l{#directives-source \c{source}}.
+
+
+\h#directives-source|\c{source}|
+
+
+\
+source <file>
+\
+
+Load the specified file in the current scope as if its contents were copied
+and pasted in place of the \c{source} directive. Note that subsequent sourcing
+of the same file in the same scope are not automatically ignored. See also
+\l{#directives-include \c{include}}.
+
\h1#attributes|Attributes|
@@ -5660,7 +6421,7 @@ exe{hello}: cxx{+{f* b*} -{foo bar}}
This is particularly useful if you would like to list the names to include or
exclude in a variable. For example, this is how we can exclude certain files
from compilation but still include them as ordinary file prerequisites (so
-that they are still included into the distribution):
+that they are still included into the source distribution):
\
exc = foo.cxx bar.cxx
@@ -5737,8 +6498,8 @@ patterns/matches that do not already contain an extension. Then the filesystem
search is performed for matching files.
For example, the \c{cxx{\}} target type obtains the default extension from the
-\c{extension} variable. Assuming we have the following line in our
-\c{root.build}:
+\c{extension} variable (see \l{#targets-types Target Types} for background).
+Assuming we have the following line in our \c{root.build}:
\
cxx{*}: extension = cxx
@@ -5762,101 +6523,6 @@ file-based, then the name pattern is returned as is (that is, as an ordinary
name). Project-qualified names are never considered to be patterns.
-\h1#variables|Variables|
-
-\N{This chapter is a work in progress and is incomplete.}
-
-The following variable/value types can currently be used in \c{buildfiles}:
-
-\
-bool
-
-int64
-int64s
-
-uint64
-uint64s
-
-string
-strings
-
-path
-paths
-dir_path
-dir_paths
-
-name
-names
-name_pair
-
-project_name
-target_triplet
-\
-
-Note that while expansions in the target and prerequisite-specific assignments
-happen in the corresponding target and prerequisite contexts, respectively,
-for type/pattern-specific assignments they happen in the scope context. Plus,
-a type/pattern-specific prepend/append is applied at the time of expansion for
-the actual target. For example:
-
-\
-x = s
-
-file{foo}: # target
-{
- x += t # s t
- y = $x y # s t y
-}
-
-file{foo}: file{bar} # prerequisite
-{
- x += p # x t p
- y = $x y # x t p y
-}
-
-file{b*}: # type/pattern
-{
- x += w # <append w>
- y = $x w # <assign s w>
-}
-
-x = S
-
-info $(file{bar}: x) # S w
-info $(file{bar}: y) # s w
-\
-
-
-\h1#directives|Directives|
-
-\N{This chapter is a work in progress and is incomplete.}
-
-\h#directives-include|\c{include}|
-
-\
-include <file>
-include <directory>
-\
-
-Load the specified file (the first form) or \c{buildfile} in the specified
-directory (the second form). In both cases the file is loaded in the scope
-corresponding to its directory. Subsequent inclusions of the same file are
-automatically ignored. See also \l{#directives-source \c{source}}.
-
-
-\h#directives-source|\c{source}|
-
-
-\
-source <file>
-\
-
-Load the specified file in the current scope as if its contents were copied
-and pasted in place of the \c{source} directive. Note that subsequent sourcing
-of the same file in the same scope are not automatically ignored. See also
-\l{#directives-include \c{include}}.
-
-
\h1#module-config|\c{config} Module|
\N{This chapter is a work in progress and is incomplete.}
@@ -6133,30 +6799,54 @@ of the Introduction, the \c{install} module defines the following standard
installation locations:
\
-name default config.* override
----- ------- -----------------
-root install.root
+name default config.install.*
+ (c.i.*) override
+---- ------- ----------------
+root c.i.root
-data_root root/ install.data_root
-exec_root root/ install.exec_root
+data_root root/ c.i.data_root
+exec_root root/ c.i.exec_root
-bin exec_root/bin/ install.bin
-sbin exec_root/sbin/ install.sbin
-lib exec_root/lib/<private>/ install.lib
-libexec exec_root/libexec/<private>/<project>/ install.libexec
-pkgconfig lib/pkgconfig/ install.pkgconfig
+bin exec_root/bin/ c.i.bin
+sbin exec_root/sbin/ c.i.sbin
+lib exec_root/lib/<private>/ c.i.lib
+libexec exec_root/libexec/<private>/<project>/ c.i.libexec
+pkgconfig lib/pkgconfig/ c.i.pkgconfig
-etc data_root/etc/ install.etc
-include data_root/include/<private>/ install.include
-share data_root/share/ install.share
-data share/<private>/<project>/ install.data
+etc data_root/etc/ c.i.etc
+include data_root/include/<private>/ c.i.include
+include_arch include/ c.i.include_arch
+share data_root/share/ c.i.share
+data share/<private>/<project>/ c.i.data
+buildfile share/build2/export/<project>/ c.i.buildfile
-doc share/doc/<private>/<project>/ install.doc
-legal doc/ install.legal
-man share/man/ install.man
-man<N> man/man<N>/ install.man<N>
+doc share/doc/<private>/<project>/ c.i.doc
+legal doc/ c.i.legal
+man share/man/ c.i.man
+man<N> man/man<N>/ c.i.man<N>
\
+The \c{include_arch} location is meant for architecture-specific files, such
+as configuration headers. By default it's the same as \c{include} but can be
+configured by the user to a different value (for example,
+\c{/usr/include/x86_64-linux-gnu/}) for platforms that support multiple
+architectures from the same installation location. This is how one would
+normally use it from a \c{buildfile}:
+
+\
+# The configuration header may contain target architecture-specific
+# information so install it into include_arch/ instead of include/.
+#
+h{*}: install = include/libhello/
+h{config}: install = include_arch/libhello/
+\
+
+The \c{buildfile} location is meant for exported buildfiles that can be
+imported by other projects. If a project contains any \c{**.build} buildfiles
+in its \c{build/export/} directory (or \c{**.build2} and \c{build2/export/} in
+the alternative naming scheme), then they are automatically installed into
+this location (recreating subdirectories).
+
The \c{<project>}, \c{<version>}, and \c{<private>} substitutions in these
\c{config.install.*} values are replaced with the project name, version, and
private subdirectory, respectively. If either is empty, then the corresponding
@@ -6175,7 +6865,9 @@ The private installation subdirectory is specified with the
directory and may include multiple components. For example:
\
-$ b install config.install.root=/usr/local/ config.install.private=hello/
+$ b install \
+ config.install.root=/usr/local/ \
+ config.install.private=hello/
\
\N|If you are relying on your system's dynamic linker defaults to
@@ -6193,6 +6885,153 @@ $ b install \
|
+
+\h#install-reloc|Relocatable Installation|
+
+A relocatable installation can be moved to a directory other than its original
+installation location. Note that the installation should be moved as a whole
+preserving the directory structure under its root (\c{config.install.root}).
+To request a relocatable installation, set the \c{config.install.relocatable}
+variable to \c{true}. For example:
+
+\
+$ b install \
+ config.install.root=/tmp/install \
+ config.install.relocatable=true
+\
+
+A relocatable installation is achieved by using paths relative to one
+filesystem entry within the installation to locate another. Some examples
+include:
+
+\ul|
+
+\li|Paths specified in \c{config.bin.rpath} are made relative using the
+\c{$ORIGIN} (Linux, BSD) or \c{@loader_path} (Mac OS) mechanisms.|
+
+\li|Paths in the generated \c{pkg-config} files are made relative to the
+\c{${pcfiledir\}} built-in variable.|
+
+\li|Paths in the generated installation manifest (\c{config.install.manifest})
+are made relative to the location of the manifest file.||
+
+While these common aspects are handled automatically, if a projects relies on
+knowing its installation location, then it will most likely need to add manual
+support for relocatable installations.
+
+As an example, consider an executable that supports loading plugins and
+requires the plugin installation directory to be embedded into the executable
+during the build. The common way to support relocatable installations for such
+cases is to embed a path relative to the executable and complete it at
+runtime, normally by resolving the executable's path and using its directory
+as a base.
+
+If you would like to always use the relative path, regardless of whether the
+installation is relocatable of not, then you can obtain the library
+installation directory relative to the executable installation directory like
+this:
+
+\
+plugin_dir = $install.resolve($install.lib, $install.bin)
+\
+
+Alternatively, if you would like to continue using absolute paths for
+non-relocatable installations, then you can use something like this:
+
+\
+plugin_dir = $install.resolve( \
+ $install.lib, \
+ ($install.relocatable ? $install.bin : [dir_path] ))
+\
+
+Finally, if you are unable to support relocatable installations, the correct
+way to handle this is to assert this fact in \c{root.build} of your project,
+for example:
+
+\
+assert (!$install.relocatable) 'relocatable installation not supported'
+\
+
+
+\h#install-filter|Installation Filtering|
+
+While project authors determine what gets installed at the \c{buildfile}
+level, the users of the project can further filter the installation using the
+\c{config.install.filter} variable.
+
+The value of this variable is a list of key-value pairs that specify the
+filesystem entries to include or exclude from the installation. For example,
+the following filters will omit installing headers and static libraries
+(notice the quoting of the wildcard).
+
+\
+$ b install config.install.filter='include/@false \"*.a\"@false'
+\
+
+The key in each pair is a file or directory path or a path wildcard pattern.
+If a key is relative and contains a directory component or is a directory,
+then it is treated relative to the corresponding \c{config.install.*}
+location. Otherwise (simple path, normally a pattern), it is matched against
+the leaf of any path. Note that if an absolute path is specified, it should be
+without the \c{config.install.chroot} prefix.
+
+The value in each pair is either \c{true} (include) or \c{false} (exclude).
+The filters are evaluated in the order specified and the first match that is
+found determines the outcome. If no match is found, the default is to
+include. For a directory, while \c{false} means exclude all the sub-paths
+inside this directory, \c{true} does not mean that all the sub-paths will be
+included wholesale. Rather, the matched component of the sub-path is treated
+as included with the rest of the components matched against the following
+sub-filters. For example:
+
+\
+$ b install config.install.filter='
+ include/x86_64-linux-gnu/@true
+ include/x86_64-linux-gnu/details/@false
+ include/@false'
+\
+
+The \c{true} or \c{false} value may be followed by comma and the \c{symlink}
+modifier to only apply to symlink filesystem entries. For example:
+
+\
+$ b config.install.filter='\"*.so\"@false,symlink'
+\
+
+A filter can be negated by specifying \c{!} as the first pair. For example:
+
+\
+$ b install config.install.filter='! include/@false \"*.a\"@false'
+\
+
+Note that the filtering mechanism only affects what gets physically copied to
+the installation directory without affecting what gets built for install or
+the view of what gets installed at the \c{buildfile} level. For example, given
+the \c{include/@false *.a@false} filters, static libraries will still be built
+(unless arranged not to with \c{config.bin.lib}) and the \c{pkg-config} files
+will still end up with \c{-I} options pointing to the header installation
+directory. Note also that this mechanism applies to both \c{install} and
+\c{uninstall} operations.
+
+\N|If you are familiar with the Debian or Fedora packaging, this mechanism is
+somewhat similar to (and can be used for a similar purpose as) the Debian's
+\c{.install} files and Fedora's \c{%files} spec file sections, which are used
+to split the installation into multiple binary packages.|
+
+As another example, the following filters will omit all the
+development-related files (headers, \c{pkg-config} files, static libraries,
+and shared library symlinks; assuming the platform uses the \c{.a}/\c{.so}
+extensions for the libraries):
+
+\
+$ b install config.install.filter='
+ include/@false
+ pkgconfig/@false
+ \"lib/*.a\"@false
+ \"lib/*.so\"@false,symlink'
+\
+
+
\h1#module-version|\c{version} Module|
A project can use any version format as long as it meets the package version
@@ -6463,7 +7302,7 @@ just not ordered correctly. As a result, we feel that the risks are justified
when the only alternative is manual version management (which is always an
option, nevertheless).
-When we prepare a distribution of a snapshot, the \c{version} module
+When we prepare a source distribution of a snapshot, the \c{version} module
automatically adjusts the package name to include the snapshot information as
well as patches the manifest file in the distribution with the snapshot number
and id (that is, replacing \c{.z} in the version value with the actual
@@ -6655,6 +7494,116 @@ depends: libprint [3.0.0-b.2.1 3.0.0-b.3)
\N{This chapter is a work in progress and is incomplete.}
+\h#module-bin-target-types|Binary Target Types|
+
+The following listing shows the hierarchy of the target types defined by the
+\c{bin} module while the following sections describe each target type in
+detail (\c{target{\}} and \c{file{\}} are standard target types defined by the
+\c{build2} core; see \l{#targets-types Target Types} for details).
+
+\
+ target----------------.
+ | |
+ ... |
+ | |
+ .---------------file------------. lib
+ | | | | | | libul
+ | libue obje bmie hbmie def obj
+liba libua obja bmia hbmia bmi
+libs libus objs bmis hbmis hbmi
+\
+
+
+\h2#module-bin-target-types-lib|\c{lib{\}}, \c{liba{\}}, \c{libs{\}}|
+
+The \c{liba{\}} and \c{libs{\}} target types represent static (archive) and
+shared libraries, respectively.
+
+The \c{lib{\}} target type is a group with the \c{liba{\}} and/or \c{libs{\}}
+members. A rule that encounters a \c{lib{\}} prerequisite may pick a member
+appropriate for the target being built or it may build all the members
+according to the \c{bin.lib} variable. See \l{#intro-lib Library Exportation
+and Versioning} for background.
+
+The \c{lib*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-libu|\c{libul{\}}, \c{libue{\}}, \c{libua{\}},
+\c{libus{\}}|
+
+The \c{libu*{\}} target types represent utility libraries. Utility libraries
+are static libraries with object files appropriate for linking an executable
+(\c{libue{\}}), static library (\c{libua{\}}), or shared library
+(\c{libus{\}}). Where possible, utility libraries are built in the
+\"thin archive\" mode.
+
+The \c{libul{\}} target type is a group with the \c{libua{\}} and/or
+\c{libus{\}} members. A rule that encounters a \c{libul{\}} prerequisite picks
+a member appropriate for the target being built.
+
+The \c{libu*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-obj|\c{obj{\}}, \c{obje{\}}, \c{obja{\}},
+\c{objs{\}}|
+
+The \c{obj*{\}} target types represent object files appropriate for linking an
+executable (\c{obje{\}}), static library (\c{obja{\}}), or shared library
+(\c{objs{\}}).
+
+\N|In \c{build2} we use distinct object files for the three types of binaries
+(executable, static library, and shared library). The distinction between
+static and shared libraries is made to accommodate build differences such as
+the need for position-independent code (\c{-fPIC}) in shared libraries. While
+in most cases the same object file can be used for executables and static
+libraries, they are kept separate for consistency and generality.|
+
+The \c{obj{\}} target type is a group with the \c{obje{\}}, and/or
+\c{obja{\}}, and/or \c{objs{\}} members. A rule that encounters an \c{obj{\}}
+prerequisite picks a member appropriate for the target being built.
+
+The \c{obj*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-bmi|\c{bmi{\}}, \c{bmie{\}}, \c{bmia{\}},
+\c{bmis{\}}|
+
+The \c{bmi*{\}} target types represent binary module interfaces (BMI) for
+C++20 named modules appropriate for linking an executable (\c{bmie{\}}),
+static library (\c{bmia{\}}), or shared library (\c{bmis{\}}).
+
+The \c{bmi{\}} target type is a group with the \c{bmie{\}}, and/or
+\c{bmia{\}}, and/or \c{bmis{\}} members. A rule that encounters an \c{bmi{\}}
+prerequisite picks a member appropriate for the target being built.
+
+The \c{bmi*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-hbmi|\c{hbmi{\}}, \c{hbmie{\}}, \c{hbmia{\}},
+\c{hbmis{\}}|
+
+The \c{hbmi*{\}} target types represent binary module interfaces (BMI) for
+C++20 header units appropriate for linking an executable (\c{hbmie{\}}),
+static library (\c{hbmia{\}}), or shared library (\c{hbmis{\}}).
+
+The \c{hbmi{\}} target type is a group with the \c{hbmie{\}}, and/or
+\c{hbmia{\}}, and/or \c{hbmis{\}} members. A rule that encounters an
+\c{hbmi{\}} prerequisite picks a member appropriate for the target being
+built.
+
+The \c{hbmi*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-def|\c{def{\}}|
+
+The \c{def{\}} target type represents Windows module definition files and has
+the fixed default extension \c{.def}.
+
\h1#module-cc|\c{cc} Module|
@@ -6694,6 +7643,11 @@ config.cc.libs
config.cc.internal.scope
cc.internal.scope
+
+config.cc.reprocess
+ cc.reprocess
+
+config.cc.pkgconfig.sysroot
\
Note that the compiler mode options are \"cross-hinted\" between \c{config.c}
@@ -6709,6 +7663,41 @@ $ b config.cxx=\"g++ -m32\"
$ b config.cxx=\"clang++ -stdlib=libc++\"
\
+\h#cc-target-types|C-Common Target Types|
+
+The following listing shows the hierarchy of the target types defined by the
+\c{cc} module while the following sections describe each target type in detail
+(\c{file{\}} is a standard target type defined by the \c{build2} core; see
+\l{#targets-types Target Types} for details). Every \c{cc}-based module (such
+as \c{c} and \c{cxx}) will have these common target types defined in addition
+to the language-specific ones.
+
+\
+.--file--.
+| |
+h pc
+ |
+ pca
+ pcs
+\
+
+\N|While the \c{h{\}} target type represents a C header file, there is hardly
+a C-family compilation without a C header inclusion. As a result, this target
+types is defined by all \c{cc}-based modules.|
+
+For the description of the \c{h{\}} target type refer to \l{#c-target-types-c
+\c{c{\}}, \c{h{\}}} in the C module documentation.
+
+\h2#cc-target-types-pc|\c{pc{\}}, \c{pca{\}}, \c{pcs{\}}|
+
+The \c{pc*{\}} target types represent \c{pkg-config} files. The \c{pc{\}}
+target type represents the common file and has the fixed default extension
+\c{.pc}. The \c{pca{\}} and \c{pcs{\}} target types represent the static and
+shared files and have the fixed default extensions \c{.static.pc} and
+\c{.shared.pc}, respectively. See \l{#cc-import-installed Importation of
+Installed Libraries} for background.
+
+
\h#cc-internal-scope|Compilation Internal Scope|
\N|While this section uses the \c{cxx} module and C++ compilation as an
@@ -6872,6 +7861,9 @@ if ($cxx.target.system == 'mingw32')
That is, we use the \c{.def} file approach for MSVC (including when building
with Clang) and the built-in support (\c{--export-all-symbols}) for MinGW.
+\N|You will likely also want to add the generated \c{.def} file (or the
+blanket \c{*.def}) to your \c{.gitignore} file.|
+
Note that it is also possible to use the \c{.def} file approach for MinGW. In
this case we need to explicitly load the \c{bin.def} module (which should be
done after loading \c{c} or \c{cxx}) and can use the following arrangement:
@@ -6899,6 +7891,169 @@ to the symbol auto-importing support in Windows linkers. Note, however, that
auto-importing only works for functions and not for global variables.
+\h#cc-import-installed|Importation of Installed Libraries|
+
+As discussed in \l{#intro-import Target Importation}, searching for installed
+C/C++ libraries is seamlessly integrated into the general target importation
+mechanism. This section provides more details on the installed library search
+semantics and \c{pkg-config} integration. These details can be particularly
+useful when dealing with libraries that were not built with \c{build2} and
+which often use idiosyncratic \c{pkg-config} file names.
+
+The \c{cc}-based modules use the common installed library search
+implementation with the following semantics. To illustrate the finer points,
+we assume the following import:
+
+\
+import libs = libbar%lib{Xfoo}
+\
+
+\ol|
+
+\li|First, the ordered list of library search directories is obtained by
+combining two lists: the lists of the compiler's system library search
+directories (extracted, for example, with \c{-print-search-dirs} GCC/Clang
+options) and the list of user library search directories (specified, for
+example, with the \c{-L} options in \c{*.loptions}).
+
+The key property of this combined list is that it matches the search semantics
+that would be used by the compiler to find libraries specified with the \c{-l}
+option during linking.|
+
+\li|Given the list obtained in the previous step, a library binary (shared
+and/or static library) is searched for in the correct order and using the
+target platform-appropriate library prefix and extension (for example, \c{lib}
+prefix and the \c{.so}/\c{.a} extensions if targeting Linux).
+
+For example (continuing with the above import and assuming Linux), each
+directory will be checked for the presence of \c{libXfoo.so} and \c{libXfoo.a}
+(where the \c{Xfoo} stem is the imported target name).
+
+If only a shared or static binary is found in a given directory, no further
+directories are checked for the missing variant. Instead, the missing
+variant is assumed to be unavailable.
+
+If neither a shared nor static library is found in a given directory, then
+it is also checked for the presence of the corresponding \c{pkg-config}
+file as in the following step. If such a file is found, then the library is
+assumed to be \i{binless} (header-only, etc).|
+
+\li|If a static and/or shared library is found (or if looking for a binless
+library), the corresponding \c{pkg-config} subdirectory (normally just
+\c{pkgconfig/}) is searched for the library's \c{.pc} file.
+
+More precisely, we first look for the \c{.static.pc} file for a static
+library and for the \c{.shared.pc} file for a shared library falling back
+to the common \c{.pc} if they don't exist.
+
+\N|It is often required to use different options for consuming static and
+shared libraries. While there is the \c{Libs.private} and \c{Cflags.private}
+mechanism in \c{pkg-config}, its semantics is to append options to \c{Libs}
+and \c{Cflags} rather than to provide alternative options. And often the
+required semantics is to provide different options for static and shared
+libraries, such as to provide a macro which indicates whether linking static
+or shared in order to setup symbol exporting.
+
+As a result, in \c{build2} we produce separate \c{.pc} files for static and
+shared libraries in addition to the \"best effort\" common \c{.pc} file for
+compatibility with other build systems. Similarly, when consuming a library
+we first look for the \c{.static.pc} and \c{.shared.pc} files falling back
+to the common \c{.pc} if they are not available.|
+
+To deal with idiosyncrasies in \c{pkg-config} file names, the following base
+names are tried in order, where \ci{name} is the imported target name
+(\c{Xfoo} in the above import), \ci{proj} is the imported project name
+(\c{libbar} in the above import), and \ci{ext} is one of the above-mentioned
+\c{pkg-config} extensions (\c{static.pc}, \c{shared.pc}, or \c{pc}). The
+concrete name tried for the above import is shown in parenthesis as an
+example.
+
+\ol|
+
+\li|\c{lib\i{name}.\i{ext}} (\c{libXfoo.pc})|
+
+\li|\c{\i{name}.\i{ext}} (\c{Xfoo.pc})|
+
+\li|lowercase \c{lib\i{name}.\i{ext}} (\c{libxfoo.pc})|
+
+\li|lowercase \c{\i{name}.\i{ext}} (\c{xfoo.pc})|
+
+\li|\c{\i{proj}.\i{ext}} (\c{libbar.pc}; this test is omitted if not project-qualified)||
+
+||
+
+In particular, the last try (for \c{\i{proj}.\i{ext}}) serves as an escape
+hatch for cases where the \c{.pc} file name does not have anything to do with
+the names of library binaries. The canonical example of this is \c{zlib} which
+names its library binaries \c{libz.so}/\c{libz.a} while its \c{.pc} file \-
+\c{zlib.pc}. To be able to import \c{zlib} that was not built with \c{build2},
+we have to use the following import:
+
+\
+import libs = zlib%lib{z}
+\
+
+Note also that these complex rules (which are unfortunately necessary to deal
+with the lack of any consistency in \c{.pc} file naming) can sometimes produce
+surprising interactions. For example, it may appear that a clearly incorrect
+import nevertheless appears to somehow work, as in the following example:
+
+\
+import libs = zlib%lib{znonsense}
+\
+
+What happens here is that while no library binary is found, \c{zlib.pc} is
+found and as a result the library ends up being considered binless with the
+\c{-lz} (that is found in the \c{Libs} value of \c{zlib.pc}) treated as a
+prerequisite library, resolved using the above algorithm, and linked. In other
+words, in this case we end up with a binless library \c{lib{znonsense\}} that
+depends on \c{lib{z\}} instead of a single \c{lib{z\}} library.
+
+\h2#cc-import-installed-sysroot|Rewriting Installed Libraries System Root (sysroot)|
+
+Sometimes the installed libraries are moved to a different location after the
+installation. This is especially common in embedded development where the code
+is normally cross-compiled and the libraries for the target platform are
+placed into a host directory, called system root or \i{sysroot}, that doesn't
+match where these libraries were originally installed to. For example, the
+libraries might have been installed into \c{/usr/} but on the host machine
+they may reside in \c{/opt/target/usr/}. In this example, \c{/opt/target/} is
+the sysroot.
+
+While such relocations usually do not affect the library headers or binaries,
+they do break the \c{pkg-config}'s \c{.pc} files which often contain \c{-I}
+and \c{-L} options with absolute paths. Continue with the above example, a
+\c{.pc} file as originally installed may contain \c{-I/usr/include} and
+\c{-L/usr/lib} while now, that the libraries have been relocated to
+\c{/opt/target/}, they somehow need to be adjusted to
+\c{-I/opt/target/usr/include} and \c{-L/opt/target/usr/lib}.
+
+While it is possible (and perhaps correct) to accomplish this by fixing the
+\c{.pc} files to match the new location, it is not always possible or easy.
+As a result, \c{build2} provides a mechanism for automatically adjusting the
+system root in the \c{-I} and \c{-L} options extracted from \c{.pc} files.
+
+\N|This functionality is roughly equivalent to that provided with the
+\c{PKG_CONFIG_SYSROOT_DIR} environment variable by the \c{pkg-config}
+utility.|
+
+Specifically, the \c{config.cc.pkgconfig.sysroot} variable can be used to
+specify an alternative system root. When specified, all absolute paths in the
+\c{-I} and \c{-L} options that are not already in this directory will be
+rewritten to start with this sysroot.
+
+\N|Note that this mechanism is a workaround rather than a proper solution since
+it is limited to the \c{-I} and \c{-L} options. In particular, it does not
+handle any other options that may contain absolute paths nor \c{pkg-config}
+variables that may be queried.
+
+As a result, it should only be used for dealing with issues in third-party
+\c{.pc} files that do not handle relocation (for example, using the
+\c{${pcfiledir\}} built-in \c{pkg-config} variable). In particular, for
+\c{build2}-generated \c{.pc} files a \l{#install-reloc relocatable
+installation} should be used instead.|
+
+
\h#cc-gcc|GCC Compiler Toolchain|
The GCC compiler id is \c{gcc}.
@@ -7127,10 +8282,38 @@ config.c.internal.scope
c.internal.scope
\
+\h#c-target-types|C Target Types|
+
+The following listing shows the hierarchy of the target types defined by the
+\c{c} module while the following sections describe each target type in detail
+(\c{file{\}} is a standard target type defined by the \c{build2} core; see
+\l{#targets-types Target Types} for details). See also \l{#cc-target-types
+C-Common Target Types} for target types defined by all the \c{cc}-based
+modules.
+
+\
+.--file--.
+| | |
+c m S
+h
+\
+
+The \c{m{\}} target type represents an Objective-C source file, see \l{c-objc
+Objective-C Compilation} for details.
+
+The \c{S{\}} target type represents an Assembler with C Preprocessor file, see
+\l{c-as-cpp Assembler with C Preprocessor Compilation} for details.
+
+\h2#c-target-types-c|\c{c{\}}, \c{h{\}}|
+
+The \c{c{\}} and \c{h{\}} target types represent C source and header files.
+They have the default extensions \c{.c} and \c{.h}, respectively, which can
+be customized with the \c{extension} variable.
+
\h#c-objc|Objective-C Compilation|
-The \c{c} module provides the \c{c.objc} submodules which can be loaded in
+The \c{c} module provides the \c{c.objc} submodule which can be loaded in
order to register the \c{m{\}} target type and enable Objective-C compilation
in the \c{C} compile rule. Note that \c{c.objc} must be loaded after the \c{c}
module and while the \c{m{\}} target type is registered unconditionally,
@@ -7157,6 +8340,128 @@ automatically link any necessary Objective-C runtime library (such as
\c{-lobjc}).
+\h#c-as-cpp|Assembler with C Preprocessor Compilation|
+
+The \c{c} module provides the \c{c.as-cpp} submodule which can be loaded in
+order to register the \c{S{\}} target type and enable Assembler with C
+Preprocessor compilation in the \c{C} compile rule. Note that \c{c.as-cpp}
+must be loaded after the \c{c} module and while the \c{S{\}} target type is
+registered unconditionally, compilation is only enabled if the C compiler
+supports Assembler with C Preprocessor compilation. Typical usage:
+
+\
+# root.build
+#
+using c
+using c.as-cpp
+\
+
+\
+# buildfile
+#
+exe{hello}: {h c}{* -hello.c}
+
+# Use C implementation as a fallback if no assembler.
+#
+assembler = ($c.class == 'gcc' && $c.target.cpu == 'x86_64')
+
+exe{hello}: S{hello}: include = $assembler
+exe{hello}: c{hello}: include = (!$assembler)
+\
+
+\
+/* hello.S
+ */
+#ifndef HELLO_RESULT
+# define HELLO_RESULT 0
+#endif
+
+text
+
+.global hello
+hello:
+ /* ... */
+ movq $HELLO_RESULT, %rax
+ ret
+
+#ifdef __ELF__
+.section .note.GNU-stack, \"\", @progbits
+#endif
+\
+
+The default file extension for the \c{S{\}} target type is \c{.S} (capital)
+but that can be customized using the standard mechanisms. For example:
+
+\
+# root.build
+#
+using c
+using c.as-cpp
+
+h{*}: extension = h
+c{*}: extension = c
+S{*}: extension = sx
+\
+
+Note that \c{*.coptions} are passed to the C compiler when compiling Assembler
+with C Preprocessor files because compile options may cause additional
+preprocessor macros to be defined. Plus, some of them (such as \c{-g}) are
+passed (potentially translated) to the underlying assembler. To pass
+additional options when compiling Assembler files use \c{c.poptions} and
+\c{c.coptions}. For example (continuing with the previous example):
+
+\
+if $assembler
+{
+ obj{hello}:
+ {
+ c.poptions += -DHELLO_RESULT=1
+ c.coptions += -Wa,--no-pad-sections
+ }
+}
+\
+
+\h#c-predefs|C Compiler Predefined Macro Extraction|
+
+The \c{c} module provides the \c{c.predefs} submodule which can be loaded in
+order to register a rule that generates a C header with predefined compiler
+macros. Note that the \c{c.predefs} module must be loaded after the \c{c}
+module and the rule will only match with an explicit rule hint. Typical usage:
+
+\
+# root.build
+#
+using c
+using c.predefs
+\
+
+\
+# buildfile
+#
+[rule_hint=c.predefs] h{predefs}:
+\
+
+Note also that the MSVC compiler only supports the predefined macro extraction
+starting from Visual Studio 2019 (16.0; \c{cl.exe} version 19.20). If support
+for earlier versions is required, then you will need to provide a fallback
+implementation appropriate for your project. For example:
+
+\
+[rule_hint=c.predefs] h{predefs}:
+% update
+if ($c.id == 'msvc' && \
+ ($c.version.major < 19 || \
+ ($c.version.major == 19 && $c.version.minor < 20)))
+{{
+ diag c-predefs $>
+
+ cat <<EOF >$path($>)
+ #define _WIN32
+ EOF
+}}
+\
+
+
\h1#module-cxx|\c{cxx} Module|
\N{This chapter is a work in progress and is incomplete.}
@@ -7224,6 +8529,45 @@ config.cxx.translate_include
\
+\h#cxx-target-types|C++ Target Types|
+
+The following listing shows the hierarchy of the target types defined by the
+\c{cxx} module while the following sections describe each target type in
+detail (\c{file{\}} is a standard target type defined by the \c{build2} core;
+see \l{#targets-types Target Types} for details). See also \l{#cc-target-types
+C-Common Target Types} for target types defined by all the \c{cc}-based
+modules.
+
+\
+ .--file--.
+ | |
+cxx mm
+hxx
+ixx
+txx
+mxx
+\
+
+The \c{mm{\}} target type represents an Objective-C++ source file, see
+\l{cxx-objcxx Objective-C++ Compilation} for details.
+
+\h2#cxx-target-types-cxx|\c{cxx{\}}, \c{hxx{\}}, \c{ixx{\}}, \c{txx{\}},
+\c{mxx{\}}|
+
+The \c{cxx{\}}, \c{hxx{\}}, \c{ixx{\}}, \c{txx{\}}, and \c{mxx{\}} target
+types represent C++ source, header, inline, template, and module interface
+files. They have the default extensions \c{.cxx}, \c{.hxx}, \c{.ixx},
+\c{.txx}, and \c{.mxx}, respectively, which can be customized with the
+\c{extension} variable. For example (normally done in \c{root.build}):
+
+\
+using cxx
+
+cxx{*}: extension = cpp
+hxx{*}: extension = hpp
+mxx{*}: extension = cppm
+\
+
\h#cxx-modules|C++ Modules Support|
This section describes the build system support for C++ modules.
@@ -7361,7 +8705,7 @@ module implementation units appears reasonable and that's what we recommend.
A module declaration (exporting or non-exporting) starts a \i{module purview}
that extends until the end of the module translation unit. Any name declared
-in a module's purview \i{belongs} to said module. For example:
+in a module's purview \i{belongs} to the said module. For example:
\
#include <string> // Not in purview.
@@ -7531,7 +8875,7 @@ say_hello (const std::string&);
\
One way to think of a re-export is \i{as if} an import of a module also
-\"injects\" all the imports said module re-exports, recursively. That's
+\"injects\" all the imports the said module re-exports, recursively. That's
essentially how most compilers implement it.
Module re-export is the mechanism for assembling bigger modules out of
@@ -8811,7 +10155,7 @@ purviews.
\h#cxx-objcxx|Objective-C++ Compilation|
-The \c{cxx} module provides the \c{cxx.objcxx} submodules which can be loaded
+The \c{cxx} module provides the \c{cxx.objcxx} submodule which can be loaded
in order to register the \c{mm{\}} target type and enable Objective-C++
compilation in the \c{C++} compile rule. Note that \c{cxx.objcxx} must be
loaded after the \c{cxx} module and while the \c{mm{\}} target type is
@@ -8838,6 +10182,49 @@ made to automatically link any necessary Objective-C runtime library (such as
\c{-lobjc}).
+\h#cxx-predefs|C++ Compiler Predefined Macro Extraction|
+
+The \c{cxx} module provides the \c{cxx.predefs} submodule which can be loaded
+in order to register a rule that generates a C++ header with predefined
+compiler macros. Note that the \c{cxx.predefs} module must be loaded after the
+\c{cxx} module and the rule will only match with an explicit rule
+hint. Typical usage:
+
+\
+# root.build
+#
+using cxx
+using cxx.predefs
+\
+
+\
+# buildfile
+#
+[rule_hint=cxx.predefs] hxx{predefs}:
+\
+
+Note also that the MSVC compiler only supports the predefined macro extraction
+starting from Visual Studio 2019 (16.0; \c{cl.exe} version 19.20). If support
+for earlier versions is required, then you will need to provide a fallback
+implementation appropriate for your project. For example:
+
+\
+[rule_hint=cxx.predefs] hxx{predefs}:
+% update
+if ($cxx.id == 'msvc' && \
+ ($cxx.version.major < 19 || \
+ ($cxx.version.major == 19 && $cxx.version.minor < 20)))
+{{
+ diag c++-predefs $>
+
+ cat <<EOF >$path($>)
+ #define _WIN32
+ #define __cplusplus 201402L
+ EOF
+}}
+\
+
+
\h1#module-in|\c{in} Module|
The \c{in} build system module provides support for \c{.in} (input) file
@@ -9147,11 +10534,11 @@ by searching in the \c{PATH} environment variable.
By convention, \c{bash} module libraries should use the \c{lib} name prefix,
for example, \c{libhello}. If there is also a native library (that is, one
written in C/C++) that provides the same functionality (or the \c{bash}
-library is a language binding for said library), then it is customary to add
-the \c{.bash} extension to the \c{bash} library name, for example,
+library is a language binding for the said library), then it is customary to
+add the \c{.bash} extension to the \c{bash} library name, for example,
\c{libhello.bash}. Note that in this case the top-level subdirectory within
-the project is expected to be called without the \c{bash} extension,
-for example, \c{libhello}.
+the project is expected to be called without the \c{bash} extension, for
+example, \c{libhello}.
Modules can be \i{private} or \i{public}. Private modules are implementation
details of a specific project and are not expected to be imported from other
@@ -9198,4 +10585,498 @@ corresponding \c{in{\}} and one or more \c{bash{\}} prerequisites as well as
\c{bash{\}} targets that have the corresponding \c{in{\}} prerequisite (if you
need to preprocess a script that does not depend on any modules, you can use
the \c{in} module's rule).
+
+
+\h1#json-dump|Appendix A \- JSON Dump Format|
+
+This appendix describes the machine-readable, JSON-based build system state
+dump format that can be requested with the \c{--dump-format=json-v0.1} build
+system driver option (see \l{b(1)} for details).
+
+The format is specified in terms of the serialized representation of C++
+\c{struct} instances. See \l{b.xhtml#json-output JSON OUTPUT} for details on
+the overall properties of this format and the semantics of the \c{struct}
+serialization.
+
+\N|This format is currently unstable (thus the temporary \c{-v0.1} suffix)
+and may be changed in ways other than as described in \l{b.xhtml#json-output
+JSON OUTPUT}. In case of such changes the format version will be incremented
+to allow detecting incompatibilities but no support for older versions is
+guaranteed.|
+
+The build system state can be dumped after the load phase (\c{--dump=load}),
+once the build state has been loaded, and/or after the match phase
+(\c{--dump=match}), after rules have been matched to targets to execute the
+desired action. The JSON format differs depending on after which phase it is
+produced. After the load phase the format aims to describe the
+action-independent state, essentially as specified in the \c{buildfiles}.
+While after the match phase it aims to describe the state for executing the
+specified action, as determined by the rules that have been matched. The
+former state would be more appropriate, for example, for an IDE that tries to
+use \c{buildfiles} as project files. While the latter state could be used to
+determine the actual build graph for a certain action, for example, in order
+to infer which executable targets are considered tests by the \c{test}
+operation.
+
+While it's possible to dump the build state as a byproduct of executing an
+action (for example, performing an update), it's often desirable to only dump
+the build state and do it as quickly as possible. For such cases the
+recommended option combinations are as follows (see the \c{--load-only} and
+\c{--match-only} documentation for details):
+
+\
+$ b --load-only --dump=load --dump-format=json-v0.1 .../dir/
+
+$ b --match-only --dump=match --dump-format=json-v0.1 .../dir/
+$ b --match-only --dump=match --dump-format=json-v0.1 .../dir/type{name}
+\
+
+\N|Note that a match dump for a large project can produce a large amount of
+data, especially for the \c{update} operation (tens and even hundreds of
+megabytes is not uncommon). To reduce this size it is possible to limit the
+dump to specific scopes and/or targets with the \c{--dump-scope} and
+\c{--dump-target} options.|
+
+The complete dump (that is, not of a specific scope or target) is a tree of
+nested scope objects (see \l{#intro-dirs-scopes Output Directories and Scopes}
+for background). The scope object has the serialized representation of the
+following C++ \c{struct} \c{scope}. It is the same for both load and match
+dumps except for the type of the \c{targets} member:
+
+\
+struct scope
+{
+ string out_path;
+ optional<string> src_path;
+
+ vector<variable> variables; // Non-type/pattern scope variables.
+
+ vector<scope> scopes; // Immediate children.
+
+ vector<loaded_target|matched_target> targets;
+};
+\
+
+For example (parts of the output are omitted for brevity):
+
+\N|The actual output is produced unindented to reduce the size.|
+
+\
+$ cd /tmp
+$ bdep new hello
+$ cd hello
+$ bdep new -C @gcc cc
+$ b --load-only --dump=load --dump-format=json-v0.1
+{
+ \"out_path\": \"\",
+ \"variables\": [ ... ],
+ \"scopes\": [
+ {
+ \"out_path\": \"/tmp/hello-gcc\",
+ \"variables\": [ ... ],
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello\",
+ \"variables\": [ ... ],
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"variables\": [ ... ],
+ \"targets\": [ ... ]
+ }
+ ],
+ \"targets\": [ ... ]
+ }
+ ],
+ \"targets\": [ ... ]
+ }
+ ]
+}
+\
+
+The \c{out_path} member is relative to the parent scope. It is empty for the
+special global scope, which is the root of the tree. The \c{src_path} member
+is absent if it is the same as \c{out_path} (in source build or scope outside
+of project).
+
+\N|For the match dump, targets that have not been matched for the specified
+action are omitted.|
+
+In the load dump, the target object has the serialized representation of the
+following C++ \c{struct} \c{loaded_target}:
+
+\
+struct loaded_target
+{
+ string name; // Relative quoted/qualified name.
+ string display_name; // Relative display name.
+ string type; // Target type.
+ optional<string> group; // Absolute quoted/qualified group target.
+
+ vector<variable> variables; // Target variables.
+
+ vector<prerequisite> prerequisites;
+};
+\
+
+For example (continuing with the previous \c{hello} setup):
+
+\
+{
+ \"out_path\": \"\",
+ \"scopes\": [
+ {
+ \"out_path\": \"/tmp/hello-gcc\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"targets\": [
+ {
+ \"name\": \"exe{hello}\",
+ \"display_name\": \"exe{hello}\",
+ \"type\": \"exe\",
+ \"prerequisites\": [
+ {
+ \"name\": \"cxx{hello}\",
+ \"type\": \"cxx\"
+ },
+ {
+ \"name\": \"testscript{testscript}\",
+ \"type\": \"testscript\"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+\
+
+The target \c{name} member is the target name that is qualified with the
+extension (if applicable and known) and, if required, is quoted so that it can
+be passed back to the build system driver on the command line. The
+\c{display_name} member is unqualified and unquoted. Note that both the target
+\c{name} and \c{display_name} members are normally relative to the containing
+scope (if any).
+
+The prerequisite object has the serialized representation of the following C++
+\c{struct} \c{prerequisite}:
+
+\
+struct prerequisite
+{
+ string name; // Quoted/qualified name.
+ string type;
+ vector<variable> variables; // Prerequisite variables.
+};
+\
+
+The prerequisite \c{name} member is normally relative to the containing scope.
+
+In the match dump, the target object has the serialized representation of the
+following C++ \c{struct} \c{matched_target}:
+
+\
+struct matched_target
+{
+ string name;
+ string display_name;
+ string type;
+ optional<string> group;
+
+ optional<path> path; // Absent if not path target, not assigned.
+
+ vector<variable> variables;
+
+ optional<operation_state> outer_operation; // null if not matched.
+ operation_state inner_operation; // null if not matched.
+};
+\
+
+For example (outer scopes removed for brevity):
+
+\
+$ b --match-only --dump=match --dump-format=json-v0.1
+{
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"targets\": [
+ {
+ \"name\": \"/tmp/hello/hello/cxx{hello.cxx}@./\",
+ \"display_name\": \"/tmp/hello/hello/cxx{hello}@./\",
+ \"type\": \"cxx\",
+ \"path\": \"/tmp/hello/hello/hello.cxx\",
+ \"inner_operation\": {
+ \"rule\": \"build.file\",
+ \"state\": \"unchanged\"
+ }
+ },
+ {
+ \"name\": \"obje{hello.o}\",
+ \"display_name\": \"obje{hello}\",
+ \"type\": \"obje\",
+ \"group\": \"/tmp/hello-gcc/hello/hello/obj{hello}\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello.o\",
+ \"inner_operation\": {
+ \"rule\": \"cxx.compile\",
+ \"prerequisite_targets\": [
+ {
+ \"name\": \"/tmp/hello/hello/cxx{hello.cxx}@./\",
+ \"type\": \"cxx\"
+ },
+ {
+ \"name\": \"/usr/include/c++/12/h{iostream.}\",
+ \"type\": \"h\"
+ },
+ ...
+ ]
+ }
+ },
+ {
+ \"name\": \"exe{hello.}\",
+ \"display_name\": \"exe{hello}\",
+ \"type\": \"exe\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello\",
+ \"inner_operation\": {
+ \"rule\": \"cxx.link\",
+ \"prerequisite_targets\": [
+ {
+ \"name\": \"/tmp/hello-gcc/hello/hello/obje{hello.o}\",
+ \"type\": \"obje\"
+ }
+ ]
+ }
+ }
+ ]
+}
+\
+
+The first four members in \c{matched_target} have the same semantics as in
+\c{loaded_target}.
+
+The \c{outer_operation} member is only present if the action has an outer
+operation. For example, when performing \c{update-for-test}, \c{test} is the
+outer operation while \c{update} is the inner operation.
+
+The operation state object has the serialized representation of the following
+C++ \c{struct} \c{operation_state}:
+
+\
+struct operation_state
+{
+ string rule; // null if direct recipe match.
+
+ optional<string> state; // One of unchanged|changed|group.
+
+ vector<variable> variables; // Rule variables.
+
+ vector<prerequisite_target> prerequisite_targets;
+};
+\
+
+The \c{rule} member is the matched rule name. The \c{state} member is the
+target state, if known after match. The \c{prerequisite_targets} array is a
+subset of prerequisites resolved to targets that are in effect for this
+action. The matched rule may add additional targets, for example, dynamically
+extracted additional dependencies, like \c{/usr/include/c++/12/h{iostream.\}}
+in the above listing.
+
+The prerequisite target object has the serialized representation of the
+following C++ \c{struct} \c{prerequisite_target}:
+
+\
+struct prerequisite_target
+{
+ string name; // Absolute quoted/qualified target name.
+ string type;
+ bool adhoc;
+};
+\
+
+The \c{variables} array in the scope, target, prerequisite, and prerequisite
+target objects contains scope, target, prerequisite, and rule variables,
+respectively.
+
+The variable object has the serialized representation of the following C++
+\c{struct} \c{variable}:
+
+\
+struct variable
+{
+ string name;
+ optional<string> type;
+ json_value value; // null|boolean|number|string|object|array
+};
+\
+
+For example:
+
+\
+{
+ \"out_path\": \"\",
+ \"variables\": [
+ {
+ \"name\": \"build.show_progress\",
+ \"type\": \"bool\",
+ \"value\": true
+ },
+ {
+ \"name\": \"build.verbosity\",
+ \"type\": \"uint64\",
+ \"value\": 1
+ },
+ ...
+ ],
+ \"scopes\": [
+ {
+ \"out_path\": \"/tmp/hello-gcc\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"variables\": [
+ {
+ \"name\": \"out_base\",
+ \"type\": \"dir_path\",
+ \"value\": \"/tmp/hello-gcc/hello/hello\"
+ },
+ {
+ \"name\": \"src_base\",
+ \"type\": \"dir_path\",
+ \"value\": \"/tmp/hello/hello\"
+ },
+ {
+ \"name\": \"cxx.poptions\",
+ \"type\": \"strings\",
+ \"value\": [
+ \"-I/tmp/hello-gcc/hello\",
+ \"-I/tmp/hello\"
+ ]
+ },
+ {
+ \"name\": \"libs\",
+ \"value\": \"/tmp/hello-gcc/libhello/libhello/lib{hello}\"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+\
+
+The \c{type} member is absent if the variable value is untyped.
+
+The \c{value} member contains the variable value in a suitable JSON
+representation. Specifically:
+
+\ul|
+
+\li|\c{null} values are represented as JSON \c{null}.|
+
+\li|\c{bool} values are represented as JSON \c{boolean}.|
+
+\li|\c{int64} and \c{uint64} values are represented as JSON \c{number}.|
+
+\li|\c{string}, \c{path}, \c{dir_path} values are represented as JSON
+ \c{string}.|
+
+\li|Untyped simple name values are represented as JSON \c{string}.|
+
+\li|Pairs of above values are represented as JSON objects with the \c{first}
+ and \c{second} members corresponding to the pair elements.|
+
+\li|Untyped complex name values are serialized as target names and represented
+ as JSON \c{string}.|
+
+\li|Containers of above values are represented as JSON arrays corresponding to
+ the container elements.|
+
+\li|An empty value is represented as an empty JSON object if it's a typed
+ pair, as an empty JSON array if it's a typed container or is untyped, and
+ as an empty string otherwise.||
+
+One expected use-case for the match dump is to determine the set of targets
+for which a given action is applicable. For example, we may want to determine
+all the executables in a project that can be tested with the \c{test}
+operation in order to present this list to the user in an IDE plugin or
+some such. To further illuminate the problem, consider the following
+\c{buildfile} which declares a number of executable targets, some are
+tests and some are not:
+
+\
+exe{hello1}: ... testscript # Test because of testscript prerequisite.
+
+exe{hello2}: test = true # Test because of test=true.
+
+exe{hello3}: ... testscript # Not a test because of test=false.
+{
+ test = false
+}
+\
+
+As can be seen, trying to infer this information is not straightforward and
+doing so manually by examining prerequisites, variables, etc., while possible,
+will be complex and likely brittle. Instead, the recommended approach is to
+use the match dump and base the decision on the \c{state} target object
+member. Specifically, a rule which matched the target but determined that
+nothing needs to be done for this target, returns the special \c{noop}
+recipe. The \c{build2} core recognizes this situation and sets such target's
+state to \c{unchanged} during match. Here is what the match dump will look
+like for the above three executables:
+
+\
+$ b --match-only --dump=match --dump-format=json-v0.1 test
+{
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"targets\": [
+ {
+ \"name\": \"exe{hello1.}\",
+ \"display_name\": \"exe{hello1}\",
+ \"type\": \"exe\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello1\",
+ \"inner_operation\": {
+ \"rule\": \"test\"
+ }
+ },
+ {
+ \"name\": \"exe{hello2.}\",
+ \"display_name\": \"exe{hello2}\",
+ \"type\": \"exe\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello2\",
+ \"inner_operation\": {
+ \"rule\": \"test\"
+ }
+ },
+ {
+ \"name\": \"exe{hello3}\",
+ \"display_name\": \"exe{hello3}\",
+ \"type\": \"exe\",
+ \"inner_operation\": {
+ \"rule\": \"test\",
+ \"state\": \"unchanged\"
+ }
+ }
+ ]
+}
+\
+
"
diff --git a/doc/testscript.cli b/doc/testscript.cli
index 254bca1..c539903 100644
--- a/doc/testscript.cli
+++ b/doc/testscript.cli
@@ -2777,6 +2777,11 @@ env - --unset=FOO -- $*
Terminate the command if it fails to complete within the specified number
of seconds. See also the \l{#builtins-timeout \c{timeout}} builtin.|
+\li|\n\c{-s|--timeout-success}
+
+ Assume the command terminated due to the timeout specified with the
+ \c{-t|--timeout} option to have succeeded.|
+
\li|\n\c{-c|--cwd <dir>}
Change the command's working directory.|
@@ -2849,6 +2854,56 @@ false
Do nothing and terminate normally with the 1 exit code (indicating failure).
+\h#builtins-find|\c{find}|
+
+\
+find <start-path>... [<expression>]
+\
+
+Search for filesystem entries in a filesystem hierarchy. Traverse filesystem
+hierarchies from each \i{start-path} specified on the command line, evaluate
+for each filesystem entry the boolean \i{expression} consisting of the
+options-like arguments called \i{primaries}, and print the filesystem entry
+path if it evaluates to \c{true}, one path per line. The primaries are
+combined into the expression with an implicit logical AND operator. The empty
+expression always evaluates to \c{true}.
+
+Note that the implementation deviates from POSIX in a number of ways. It only
+supports a small subset of primaries and doesn't support compound expressions,
+negations, logical OR and (explicit) AND operators, and the \c{-type} primary
+values other than \c{f}, \c{d}, and \c{l}. It, however, supports the
+\c{-mindepth} and \c{-maxdepth} primaries which are not specified by POSIX but
+are supported by the major \c{find} utility implementations.
+
+The following primaries are supported:
+
+\dl|
+
+\li|\n\c{-name <pattern>}
+
+ Evaluates to \c{true} if a filesystem entry base name matches the specified
+ wildcard pattern.|
+
+\li|\n\c{-type <type>}
+
+ Evaluates to \c{true} if a filesystem entry type matches the specified type:
+ \c{f} for a regular file, \c{d} for a directory, and \c{l} for a symbolic
+ link.|
+
+\li|\n\c{-mindepth <depth>}
+
+ Evaluates to \c{true} if a filesystem entry directory level is not less than
+ the specified depth. The level of the \i{start-path} entries specified on
+ the command line is 0.|
+
+\li|\n\c{-maxdepth <depth>}
+
+ Evaluates to \c{true} if a filesystem entry directory level is not greater
+ than the specified depth. The level of the \i{start-path} entries specified
+ on the command line is 0. Note that the implementation is smart enough not
+ to traverse a directory when the maximum depth is reached.||
+
+
\h#builtins-ln|\c{ln}|
\
diff --git a/libbuild2/adhoc-rule-buildscript.cxx b/libbuild2/adhoc-rule-buildscript.cxx
index 2cec3f3..3e868a6 100644
--- a/libbuild2/adhoc-rule-buildscript.cxx
+++ b/libbuild2/adhoc-rule-buildscript.cxx
@@ -5,6 +5,8 @@
#include <sstream>
+#include <libbutl/filesystem.hxx> // try_rm_file(), path_entry()
+
#include <libbuild2/depdb.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
@@ -47,7 +49,7 @@ namespace build2
if (l)
{
storage.clear ();
- names_view ns (reverse (*l, storage));
+ names_view ns (reverse (*l, storage, true /* reduce */));
for (const name& n: ns)
to_checksum (cs, n);
@@ -209,13 +211,18 @@ namespace build2
bool adhoc_buildscript_rule::
reverse_fallback (action a, const target_type& tt) const
{
- // We can provide clean for a file target if we are providing update.
+ // We can provide clean for a file or group target if we are providing
+ // update.
//
- return a == perform_clean_id && tt.is_a<file> () &&
- find (actions.begin (), actions.end (),
- perform_update_id) != actions.end ();
+ return (a == perform_clean_id &&
+ (tt.is_a<file> () || tt.is_a<group> ()) &&
+ find (actions.begin (), actions.end (),
+ perform_update_id) != actions.end ());
}
+ using dynamic_target = build::script::parser::dynamic_target;
+ using dynamic_targets = build::script::parser::dynamic_targets;
+
struct adhoc_buildscript_rule::match_data
{
match_data (action a, const target& t, const scope& bs, bool temp_dir)
@@ -225,6 +232,7 @@ namespace build2
build::script::default_runner run;
path dd;
+ dynamic_targets dyn_targets;
const scope* bs;
timestamp mt;
@@ -252,22 +260,27 @@ namespace build2
};
bool adhoc_buildscript_rule::
- match (action a, target& t, const string& h, match_extra& me) const
+ match (action a, target& xt, const string& h, match_extra& me) const
{
+ const target& t (xt); // See adhoc_rule::match().
+
// We pre-parsed the script with the assumption it will be used on a
- // non/file-based target. Note that this should not be possible with
- // patterns.
+ // non/file-based (or file group-based) target. Note that this should not
+ // be possible with patterns.
//
if (pattern == nullptr)
{
- if ((t.is_a<file> () != nullptr) != ttype->is_a<file> ())
- {
+ // Let's not allow mixing file/group.
+ //
+ if ((t.is_a<file> () != nullptr) == ttype->is_a<file> () ||
+ (t.is_a<group> () != nullptr) == ttype->is_a<group> ())
+ ;
+ else
fail (loc) << "incompatible target types used with shared recipe" <<
- info << "all targets must be file-based or non-file-based";
- }
+ info << "all targets must be file- or file group-based or non";
}
- return adhoc_rule::match (a, t, h, me);
+ return adhoc_rule::match (a, xt, h, me);
}
recipe adhoc_buildscript_rule::
@@ -278,17 +291,28 @@ namespace build2
recipe adhoc_buildscript_rule::
apply (action a,
- target& xt,
+ target& t,
match_extra& me,
- const optional<timestamp>& d) const
+ const optional<timestamp>& deadline) const
{
tracer trace ("adhoc_buildscript_rule::apply");
+ // Handle matching group members (see adhoc_rule::match() for background).
+ //
+ if (const group* g = t.group != nullptr ? t.group->is_a<group> () : nullptr)
+ {
+ // Note: this looks very similar to how we handle ad hoc group members.
+ //
+ match_sync (a, *g, 0 /* options */);
+ return group_recipe; // Execute the group's recipe.
+ }
+
// We don't support deadlines for any of these cases (see below).
//
- if (d && (a.outer () ||
- me.fallback ||
- (a == perform_update_id && xt.is_a<file> ())))
+ if (deadline && (a.outer () ||
+ me.fallback ||
+ (a == perform_update_id &&
+ (t.is_a<file> () || t.is_a<group> ()))))
return empty_recipe;
// If this is an outer operation (e.g., update-for-test), then delegate to
@@ -296,26 +320,98 @@ namespace build2
//
if (a.outer ())
{
- match_inner (a, xt);
- return execute_inner;
+ match_inner (a, t);
+ return inner_recipe;
}
- context& ctx (xt.ctx);
- const scope& bs (xt.base_scope ());
+ context& ctx (t.ctx);
+ const scope& bs (t.base_scope ());
- // Inject pattern's ad hoc group members, if any.
+ group* g (t.is_a<group> ()); // Explicit group.
+
+ // Inject pattern's ad hoc group members, if any (explicit group members
+ // are injected after reset below).
//
- if (pattern != nullptr)
- pattern->apply_adhoc_members (a, xt, bs, me);
+ if (g == nullptr && pattern != nullptr)
+ pattern->apply_group_members (a, t, bs, me);
- // Derive file names for the target and its ad hoc group members, if any.
+ // Derive file names for the target and its static/ad hoc group members,
+ // if any.
//
if (a == perform_update_id || a == perform_clean_id)
{
- for (target* m (&xt); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
+ {
+ g->reset_members (a); // See group::group_members() for background.
+
+ // Note that we rely on the fact that if the group has static members,
+ // then they always come first in members and the first static member
+ // is a file.
+ //
+ for (const target& m: g->static_members)
+ g->members.push_back (&m);
+
+ g->members_static = g->members.size ();
+
+ if (pattern != nullptr)
+ {
+ pattern->apply_group_members (a, *g, bs, me);
+ g->members_static = g->members.size ();
+ }
+
+ if (g->members_static == 0)
+ {
+ if (!script.depdb_dyndep_dyn_target)
+ fail << "group " << *g << " has no static or dynamic members";
+ }
+ else
+ {
+ if (!g->members.front ()->is_a<file> ())
+ {
+ // We use the first static member to derive depdb path, get mtime,
+ // etc. So it must be file-based.
+ //
+ fail << "first static member " << g->members.front ()
+ << " of group " << *g << " is not a file";
+ }
+
+ // Derive paths for all the static members.
+ //
+ for (const target* m: g->members)
+ if (auto* p = m->is_a<path_target> ())
+ p->derive_path ();
+ }
+ }
+ else
+ {
+ for (target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (auto* p = m->is_a<path_target> ())
+ p->derive_path ();
+ }
+ }
+ }
+ else if (g != nullptr)
+ {
+ // This could be, for example, configure/dist update which could need a
+ // "representative sample" of members (in order to be able to match the
+ // rules). So add static members unless we already have something
+ // cached.
+ //
+ if (g->group_members (a).members == nullptr) // Note: not g->member.
{
- if (auto* p = m->is_a<path_target> ())
- p->derive_path ();
+ g->reset_members (a);
+
+ for (const target& m: g->static_members)
+ g->members.push_back (&m);
+
+ g->members_static = g->members.size ();
+
+ if (pattern != nullptr)
+ {
+ pattern->apply_group_members (a, *g, bs, me);
+ g->members_static = g->members.size ();
+ }
}
}
@@ -328,22 +424,22 @@ namespace build2
// prerequisites injected by the pattern. So we have to handle this ad hoc
// below.
//
- const fsdir* dir (inject_fsdir (a, xt, false /* prereq */));
+ const fsdir* dir (inject_fsdir (a, t, true /*match*/, false /*prereq*/));
// Match prerequisites.
//
// This is essentially match_prerequisite_members() but with support
// for update=unmatch|match.
//
- auto& pts (xt.prerequisite_targets[a]);
+ auto& pts (t.prerequisite_targets[a]);
{
// Re-create the clean semantics as in match_prerequisite_members().
//
- bool clean (a.operation () == clean_id && !xt.is_a<alias> ());
+ bool clean (a.operation () == clean_id && !t.is_a<alias> ());
// Add target's prerequisites.
//
- for (prerequisite_member p: group_prerequisite_members (a, xt))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
// Note that we have to recognize update=unmatch|match for *(update),
// not just perform(update). But only actually do anything about it
@@ -351,7 +447,7 @@ namespace build2
//
lookup l; // The `update` variable value, if any.
include_type pi (
- include (a, xt, p, a.operation () == update_id ? &l : nullptr));
+ include (a, t, p, a.operation () == update_id ? &l : nullptr));
// Use prerequisite_target::include to signal update during match or
// unmatch.
@@ -383,7 +479,7 @@ namespace build2
if (!pi)
continue;
- const target& pt (p.search (xt));
+ const target& pt (p.search (t));
if (&pt == dir) // Don't add injected fsdir{} twice.
continue;
@@ -402,19 +498,19 @@ namespace build2
// Inject pattern's prerequisites, if any.
//
if (pattern != nullptr)
- pattern->apply_prerequisites (a, xt, bs, me);
+ pattern->apply_prerequisites (a, t, bs, me);
// Start asynchronous matching of prerequisites. Wait with unlocked
// phase to allow phase switching.
//
- wait_guard wg (ctx, ctx.count_busy (), xt[a].task_count, true);
+ wait_guard wg (ctx, ctx.count_busy (), t[a].task_count, true);
for (const prerequisite_target& pt: pts)
{
if (pt.target == dir) // Don't match injected fsdir{} twice.
continue;
- match_async (a, *pt.target, ctx.count_busy (), xt[a].task_count);
+ match_async (a, *pt.target, ctx.count_busy (), t[a].task_count);
}
wg.wait ();
@@ -439,7 +535,7 @@ namespace build2
l6 ([&]{trace << "unmatch " << *pt.target << ": " << mr.first;});
// If we managed to unmatch, blank it out so that it's not executed,
- // etc. Otherwise, convert it to ad hoc (we also automatically avoid
+ // etc. Otherwise, leave it as is (but we still automatically avoid
// hashing it, updating it during match in exec_depdb_dyndep(), and
// making us out of date in execute_update_prerequisites()).
//
@@ -449,18 +545,213 @@ namespace build2
// information (e.g., poptions from a library) and those will be
// change-tracked.
//
+ // Note: set the include_target flag for the updated_during_match()
+ // check.
+ //
if (mr.first)
+ {
+ pt.data = reinterpret_cast<uintptr_t> (pt.target);
pt.target = nullptr;
- else
- pt.include |= prerequisite_target::include_adhoc;
+ pt.include |= prerequisite_target::include_target;
+
+ // Note that this prerequisite could also be ad hoc and we must
+ // clear that flag if we managed to unmatch (failed that we will
+ // treat it as ordinary ad hoc since it has the target pointer in
+ // data).
+ //
+ // But that makes it impossible to distinguish ad hoc unmatch from
+ // ordinary unmatch prerequisites later when setting $<. Another
+ // flag to the rescue.
+ //
+ if ((pt.include & prerequisite_target::include_adhoc) != 0)
+ {
+ pt.include &= ~prerequisite_target::include_adhoc;
+ pt.include |= include_unmatch_adhoc;
+ }
+ }
}
}
}
+ // Read the list of dynamic targets and, optionally, fsdir{} prerequisites
+ // from depdb, if exists (used in a few depdb-dyndep --dyn-target handling
+ // places below).
+ //
+ auto read_dyn_targets = [] (path ddp, bool fsdir)
+ -> pair<dynamic_targets, dir_paths>
+ {
+ depdb dd (move (ddp), true /* read_only */);
+
+ pair<dynamic_targets, dir_paths> r;
+ while (dd.reading ()) // Breakout loop.
+ {
+ string* l;
+ auto read = [&dd, &l] () -> bool
+ {
+ return (l = dd.read ()) != nullptr;
+ };
+
+ if (!read ()) // Rule id.
+ break;
+
+ // We can omit this for as long as we don't break our blank line
+ // anchors semantics.
+ //
+#if 0
+ if (*l != rule_id_)
+ fail << "unable to clean dynamic target group " << t
+ << " with old depdb";
+#endif
+
+ // Note that we cannot read out expected lines since there can be
+ // custom depdb builtins. So we use the blank lines as anchors to
+ // skip to the parts we need.
+ //
+ // Skip until the first blank that separated custom depdb entries from
+ // the prerequisites list.
+ {
+ bool g;
+ while ((g = read ()) && !l->empty ()) ;
+ if (!g)
+ break;
+ }
+
+ // Next read the prerequisites, detecting fsdir{} entries if asked.
+ //
+ {
+ bool g;
+ while ((g = read ()) && !l->empty ())
+ {
+ if (fsdir)
+ {
+ path p (*l);
+ if (p.to_directory ())
+ r.second.push_back (path_cast<dir_path> (move (p)));
+ }
+ }
+
+ if (!g)
+ break;
+ }
+
+ // Read the dynamic target files. We should always end with a blank
+ // line.
+ //
+ for (;;)
+ {
+ if (!read () || l->empty ())
+ break;
+
+ // Split into type and path.
+ //
+ size_t p (l->find (' '));
+ if (p == string::npos || // Invalid format.
+ p == 0 || // Empty type.
+ p + 1 == l->size ()) // Empty path.
+ break;
+
+ r.first.push_back (
+ dynamic_target {string (*l, 0, p), path (*l, p + 1, string::npos)});
+ }
+
+ break;
+ }
+
+ return r;
+ };
+
+ // Target path to derive the depdb path, query mtime (if file), etc.
+ //
+ // To derive the depdb path for a group with at least one static member we
+ // use the path of the first member. For a group without any static
+ // members we use the group name with the target type name as the
+ // second-level extension.
+ //
+ auto target_path = [&t, g, p = path ()] () mutable -> const path&
+ {
+ return
+ g == nullptr ? t.as<file> ().path () :
+ g->members_static != 0 ? g->members.front ()->as<file> ().path () :
+ (p = g->dir / (g->name + '.' + g->type ().name));
+ };
+
// See if we are providing the standard clean as a fallback.
//
if (me.fallback)
- return &perform_clean_file;
+ {
+ // For depdb-dyndep --dyn-target use depdb to clean dynamic targets.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ {
+ // Note that only removing the relevant filesystem entries is not
+ // enough: we actually have to populate the group with members since
+ // this information could be used to clean derived targets (for
+ // example, object files). So we just do that and let the standard
+ // clean logic take care of them the same as static members.
+ //
+ // NOTE that this logic should be consistent with what we have in
+ // exec_depdb_dyndep().
+ //
+ using dyndep = dyndep_rule;
+
+ function<dyndep::group_filter_func> filter;
+ if (g != nullptr)
+ {
+ filter = [] (mtime_target& g, const build2::file& m)
+ {
+ auto& ms (g.as<group> ().members);
+ return find (ms.begin (), ms.end (), &m) == ms.end ();
+ };
+ }
+
+ pair<dynamic_targets, dir_paths> p (
+ read_dyn_targets (target_path () + ".d", true));
+
+ for (dynamic_target& dt: p.first)
+ {
+ path& f (dt.path);
+
+ // Resolve target type. Clean it as file if unable to.
+ //
+ const target_type* tt (bs.find_target_type (dt.type));
+ if (tt == nullptr)
+ tt = &file::static_type;
+
+ if (g != nullptr)
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_group_member (a, bs, *g, move (f), *tt, filter));
+
+ if (r.second)
+ g->members.push_back (&r.first);
+ }
+ else
+ {
+ // Note that here we don't bother cleaning any old dynamic targets
+ // -- the more we can clean, the merrier.
+ //
+ dyndep::inject_adhoc_group_member (a, bs, t, move (f), *tt);
+ }
+ }
+
+ // Enter fsdir{} prerequisites.
+ //
+ // See the add lambda in exec_depdb_dyndep() for background.
+ //
+ for (dir_path& d: p.second)
+ {
+ dir_path o; string n; // For GCC 13 -Wdangling-reference.
+ const fsdir& dt (search<fsdir> (t,
+ move (d),
+ move (o),
+ move (n), nullptr, nullptr));
+ match_sync (a, dt);
+ pts.push_back (prerequisite_target (&dt, true /* adhoc */));
+ }
+ }
+
+ return g == nullptr ? perform_clean_file : perform_clean_group;
+ }
// If we have any update during match prerequisites, now is the time to
// update them.
@@ -472,17 +763,17 @@ namespace build2
// prerequisite_target::data.
//
if (a == perform_update_id)
- update_during_match_prerequisites (trace, a, xt);
+ update_during_match_prerequisites (trace, a, t);
- // See if this is not update or not on a file-based target.
+ // See if this is not update or not on a file/group-based target.
//
- if (a != perform_update_id || !xt.is_a<file> ())
+ if (a != perform_update_id || !(g != nullptr || t.is_a<file> ()))
{
// Make sure we get small object optimization.
//
- if (d)
+ if (deadline)
{
- return [dv = *d, this] (action a, const target& t)
+ return [dv = *deadline, this] (action a, const target& t)
{
return default_action (a, t, dv);
};
@@ -496,20 +787,22 @@ namespace build2
}
}
+ // This is a perform update on a file or group target.
+ //
// See if this is the simple case with only static dependencies.
//
if (!script.depdb_dyndep)
{
return [this] (action a, const target& t)
{
- return perform_update_file (a, t);
+ return perform_update_file_or_group (a, t);
};
}
- // This is a perform update on a file target with extraction of dynamic
- // dependency information either in the depdb preamble (depdb-dyndep
- // without --byproduct) or as a byproduct of the recipe body execution
- // (depdb-dyndep with --byproduct).
+ // This is a perform update on a file or group target with extraction of
+ // dynamic dependency information either in the depdb preamble
+ // (depdb-dyndep without --byproduct) or as a byproduct of the recipe body
+ // execution (depdb-dyndep with --byproduct).
//
// For the former case, we may need to add additional prerequisites (or
// even target group members). We also have to save any such additional
@@ -527,9 +820,6 @@ namespace build2
// example and all this logic is based on the prior work in the cc module
// where you can often find more detailed rationale for some of the steps
// performed (like the fsdir update below).
- //
- file& t (xt.as<file> ());
- const path& tp (t.path ());
// Re-acquire fsdir{} specified by the user, similar to inject_fsdir()
// (which we have disabled; see above).
@@ -553,16 +843,14 @@ namespace build2
}
if (dir != nullptr)
- fsdir_rule::perform_update_direct (a, t);
+ fsdir_rule::perform_update_direct (a, *dir);
// Because the depdb preamble can access $<, we have to blank out all the
// ad hoc prerequisites. Since we will still need them later, we "move"
// them to the auxiliary data member in prerequisite_target (see
// execute_update_prerequisites() for details).
//
- // @@ This actually messes up with updated_during_match() check. Could
- // we not redo this so that we always keep p.target intact? Can't
- // we just omit p.adhoc() targets from $<?
+ // Note: set the include_target flag for the updated_during_match() check.
//
for (prerequisite_target& p: pts)
{
@@ -572,13 +860,57 @@ namespace build2
{
p.data = reinterpret_cast<uintptr_t> (p.target);
p.target = nullptr;
+ p.include |= prerequisite_target::include_target;
}
}
+ const path& tp (target_path ());
+
+ // Note that while it's tempting to turn match_data* into recipes, some of
+ // their members are not movable. And in the end we will have the same
+ // result: one dynamic memory allocation.
+ //
+ unique_ptr<match_data> md;
+ unique_ptr<match_data_byproduct> mdb;
+
+ dynamic_targets old_dyn_targets;
+
+ if (script.depdb_dyndep_byproduct)
+ {
+ mdb.reset (new match_data_byproduct (
+ a, t, bs, script.depdb_preamble_temp_dir));
+ }
+ else
+ {
+ md.reset (new match_data (a, t, bs, script.depdb_preamble_temp_dir));
+
+ // If the set of dynamic targets can change based on changes to the
+ // inputs (say, each entity, such as a type, in the input file gets its
+ // own output file), then we can end up with a large number of old
+ // output files laying around because they are not part of the new
+ // dynamic target set. So we try to clean them up based on the old depdb
+ // information, similar to how we do it for perform_clean above (except
+ // here we will just keep the list of old files).
+ //
+ // Note: do before opening depdb, which can start over-writing it.
+ //
+ // We also have to do this speculatively, without knowing whether we
+ // will need to update. Oh, well, being dynamic ain't free.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ old_dyn_targets = read_dyn_targets (tp + ".d", false).first;
+ }
+
depdb dd (tp + ".d");
// NOTE: see the "static dependencies" version (with comments) below.
//
+ // NOTE: We use blank lines as anchors to skip directly to certain entries
+ // (e.g., dynamic targets). So make sure none of the other entries
+ // can be blank (for example, see `depdb string` builtin).
+ //
+ // NOTE: KEEP IN SYNC WITH read_dyn_targets ABOVE!
+ //
if (dd.expect ("<ad hoc buildscript recipe> 1") != nullptr)
l4 ([&]{trace << "rule mismatch forcing update of " << t;});
@@ -613,10 +945,32 @@ namespace build2
l4 ([&]{trace << "recipe variable change forcing update of " << t;});
}
+ // Static targets and prerequisites (there can also be dynamic targets;
+ // see dyndep --dyn-target).
+ //
{
sha256 tcs;
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
- hash_target (tcs, *m, storage);
+ if (g == nullptr)
+ {
+ // There is a nuance: in an operation batch (e.g., `b update
+ // update`) we will already have the dynamic targets as members on
+ // the subsequent operations and we need to make sure we don't treat
+ // them as static. Using target_decl to distinguish the two seems
+ // like a natural way.
+ //
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (m->decl == target_decl::real)
+ hash_target (tcs, *m, storage);
+ }
+ }
+ else
+ {
+ // Feels like there is not much sense in hashing the group itself.
+ //
+ for (const target* m: g->members)
+ hash_target (tcs, *m, storage);
+ }
if (dd.expect (tcs.string ()) != nullptr)
l4 ([&]{trace << "target set change forcing update of " << t;});
@@ -634,22 +988,8 @@ namespace build2
}
}
- // Note that while it's tempting to turn match_data* into recipes, some of
- // their members are not movable. And in the end we will have the same
- // result: one dynamic memory allocation.
+ // Get ready to run the depdb preamble.
//
- unique_ptr<match_data> md;
- unique_ptr<match_data_byproduct> mdb;
-
- if (script.depdb_dyndep_byproduct)
- {
- mdb.reset (new match_data_byproduct (
- a, t, bs, script.depdb_preamble_temp_dir));
- }
- else
- md.reset (new match_data (a, t, bs, script.depdb_preamble_temp_dir));
-
-
build::script::environment& env (mdb != nullptr ? mdb->env : md->env);
build::script::default_runner& run (mdb != nullptr ? mdb->run : md->run);
@@ -660,21 +1000,51 @@ namespace build2
{
build::script::parser p (ctx);
p.execute_depdb_preamble (a, bs, t, env, script, run, dd);
+
+ // Write a blank line after the custom depdb entries and before
+ // prerequisites, which we use as an anchor (see read_dyn_targets
+ // above). We only do it for the new --dyn-target mode in order not to
+ // invalidate the existing depdb instances.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ dd.expect ("");
}
// Determine if we need to do an update based on the above checks.
//
- bool update;
+ bool update (false);
timestamp mt;
if (dd.writing ())
update = true;
else
{
- if ((mt = t.mtime ()) == timestamp_unknown)
- t.mtime (mt = mtime (tp)); // Cache.
+ if (g == nullptr)
+ {
+ const file& ft (t.as<file> ());
- update = dd.mtime > mt;
+ if ((mt = ft.mtime ()) == timestamp_unknown)
+ ft.mtime (mt = mtime (tp)); // Cache.
+ }
+ else
+ {
+ // Use static member, old dynamic, or force update.
+ //
+ const path* p (
+ g->members_static != 0
+ ? &tp /* first static member path */
+ : (!old_dyn_targets.empty ()
+ ? &old_dyn_targets.front ().path
+ : nullptr));
+
+ if (p != nullptr)
+ mt = g->load_mtime (*p);
+ else
+ update = true;
+ }
+
+ if (!update)
+ update = dd.mtime > mt;
}
if (update)
@@ -810,7 +1180,7 @@ namespace build2
// Note that in case of dry run we will have an incomplete (but valid)
// database which will be updated on the next non-dry run.
//
- if (!update || ctx.dry_run)
+ if (!update || ctx.dry_run_option)
dd.close (false /* mtime_check */);
else
mdb->dd = dd.close_to_reopen ();
@@ -822,59 +1192,107 @@ namespace build2
return [this, md = move (mdb)] (action a, const target& t)
{
- return perform_update_file_dyndep_byproduct (a, t, *md);
+ return perform_update_file_or_group_dyndep_byproduct (a, t, *md);
};
}
else
{
// Run the second half of the preamble (depdb-dyndep commands) to update
- // our prerequisite targets and extract dynamic dependencies.
+ // our prerequisite targets and extract dynamic dependencies (targets
+ // and prerequisites).
//
// Note that this should be the last update to depdb (the invalidation
// order semantics).
//
- bool deferred_failure (false);
+ md->deferred_failure = false;
{
build::script::parser p (ctx);
p.execute_depdb_preamble_dyndep (a, bs, t,
env, script, run,
dd,
+ md->dyn_targets,
update,
mt,
- deferred_failure);
+ md->deferred_failure);
}
- if (update && dd.reading () && !ctx.dry_run)
+ if (update && dd.reading () && !ctx.dry_run_option)
dd.touch = timestamp_unknown;
dd.close (false /* mtime_check */);
- md->dd = move (dd.path);
- // Pass on base scope and update/mtime.
+ // Remove previous dynamic targets since their set may change with
+ // changes to the inputs.
+ //
+ // The dry-run mode complicates things: if we don't remove the old
+ // files, then that information will be gone (since we update depdb even
+ // in the dry-run mode). But if we remove everything in the dry-run
+ // mode, then we may also remove some of the current files, which would
+ // be incorrect. So let's always remove but only files that are not in
+ // the current set.
+ //
+ // Note that we used to do this in perform_update_file_or_group_dyndep()
+ // but that had a tricky issue: if we end up performing match but not
+ // execute (e.g., via the resolve_members() logic), then we will not
+ // cleanup old targets but loose this information (since the depdb has
+ // be updated). So now we do it here, which is a bit strange, but it
+ // sort of fits into that dry-run logic above. Note also that we do this
+ // unconditionally, update or not, since if everything is up to date,
+ // then old and new sets should be the same.
+ //
+ for (const dynamic_target& dt: old_dyn_targets)
+ {
+ const path& f (dt.path);
+
+ if (find_if (md->dyn_targets.begin (), md->dyn_targets.end (),
+ [&f] (const dynamic_target& dt)
+ {
+ return dt.path == f;
+ }) == md->dyn_targets.end ())
+ {
+ // This is an optimization so best effort.
+ //
+ if (optional<rmfile_status> s = butl::try_rmfile_ignore_error (f))
+ {
+ if (s == rmfile_status::success && verb >= 2)
+ text << "rm " << f;
+ }
+ }
+ }
+
+ // Pass on the base scope, depdb path, and update/mtime.
//
md->bs = &bs;
+ md->dd = move (dd.path);
md->mt = update ? timestamp_nonexistent : mt;
- md->deferred_failure = deferred_failure;
return [this, md = move (md)] (action a, const target& t)
{
- return perform_update_file_dyndep (a, t, *md);
+ return perform_update_file_or_group_dyndep (a, t, *md);
};
}
}
target_state adhoc_buildscript_rule::
- perform_update_file_dyndep_byproduct (action a,
- const target& xt,
- match_data_byproduct& md) const
+ perform_update_file_or_group_dyndep_byproduct (
+ action a, const target& t, match_data_byproduct& md) const
{
// Note: using shared function name among the three variants.
//
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ tracer trace (
+ "adhoc_buildscript_rule::perform_update_file_or_group_dyndep_byproduct");
- context& ctx (xt.ctx);
+ context& ctx (t.ctx);
- const file& t (xt.as<file> ());
+ // For a group we use the first (for now static) member as a source of
+ // mtime.
+ //
+ // @@ TODO: expl: byproduct: Note that until we support dynamic targets in
+ // the byproduct mode, we verify there is at least one static member in
+ // apply() above. Once we do support this, we will need to verify after
+ // the dependency extraction below.
+ //
+ const group* g (t.is_a<group> ());
// Note that even if we've updated all our prerequisites in apply(), we
// still need to execute them here to keep the dependency counts straight.
@@ -903,7 +1321,14 @@ namespace build2
if (!ctx.dry_run || verb != 0)
{
- execute_update_file (bs, a, t, env, run);
+ if (g == nullptr)
+ execute_update_file (bs, a, t.as<file> (), env, run);
+ else
+ {
+ // Note: no dynamic members yet.
+ //
+ execute_update_group (bs, a, *g, env, run);
+ }
}
// Extract the dynamic dependency information as byproduct of the recipe
@@ -943,7 +1368,7 @@ namespace build2
const auto& pts (t.prerequisite_targets[a]);
auto add = [&trace, what,
- a, &bs, &t, &pts, pts_n = md.pts_n,
+ a, &bs, &t, g, &pts, pts_n = md.pts_n,
&byp, &map_ext, &dd, &skip] (path fp)
{
normalize_external (fp, what);
@@ -981,15 +1406,25 @@ namespace build2
}
}
- // Skip if this is one of the targets.
+ // Skip if this is one of the targets (see the non-byproduct version
+ // for background).
//
if (byp.drop_cycles)
{
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
{
- if (ft == m)
+ auto& ms (g->members);
+ if (find (ms.begin (), ms.end (), ft) != ms.end ())
return;
}
+ else
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ return;
+ }
+ }
}
// Skip until where we left off.
@@ -1066,7 +1501,7 @@ namespace build2
if (r.second.empty ())
continue;
- // @@ TODO: what should we do about targets?
+ // Note: no support for dynamic targets in byproduct mode.
//
if (r.first == make_type::target)
continue;
@@ -1076,10 +1511,11 @@ namespace build2
if (f.relative ())
{
if (!byp.cwd)
- fail (il) << "relative path '" << f << "' in make dependency"
- << " declaration" <<
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in make dependency declaration" <<
info << "consider using --cwd to specify relative path "
- << "base";
+ << "base";
f = *byp.cwd / f;
}
@@ -1094,6 +1530,52 @@ namespace build2
break;
}
+ case dyndep_format::lines:
+ {
+ for (string l;; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ break;
+
+ if (l.empty ())
+ fail (il) << "blank line in prerequisites list";
+
+ if (l.front () == ' ')
+ fail (il) << "non-existent prerequisite in --byproduct mode";
+
+ path f;
+ try
+ {
+ f = path (l);
+
+ // fsdir{} prerequisites only make sense with dynamic targets.
+ //
+ if (f.to_directory ())
+ throw invalid_path ("");
+
+ if (f.relative ())
+ {
+ if (!byp.cwd)
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --cwd to specify "
+ << "relative path base";
+
+ f = *byp.cwd / f;
+ }
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what << " prerequisite path '"
+ << l << "'";
+ }
+
+ add (move (f));
+ }
+
+ break;
+ }
}
// Add the terminating blank line.
@@ -1101,6 +1583,8 @@ namespace build2
dd.expect ("");
dd.close ();
+ //@@ TODO: expl: byproduct: verify have at least one member.
+
md.dd.path = move (dd.path); // For mtime check below.
}
@@ -1109,20 +1593,36 @@ namespace build2
timestamp now (system_clock::now ());
if (!ctx.dry_run)
- depdb::check_mtime (start, md.dd.path, t.path (), now);
+ {
+ // Only now we know for sure there must be a member in the group.
+ //
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+
+ depdb::check_mtime (start, md.dd.path, ft.path (), now);
+ }
+
+ (g == nullptr
+ ? static_cast<const mtime_target&> (t.as<file> ())
+ : static_cast<const mtime_target&> (*g)).mtime (now);
- t.mtime (now);
return target_state::changed;
}
target_state adhoc_buildscript_rule::
- perform_update_file_dyndep (action a, const target& xt, match_data& md) const
+ perform_update_file_or_group_dyndep (
+ action a, const target& t, match_data& md) const
{
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ tracer trace (
+ "adhoc_buildscript_rule::perform_update_file_or_group_dyndep");
- context& ctx (xt.ctx);
+ context& ctx (t.ctx);
- const file& t (xt.as<file> ());
+ // For a group we use the first (static or dynamic) member as a source of
+ // mtime. Note that in this case there must be at least one since we fail
+ // if we were unable to extract any dynamic members and there are no
+ // static (see exec_depdb_dyndep()).
+ //
+ const group* g (t.is_a<group> ());
// Note that even if we've updated all our prerequisites in apply(), we
// still need to execute them here to keep the dependency counts straight.
@@ -1151,7 +1651,11 @@ namespace build2
if (!ctx.dry_run || verb != 0)
{
- execute_update_file (*md.bs, a, t, env, run, md.deferred_failure);
+ if (g == nullptr)
+ execute_update_file (
+ *md.bs, a, t.as<file> (), env, run, md.deferred_failure);
+ else
+ execute_update_group (*md.bs, a, *g, env, run, md.deferred_failure);
}
run.leave (env, script.end_loc);
@@ -1159,28 +1663,67 @@ namespace build2
timestamp now (system_clock::now ());
if (!ctx.dry_run)
- depdb::check_mtime (start, md.dd, t.path (), now);
+ {
+ // Note: in case of deferred failure we may not have any members.
+ //
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+ depdb::check_mtime (start, md.dd, ft.path (), now);
+ }
+
+ (g == nullptr
+ ? static_cast<const mtime_target&> (t)
+ : static_cast<const mtime_target&> (*g)).mtime (now);
- t.mtime (now);
return target_state::changed;
}
target_state adhoc_buildscript_rule::
- perform_update_file (action a, const target& xt) const
+ perform_update_file_or_group (action a, const target& t) const
{
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ tracer trace ("adhoc_buildscript_rule::perform_update_file_or_group");
- context& ctx (xt.ctx);
+ context& ctx (t.ctx);
+ const scope& bs (t.base_scope ());
+
+ // For a group we use the first (static) member to derive depdb path, as a
+ // source of mtime, etc. Note that in this case there must be a static
+ // member since in this version of perform_update we don't extract dynamic
+ // dependencies (see apply() details).
+ //
+ const group* g (t.is_a<group> ());
- const file& t (xt.as<file> ());
- const path& tp (t.path ());
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+ const path& tp (ft.path ());
- const scope& bs (t.base_scope ());
+ // Support creating file symlinks using ad hoc recipes.
+ //
+ auto path_symlink = [&tp] ()
+ {
+ pair<bool, butl::entry_stat> r (
+ butl::path_entry (tp,
+ false /* follow_symlinks */,
+ true /* ignore_errors */));
+
+ return r.first && r.second.type == butl::entry_type::symlink;
+ };
// Update prerequisites and determine if any of them render this target
// out-of-date.
//
- timestamp mt (t.load_mtime ());
+ // If the file entry exists, check if its a symlink.
+ //
+ bool symlink (false);
+ timestamp mt;
+
+ if (g == nullptr)
+ {
+ mt = ft.load_mtime ();
+
+ if (mt != timestamp_nonexistent)
+ symlink = path_symlink ();
+ }
+ else
+ mt = g->load_mtime (tp);
// This is essentially ps=execute_prerequisites(a, t, mt) which we
// cannot use because we need to see ad hoc prerequisites.
@@ -1274,8 +1817,18 @@ namespace build2
//
{
sha256 tcs;
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
- hash_target (tcs, *m, storage);
+ if (g == nullptr)
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ hash_target (tcs, *m, storage);
+ }
+ else
+ {
+ // Feels like there is not much sense in hashing the group itself.
+ //
+ for (const target* m: g->members)
+ hash_target (tcs, *m, storage);
+ }
if (dd.expect (tcs.string ()) != nullptr)
l4 ([&]{trace << "target set change forcing update of " << t;});
@@ -1309,7 +1862,10 @@ namespace build2
if (!depdb_preamble)
{
- if (dd.writing () || dd.mtime > mt)
+ // If this is a symlink, depdb mtime could be greater than the symlink
+ // target.
+ //
+ if (dd.writing () || (dd.mtime > mt && !symlink))
update = true;
if (!update)
@@ -1335,7 +1891,7 @@ namespace build2
// Update if depdb mismatch.
//
- if (dd.writing () || dd.mtime > mt)
+ if (dd.writing () || (dd.mtime > mt && !symlink))
update = true;
dd.close ();
@@ -1359,17 +1915,42 @@ namespace build2
{
// Prepare to execute the script diag preamble and/or body.
//
- if ((r = execute_update_file (bs, a, t, env, run)))
+ r = g == nullptr
+ ? execute_update_file (bs, a, ft, env, run)
+ : execute_update_group (bs, a, *g, env, run);
+
+ if (r)
{
if (!ctx.dry_run)
- dd.check_mtime (tp);
+ {
+ if (g == nullptr)
+ symlink = path_symlink ();
+
+ // Again, if this is a symlink, depdb mtime will be greater than
+ // the symlink target.
+ //
+ if (!symlink)
+ dd.check_mtime (tp);
+ }
}
}
if (r || depdb_preamble)
run.leave (env, script.end_loc);
- t.mtime (system_clock::now ());
+ // Symlinks don't play well with dry-run: we can't extract accurate target
+ // timestamp without creating the symlink. Overriding the dry-run doesn't
+ // seem to be an option since we don't know whether it will be a symlink
+ // until it's created. At least we are being pessimistic rather than
+ // optimistic here.
+ //
+ (g == nullptr
+ ? static_cast<const mtime_target&> (ft)
+ : static_cast<const mtime_target&> (*g)).mtime (
+ symlink
+ ? build2::mtime (tp)
+ : system_clock::now ());
+
return target_state::changed;
}
@@ -1384,6 +1965,7 @@ namespace build2
// !NULL false 1 - normal prerequisite already updated
// !NULL true 0 - ad hoc prerequisite to be updated and blanked
// NULL true !NULL - ad hoc prerequisite already updated and blanked
+ // NULL false !NULL - unmatched prerequisite (ignored by this function)
//
// Note that we still execute already updated prerequisites to keep the
// dependency counts straight. But we don't consider them for the "renders
@@ -1391,6 +1973,8 @@ namespace build2
//
// See also environment::set_special_variables().
//
+ // See also perform_execute() which has to deal with these shenanigans.
+ //
optional<target_state> adhoc_buildscript_rule::
execute_update_prerequisites (action a, const target& t, timestamp mt) const
{
@@ -1456,10 +2040,14 @@ namespace build2
// Blank out adhoc.
//
+ // Note: set the include_target flag for the updated_during_match()
+ // check.
+ //
if (p.adhoc ())
{
p.data = reinterpret_cast<uintptr_t> (p.target);
p.target = nullptr;
+ p.include |= prerequisite_target::include_target;
}
}
}
@@ -1478,6 +2066,8 @@ namespace build2
build::script::default_runner& run,
bool deferred_failure) const
{
+ // NOTE: similar to execute_update_group() below.
+ //
context& ctx (t.ctx);
const scope& rs (*bs.root_scope ());
@@ -1612,6 +2202,128 @@ namespace build2
return exec_diag || exec_body;
}
+ bool adhoc_buildscript_rule::
+ execute_update_group (const scope& bs,
+ action a, const group& g,
+ build::script::environment& env,
+ build::script::default_runner& run,
+ bool deferred_failure) const
+ {
+ // Note: similar to execute_update_file() above (see there for comments).
+ //
+ // NOTE: when called from perform_update_file_or_group_dyndep_byproduct(),
+ // the group does not contain dynamic members yet and thus could
+ // have no members at all.
+ //
+ context& ctx (g.ctx);
+
+ const scope& rs (*bs.root_scope ());
+
+ build::script::parser p (ctx);
+
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () && (exec_body || verb == 1));
+ bool exec_depdb (!script.depdb_preamble.empty ());
+
+ if (script.diag_name)
+ {
+ if (verb == 1)
+ {
+ const file* pt (nullptr);
+ for (const prerequisite_target& p: g.prerequisite_targets[a])
+ {
+ if (p.target != nullptr && !p.adhoc ())
+ {
+ pt = p.target->is_a<file> ();
+ break;
+ }
+ }
+
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), *pt, g);
+ else
+ print_diag (script.diag_name->c_str (), g);
+ }
+ }
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir && !script.depdb_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ env, script, run,
+ verb == 1 /* diag */,
+ !exec_depdb /* enter */,
+ false /* leave */));
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
+ }
+
+ if (exec_body)
+ {
+ // On failure remove the target files that may potentially exist but
+ // be invalid.
+ //
+ // Note: we may leave dynamic members if we don't know about them yet.
+ // Feels natural enough.
+ //
+ small_vector<auto_rmfile, 8> rms;
+
+ if (!ctx.dry_run)
+ {
+ for (const target* m: g.members)
+ {
+ if (auto* f = m->is_a<file> ())
+ rms.emplace_back (f->path ());
+ }
+ }
+
+ if (script.body_temp_dir &&
+ !script.depdb_preamble_temp_dir &&
+ !script.diag_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ p.execute_body (rs, bs,
+ env, script, run,
+ !exec_depdb && !exec_diag /* enter */,
+ false /* leave */);
+
+ if (!ctx.dry_run)
+ {
+ if (deferred_failure)
+ fail << "expected error exit status from recipe body";
+
+ // @@ TODO: expl: byproduct
+ //
+ // Note: will not work for dynamic members if we don't know about them
+ // yet. Could probably fix by doing this later, after the dynamic
+ // dependency extraction.
+ //
+#ifndef _WIN32
+ auto chmod = [] (const path& p)
+ {
+ path_perms (p,
+ (path_perms (p) |
+ permissions::xu |
+ permissions::xg |
+ permissions::xo));
+ };
+
+ for (const target* m: g.members)
+ {
+ if (auto* p = m->is_a<exe> ())
+ chmod (p->path ());
+ }
+#endif
+ for (auto& rm: rms)
+ rm.cancel ();
+ }
+ }
+
+ return exec_diag || exec_body;
+ }
+
target_state adhoc_buildscript_rule::
perform_clean_file (action a, const target& t)
{
@@ -1627,6 +2339,8 @@ namespace build2
// Finally, we print the entire ad hoc group at verbosity level 1, similar
// to the default update diagnostics.
//
+ // @@ TODO: .t may also be a temporary directory (and below).
+ //
return perform_clean_extra (a,
t.as<file> (),
{".d", ".t"},
@@ -1635,6 +2349,31 @@ namespace build2
}
target_state adhoc_buildscript_rule::
+ perform_clean_group (action a, const target& xt)
+ {
+ const group& g (xt.as<group> ());
+
+ path d, t;
+ if (g.members_static != 0)
+ {
+ const path& p (g.members.front ()->as<file> ().path ());
+ d = p + ".d";
+ t = p + ".t";
+ }
+ else
+ {
+ // See target_path lambda in apply().
+ //
+ t = g.dir / (g.name + '.' + g.type ().name);
+ d = t + ".d";
+ t += ".t";
+ }
+
+ return perform_clean_group_extra (a, g, {d.string ().c_str (),
+ t.string ().c_str ()});
+ }
+
+ target_state adhoc_buildscript_rule::
default_action (action a,
const target& t,
const optional<timestamp>& deadline) const
@@ -1643,9 +2382,34 @@ namespace build2
context& ctx (t.ctx);
- execute_prerequisites (a, t);
+ target_state ts (target_state::unchanged);
- if (!ctx.dry_run || verb != 0)
+ if (ctx.current_mode == execution_mode::first)
+ ts |= straight_execute_prerequisites (a, t);
+
+ bool exec (!ctx.dry_run || verb != 0);
+
+ // Special handling for fsdir{} (which is the recommended if somewhat
+ // hackish way to represent directory symlinks). See fsdir_rule for
+ // background.
+ //
+ // @@ Note that because there is no depdb, we cannot detect the target
+ // directory change (or any other changes in the script).
+ //
+ if (exec &&
+ (a == perform_update_id || a == perform_clean_id) &&
+ t.is_a<fsdir> ())
+ {
+ // For update we only want to skip if it's a directory. For clean we
+ // want to (try) to clean up any filesystem entry, including a dangling
+ // symlink.
+ //
+ exec = a == perform_update_id
+ ? !exists (t.dir, true /* ignore_errors */)
+ : build2::entry_exists (t.dir, false /* follow_symlinks */);
+ }
+
+ if (exec)
{
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
@@ -1701,9 +2465,14 @@ namespace build2
p.execute_body (rs, bs, e, script, r, !exec_diag /* enter */);
}
+
+ ts |= target_state::changed;
}
- return target_state::changed;
+ if (ctx.current_mode == execution_mode::last)
+ ts |= reverse_execute_prerequisites (a, t);
+
+ return ts;
}
void adhoc_buildscript_rule::
diff --git a/libbuild2/adhoc-rule-buildscript.hxx b/libbuild2/adhoc-rule-buildscript.hxx
index 02939c1..336dceb 100644
--- a/libbuild2/adhoc-rule-buildscript.hxx
+++ b/libbuild2/adhoc-rule-buildscript.hxx
@@ -36,16 +36,17 @@ namespace build2
const optional<timestamp>&) const override;
target_state
- perform_update_file (action, const target&) const;
+ perform_update_file_or_group (action, const target&) const;
struct match_data;
struct match_data_byproduct;
target_state
- perform_update_file_dyndep (action, const target&, match_data&) const;
+ perform_update_file_or_group_dyndep (
+ action, const target&, match_data&) const;
target_state
- perform_update_file_dyndep_byproduct (
+ perform_update_file_or_group_dyndep_byproduct (
action, const target&, match_data_byproduct&) const;
optional<target_state>
@@ -58,9 +59,19 @@ namespace build2
build::script::default_runner&,
bool deferred_failure = false) const;
+ bool
+ execute_update_group (const scope&,
+ action a, const group&,
+ build::script::environment&,
+ build::script::default_runner&,
+ bool deferred_failure = false) const;
+
static target_state
perform_clean_file (action, const target&);
+ static target_state
+ perform_clean_group (action, const target&);
+
target_state
default_action (action, const target&, const optional<timestamp>&) const;
@@ -85,9 +96,12 @@ namespace build2
public:
using script_type = build::script::script;
- // The prerequisite_target::include bit that indicates update=unmatch.
+ // The prerequisite_target::include bits that indicate update=unmatch and
+ // an ad hoc version of that.
//
- static const uintptr_t include_unmatch = 0x100;
+ static const uintptr_t include_unmatch = 0x100;
+ static const uintptr_t include_unmatch_adhoc = 0x200;
+
script_type script;
string checksum; // Script text hash.
diff --git a/libbuild2/adhoc-rule-cxx.cxx b/libbuild2/adhoc-rule-cxx.cxx
index fbe967e..8a91809 100644
--- a/libbuild2/adhoc-rule-cxx.cxx
+++ b/libbuild2/adhoc-rule-cxx.cxx
@@ -25,6 +25,13 @@ namespace build2
return true;
}
+ recipe cxx_rule_v1::
+ apply (action, target&) const
+ {
+ assert (false); // This (or the match_extra version) must be overriden.
+ return empty_recipe;
+ }
+
// adhoc_cxx_rule
//
adhoc_cxx_rule::
@@ -95,8 +102,10 @@ namespace build2
load_module_library (const path& lib, const string& sym, string& err);
bool adhoc_cxx_rule::
- match (action a, target& t, const string& hint, match_extra& me) const
+ match (action a, target& xt, const string& hint, match_extra& me) const
{
+ const target& t (xt); // See adhoc_rule::match() for background.
+
if (pattern != nullptr && !pattern->match (a, t, hint, me))
return false;
@@ -302,7 +311,7 @@ namespace build2
//
auto_thread_env penv (nullptr);
context& ctx (*t.ctx.module_context);
- scheduler::phase_guard pg (ctx.sched);
+ scheduler::phase_guard pg (*ctx.sched);
uint16_t verbosity (3); // Project creation command verbosity.
@@ -349,6 +358,46 @@ namespace build2
// This way the configuration will be always in sync with ~build2
// and we can update the recipe manually (e.g., for debugging).
//
+ // Should we use ~build2 or ~build2-no-warnings? This case is similar
+ // to private host/module configurations in that the user doesn't have
+ // any control over the options used, etc. So it would be natural to
+ // use the no-warnings variant. However, unlike with tools/modules
+ // which can be configured in a user-created configuration (and which
+ // will normally be the case during development), for recipes it's
+ // always this automatically-create configuration. It feels like the
+ // best we can do is use ~build2-no-warnings by default but switch to
+ // ~build2 if the project is configured for development
+ // (config.<project>.develop).
+ //
+ string cfg;
+ {
+ const project_name& pn (named_project (rs));
+
+ if (!pn.empty ())
+ {
+ string var ("config." + pn.variable () + ".develop");
+
+ if (lookup l = rs[var])
+ {
+ // The value could be untyped if the project didn't declare this
+ // variable. Let's handle that case gracefully.
+ //
+ try
+ {
+ if (convert<bool> (*l))
+ cfg = "~build2";
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "invalid " << var << " value: " << e;
+ }
+ }
+ }
+
+ if (cfg.empty ())
+ cfg = "~build2-no-warnings";
+ }
+
create_project (
pd,
dir_path (), /* amalgamation */
@@ -357,7 +406,7 @@ namespace build2
{"cxx."}, /* root_modules */
"", /* root_post */
string ("config"), /* config_module */
- string ("config.config.load = ~build2"), /* config_file */
+ "config.config.load = " + cfg, /* config_file */
false, /* buildfile */
"build2 core", /* who */
verbosity); /* verbosity */
@@ -387,6 +436,7 @@ namespace build2
<< "#include <libbuild2/depdb.hxx>" << '\n'
<< "#include <libbuild2/scope.hxx>" << '\n'
<< "#include <libbuild2/target.hxx>" << '\n'
+ << "#include <libbuild2/recipe.hxx>" << '\n'
<< "#include <libbuild2/dyndep.hxx>" << '\n'
<< "#include <libbuild2/context.hxx>" << '\n'
<< "#include <libbuild2/variable.hxx>" << '\n'
@@ -674,13 +724,41 @@ namespace build2
}
}
- return impl->match (a, t, hint, me);
+ return impl->match (a, xt, hint, me);
}
#endif // BUILD2_BOOTSTRAP || LIBBUILD2_STATIC_BUILD
recipe adhoc_cxx_rule::
apply (action a, target& t, match_extra& me) const
{
+ // Handle matching explicit group member (see adhoc_rule::match() for
+ // background).
+ //
+ if (const group* g = (t.group != nullptr
+ ? t.group->is_a<group> ()
+ : nullptr))
+ {
+ // @@ Hm, this looks very similar to how we handle ad hoc group members.
+ // Shouldn't impl be given a chance to translate options or some
+ // such?
+ //
+ match_sync (a, *g, 0 /* options */);
+ return group_recipe; // Execute the group's recipe.
+ }
+
+ // Note that while we probably could call pattern's apply_group_members()
+ // here, apply_group_prerequisites() is normally called after adding
+ // prerequisites but before matching, which can only be done from the
+ // rule's implementation. Also, for apply_group_members(), there is the
+ // explicit group special case which may also require custom logic.
+ // So it feels best to leave both to the implementation.
+
return impl.load (memory_order_relaxed)->apply (a, t, me);
}
+
+ void adhoc_cxx_rule::
+ reapply (action a, target& t, match_extra& me) const
+ {
+ return impl.load (memory_order_relaxed)->reapply (a, t, me);
+ }
}
diff --git a/libbuild2/adhoc-rule-cxx.hxx b/libbuild2/adhoc-rule-cxx.hxx
index 9a17447..2ac2281 100644
--- a/libbuild2/adhoc-rule-cxx.hxx
+++ b/libbuild2/adhoc-rule-cxx.hxx
@@ -36,11 +36,6 @@ namespace build2
// cannot be injected as a real prerequisite since it's from a different
// build context).
//
- // If pattern is not NULL then this recipe belongs to an ad hoc pattern
- // rule and apply() may need to call the pattern's apply_*() functions if
- // the pattern has any ad hoc group member substitutions or prerequisite
- // substitutions/non-patterns, respectively.
- //
const location recipe_loc; // Buildfile location of the recipe.
const target_state recipe_state; // State of recipe library target.
const adhoc_rule_pattern* pattern; // Ad hoc pattern rule of recipe.
@@ -52,8 +47,26 @@ namespace build2
// Return true by default.
//
+ // Note: must treat target as const (unless known to match a non-group).
+ // See adhoc_rule::match() for background.
+ //
virtual bool
match (action, target&) const override;
+
+ using simple_rule::match; // Unhide the match_extra version.
+
+ // Either this version or the one with match_extra must be overridden.
+ //
+ // If the pattern member above is not NULL then this recipe belongs to an
+ // ad hoc pattern rule and the implementation may need to call the
+ // pattern's apply_*() functions if the pattern has any ad hoc group
+ // member substitutions or prerequisite substitutions/non-patterns,
+ // respectively.
+ //
+ virtual recipe
+ apply (action, target&) const override;
+
+ using simple_rule::apply; // Unhide the match_extra version.
};
// Note: not exported.
@@ -67,6 +80,9 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const override;
+ virtual void
+ reapply (action, target&, match_extra&) const override;
+
adhoc_cxx_rule (string, const location&, size_t,
uint64_t ver,
optional<string> sep);
diff --git a/libbuild2/adhoc-rule-regex-pattern.cxx b/libbuild2/adhoc-rule-regex-pattern.cxx
index 59a63bc..2d60520 100644
--- a/libbuild2/adhoc-rule-regex-pattern.cxx
+++ b/libbuild2/adhoc-rule-regex-pattern.cxx
@@ -86,7 +86,9 @@ namespace build2
tt = n.untyped () ? &file::static_type : s.find_target_type (n.type);
if (tt == nullptr)
- fail (loc) << "unknown target type " << n.type;
+ fail (loc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *s.root_scope ();
}
bool e (n.pattern &&
@@ -126,10 +128,13 @@ namespace build2
}
bool adhoc_rule_regex_pattern::
- match (action a, target& t, const string&, match_extra& me) const
+ match (action a, const target& t, const string&, match_extra& me) const
{
tracer trace ("adhoc_rule_regex_pattern::match");
+ // Note: target may not be locked in which case we should not modify
+ // target or match_extra (see adhoc_rule::match() for background).
+
// The plan is as follows: First check the "type signature" of the target
// and its prerequisites (the primary target type has already been matched
// by the rule matching machinery). If there is a match, then concatenate
@@ -161,12 +166,17 @@ namespace build2
// implementation. Except we support the unmatch and match values in
// the update variable.
//
+ // Note: assuming group prerequisites are immutable (not locked).
+ //
for (prerequisite_member p: group_prerequisite_members (a, t))
{
// Note that here we don't validate the update operation override
// value (since we may not match). Instead the rule does this in
// apply().
//
+ // Note: assuming include()'s use of target only relied on immutable
+ // data (not locked).
+ //
lookup l;
if (include (a, t, p, a.operation () == update_id ? &l : nullptr) ==
include_type::normal && p.is_a (tt))
@@ -205,10 +215,13 @@ namespace build2
// So the plan is to store the string in match_extra::data() and
// regex_match_results (which we can move) in the auxiliary data storage.
//
+ // Note: only cache if locked.
+ //
static_assert (sizeof (string) <= match_extra::data_size,
"match data too large");
- string& ns (me.data (string ()));
+ string tmp;
+ string& ns (me.locked ? me.data (string ()) : tmp);
auto append_name = [&ns,
first = true,
@@ -226,10 +239,12 @@ namespace build2
// Primary target (always a pattern).
//
auto te (targets_.end ()), ti (targets_.begin ());
- append_name (t.key (), *ti);
+ append_name (t.key (), *ti); // Immutable (not locked).
// Match ad hoc group members.
//
+ // Note: shouldn't be in effect for an explicit group (not locked).
+ //
while ((ti = find_if (ti + 1, te, pattern)) != te)
{
const target* at (find_adhoc_member (t, ti->type));
@@ -279,7 +294,9 @@ namespace build2
return false;
}
- t.data (a, move (mr));
+ if (me.locked)
+ t.data (a, move (mr));
+
return true;
}
@@ -304,8 +321,14 @@ namespace build2
}
void adhoc_rule_regex_pattern::
- apply_adhoc_members (action a, target& t, const scope&, match_extra&) const
+ apply_group_members (action a, target& t, const scope& bs,
+ match_extra&) const
{
+ if (targets_.size () == 1) // The group/primary target is always present.
+ return;
+
+ group* g (t.is_a<group> ());
+
const auto& mr (t.data<regex_match_results> (a));
for (auto i (targets_.begin () + 1); i != targets_.end (); ++i)
@@ -333,14 +356,86 @@ namespace build2
d.normalize ();
}
- // @@ TODO: currently this uses type as the ad hoc member identity.
+ string n (substitute (
+ t,
+ mr,
+ e.name.value,
+ (g != nullptr
+ ? "explicit target group member"
+ : "ad hoc target group member")));
+
+ // @@ TODO: save location in constructor?
//
- add_adhoc_member (
- t,
- e.type,
- move (d),
- dir_path () /* out */,
- substitute (t, mr, e.name.value, "ad hoc target group member"));
+ location loc;
+
+ optional<string> ext (target::split_name (n, loc));
+
+ if (g != nullptr)
+ {
+ auto& ms (g->members);
+
+ // These are conceptually static but they behave more like dynamic in
+ // that we likely need to insert the target, set its group, etc. In a
+ // sense, they are rule-static, but group-dynamic.
+ //
+ // Note: a custom version of the dyndep_rule::inject_group_member()
+ // logic.
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ e.type,
+ move (d),
+ dir_path (), // Always in out.
+ move (n),
+ ext ? &*ext : nullptr,
+ &bs));
+
+ const target& t (l.first); // Note: non-const only if have lock.
+
+ if (l.second)
+ {
+ l.first.group = g;
+ l.second.unlock ();
+ }
+ else
+ {
+ if (find (ms.begin (), ms.end (), &t) != ms.end ())
+ continue;
+
+ if (t.group != g) // Note: atomic.
+ {
+ // We can only update the group under lock.
+ //
+ target_lock tl (lock (a, t));
+
+ if (!tl)
+ fail << "group " << *g << " member " << t << " is already matched" <<
+ info << "static group members specified by pattern rules cannot "
+ << "be used as prerequisites directly, only via group";
+
+ if (t.group == nullptr)
+ tl.target->group = g;
+ else if (t.group != g)
+ {
+ fail << "group " << *g << " member " << t
+ << " is already member of group " << *t.group;
+ }
+ }
+ }
+
+ ms.push_back (&t);
+ }
+ else
+ {
+ add_adhoc_member_identity (
+ t,
+ e.type,
+ move (d),
+ dir_path (), // Always in out.
+ move (n),
+ move (ext),
+ loc);
+ }
}
}
@@ -357,6 +452,18 @@ namespace build2
auto& pts (t.prerequisite_targets[a]);
+ // Avoid duplicating fsdir{} that may have already been injected by
+ // inject_fsdir() (in which case it is expected to be first).
+ //
+ const target* dir (nullptr);
+ if (!pts.empty ())
+ {
+ const prerequisite_target& pt (pts.front ());
+
+ if (pt.target != nullptr && pt.adhoc () && pt.target->is_a<fsdir> ())
+ dir = pt.target;
+ }
+
for (const element& e: prereqs_)
{
// While it would be nice to avoid copying here, the semantics of
@@ -393,7 +500,7 @@ namespace build2
const target& pt (search (t, move (n), *s, &e.type));
- if (clean && !pt.in (*bs.root_scope ()))
+ if (&pt == dir || (clean && !pt.in (*bs.root_scope ())))
continue;
// @@ TODO: it could be handy to mark a prerequisite (e.g., a tool)
diff --git a/libbuild2/adhoc-rule-regex-pattern.hxx b/libbuild2/adhoc-rule-regex-pattern.hxx
index 597f30d..9cb7874 100644
--- a/libbuild2/adhoc-rule-regex-pattern.hxx
+++ b/libbuild2/adhoc-rule-regex-pattern.hxx
@@ -32,10 +32,10 @@ namespace build2
names&&, const location&);
virtual bool
- match (action, target&, const string&, match_extra&) const override;
+ match (action, const target&, const string&, match_extra&) const override;
virtual void
- apply_adhoc_members (action, target&,
+ apply_group_members (action, target&,
const scope&,
match_extra&) const override;
diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx
index cc48a38..16f1503 100644
--- a/libbuild2/algorithm.cxx
+++ b/libbuild2/algorithm.cxx
@@ -54,19 +54,21 @@ namespace build2
const target&
search (const target& t, const prerequisite_key& pk)
{
- assert (t.ctx.phase == run_phase::match);
+ context& ctx (t.ctx);
+
+ assert (ctx.phase == run_phase::match);
// If this is a project-qualified prerequisite, then this is import's
- // business.
+ // business (phase 2).
//
if (pk.proj)
- return import (t.ctx, pk);
+ return import2 (ctx, pk);
- if (const target* pt = pk.tk.type->search (t, pk))
+ if (const target* pt = pk.tk.type->search (ctx, &t, pk))
return *pt;
if (pk.tk.out->empty ())
- return create_new_target (t.ctx, pk);
+ return create_new_target (ctx, pk);
// If this is triggered, then you are probably not passing scope to
// search() (which leads to search_existing_file() being skipped).
@@ -77,13 +79,15 @@ namespace build2
pair<target&, ulock>
search_locked (const target& t, const prerequisite_key& pk)
{
- assert (t.ctx.phase == run_phase::match && !pk.proj);
+ context& ctx (t.ctx);
+
+ assert (ctx.phase == run_phase::match && !pk.proj);
- if (const target* pt = pk.tk.type->search (t, pk))
+ if (const target* pt = pk.tk.type->search (ctx, &t, pk))
return {const_cast<target&> (*pt), ulock ()};
if (pk.tk.out->empty ())
- return create_new_target_locked (t.ctx, pk);
+ return create_new_target_locked (ctx, pk);
// If this is triggered, then you are probably not passing scope to
// search() (which leads to search_existing_file() being skipped).
@@ -96,7 +100,7 @@ namespace build2
{
return pk.proj
? import_existing (ctx, pk)
- : search_existing_target (ctx, pk);
+ : pk.tk.type->search (ctx, nullptr /* existing */, pk);
}
const target&
@@ -104,7 +108,7 @@ namespace build2
{
assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
- if (const target* pt = search_existing_target (ctx, pk))
+ if (const target* pt = search_existing_target (ctx, pk, true /*out_only*/))
return *pt;
return create_new_target (ctx, pk);
@@ -115,14 +119,14 @@ namespace build2
{
assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
- if (const target* pt = search_existing_target (ctx, pk))
+ if (const target* pt = search_existing_target (ctx, pk, true /*out_only*/))
return {const_cast<target&> (*pt), ulock ()};
return create_new_target_locked (ctx, pk);
}
const target&
- search (const target& t, name n, const scope& s, const target_type* tt)
+ search (const target& t, name&& n, const scope& s, const target_type* tt)
{
assert (t.ctx.phase == run_phase::match);
@@ -176,16 +180,12 @@ namespace build2
}
bool q (cn.qualified ());
-
- // @@ OUT: for now we assume the prerequisite's out is undetermined.
- // Would need to pass a pair of names.
- //
prerequisite_key pk {
n.proj, {tt, &n.dir, q ? &empty_dir_path : &out, &n.value, ext}, &s};
return q
? import_existing (s.ctx, pk)
- : search_existing_target (s.ctx, pk);
+ : tt->search (s.ctx, nullptr /* existing */, pk);
}
const target*
@@ -232,8 +232,14 @@ namespace build2
// If the work_queue is absent, then we don't wait.
//
+ // While already applied or executed targets are normally not locked, if
+ // options contain any bits that are not already in cur_options, then the
+ // target is locked even in these states.
+ //
target_lock
- lock_impl (action a, const target& ct, optional<scheduler::work_queue> wq)
+ lock_impl (action a, const target& ct,
+ optional<scheduler::work_queue> wq,
+ uint64_t options)
{
context& ctx (ct.ctx);
@@ -248,7 +254,8 @@ namespace build2
size_t appl (b + target::offset_applied);
size_t busy (b + target::offset_busy);
- atomic_count& task_count (ct[a].task_count);
+ const target::opstate& cs (ct[a]);
+ atomic_count& task_count (cs.task_count);
while (!task_count.compare_exchange_strong (
e,
@@ -277,13 +284,19 @@ namespace build2
// to switch the phase to load. Which would result in a deadlock
// unless we release the phase.
//
- phase_unlock u (ct.ctx, true /* unlock */, true /* delay */);
- e = ctx.sched.wait (busy - 1, task_count, u, *wq);
+ phase_unlock u (ct.ctx, true /* delay */);
+ e = ctx.sched->wait (busy - 1, task_count, u, *wq);
}
- // We don't lock already applied or executed targets.
+ // We don't lock already applied or executed targets unless there
+ // are new options.
+ //
+ // Note: we don't have the lock yet so we must use atomic cur_options.
+ // We also have to re-check this once we've grabbed the lock.
//
- if (e >= appl)
+ if (e >= appl &&
+ (cs.match_extra.cur_options_.load (memory_order_relaxed) & options)
+ == options)
return target_lock {a, nullptr, e - b, false};
}
@@ -298,17 +311,33 @@ namespace build2
{
// First lock for this operation.
//
+ // Note that we use 0 match_extra::cur_options_ as an indication of not
+ // being applied yet. In particular, in the match phase, this is used to
+ // distinguish between the "busy because not applied yet" and "busy
+ // because relocked to reapply match options" cases. See
+ // target::matched() for details.
+ //
s.rule = nullptr;
s.dependents.store (0, memory_order_release);
+ s.match_extra.cur_options_.store (0, memory_order_relaxed);
offset = target::offset_touched;
}
else
{
+ // Re-check the options if already applied or worse.
+ //
+ if (e >= appl && (s.match_extra.cur_options & options) == options)
+ {
+ // Essentially unlock_impl().
+ //
+ task_count.store (e, memory_order_release);
+ ctx.sched->resume (task_count);
+
+ return target_lock {a, nullptr, e - b, false};
+ }
+
offset = e - b;
- assert (offset == target::offset_touched ||
- offset == target::offset_tried ||
- offset == target::offset_matched);
}
return target_lock {a, &t, offset, first};
@@ -327,7 +356,7 @@ namespace build2
// this target.
//
task_count.store (offset + ctx.count_base (), memory_order_release);
- ctx.sched.resume (task_count);
+ ctx.sched->resume (task_count);
}
target&
@@ -335,7 +364,8 @@ namespace build2
const target_type& tt,
dir_path dir,
dir_path out,
- string n)
+ string n,
+ optional<string> ext)
{
tracer trace ("add_adhoc_member");
@@ -345,31 +375,71 @@ namespace build2
if (*mp != nullptr) // Might already be there.
return **mp;
- target* m (nullptr);
- {
- pair<target&, ulock> r (
- t.ctx.targets.insert_locked (tt,
- move (dir),
- move (out),
- move (n),
- nullopt /* ext */,
- target_decl::implied,
- trace,
- true /* skip_find */));
+ pair<target&, ulock> r (
+ t.ctx.targets.insert_locked (tt,
+ move (dir),
+ move (out),
+ move (n),
+ move (ext),
+ target_decl::implied,
+ trace,
+ true /* skip_find */));
- if (r.second) // Inserted.
- {
- m = &r.first;
- m->group = &t;
- }
- }
+ target& m (r.first);
+
+ if (!r.second)
+ fail << "target " << m << " already exists and cannot be made "
+ << "ad hoc member of group " << t;
- assert (m != nullptr);
- *mp = m;
+ m.group = &t;
+ *mp = &m;
- return *m;
+ return m;
};
+ pair<target&, bool>
+ add_adhoc_member_identity (target& t,
+ const target_type& tt,
+ dir_path dir,
+ dir_path out,
+ string n,
+ optional<string> ext,
+ const location& loc)
+ {
+ // NOTE: see similar code in parser::enter_adhoc_members().
+
+ tracer trace ("add_adhoc_member_identity");
+
+ pair<target&, ulock> r (
+ t.ctx.targets.insert_locked (tt,
+ move (dir),
+ move (out),
+ move (n),
+ move (ext),
+ target_decl::implied,
+ trace,
+ true /* skip_find */));
+ target& m (r.first);
+
+ // Add as an ad hoc member at the end of the chain skipping duplicates.
+ //
+ const_ptr<target>* mp (&t.adhoc_member);
+ for (; *mp != nullptr; mp = &(*mp)->adhoc_member)
+ {
+ if (*mp == &m)
+ return {m, false};
+ }
+
+ if (!r.second)
+ fail (loc) << "target " << m << " already exists and cannot be made "
+ << "ad hoc member of group " << t;
+
+ m.group = &t;
+ *mp = &m;
+
+ return {m, true};
+ }
+
static bool
trace_target (const target& t, const vector<name>& ns)
{
@@ -448,7 +518,7 @@ namespace build2
auto match = [a, &t, &me] (const adhoc_rule& r, bool fallback) -> bool
{
- me.init (fallback);
+ me.reinit (fallback);
if (auto* f = (a.outer ()
? t.ctx.current_outer_oif
@@ -466,46 +536,167 @@ namespace build2
? a
: action (a.meta_operation (), a.outer_operation ()));
- auto b (t.adhoc_recipes.begin ()), e (t.adhoc_recipes.end ());
- auto i (find_if (
- b, e,
- [&match, ca] (const shared_ptr<adhoc_rule>& r)
- {
- auto& as (r->actions);
- return (find (as.begin (), as.end (), ca) != as.end () &&
- match (*r, false));
- }));
-
- if (i == e)
+ // If returned rule_match is NULL, then the second half indicates whether
+ // the rule was found (but did not match).
+ //
+ auto find_match = [&t, &match] (action ca) -> pair<const rule_match*, bool>
{
- // See if we have a fallback implementation.
+ // Note that there can be at most one recipe for any action.
//
- // See the adhoc_rule::reverse_fallback() documentation for details on
- // what's going on here.
- //
- i = find_if (
- b, e,
- [&match, ca, &t] (const shared_ptr<adhoc_rule>& r)
+ auto b (t.adhoc_recipes.begin ()), e (t.adhoc_recipes.end ());
+ auto i (find_if (
+ b, e,
+ [ca] (const shared_ptr<adhoc_rule>& r)
+ {
+ auto& as (r->actions);
+ return find (as.begin (), as.end (), ca) != as.end ();
+ }));
+
+ bool f (i != e);
+ if (f)
+ {
+ if (!match (**i, false /* fallback */))
+ i = e;
+ }
+ else
+ {
+ // See if we have a fallback implementation.
+ //
+ // See the adhoc_rule::reverse_fallback() documentation for details on
+ // what's going on here.
+ //
+ // Note that it feels natural not to look for a fallback if a custom
+ // recipe was provided but did not match.
+ //
+ const target_type& tt (t.type ());
+ i = find_if (
+ b, e,
+ [ca, &tt] (const shared_ptr<adhoc_rule>& r)
+ {
+ // Only the rule that provides the "forward" action can provide
+ // "reverse", so there can be at most one such rule.
+ //
+ return r->reverse_fallback (ca, tt);
+ });
+
+ f = (i != e);
+ if (f)
{
- auto& as (r->actions);
+ if (!match (**i, true /* fallback */))
+ i = e;
+ }
+ }
- // Note that the rule could be there but not match (see above),
- // thus this extra check.
- //
- return (find (as.begin (), as.end (), ca) == as.end () &&
- r->reverse_fallback (ca, t.type ()) &&
- match (*r, true));
- });
+ return pair<const rule_match*, bool> (
+ i != e ? &(*i)->rule_match : nullptr,
+ f);
+ };
+
+ pair<const rule_match*, bool> r (find_match (ca));
+
+ // Provide the "add dist_* and configure_* actions for every perform_*
+ // action unless there is a custom one" semantics (see the equivalent ad
+ // hoc rule registration code in the parser for background).
+ //
+ // Note that handling this in the parser by adding the extra actions is
+ // difficult because we store recipe actions in the recipe itself (
+ // adhoc_rule::actions) and a recipe could be shared among multiple
+ // targets, some of which may provide a "custom one" as another recipe. On
+ // the other hand, handling it here is relatively straightforward.
+ //
+ if (r.first == nullptr && !r.second)
+ {
+ meta_operation_id mo (ca.meta_operation ());
+ if (mo == configure_id || mo == dist_id)
+ {
+ action pa (perform_id, ca.operation ());
+ r = find_match (pa);
+ }
}
- return i != e ? &(*i)->rule_match : nullptr;
+ return r.first;
}
// Return the matching rule or NULL if no match and try_match is true.
//
const rule_match*
- match_rule (action a, target& t, const rule* skip, bool try_match)
+ match_rule_impl (action a, target& t,
+ uint64_t options,
+ const rule* skip,
+ bool try_match,
+ match_extra* pme)
{
+ using fallback_rule = adhoc_rule_pattern::fallback_rule;
+
+ auto adhoc_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const adhoc_rule*> (&r.second.get ());
+ };
+
+ auto fallback_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const fallback_rule*> (&r.second.get ());
+ };
+
+ // Note: we copy the options value to me.new_options after successfully
+ // matching the rule to make sure rule::match() implementations don't rely
+ // on it.
+ //
+ match_extra& me (pme == nullptr ? t[a].match_extra : *pme);
+
+ if (const target* g = t.group)
+ {
+ // If this is a group with dynamic members, then match it with the
+ // group's rule automatically. See dyndep_rule::inject_group_member()
+ // for background.
+ //
+ if ((g->type ().flags & target_type::flag::dyn_members) ==
+ target_type::flag::dyn_members)
+ {
+ if (g->matched (a, memory_order_acquire))
+ {
+ const rule_match* r (g->state[a].rule);
+ assert (r != nullptr); // Shouldn't happen with dyn_members.
+
+ me.new_options = options;
+ return r;
+ }
+
+ // Assume static member and fall through.
+ }
+
+ // If this is a member of group-based target, then first try to find a
+ // matching ad hoc recipe/rule by matching (to an ad hoc recipe/rule)
+ // the group but applying to the member. See adhoc_rule::match() for
+ // background, including for why const_cast should be safe.
+ //
+ // To put it another way, if a group is matched by an ad hoc
+ // recipe/rule, then we want all the member to be matched to the same
+ // recipe/rule.
+ //
+ // Note that such a group is dyn_members so we would have tried the
+ // "already matched" case above.
+ //
+ if (g->is_a<group> ())
+ {
+ // We cannot init match_extra from the target if it's unlocked so use
+ // a temporary (it shouldn't be modified if unlocked).
+ //
+ match_extra gme (false /* locked */);
+ if (const rule_match* r = match_rule_impl (a, const_cast<target&> (*g),
+ 0 /* options */,
+ skip,
+ true /* try_match */,
+ &gme))
+ {
+ me.new_options = options;
+ return r;
+ }
+
+ // Fall through to normal match of the member.
+ }
+ }
+
const scope& bs (t.base_scope ());
// Match rules in project environment.
@@ -514,8 +705,6 @@ namespace build2
if (const scope* rs = bs.root_scope ())
penv = auto_project_env (*rs);
- match_extra& me (t[a].match_extra);
-
// First check for an ad hoc recipe.
//
// Note that a fallback recipe is preferred over a non-fallback rule.
@@ -523,7 +712,10 @@ namespace build2
if (!t.adhoc_recipes.empty ())
{
if (const rule_match* r = match_adhoc_recipe (a, t, me))
+ {
+ me.new_options = options;
return r;
+ }
}
// If this is an outer operation (Y-for-X), then we look for rules
@@ -619,8 +811,6 @@ namespace build2
// reverse_fallback() rather than it returning (a list) of
// reverse actions, which would be necessary to register them.
//
- using fallback_rule = adhoc_rule_pattern::fallback_rule;
-
auto find_fallback = [mo, o, tt] (const fallback_rule& fr)
-> const rule_match*
{
@@ -633,21 +823,27 @@ namespace build2
if (oi == 0)
{
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r->second.get ()))
+ if (const fallback_rule* fr = fallback_rule_match (*r))
{
if ((r = find_fallback (*fr)) == nullptr)
continue;
}
}
+ // Skip non-ad hoc rules if the target is not locked (see above;
+ // note that in this case match_extra is a temporary which we
+ // can reinit).
+ //
+ if (!me.locked && !adhoc_rule_match (*r))
+ continue;
+
const string& n (r->first);
const rule& ru (r->second);
if (&ru == skip)
continue;
- me.init (oi == 0 /* fallback */);
+ me.reinit (oi == 0 /* fallback */);
{
auto df = make_diag_frame (
[a, &t, &n](const diag_record& dr)
@@ -672,14 +868,16 @@ namespace build2
if (oi == 0)
{
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r1->second.get ()))
+ if (const fallback_rule* fr = fallback_rule_match (*r1))
{
if ((r1 = find_fallback (*fr)) == nullptr)
continue;
}
}
+ if (!me.locked && !adhoc_rule_match (*r1))
+ continue;
+
const string& n1 (r1->first);
const rule& ru1 (r1->second);
@@ -698,8 +896,7 @@ namespace build2
//
// @@ Can't we temporarily swap things out in target?
//
- match_extra me1;
- me1.init (oi == 0);
+ match_extra me1 (me.locked, oi == 0 /* fallback */);
if (!ru1.match (a, t, *hint, me1))
continue;
}
@@ -715,7 +912,10 @@ namespace build2
}
if (!ambig)
+ {
+ me.new_options = options;
return r;
+ }
else
dr << info << "use rule hint to disambiguate this match";
}
@@ -828,15 +1028,84 @@ namespace build2
recipe re (ar != nullptr ? f (*ar, a, t, me) : ru.apply (a, t, me));
- me.free ();
+ me.free (); // Note: cur_options are still in use.
+ assert (me.cur_options != 0); // Match options cannot be 0 after apply().
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
return re;
}
- // If anything goes wrong, set target state to failed and return false.
+ static void
+ apply_posthoc_impl (
+ action a, target& t,
+ const pair<const string, reference_wrapper<const rule>>& m,
+ context::posthoc_target& pt)
+ {
+ const scope& bs (t.base_scope ());
+
+ // Apply rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ const rule& ru (m.second);
+ match_extra& me (t[a].match_extra);
+ me.posthoc_prerequisite_targets = &pt.prerequisite_targets;
+
+ auto df = make_diag_frame (
+ [a, &t, &m](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while applying rule " << m.first << " to "
+ << diag_do (a, t) << " for post hoc prerequisites";
+ });
+
+ // Note: for now no adhoc_apply_posthoc().
+ //
+ ru.apply_posthoc (a, t, me);
+ }
+
+ static void
+ reapply_impl (action a,
+ target& t,
+ const pair<const string, reference_wrapper<const rule>>& m)
+ {
+ const scope& bs (t.base_scope ());
+
+ // Reapply rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ const rule& ru (m.second);
+ match_extra& me (t[a].match_extra);
+ // Note: me.posthoc_prerequisite_targets carried over.
+
+ auto df = make_diag_frame (
+ [a, &t, &m](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while reapplying rule " << m.first << " to "
+ << diag_do (a, t);
+ });
+
+ // Note: for now no adhoc_reapply().
+ //
+ ru.reapply (a, t, me);
+ assert (me.cur_options != 0); // Match options cannot be 0 after reapply().
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
+ }
+
+ // If anything goes wrong, set target state to failed and return nullopt.
+ // Otherwise return the pointer to the new posthoc_target entry if any post
+ // hoc prerequisites were present or NULL otherwise. Note that the returned
+ // entry is stable (because we use a list) and should only be accessed
+ // during the match phase if the holding the target lock.
//
// Note: must be called while holding target_lock.
//
- static bool
+ static optional<context::posthoc_target*>
match_posthoc (action a, target& t)
{
// The plan is to, while holding the lock, search and collect all the post
@@ -862,11 +1131,18 @@ namespace build2
// In the end, matching (and execution) "inline" (i.e., as we match/
// execute the corresponding target) appears to be unworkable in the
// face of cycles.
-
+ //
+ // Note also that this delayed match also helps with allowing the rule to
+ // adjust match options of post hoc prerequisites without needing the
+ // rematch support (see match_extra::posthoc_prerequisites).
+ //
// @@ Anything we need to do for group members (see through)? Feels quite
// far-fetched.
//
- vector<const target*> pts;
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ vector<posthoc_prerequisite_target> pts;
try
{
for (const prerequisite& p: group_prerequisites (t))
@@ -891,14 +1167,17 @@ namespace build2
}
}
- pts.push_back (&search (t, p)); // May fail.
+ pts.push_back (
+ posthoc_prerequisite_target {
+ &search (t, p), // May fail.
+ match_extra::all_options});
}
}
}
catch (const failed&)
{
t.state[a].state = target_state::failed;
- return false;
+ return nullopt;
}
if (!pts.empty ())
@@ -906,11 +1185,11 @@ namespace build2
context& ctx (t.ctx);
mlock l (ctx.current_posthoc_targets_mutex);
- ctx.current_posthoc_targets.push_back (
- context::posthoc_target {a, t, move (pts)});
+ ctx.current_posthoc_targets.push_back (posthoc_target {a, t, move (pts)});
+ return &ctx.current_posthoc_targets.back (); // Stable.
}
- return true;
+ return nullptr;
}
// If step is true then perform only one step of the match/apply sequence.
@@ -919,10 +1198,24 @@ namespace build2
// the first half of the result.
//
static pair<bool, target_state>
- match_impl (target_lock& l,
- bool step = false,
- bool try_match = false)
+ match_impl_impl (target_lock& l,
+ uint64_t options,
+ bool step = false,
+ bool try_match = false)
{
+ // With regards to options, the semantics that we need to achieve for each
+ // target::offeset_*:
+ //
+ // tried -- nothing to do (no match)
+ // touched -- set to new_options
+ // matched -- add to new_options
+ // applied -- reapply if any new options
+ // executed -- check and fail if any new options
+ // busy -- postpone until *_complete() call
+ //
+ // Note that if options is 0 (see resolve_{members,group}_impl()), then
+ // all this can be skipped.
+
assert (l.target != nullptr);
action a (l.action);
@@ -935,10 +1228,6 @@ namespace build2
//
if (t.adhoc_group_member ())
{
- assert (!step);
-
- const target& g (*t.group);
-
// It feels natural to "convert" this call to the one for the group,
// including the try_match part. Semantically, we want to achieve the
// following:
@@ -946,6 +1235,29 @@ namespace build2
// [try_]match (a, g);
// match_recipe (l, group_recipe);
//
+ // Currently, ad hoc group members cannot have options. An alternative
+ // semantics could be to call the goup's rule to translate member
+ // options to group options and then (re)match the group with that.
+ // The implementation of this semantics could look like this:
+ //
+ // 1. Lock the group.
+ // 2. If not already offset_matched, do one step to get the rule.
+ // 3. Call the rule to translate options.
+ // 4. Continue matching the group passing the translated options.
+ // 5. Keep track of member options in member's cur_options to handle
+ // member rematches (if already offset_{applied,executed}).
+ //
+ // Note: see also similar semantics but for explicit groups in
+ // adhoc-rule-*.cxx.
+
+ assert (!step && options == match_extra::all_options);
+
+ const target& g (*t.group);
+
+ // What should we do with options? After some rumination it fells most
+ // natural to treat options for the group and for its ad hoc member as
+ // the same entity ... or not.
+ //
auto df = make_diag_frame (
[a, &t](const diag_record& dr)
{
@@ -953,14 +1265,35 @@ namespace build2
dr << info << "while matching group rule to " << diag_do (a, t);
});
- pair<bool, target_state> r (match_impl (a, g, 0, nullptr, try_match));
+ pair<bool, target_state> r (
+ match_impl (a, g, 0 /* options */, 0, nullptr, try_match));
if (r.first)
{
if (r.second != target_state::failed)
{
+ // Note: in particular, passing all_options makes sure we will
+ // never re-lock this member if already applied/executed.
+ //
match_inc_dependents (a, g);
- match_recipe (l, group_recipe);
+ match_recipe (l, group_recipe, match_extra::all_options);
+
+ // Note: no need to call match_posthoc() since an ad hoc member
+ // has no own prerequisites and the group's ones will be matched
+ // by the group.
+ }
+ else
+ {
+ // Similar to catch(failed) below.
+ //
+ s.state = target_state::failed;
+ l.offset = target::offset_applied;
+
+ // Make sure we don't relock a failed target.
+ //
+ match_extra& me (s.match_extra);
+ me.cur_options = match_extra::all_options;
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
}
}
else
@@ -993,7 +1326,8 @@ namespace build2
//
clear_target (a, t);
- const rule_match* r (match_rule (a, t, nullptr, try_match));
+ const rule_match* r (
+ match_rule_impl (a, t, options, nullptr, try_match));
assert (l.offset != target::offset_tried); // Should have failed.
@@ -1015,25 +1349,86 @@ namespace build2
// Fall through.
case target::offset_matched:
{
+ // Add any new options.
+ //
+ s.match_extra.new_options |= options;
+
// Apply.
//
set_recipe (l, apply_impl (a, t, *s.rule));
l.offset = target::offset_applied;
+
+ if (t.has_group_prerequisites ()) // Ok since already matched.
+ {
+ if (optional<context::posthoc_target*> p = match_posthoc (a, t))
+ {
+ if (*p != nullptr)
+ {
+ // It would have been more elegant to do this before calling
+ // apply_impl() and then expose the post hoc prerequisites to
+ // apply(). The problem is the group may not be resolved until
+ // the call to apply(). And so we resort to the separate
+ // apply_posthoc() function.
+ //
+ apply_posthoc_impl (a, t, *s.rule, **p);
+ }
+ }
+ else
+ s.state = target_state::failed;
+ }
+
break;
}
+ case target::offset_applied:
+ {
+ // Reapply if any new options.
+ //
+ match_extra& me (s.match_extra);
+ me.new_options = options & ~me.cur_options; // Clear existing.
+ assert (me.new_options != 0); // Otherwise should not have locked.
+
+ // Feels like this can only be a logic bug since to end up with a
+ // subset of options requires a rule (see match_extra for details).
+ //
+ assert (s.rule != nullptr);
+
+ reapply_impl (a, t, *s.rule);
+ break;
+ }
+ case target::offset_executed:
+ {
+ // Diagnose new options after execute.
+ //
+ match_extra& me (s.match_extra);
+ assert ((me.cur_options & options) != options); // Otherwise no lock.
+
+ fail << "change of match options after " << diag_do (a, t)
+ << " has been executed" <<
+ info << "executed options 0x" << hex << me.cur_options <<
+ info << "requested options 0x" << hex << options << endf;
+ }
default:
assert (false);
}
}
catch (const failed&)
{
+ s.state = target_state::failed;
+ l.offset = target::offset_applied;
+
+ // Make sure we don't relock a failed target.
+ //
+ match_extra& me (s.match_extra);
+ me.cur_options = match_extra::all_options;
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
+ }
+
+ if (s.state == target_state::failed)
+ {
// As a sanity measure clear the target data since it can be incomplete
// or invalid (mark()/unmark() should give you some ideas).
//
clear_target (a, t);
-
- s.state = target_state::failed;
- l.offset = target::offset_applied;
}
return make_pair (true, s.state);
@@ -1043,10 +1438,9 @@ namespace build2
// the first half of the result.
//
pair<bool, target_state>
- match_impl (action a,
- const target& ct,
- size_t start_count,
- atomic_count* task_count,
+ match_impl (action a, const target& ct,
+ uint64_t options,
+ size_t start_count, atomic_count* task_count,
bool try_match)
{
// If we are blocking then work our own queue one task at a time. The
@@ -1068,30 +1462,16 @@ namespace build2
ct,
task_count == nullptr
? optional<scheduler::work_queue> (scheduler::work_none)
- : nullopt));
+ : nullopt,
+ options));
if (l.target != nullptr)
{
- assert (l.offset < target::offset_applied); // Shouldn't lock otherwise.
-
if (try_match && l.offset == target::offset_tried)
return make_pair (false, target_state::unknown);
if (task_count == nullptr)
- {
- pair<bool, target_state> r (match_impl (l, false /*step*/, try_match));
-
- if (r.first &&
- r.second != target_state::failed &&
- l.offset == target::offset_applied &&
- ct.has_group_prerequisites ()) // Already matched.
- {
- if (!match_posthoc (a, *l.target))
- r.second = target_state::failed;
- }
-
- return r;
- }
+ return match_impl_impl (l, options, false /* step */, try_match);
// Pass "disassembled" lock since the scheduler queue doesn't support
// task destruction.
@@ -1101,12 +1481,18 @@ namespace build2
// Also pass our diagnostics and lock stacks (this is safe since we
// expect the caller to wait for completion before unwinding its stack).
//
- if (ct.ctx.sched.async (
+ // Note: pack captures and arguments a bit to reduce the storage space
+ // requrements.
+ //
+ bool first (ld.first);
+
+ if (ct.ctx.sched->async (
start_count,
*task_count,
- [a, try_match] (const diag_frame* ds,
- const target_lock* ls,
- target& t, size_t offset, bool first)
+ [a, try_match, first] (const diag_frame* ds,
+ const target_lock* ls,
+ target& t, size_t offset,
+ uint64_t options)
{
// Switch to caller's diag and lock stacks.
//
@@ -1120,24 +1506,15 @@ namespace build2
// Note: target_lock must be unlocked within the match phase.
//
target_lock l {a, &t, offset, first}; // Reassemble.
-
- pair<bool, target_state> r (
- match_impl (l, false /* step */, try_match));
-
- if (r.first &&
- r.second != target_state::failed &&
- l.offset == target::offset_applied &&
- t.has_group_prerequisites ()) // Already matched.
- match_posthoc (a, t);
+ match_impl_impl (l, options, false /* step */, try_match);
}
}
catch (const failed&) {} // Phase lock failure.
},
diag_frame::stack (),
target_lock::stack (),
- ref (*ld.target),
- ld.offset,
- ld.first))
+ ref (*ld.target), ld.offset,
+ options))
return make_pair (true, target_state::postponed); // Queued.
// Matched synchronously, fall through.
@@ -1155,11 +1532,39 @@ namespace build2
return ct.try_matched_state (a, false);
}
+ void
+ match_only_sync (action a, const target& t, uint64_t options)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ target_lock l (lock_impl (a, t, scheduler::work_none, options));
+
+ if (l.target != nullptr)
+ {
+ if (l.offset != target::offset_matched)
+ {
+ if (match_impl_impl (l,
+ options,
+ true /* step */).second == target_state::failed)
+ throw failed ();
+ }
+ else
+ {
+ // If the target is already matched, then we need to add any new
+ // options but not call apply() (thus cannot use match_impl_impl()).
+ //
+ (*l.target)[a].match_extra.new_options |= options;
+ }
+ }
+ }
+
// Note: lock is a reference to avoid the stacking overhead.
//
static group_view
resolve_members_impl (action a, const target& g, target_lock&& l)
{
+ assert (a.inner ());
+
// Note that we will be unlocked if the target is already applied.
//
group_view r;
@@ -1173,11 +1578,11 @@ namespace build2
{
// Match (locked).
//
- if (match_impl (l, true /* step */).second == target_state::failed)
+ if (match_impl_impl (l,
+ 0 /* options */,
+ true /* step */).second == target_state::failed)
throw failed ();
- // Note: only matched so no call to match_posthoc().
-
if ((r = g.group_members (a)).members != nullptr)
break;
@@ -1186,43 +1591,52 @@ namespace build2
// Fall through.
case target::offset_matched:
{
- // @@ Doing match without execute messes up our target_count. Does
- // not seem like it will be easy to fix (we don't know whether
- // someone else will execute this target).
- //
- // What if we always do match & execute together? After all,
- // if a group can be resolved in apply(), then it can be
- // resolved in match()! Feels a bit drastic.
- //
- // But, this won't be a problem if the target returns noop_recipe.
- // And perhaps it's correct to fail if it's not noop_recipe but
- // nobody executed it? Maybe not.
- //
- // Another option would be to have a count for such "matched but
- // may not be executed" targets and then make sure target_count
- // is less than that at the end. Though this definitelt makes it
- // less exact (since we can end up executed this target but not
- // some other). Maybe we can increment and decrement such targets
- // in a separate count (i.e., mark their recipe as special or some
- // such).
- //
-
// Apply (locked).
//
- pair<bool, target_state> s (match_impl (l, true /* step */));
-
- if (s.second != target_state::failed &&
- g.has_group_prerequisites ()) // Already matched.
- {
- if (!match_posthoc (a, *l.target))
- s.second = target_state::failed;
- }
+ pair<bool, target_state> s (
+ match_impl_impl (l, 0 /* options */, true /* step */));
if (s.second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
+ {
+ // Doing match without execute messes up our target_count. There
+ // doesn't seem to be a clean way to solve this. Well, just always
+ // executing if we've done the match would have been clean but quite
+ // heavy-handed (it would be especially surprising if otherwise
+ // there is nothing else to do, which can happen, for example,
+ // during update-for-test when there are no tests to run).
+ //
+ // So our solution is as follows:
+ //
+ // 1. Keep track both of the targets that ended up in this situation
+ // (the target::resolve_counted flag) as well as their total
+ // count (the context::resolve_count member). Only do this if
+ // set_recipe() (called by match_impl()) would have incremented
+ // target_count.
+ //
+ // 2. If we happen to execute such a target (common case), then
+ // clear the flag and decrement the count.
+ //
+ // 3. When it's time to assert that target_count==0 (i.e., all the
+ // matched targets have been executed), check if resolve_count is
+ // 0. If it's not, then find every target with the flag set,
+ // pretend-execute it, and decrement both counts. See
+ // perform_execute() for further details on this step.
+ //
+ if (s.second != target_state::unchanged)
+ {
+ target::opstate& s (l.target->state[a]); // Inner.
+
+ if (!s.recipe_group_action)
+ {
+ s.resolve_counted = true;
+ g.ctx.resolve_count.fetch_add (1, memory_order_relaxed);
+ }
+ }
break;
+ }
// Unlock and to execute ...
//
@@ -1239,6 +1653,10 @@ namespace build2
// we would have already known the members list) and we really do need
// to execute it now.
//
+ // Note that while it might be tempting to decrement resolve_count
+ // here, there is no guarantee that we were the ones who have matched
+ // this target.
+ //
{
phase_switch ps (g.ctx, run_phase::execute);
execute_direct_sync (a, g);
@@ -1290,19 +1708,15 @@ namespace build2
// Note: lock is a reference to avoid the stacking overhead.
//
void
- resolve_group_impl (action a, const target& t, target_lock&& l)
+ resolve_group_impl (target_lock&& l)
{
- pair<bool, target_state> r (
- match_impl (l, true /* step */, true /* try_match */));
+ assert (l.action.inner ());
- if (r.first &&
- r.second != target_state::failed &&
- l.offset == target::offset_applied &&
- t.has_group_prerequisites ()) // Already matched.
- {
- if (!match_posthoc (a, *l.target))
- r.second = target_state::failed;
- }
+ pair<bool, target_state> r (
+ match_impl_impl (l,
+ 0 /* options */,
+ true /* step */,
+ true /* try_match */));
l.unlock ();
@@ -1315,16 +1729,33 @@ namespace build2
match_prerequisite_range (action a, target& t,
R&& r,
const S& ms,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
auto& pts (t.prerequisite_targets[a]);
+ size_t i (pts.size ()); // Index of the first to be added.
+
+ // Avoid duplicating fsdir{} that may have already been injected by
+ // inject_fsdir() (in which case it is expected to be first).
+ //
+ const target* dir (nullptr);
+ if (i != 0)
+ {
+ const prerequisite_target& pt (pts.front ());
+
+ if (pt.target != nullptr && pt.adhoc () && pt.target->is_a<fsdir> ())
+ dir = pt.target;
+ }
+
// Start asynchronous matching of prerequisites. Wait with unlocked phase
// to allow phase switching.
//
- wait_guard wg (t.ctx, t.ctx.count_busy (), t[a].task_count, true);
+ wait_guard wg (
+ search_only
+ ? wait_guard ()
+ : wait_guard (t.ctx, t.ctx.count_busy (), t[a].task_count, true));
- size_t i (pts.size ()); // Index of the first to be added.
for (auto&& p: forward<R> (r))
{
// Ignore excluded.
@@ -1338,13 +1769,20 @@ namespace build2
? ms (a, t, p, pi)
: prerequisite_target (&search (t, p), pi));
- if (pt.target == nullptr || (s != nullptr && !pt.target->in (*s)))
+ if (pt.target == nullptr ||
+ pt.target == dir ||
+ (s != nullptr && !pt.target->in (*s)))
continue;
- match_async (a, *pt.target, t.ctx.count_busy (), t[a].task_count);
+ if (!search_only)
+ match_async (a, *pt.target, t.ctx.count_busy (), t[a].task_count);
+
pts.push_back (move (pt));
}
+ if (search_only)
+ return;
+
wg.wait ();
// Finish matching all the targets that we have started.
@@ -1359,17 +1797,27 @@ namespace build2
void
match_prerequisites (action a, target& t,
const match_search& ms,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
- match_prerequisite_range (a, t, group_prerequisites (t), ms, s);
+ match_prerequisite_range (a, t,
+ group_prerequisites (t),
+ ms,
+ s,
+ search_only);
}
void
match_prerequisite_members (action a, target& t,
const match_search_member& msm,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
- match_prerequisite_range (a, t, group_prerequisite_members (a, t), msm, s);
+ match_prerequisite_range (a, t,
+ group_prerequisite_members (a, t),
+ msm,
+ s,
+ search_only);
}
void
@@ -1446,7 +1894,7 @@ namespace build2
}
const fsdir*
- inject_fsdir (action a, target& t, bool prereq, bool parent)
+ inject_fsdir_impl (target& t, bool prereq, bool parent)
{
tracer trace ("inject_fsdir");
@@ -1467,6 +1915,7 @@ namespace build2
// subprojects (e.g., tests/).
//
const fsdir* r (nullptr);
+
if (rs != nullptr && !d.sub (rs->src_path ()))
{
l6 ([&]{trace << d << " for " << t;});
@@ -1494,13 +1943,45 @@ namespace build2
}
}
+ return r;
+ }
+
+ const fsdir*
+ inject_fsdir (action a, target& t, bool match, bool prereq, bool parent)
+ {
+ auto& pts (t.prerequisite_targets[a]);
+
+ assert (!prereq || pts.empty ()); // This prerequisite target must be first.
+
+ const fsdir* r (inject_fsdir_impl (t, prereq, parent));
+
if (r != nullptr)
{
+ if (match)
+ match_sync (a, *r);
+
// Make it ad hoc so that it doesn't end up in prerequisite_targets
// after execution.
//
- match_sync (a, *r);
- t.prerequisite_targets[a].emplace_back (r, include_type::adhoc);
+ pts.emplace_back (r, include_type::adhoc);
+ }
+
+ return r;
+ }
+
+ const fsdir*
+ inject_fsdir_direct (action a, target& t, bool prereq, bool parent)
+ {
+ auto& pts (t.prerequisite_targets[a]);
+
+ assert (!prereq || pts.empty ()); // This prerequisite target must be first.
+
+ const fsdir* r (inject_fsdir_impl (t, prereq, parent));
+
+ if (r != nullptr)
+ {
+ match_direct_sync (a, *r);
+ pts.emplace_back (r, include_type::adhoc);
}
return r;
@@ -1821,8 +2302,7 @@ namespace build2
try_mkdir (to);
- for (const auto& de:
- dir_iterator (fr, false /* ignore_dangling */))
+ for (const auto& de: dir_iterator (fr, dir_iterator::no_follow))
{
path f (fr / de.path ());
path t (to / de.path ());
@@ -2370,7 +2850,6 @@ namespace build2
// s.recipe_group_action may be used further (see, for example,
// group_state()) and should retain its value.
//
- //
if (!s.recipe_keep)
s.recipe = nullptr;
@@ -2380,7 +2859,17 @@ namespace build2
// postponment logic (see excute_recipe() for details).
//
if (a.inner () && !s.recipe_group_action)
+ {
+ // See resolve_members_impl() for background.
+ //
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+
ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ }
// Decrement the task count (to count_executed) and wake up any threads
// that might be waiting for this target.
@@ -2389,7 +2878,7 @@ namespace build2
target::offset_busy - target::offset_executed,
memory_order_release));
assert (tc == ctx.count_busy ());
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
return ts;
}
@@ -2400,6 +2889,8 @@ namespace build2
size_t start_count,
atomic_count* task_count)
{
+ // NOTE: see also pretend_execute lambda in perform_execute().
+
target& t (const_cast<target&> (ct)); // MT-aware.
target::opstate& s (t[a]);
@@ -2460,7 +2951,7 @@ namespace build2
: s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
else
{
@@ -2471,15 +2962,15 @@ namespace build2
// Pass our diagnostics stack (this is safe since we expect the
// caller to wait for completion before unwinding its diag stack).
//
- if (ctx.sched.async (start_count,
- *task_count,
- [a] (const diag_frame* ds, target& t)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (a, t);
- },
- diag_frame::stack (),
- ref (t)))
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
return target_state::unknown; // Queued.
// Executed synchronously, fall through.
@@ -2528,15 +3019,15 @@ namespace build2
r = execute_impl (a, t);
else
{
- if (ctx.sched.async (start_count,
- *task_count,
- [a] (const diag_frame* ds, target& t)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (a, t);
- },
- diag_frame::stack (),
- ref (t)))
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
return target_state::unknown; // Queued.
// Executed synchronously, fall through.
@@ -2552,7 +3043,7 @@ namespace build2
: s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
}
else
@@ -2569,6 +3060,8 @@ namespace build2
bool
update_during_match (tracer& trace, action a, const target& t, timestamp ts)
{
+ // NOTE: see also clean_during_match() if changing anything here.
+
assert (a == perform_update_id);
// Note: this function is used to make sure header dependencies are up to
@@ -2640,6 +3133,11 @@ namespace build2
action a, target& t,
uintptr_t mask)
{
+ // NOTE: see also clean_during_match_prerequisites() if changing anything
+ // here.
+
+ assert (a == perform_update_id);
+
prerequisite_targets& pts (t.prerequisite_targets[a]);
// On the first pass detect and handle unchanged tragets. Note that we
@@ -2650,7 +3148,7 @@ namespace build2
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0)
+ if (mask == 0 || (p.include & mask) != 0)
{
if (p.target != nullptr)
{
@@ -2697,7 +3195,7 @@ namespace build2
#if 0
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0 && p.data != 0)
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
{
const target& pt (*p.target);
@@ -2732,7 +3230,7 @@ namespace build2
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0 && p.data != 0)
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
{
execute_direct_async (a, *p.target, busy, tc);
}
@@ -2744,7 +3242,7 @@ namespace build2
//
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0 && p.data != 0)
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
{
const target& pt (*p.target);
target_state ns (execute_complete (a, pt));
@@ -2766,6 +3264,188 @@ namespace build2
return r;
}
+ bool
+ clean_during_match (tracer& trace, action a, const target& t)
+ {
+ // Let's keep this as close to update_during_match() semantically as
+ // possible until we see a clear reason to deviate.
+
+ // We have a problem with fsdir{}: if the directory is not empty because
+ // there are other targets that depend on it and we execute it here and
+ // now, it will not remove the directory (because it's not yet empty) but
+ // will cause the target to be in the executed state, which means that
+ // when other targets try to execute it, it will be a noop and the
+ // directory will be left behind.
+
+ assert (a == perform_clean_id && !t.is_a<fsdir> ());
+
+ target_state os (t.matched_state (a));
+
+ if (os == target_state::unchanged)
+ return false;
+ else
+ {
+ target_state ns;
+ if (os != target_state::changed)
+ {
+ phase_switch ps (t.ctx, run_phase::execute);
+ ns = execute_direct_sync (a, t);
+ }
+ else
+ ns = os;
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << t
+ << "; old state " << os
+ << "; new state " << ns;});
+ return true;
+ }
+ else
+ return false;
+ }
+ }
+
+ bool
+ clean_during_match_prerequisites (tracer& trace,
+ action a, target& t,
+ uintptr_t mask)
+ {
+ // Let's keep this as close to update_during_match_prerequisites()
+ // semantically as possible until we see a clear reason to deviate.
+ //
+ // Currently the only substantial change is the reverse iteration order.
+
+ assert (a == perform_clean_id);
+
+ prerequisite_targets& pts (t.prerequisite_targets[a]);
+
+ // On the first pass detect and handle unchanged tragets. Note that we
+ // have to do it in a separate pass since we cannot call matched_state()
+ // once we've switched the phase.
+ //
+ size_t n (0);
+
+ for (prerequisite_target& p: pts)
+ {
+ if (mask == 0 || (p.include & mask) != 0)
+ {
+ if (p.target != nullptr)
+ {
+ const target& pt (*p.target);
+
+ assert (!pt.is_a<fsdir> ()); // See above.
+
+ target_state os (pt.matched_state (a));
+
+ if (os != target_state::unchanged)
+ {
+ ++n;
+ p.data = static_cast<uintptr_t> (os);
+ continue;
+ }
+ }
+
+ p.data = 0;
+ }
+ }
+
+ // If all unchanged, we are done.
+ //
+ if (n == 0)
+ return false;
+
+ // Provide additional information on what's going on.
+ //
+ auto df = make_diag_frame (
+ [&t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while cleaning during match prerequisites of "
+ << "target " << t;
+ });
+
+ context& ctx (t.ctx);
+
+ phase_switch ps (ctx, run_phase::execute);
+
+ bool r (false);
+
+ // @@ Maybe we should optimize for n == 1? Maybe we should just call
+ // smarter clean_during_match() in this case?
+ //
+#if 0
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+
+ target_state os (static_cast<target_state> (p.data));
+ target_state ns (execute_direct_sync (a, pt));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#else
+
+ // Start asynchronous execution of prerequisites. Similar logic to
+ // straight_execute_members().
+ //
+ // Note that the target's task count is expected to be busy (since this
+ // function is called during match). And there don't seem to be any
+ // problems in using it for execute.
+ //
+ atomic_count& tc (t[a].task_count);
+
+ size_t busy (ctx.count_busy ());
+
+ wait_guard wg (ctx, busy, tc);
+
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ execute_direct_async (a, *p.target, busy, tc);
+ }
+ }
+
+ wg.wait ();
+
+ // Finish execution and process the result.
+ //
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+ target_state ns (execute_complete (a, pt));
+ target_state os (static_cast<target_state> (p.data));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#endif
+
+ return r;
+ }
+
static inline void
blank_adhoc_member (const target*&)
{
@@ -3097,7 +3777,7 @@ namespace build2
target_state gs (execute_impl (a, g, 0, nullptr));
if (gs == target_state::busy)
- ctx.sched.wait (ctx.count_executed (),
+ ctx.sched->wait (ctx.count_executed (),
g[a].task_count,
scheduler::work_none);
diff --git a/libbuild2/algorithm.hxx b/libbuild2/algorithm.hxx
index 9a6a56b..a4feaea 100644
--- a/libbuild2/algorithm.hxx
+++ b/libbuild2/algorithm.hxx
@@ -79,7 +79,7 @@ namespace build2
search_locked (const target&, const target_type&, const prerequisite_key&);
const target*
- search_exsiting (context&, const target_type&, const prerequisite_key&);
+ search_existing (context&, const target_type&, const prerequisite_key&);
const target&
search_new (context&, const target_type&, const prerequisite_key&);
@@ -164,15 +164,13 @@ namespace build2
// argument.
//
LIBBUILD2_SYMEXPORT const target&
- search (const target&, name, const scope&, const target_type* = nullptr);
+ search (const target&, name&&, const scope&, const target_type* = nullptr);
- // Note: returns NULL for unknown target types. Note that unlike the above
- // version, these ones can be called during the load and execute phases.
+ // Note: returns NULL for unknown target types. Note also that unlike the
+ // above version, these can be called during the load and execute phases.
//
LIBBUILD2_SYMEXPORT const target*
- search_existing (const name&,
- const scope&,
- const dir_path& out = dir_path ());
+ search_existing (const name&, const scope&, const dir_path& out = dir_path ());
LIBBUILD2_SYMEXPORT const target*
search_existing (const names&, const scope&);
@@ -203,8 +201,8 @@ namespace build2
// Movable-only type with move-assignment only to NULL lock.
//
target_lock () = default;
- target_lock (target_lock&&);
- target_lock& operator= (target_lock&&);
+ target_lock (target_lock&&) noexcept;
+ target_lock& operator= (target_lock&&) noexcept;
target_lock (const target_lock&) = delete;
target_lock& operator= (const target_lock&) = delete;
@@ -257,10 +255,10 @@ namespace build2
// If the target is already applied (for this action) or executed, then no
// lock is acquired. Otherwise, unless matched is true, the target must not
- // be matched but not yet applied for this action (and if that's the case
- // and matched is true, then you get a locked target that you should
- // probably check for consistency, for exmaple, by comparing the matched
- // rule).
+ // be in the matched but not yet applied state for this action (and if
+ // that's the case and matched is true, then you get a locked target that
+ // you should probably check for consistency, for example, by comparing the
+ // matched rule).
//
// @@ MT fuzzy: what if it is already in the desired state, why assert?
// Currently we only use it with match_recipe/rule() and if it is matched
@@ -276,21 +274,27 @@ namespace build2
//
// Note that here and in find_adhoc_member() below (as well as in
// perform_clean_extra()) we use target type (as opposed to, say, type and
- // name) as the member's identity. This fits our current needs where every
+ // name) as the member's identity. This fits common needs where every
// (rule-managed) ad hoc member has a unique target type and we have no need
// for multiple members of the same type. This also allows us to support
// things like changing the ad hoc member name by declaring it in a
- // buildfile.
+ // buildfile. However, if this semantics is not appropriate, use the
+ // add_adhoc_member_identity() version below.
+ //
+ // Note that the current implementation asserts if the member target already
+ // exists but is not already a member.
//
LIBBUILD2_SYMEXPORT target&
add_adhoc_member (target&,
const target_type&,
dir_path dir,
dir_path out,
- string name);
+ string name,
+ optional<string> ext);
// If the extension is specified then it is added to the member's target
- // name.
+ // name as a second-level extension (the first-level extension, if any,
+ // comes from the target type).
//
target&
add_adhoc_member (target&, const target_type&, const char* ext = nullptr);
@@ -309,6 +313,24 @@ namespace build2
return add_adhoc_member<T> (g, T::static_type, e);
}
+ // Add an ad hoc member using the member identity (as opposed to only its
+ // type as in add_adhoc_member() above) to suppress diplicates. See also
+ // dyndep::inject_adhoc_group_member().
+ //
+ // Return the member target as well as an indication of whether it was added
+ // or was already a member. Fail if the member target already exists but is
+ // not a member since it's not possible to make it a member in an MT-safe
+ // manner.
+ //
+ LIBBUILD2_SYMEXPORT pair<target&, bool>
+ add_adhoc_member_identity (target&,
+ const target_type&,
+ dir_path dir,
+ dir_path out,
+ string name,
+ optional<string> ext,
+ const location& = location ());
+
// Find an ad hoc member of the specified target type returning NULL if not
// found.
//
@@ -363,24 +385,34 @@ namespace build2
// to be unchanged after match. If it is unmatch::safe, then unmatch the
// target if it is safe (this includes unchanged or if we know that someone
// else will execute this target). Return true in first half of the pair if
- // unmatch succeeded. Always throw if failed.
+ // unmatch succeeded. Always throw if failed. Note that unmatching may not
+ // play well with options -- if unmatch succeeds, the options that have been
+ // passed to match will not be cleared.
//
enum class unmatch {none, unchanged, safe};
target_state
- match_sync (action, const target&, bool fail = true);
+ match_sync (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
pair<bool, target_state>
- try_match_sync (action, const target&, bool fail = true);
+ try_match_sync (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
pair<bool, target_state>
- match_sync (action, const target&, unmatch);
+ match_sync (action, const target&,
+ unmatch,
+ uint64_t options = match_extra::all_options);
- // As above but without incrementing the target's dependents count. Should
- // be executed with execute_direct_*().
+ // As above but only match the target (unless already matched) without
+ // applying the match (which is normally done with match_sync()). You will
+ // most likely regret using this function.
//
- target_state
- match_direct_sync (action, const target&, bool fail = true);
+ LIBBUILD2_SYMEXPORT void
+ match_only_sync (action, const target&,
+ uint64_t options = match_extra::all_options);
// Start asynchronous match. Return target_state::postponed if the
// asynchronous operation has been started and target_state::busy if the
@@ -392,28 +424,60 @@ namespace build2
// failed. Otherwise, throw the failed exception if keep_going is false and
// return target_state::failed otherwise.
//
+ // Note: same options must be passed to match_async() and match_complete().
+ //
target_state
match_async (action, const target&,
size_t start_count, atomic_count& task_count,
+ uint64_t options = match_extra::all_options,
bool fail = true);
target_state
- match_complete (action, const target&, bool fail = true);
+ match_complete (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
pair<bool, target_state>
- match_complete (action, const target&, unmatch);
+ match_complete (action, const target&,
+ unmatch,
+ uint64_t options = match_extra::all_options);
+
+ // As above but without incrementing the target's dependents count. Should
+ // be executed with execute_direct_*().
+ //
+ // For async, call match_async() followed by match_direct_complete().
+ //
+ target_state
+ match_direct_sync (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
+
+ target_state
+ match_direct_complete (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
// Apply the specified recipe directly and without incrementing the
- // dependency counts. The target must be locked.
+ // dependency counts. The target must be locked (and it remains locked
+ // after this function returns).
+ //
+ // Note that there will be no way to rematch on options change (since there
+ // is no rule), so passing anything other than all_options is most likely a
+ // bad idea. Passing 0 for options is illegal.
//
void
- match_recipe (target_lock&, recipe);
+ match_recipe (target_lock&,
+ recipe,
+ uint64_t options = match_extra::all_options);
// Match (but do not apply) the specified rule directly and without
- // incrementing the dependency counts. The target must be locked.
+ // incrementing the dependency counts. The target must be locked (and it
+ // remains locked after this function returns).
//
void
- match_rule (target_lock&, const rule_match&);
+ match_rule (target_lock&,
+ const rule_match&,
+ uint64_t options = match_extra::all_options);
// Match a "delegate rule" from withing another rules' apply() function
// avoiding recursive matches (thus the third argument). Unless try_match is
@@ -422,7 +486,10 @@ namespace build2
// See also the companion execute_delegate().
//
recipe
- match_delegate (action, target&, const rule&, bool try_match = false);
+ match_delegate (action, target&,
+ const rule&,
+ uint64_t options = match_extra::all_options,
+ bool try_match = false);
// Incrementing the dependency counts of the specified target.
//
@@ -430,13 +497,43 @@ namespace build2
match_inc_dependents (action, const target&);
// Match (synchronously) a rule for the inner operation from withing the
- // outer rule's apply() function. See also the companion execute_inner().
+ // outer rule's apply() function. See also the companion execute_inner()
+ // and inner_recipe.
//
target_state
- match_inner (action, const target&);
+ match_inner (action, const target&,
+ uint64_t options = match_extra::all_options);
pair<bool, target_state>
- match_inner (action, const target&, unmatch);
+ match_inner (action, const target&,
+ unmatch,
+ uint64_t options = match_extra::all_options);
+
+ // Re-match with new options a target that has already been matched with one
+ // of the match_*() functions. Note that natually you cannot rematch a
+ // target that you have unmatched.
+ //
+ // Note also that there is no way to check if the rematch is unnecessary
+ // (i.e., because the target is already matched with this option) because
+ // that would require MT-safety considerations (since there could be a
+ // concurrent rematch). Instead, you should rematch unconditionally and if
+ // the option is already present, it will be a cheap noop.
+ //
+ target_state
+ rematch_sync (action, const target&,
+ uint64_t options,
+ bool fail = true);
+
+ target_state
+ rematch_async (action, const target&,
+ size_t start_count, atomic_count& task_count,
+ uint64_t options,
+ bool fail = true);
+
+ target_state
+ rematch_complete (action, const target&,
+ uint64_t options,
+ bool fail = true);
// The standard prerequisite search and match implementations. They call
// search() (unless a custom is provided) and then match() (unless custom
@@ -466,6 +563,19 @@ namespace build2
void
match_prerequisites (action, target&, const match_search& = nullptr);
+ // As above but only do search. The match part can be performed later, for
+ // example, with the match_members() function below. The typical call
+ // sequence would be:
+ //
+ // inject_fsdir (a, t, false /* match */);
+ // search_prerequisite_members (a, t); // Potentially with filter.
+ // pattern->apply_prerequisites (a, t, bs, me); // If ad hoc pattern.
+ // <dependency synthesis> // Optional.
+ // match_members (a, t, t.prerequisite_targets[a]);
+ //
+ void
+ search_prerequisites (action, target&, const match_search& = nullptr);
+
// As above but go into group members.
//
// Note that if we are cleaning, this function doesn't go into group
@@ -481,14 +591,24 @@ namespace build2
match_prerequisite_members (action, target&,
const match_search_member& = nullptr);
+ void
+ search_prerequisite_members (action, target&,
+ const match_search_member& = nullptr);
+
// As above but omit prerequisites that are not in the specified scope.
//
void
match_prerequisites (action, target&, const scope&);
void
+ search_prerequisites (action, target&, const scope&);
+
+ void
match_prerequisite_members (action, target&, const scope&);
+ void
+ search_prerequisite_members (action, target&, const scope&);
+
// Match (already searched) members of a group or similar prerequisite-like
// dependencies. Similar in semantics to match_prerequisites(). Any marked
// target pointers are skipped.
@@ -508,9 +628,9 @@ namespace build2
// ((prerequisite_target::include & mask) == value) condition.
//
LIBBUILD2_SYMEXPORT void
- match_members (action a,
- const target& t,
- prerequisite_targets& ts,
+ match_members (action,
+ const target&,
+ prerequisite_targets&,
size_t start = 0,
pair<uintptr_t, uintptr_t> include = {0, 0});
@@ -552,17 +672,35 @@ namespace build2
// Inject dependency on the target's directory fsdir{}, unless it is in the
// src tree or is outside of any project (say, for example, an installation
// directory). If the parent argument is true, then inject the parent
- // directory of a target that is itself a directory (name is empty). Return
- // the injected target or NULL. Normally this function is called from the
- // rule's apply() function.
+ // directory of a target that is itself a directory (name is empty). Match
+ // unless match is false and return the injected target or NULL. Normally
+ // this function is called from the rule's apply() function.
+ //
+ // The match=false semantics is useful when you wish to first collect all
+ // the prerequisites targets and then match them all as a separate step, for
+ // example, with match_members().
//
// As an extension, unless prereq is false, this function will also search
// for an existing fsdir{} prerequisite for the directory and if one exists,
- // return that (even if the target is in src tree). This can be used, for
- // example, to place output into an otherwise non-existent directory.
+ // return that (even if the target is in the src tree). In this case, the
+ // injected fsdir{} (if any) must be the first prerequisite in this target's
+ // prerequisite_targets, which is relied upon by the match_prerequisite*()
+ // family of functons to suppress the duplicate addition.
+ //
+ // Note that the explicit fsdir{} prerquiste is used to place output into an
+ // otherwise non-existent (in src) directory.
+ //
+ LIBBUILD2_SYMEXPORT const fsdir*
+ inject_fsdir (action, target&,
+ bool match = true,
+ bool prereq = true,
+ bool parent = true);
+
+ // As above, but match the injected fsdir{} target directly (that is,
+ // without incrementing the dependency counts).
//
LIBBUILD2_SYMEXPORT const fsdir*
- inject_fsdir (action, target&, bool prereq = true, bool parent = true);
+ inject_fsdir_direct (action, target&, bool prereq = true, bool parent = true);
// Execute the action on target, assuming a rule has been matched and the
// recipe for this action has been set. This is the synchrounous executor
@@ -604,7 +742,8 @@ namespace build2
// Note that the returned target state is for the inner operation. The
// appropriate usage is to call this function from the outer operation's
// recipe and to factor the obtained state into the one returned (similar to
- // how we do it for prerequisites).
+ // how we do it for prerequisites). Or, if factoring is not needed, simply
+ // return inner_recipe as outer recipe.
//
// Note: waits for the completion if the target is busy and translates
// target_state::failed to the failed exception.
@@ -635,7 +774,8 @@ namespace build2
//
// Note that such a target must still be updated normally during the execute
// phase in order to keep the dependency counts straight (at which point the
- // target state/timestamp will be re-incorporated into the result).
+ // target state/timestamp will be re-incorporated into the result). Unless
+ // it was matched direct.
//
LIBBUILD2_SYMEXPORT bool
update_during_match (tracer&,
@@ -644,7 +784,7 @@ namespace build2
// As above, but update all the targets in prerequisite_targets that have
// the specified mask in prerequisite_target::include. Return true if any of
- // them have changed.
+ // them have changed. If mask is 0, then update all the targets.
//
// Note that this function spoils prerequisite_target::data (which is used
// for temporary storage). But it resets data to 0 once done.
@@ -655,6 +795,25 @@ namespace build2
action, target&,
uintptr_t mask = prerequisite_target::include_udm);
+ // Equivalent functions for clean. Note that if possible you should leave
+ // cleaning to normal execute and these functions should only be used in
+ // special cases where this is not possible.
+ //
+ // Note also that neither function should be called on fsdir{} since it's
+ // hard to guarantee such an execution won't be too early (see the
+ // implementation for details). If you do need to clean fsdir{} during
+ // match, use fsdir_rule::perform_clean_direct() instead.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ clean_during_match (tracer&,
+ action, const target&);
+
+ LIBBUILD2_SYMEXPORT bool
+ clean_during_match_prerequisites (
+ tracer&,
+ action, target&,
+ uintptr_t mask = prerequisite_target::include_udm);
+
// The default prerequisite execute implementation. Call execute_async() on
// each non-ignored (non-NULL) prerequisite target in a loop and then wait
// for their completion. Return target_state::changed if any of them were
diff --git a/libbuild2/algorithm.ixx b/libbuild2/algorithm.ixx
index 3c6c2fa..836dbed 100644
--- a/libbuild2/algorithm.ixx
+++ b/libbuild2/algorithm.ixx
@@ -46,7 +46,7 @@ namespace build2
}
inline const target*
- search_exsiting (context& ctx,
+ search_existing (context& ctx,
const target_type& tt,
const prerequisite_key& k)
{
@@ -214,7 +214,9 @@ namespace build2
}
LIBBUILD2_SYMEXPORT target_lock
- lock_impl (action, const target&, optional<scheduler::work_queue>);
+ lock_impl (action, const target&,
+ optional<scheduler::work_queue>,
+ uint64_t = 0);
LIBBUILD2_SYMEXPORT void
unlock_impl (action, target&, size_t);
@@ -284,7 +286,7 @@ namespace build2
}
inline target_lock::
- target_lock (target_lock&& x)
+ target_lock (target_lock&& x) noexcept
: action (x.action), target (x.target), offset (x.offset)
{
if (target != nullptr)
@@ -304,7 +306,7 @@ namespace build2
}
inline target_lock& target_lock::
- operator= (target_lock&& x)
+ operator= (target_lock&& x) noexcept
{
if (this != &x)
{
@@ -372,7 +374,7 @@ namespace build2
n += e;
}
- return add_adhoc_member (t, tt, t.dir, t.out, move (n));
+ return add_adhoc_member (t, tt, t.dir, t.out, move (n), nullopt /* ext */);
}
inline target*
@@ -392,13 +394,18 @@ namespace build2
}
LIBBUILD2_SYMEXPORT const rule_match*
- match_rule (action, target&, const rule* skip, bool try_match = false);
+ match_rule_impl (action, target&,
+ uint64_t options,
+ const rule* skip,
+ bool try_match = false,
+ match_extra* = nullptr);
LIBBUILD2_SYMEXPORT recipe
apply_impl (action, target&, const rule_match&);
LIBBUILD2_SYMEXPORT pair<bool, target_state>
match_impl (action, const target&,
+ uint64_t options,
size_t, atomic_count*,
bool try_match = false);
@@ -410,11 +417,11 @@ namespace build2
}
inline target_state
- match_sync (action a, const target& t, bool fail)
+ match_sync (action a, const target& t, uint64_t options, bool fail)
{
assert (t.ctx.phase == run_phase::match);
- target_state r (match_impl (a, t, 0, nullptr).second);
+ target_state r (match_impl (a, t, options, 0, nullptr).second);
if (r != target_state::failed)
match_inc_dependents (a, t);
@@ -424,26 +431,13 @@ namespace build2
return r;
}
- inline target_state
- match_direct_sync (action a, const target& t, bool fail)
- {
- assert (t.ctx.phase == run_phase::match);
-
- target_state r (match_impl (a, t, 0, nullptr).second);
-
- if (r == target_state::failed && fail)
- throw failed ();
-
- return r;
- }
-
inline pair<bool, target_state>
- try_match_sync (action a, const target& t, bool fail)
+ try_match_sync (action a, const target& t, uint64_t options, bool fail)
{
assert (t.ctx.phase == run_phase::match);
pair<bool, target_state> r (
- match_impl (a, t, 0, nullptr, true /* try_match */));
+ match_impl (a, t, options, 0, nullptr, true /* try_match */));
if (r.first)
{
@@ -457,11 +451,11 @@ namespace build2
}
inline pair<bool, target_state>
- match_sync (action a, const target& t, unmatch um)
+ match_sync (action a, const target& t, unmatch um, uint64_t options)
{
assert (t.ctx.phase == run_phase::match);
- target_state s (match_impl (a, t, 0, nullptr).second);
+ target_state s (match_impl (a, t, options, 0, nullptr).second);
if (s == target_state::failed)
throw failed ();
@@ -488,7 +482,7 @@ namespace build2
// cannot change their mind).
//
if ((s == target_state::unchanged && t.group == nullptr) ||
- t[a].dependents.load (memory_order_consume) != 0)
+ t[a].dependents.load (memory_order_relaxed) != 0)
return make_pair (true, s);
break;
@@ -502,12 +496,13 @@ namespace build2
inline target_state
match_async (action a, const target& t,
size_t sc, atomic_count& tc,
+ uint64_t options,
bool fail)
{
context& ctx (t.ctx);
assert (ctx.phase == run_phase::match);
- target_state r (match_impl (a, t, sc, &tc).second);
+ target_state r (match_impl (a, t, options, sc, &tc).second);
if (r == target_state::failed && fail && !ctx.keep_going)
throw failed ();
@@ -516,25 +511,49 @@ namespace build2
}
inline target_state
- match_complete (action a, const target& t, bool fail)
+ match_complete (action a, const target& t, uint64_t options, bool fail)
{
- return match_sync (a, t, fail);
+ return match_sync (a, t, options, fail);
}
inline pair<bool, target_state>
- match_complete (action a, const target& t, unmatch um)
+ match_complete (action a, const target& t, unmatch um, uint64_t options)
+ {
+ return match_sync (a, t, um, options);
+ }
+
+ inline target_state
+ match_direct_sync (action a, const target& t, uint64_t options, bool fail)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ target_state r (match_impl (a, t, options, 0, nullptr).second);
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ match_direct_complete (action a, const target& t,
+ uint64_t options,
+ bool fail)
{
- return match_sync (a, t, um);
+ return match_direct_sync (a, t, options, fail);
}
- // Clear rule match-specific target data.
+ // Clear rule match-specific target data (except match_extra).
//
inline void
clear_target (action a, target& t)
{
- t[a].vars.clear ();
+ target::opstate& s (t.state[a]);
+ s.recipe = nullptr;
+ s.recipe_keep = false;
+ s.resolve_counted = false;
+ s.vars.clear ();
t.prerequisite_targets[a].clear ();
- t.clear_data (a);
}
LIBBUILD2_SYMEXPORT void
@@ -593,12 +612,18 @@ namespace build2
}
inline void
- match_recipe (target_lock& l, recipe r)
+ match_recipe (target_lock& l, recipe r, uint64_t options)
{
- assert (l.target != nullptr &&
- l.offset != target::offset_matched &&
+ assert (options != 0 &&
+ l.target != nullptr &&
+ l.offset < target::offset_matched &&
l.target->ctx.phase == run_phase::match);
+ match_extra& me ((*l.target)[l.action].match_extra);
+
+ me.reinit (false /* fallback */);
+ me.cur_options = options; // Already applied, so cur_, not new_options.
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
clear_target (l.action, *l.target);
set_rule (l, nullptr); // No rule.
set_recipe (l, move (r));
@@ -606,47 +631,82 @@ namespace build2
}
inline void
- match_rule (target_lock& l, const rule_match& r)
+ match_rule (target_lock& l, const rule_match& r, uint64_t options)
{
assert (l.target != nullptr &&
- l.offset != target::offset_matched &&
+ l.offset < target::offset_matched &&
l.target->ctx.phase == run_phase::match);
+ match_extra& me ((*l.target)[l.action].match_extra);
+
+ me.reinit (false /* fallback */);
+ me.new_options = options;
clear_target (l.action, *l.target);
set_rule (l, &r);
l.offset = target::offset_matched;
}
inline recipe
- match_delegate (action a, target& t, const rule& dr, bool try_match)
+ match_delegate (action a, target& t,
+ const rule& dr,
+ uint64_t options,
+ bool try_match)
{
assert (t.ctx.phase == run_phase::match);
// Note: we don't touch any of the t[a] state since that was/will be set
// for the delegating rule.
//
- const rule_match* r (match_rule (a, t, &dr, try_match));
+ const rule_match* r (match_rule_impl (a, t, options, &dr, try_match));
return r != nullptr ? apply_impl (a, t, *r) : empty_recipe;
}
inline target_state
- match_inner (action a, const target& t)
+ match_inner (action a, const target& t, uint64_t options)
{
// In a sense this is like any other dependency.
//
assert (a.outer ());
- return match_sync (a.inner_action (), t);
+ return match_sync (a.inner_action (), t, options);
}
inline pair<bool, target_state>
- match_inner (action a, const target& t, unmatch um)
+ match_inner (action a, const target& t, unmatch um, uint64_t options)
{
assert (a.outer ());
- return match_sync (a.inner_action (), t, um);
+ return match_sync (a.inner_action (), t, um, options);
+ }
+
+ // Note: rematch is basically normal match but without the counts increment,
+ // so we just delegate to match_direct_*().
+ //
+ inline target_state
+ rematch_sync (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_sync (a, t, options, fail);
+ }
+
+ inline target_state
+ rematch_async (action a, const target& t,
+ size_t start_count, atomic_count& task_count,
+ uint64_t options,
+ bool fail)
+ {
+ return match_async (a, t, start_count, task_count, options, fail);
+ }
+
+ inline target_state
+ rematch_complete (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_complete (a, t, options, fail);
}
LIBBUILD2_SYMEXPORT void
- resolve_group_impl (action, const target&, target_lock&&);
+ resolve_group_impl (target_lock&&);
inline const target*
resolve_group (action a, const target& t)
@@ -666,7 +726,7 @@ namespace build2
// then unlock and return.
//
if (t.group == nullptr && l.offset < target::offset_tried)
- resolve_group_impl (a, t, move (l));
+ resolve_group_impl (move (l));
break;
}
@@ -685,12 +745,16 @@ namespace build2
}
LIBBUILD2_SYMEXPORT void
- match_prerequisites (action, target&, const match_search&, const scope*);
+ match_prerequisites (action, target&,
+ const match_search&,
+ const scope*,
+ bool search_only);
LIBBUILD2_SYMEXPORT void
match_prerequisite_members (action, target&,
const match_search_member&,
- const scope*);
+ const scope*,
+ bool search_only);
inline void
match_prerequisites (action a, target& t, const match_search& ms)
@@ -701,7 +765,21 @@ namespace build2
ms,
(a.operation () != clean_id || t.is_a<alias> ()
? nullptr
- : &t.root_scope ()));
+ : &t.root_scope ()),
+ false);
+ }
+
+ inline void
+ search_prerequisites (action a, target& t, const match_search& ms)
+ {
+ match_prerequisites (
+ a,
+ t,
+ ms,
+ (a.operation () != clean_id || t.is_a<alias> ()
+ ? nullptr
+ : &t.root_scope ()),
+ true);
}
inline void
@@ -709,13 +787,46 @@ namespace build2
const match_search_member& msm)
{
if (a.operation () != clean_id || t.is_a<alias> ())
- match_prerequisite_members (a, t, msm, nullptr);
+ match_prerequisite_members (a, t, msm, nullptr, false);
+ else
+ {
+ // Note that here we don't iterate over members even for see-through
+ // groups since the group target should clean eveything up. A bit of an
+ // optimization.
+ //
+ // @@ TMP: I wonder if this still holds for the new group semantics
+ // we have in Qt automoc? Also below.
+ //
+ match_search ms (
+ msm
+ ? [&msm] (action a,
+ const target& t,
+ const prerequisite& p,
+ include_type i)
+ {
+ return msm (a, t, prerequisite_member {p, nullptr}, i);
+ }
+ : match_search ());
+
+ match_prerequisites (a, t, ms, &t.root_scope (), false);
+ }
+ }
+
+ inline void
+ search_prerequisite_members (action a, target& t,
+ const match_search_member& msm)
+ {
+ if (a.operation () != clean_id || t.is_a<alias> ())
+ match_prerequisite_members (a, t, msm, nullptr, true);
else
{
// Note that here we don't iterate over members even for see-through
// groups since the group target should clean eveything up. A bit of an
// optimization.
//
+ // @@ TMP: I wonder if this still holds for the new group semantics
+ // we have in Qt automoc? Also above.
+ //
match_search ms (
msm
? [&msm] (action a,
@@ -727,20 +838,32 @@ namespace build2
}
: match_search ());
- match_prerequisites (a, t, ms, &t.root_scope ());
+ match_prerequisites (a, t, ms, &t.root_scope (), true);
}
}
inline void
match_prerequisites (action a, target& t, const scope& s)
{
- match_prerequisites (a, t, nullptr, &s);
+ match_prerequisites (a, t, nullptr, &s, false);
+ }
+
+ inline void
+ search_prerequisites (action a, target& t, const scope& s)
+ {
+ match_prerequisites (a, t, nullptr, &s, true);
}
inline void
match_prerequisite_members (action a, target& t, const scope& s)
{
- match_prerequisite_members (a, t, nullptr, &s);
+ match_prerequisite_members (a, t, nullptr, &s, false);
+ }
+
+ inline void
+ search_prerequisite_members (action a, target& t, const scope& s)
+ {
+ match_prerequisite_members (a, t, nullptr, &s, true);
}
LIBBUILD2_SYMEXPORT target_state
@@ -753,7 +876,7 @@ namespace build2
if (r == target_state::busy)
{
- t.ctx.sched.wait (t.ctx.count_executed (),
+ t.ctx.sched->wait (t.ctx.count_executed (),
t[a].task_count,
scheduler::work_none);
@@ -789,7 +912,7 @@ namespace build2
// If the target is still busy, wait for its completion.
//
- ctx.sched.wait (ctx.count_executed (),
+ ctx.sched->wait (ctx.count_executed (),
t[a].task_count,
scheduler::work_none);
@@ -806,7 +929,7 @@ namespace build2
if (r == target_state::busy)
{
- t.ctx.sched.wait (t.ctx.count_executed (),
+ t.ctx.sched->wait (t.ctx.count_executed (),
t[a].task_count,
scheduler::work_none);
diff --git a/libbuild2/b-cmdline.cxx b/libbuild2/b-cmdline.cxx
index 2e2deb8..206c9de 100644
--- a/libbuild2/b-cmdline.cxx
+++ b/libbuild2/b-cmdline.cxx
@@ -286,8 +286,25 @@ namespace build2
{
optional<dir_path> extra;
if (ops.default_options_specified ())
+ {
extra = ops.default_options ();
+ // Note that load_default_options() expects absolute and normalized
+ // directory.
+ //
+ try
+ {
+ if (extra->relative ())
+ extra->complete ();
+
+ extra->normalize ();
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid --default-options value " << e.path;
+ }
+ }
+
// Load default options files.
//
default_options<b_options> def_ops (
@@ -396,8 +413,26 @@ namespace build2
if (ops.progress () && ops.no_progress ())
fail << "both --progress and --no-progress specified";
+ if (ops.diag_color () && ops.no_diag_color ())
+ fail << "both --diag-color and --no-diag-color specified";
+
if (ops.mtime_check () && ops.no_mtime_check ())
fail << "both --mtime-check and --no-mtime-check specified";
+
+ if (ops.match_only () && ops.load_only ())
+ fail << "both --match-only and --load-only specified";
+
+ if (!ops.dump_specified ())
+ {
+ // Note: let's allow specifying --dump-format without --dump in case
+ // it comes from a default options file or some such.
+
+ if (ops.dump_target_specified ())
+ fail << "--dump-target requires --dump";
+
+ if (ops.dump_scope_specified ())
+ fail << "--dump-scope requires --dump";
+ }
}
catch (const cli::exception& e)
{
@@ -416,6 +451,9 @@ namespace build2
r.progress = (ops.progress () ? optional<bool> (true) :
ops.no_progress () ? optional<bool> (false) : nullopt);
+ r.diag_color = (ops.diag_color () ? optional<bool> (true) :
+ ops.no_diag_color () ? optional<bool> (false) : nullopt);
+
r.mtime_check = (ops.mtime_check () ? optional<bool> (true) :
ops.no_mtime_check () ? optional<bool> (false) : nullopt);
diff --git a/libbuild2/b-cmdline.hxx b/libbuild2/b-cmdline.hxx
index c5c82fc..8ccbb20 100644
--- a/libbuild2/b-cmdline.hxx
+++ b/libbuild2/b-cmdline.hxx
@@ -24,6 +24,7 @@ namespace build2
//
uint16_t verbosity = 1;
optional<bool> progress;
+ optional<bool> diag_color;
optional<bool> mtime_check;
optional<path> config_sub;
optional<path> config_guess;
diff --git a/libbuild2/b-options.cxx b/libbuild2/b-options.cxx
index c1e5f23..c107b44 100644
--- a/libbuild2/b-options.cxx
+++ b/libbuild2/b-options.cxx
@@ -233,6 +233,66 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (std::multimap<K, V, C>& b, const std::multimap<K, V, C>& a)
+ {
+ for (typename std::multimap<K, V, C>::const_iterator i (a.begin ());
+ i != a.end ();
+ ++i)
+ b.insert (typename std::multimap<K, V, C>::value_type (i->first,
+ i->second));
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
@@ -278,6 +338,8 @@ namespace build2
stat_ (),
progress_ (),
no_progress_ (),
+ diag_color_ (),
+ no_diag_color_ (),
jobs_ (),
jobs_specified_ (false),
max_jobs_ (),
@@ -292,6 +354,7 @@ namespace build2
dry_run_ (),
no_diag_buffer_ (),
match_only_ (),
+ load_only_ (),
no_external_modules_ (),
structured_result_ (),
structured_result_specified_ (false),
@@ -299,6 +362,12 @@ namespace build2
no_mtime_check_ (),
dump_ (),
dump_specified_ (false),
+ dump_format_ (),
+ dump_format_specified_ (false),
+ dump_scope_ (),
+ dump_scope_specified_ (false),
+ dump_target_ (),
+ dump_target_specified_ (false),
trace_match_ (),
trace_match_specified_ (false),
trace_execute_ (),
@@ -449,6 +518,18 @@ namespace build2
this->no_progress_, a.no_progress_);
}
+ if (a.diag_color_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->diag_color_, a.diag_color_);
+ }
+
+ if (a.no_diag_color_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_diag_color_, a.no_diag_color_);
+ }
+
if (a.jobs_specified_)
{
::build2::build::cli::parser< size_t>::merge (
@@ -508,6 +589,12 @@ namespace build2
this->match_only_, a.match_only_);
}
+ if (a.load_only_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->load_only_, a.load_only_);
+ }
+
if (a.no_external_modules_)
{
::build2::build::cli::parser< bool>::merge (
@@ -535,21 +622,42 @@ namespace build2
if (a.dump_specified_)
{
- ::build2::build::cli::parser< std::set<string>>::merge (
+ ::build2::build::cli::parser< strings>::merge (
this->dump_, a.dump_);
this->dump_specified_ = true;
}
+ if (a.dump_format_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->dump_format_, a.dump_format_);
+ this->dump_format_specified_ = true;
+ }
+
+ if (a.dump_scope_specified_)
+ {
+ ::build2::build::cli::parser< dir_paths>::merge (
+ this->dump_scope_, a.dump_scope_);
+ this->dump_scope_specified_ = true;
+ }
+
+ if (a.dump_target_specified_)
+ {
+ ::build2::build::cli::parser< vector<pair<name, optional<name>>>>::merge (
+ this->dump_target_, a.dump_target_);
+ this->dump_target_specified_ = true;
+ }
+
if (a.trace_match_specified_)
{
- ::build2::build::cli::parser< std::vector<name>>::merge (
+ ::build2::build::cli::parser< vector<name>>::merge (
this->trace_match_, a.trace_match_);
this->trace_match_specified_ = true;
}
if (a.trace_execute_specified_)
{
- ::build2::build::cli::parser< std::vector<name>>::merge (
+ ::build2::build::cli::parser< vector<name>>::merge (
this->trace_execute_, a.trace_execute_);
this->trace_execute_specified_ = true;
}
@@ -690,6 +798,19 @@ namespace build2
<< "\033[1m--no-progress\033[0m Don't display build progress." << ::std::endl;
os << std::endl
+ << "\033[1m--diag-color\033[0m Use color in diagnostics. If printing to a terminal the" << ::std::endl
+ << " color is used by default provided the terminal is not" << ::std::endl
+ << " dumb. Use \033[1m--no-diag-color\033[0m to suppress." << ::std::endl
+ << ::std::endl
+ << " This option affects the diagnostics printed by the" << ::std::endl
+ << " build system itself. Some rules may also choose to" << ::std::endl
+ << " propagate its value to tools (such as compilers) that" << ::std::endl
+ << " they invoke." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-diag-color\033[0m Don't use color in diagnostics." << ::std::endl;
+
+ os << std::endl
<< "\033[1m--jobs\033[0m|\033[1m-j\033[0m \033[4mnum\033[0m Number of active jobs to perform in parallel. This" << ::std::endl
<< " includes both the number of active threads inside the" << ::std::endl
<< " build system as well as the number of external commands" << ::std::endl
@@ -763,14 +884,24 @@ namespace build2
<< " in order to prevent interleaving. However, this can" << ::std::endl
<< " have side-effects since the child process' \033[1mstderr\033[0m is no" << ::std::endl
<< " longer a terminal. Most notably, the use of color in" << ::std::endl
- << " diagnostics will be disabled by most programs. On the" << ::std::endl
+ << " diagnostics may be disabled by some programs. On the" << ::std::endl
<< " other hand, depending on the platform and programs" << ::std::endl
<< " invoked, the interleaving diagnostics may not break" << ::std::endl
<< " lines and thus could be tolerable." << ::std::endl;
os << std::endl
- << "\033[1m--match-only\033[0m Match the rules but do not execute the operation. This" << ::std::endl
- << " mode is primarily useful for profiling." << ::std::endl;
+ << "\033[1m--match-only\033[0m Match the rules without executing the operation. This" << ::std::endl
+ << " mode is primarily useful for profiling and dumping the" << ::std::endl
+ << " build system state." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--load-only\033[0m Match the rules only to \033[1malias{}\033[0m targets ignoring other" << ::std::endl
+ << " targets and without executing the operation. In" << ::std::endl
+ << " particular, this has the effect of loading all the" << ::std::endl
+ << " subdirectory \033[1mbuildfiles\033[0m that are not explicitly" << ::std::endl
+ << " included. Note that this option can only be used with" << ::std::endl
+ << " the \033[1mperform(update)\033[0m action on an \033[1malias{}\033[0m target," << ::std::endl
+ << " usually \033[1mdir{}\033[0m." << ::std::endl;
os << std::endl
<< "\033[1m--no-external-modules\033[0m Don't load external modules during project bootstrap." << ::std::endl
@@ -799,8 +930,9 @@ namespace build2
<< " the outer operation is specified in parenthesis. For" << ::std::endl
<< " example:" << ::std::endl
<< ::std::endl
- << " unchanged perform update(test) /tmp/dir{hello/}" << ::std::endl
- << " changed perform test /tmp/hello/exe{test}" << ::std::endl
+ << " unchanged perform update(test)" << ::std::endl
+ << " /tmp/hello/hello/exe{hello}" << ::std::endl
+ << " changed perform test /tmp/hello/hello/exe{hello}" << ::std::endl
<< ::std::endl
<< " If the output format is \033[1mjson\033[0m, then the output is a JSON" << ::std::endl
<< " array of objects which are the serialized" << ::std::endl
@@ -810,7 +942,7 @@ namespace build2
<< " struct target_action_result" << ::std::endl
<< " {" << ::std::endl
<< " string target;" << ::std::endl
- << " string quoted_target;" << ::std::endl
+ << " string display_target;" << ::std::endl
<< " string target_type;" << ::std::endl
<< " optional<string> target_path;" << ::std::endl
<< " string meta_operation;" << ::std::endl
@@ -823,20 +955,20 @@ namespace build2
<< ::std::endl
<< " [" << ::std::endl
<< " {" << ::std::endl
- << " \"target\": \"/tmp/dir{hello/}\"," << ::std::endl
- << " \"quoted_target\": \"/tmp/dir{hello/}\"," << ::std::endl
- << " \"target_type\": \"dir\"," << ::std::endl
- << " \"target_path\": \"/tmp/hello\"," << ::std::endl
+ << " \"target\": \"/tmp/hello/hello/exe{hello.}\"," << ::std::endl
+ << " \"display_target\": \"/tmp/hello/hello/exe{hello}\"," << ::std::endl
+ << " \"target_type\": \"exe\"," << ::std::endl
+ << " \"target_path\": \"/tmp/hello/hello/hello\"," << ::std::endl
<< " \"meta_operation\": \"perform\"," << ::std::endl
<< " \"operation\": \"update\"," << ::std::endl
<< " \"outer_operation\": \"test\"," << ::std::endl
<< " \"state\": \"unchanged\"" << ::std::endl
<< " }," << ::std::endl
<< " {" << ::std::endl
- << " \"target\": \"/tmp/dir{hello/}\"," << ::std::endl
- << " \"quoted_target\": \"/tmp/dir{hello/}\"," << ::std::endl
- << " \"target_type\": \"dir\"," << ::std::endl
- << " \"target_path\": \"/tmp/hello\"," << ::std::endl
+ << " \"target\": \"/tmp/hello/hello/exe{hello.}\"," << ::std::endl
+ << " \"display_target\": \"/tmp/hello/hello/exe{hello}\"," << ::std::endl
+ << " \"target_type\": \"exe\"," << ::std::endl
+ << " \"target_path\": \"/tmp/hello/hello/hello\"," << ::std::endl
<< " \"meta_operation\": \"perform\"," << ::std::endl
<< " \"operation\": \"test\"," << ::std::endl
<< " \"state\": \"changed\"" << ::std::endl
@@ -847,13 +979,15 @@ namespace build2
<< " overall properties of this format and the semantics of" << ::std::endl
<< " the \033[1mstruct\033[0m serialization." << ::std::endl
<< ::std::endl
- << " The \033[1mtarget\033[0m member is a \"display\" target name, the same" << ::std::endl
- << " as in the \033[1mlines\033[0m format. The \033[1mquoted_target\033[0m member is a" << ::std::endl
- << " target name that, if required, is quoted so that it can" << ::std::endl
- << " be passed back to the driver on the command line. The" << ::std::endl
- << " \033[1mtarget_type\033[0m member is the type of target. The" << ::std::endl
- << " \033[1mtarget_path\033[0m member is an absolute path to the target if" << ::std::endl
- << " the target type is path-based or \033[1mdir\033[0m." << ::std::endl;
+ << " The \033[1mtarget\033[0m member is the target name that is qualified" << ::std::endl
+ << " with the extension (if applicable) and, if required, is" << ::std::endl
+ << " quoted so that it can be passed back to the build" << ::std::endl
+ << " system driver on the command line. The \033[1mdisplay_target\033[0m" << ::std::endl
+ << " member is the unqualified and unquoted \"display\" target" << ::std::endl
+ << " name, the same as in the \033[1mlines\033[0m format. The \033[1mtarget_type\033[0m" << ::std::endl
+ << " member is the type of target. The \033[1mtarget_path\033[0m member" << ::std::endl
+ << " is an absolute path to the target if the target type is" << ::std::endl
+ << " path-based or \033[1mdir\033[0m." << ::std::endl;
os << std::endl
<< "\033[1m--mtime-check\033[0m Perform file modification time sanity checks. These" << ::std::endl
@@ -870,8 +1004,48 @@ namespace build2
os << std::endl
<< "\033[1m--dump\033[0m \033[4mphase\033[0m Dump the build system state after the specified phase." << ::std::endl
<< " Valid \033[4mphase\033[0m values are \033[1mload\033[0m (after loading \033[1mbuildfiles\033[0m)" << ::std::endl
- << " and \033[1mmatch\033[0m (after matching rules to targets). Repeat" << ::std::endl
- << " this option to dump the state after multiple phases." << ::std::endl;
+ << " and \033[1mmatch\033[0m (after matching rules to targets). The \033[1mmatch\033[0m" << ::std::endl
+ << " value also has the \033[1mmatch-pre\033[0m and \033[1mmatch-post\033[0m variants to" << ::std::endl
+ << " dump the state for the pre/post-operations (\033[1mmatch\033[0m dumps" << ::std::endl
+ << " the main operation only). Repeat this option to dump" << ::std::endl
+ << " the state after multiple phases/variants. By default" << ::std::endl
+ << " the entire build state is dumped but this behavior can" << ::std::endl
+ << " be altered with the \033[1m--dump-scope\033[0m and \033[1m--dump-target\033[0m" << ::std::endl
+ << " options. See also the \033[1m--match-only\033[0m and \033[1m--load-only\033[0m" << ::std::endl
+ << " options." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-format\033[0m \033[4mformat\033[0m Representation format and output stream to use when" << ::std::endl
+ << " dumping the build system state. Valid values for this" << ::std::endl
+ << " option are \033[1mbuildfile\033[0m (a human-readable, Buildfile-like" << ::std::endl
+ << " format written to \033[1mstderr\033[0m; this is the default), and" << ::std::endl
+ << " \033[1mjson-v0.1\033[0m (machine-readable, JSON-based format written" << ::std::endl
+ << " to \033[1mstdout\033[0m). For details on the \033[1mbuildfile\033[0m format, see" << ::std::endl
+ << " Diagnostics and Debugging (b#intro-diag-debug). For" << ::std::endl
+ << " details on the \033[1mjson-v0.1\033[0m format, see the JSON OUTPUT" << ::std::endl
+ << " section below (overall properties) and JSON Dump Format" << ::std::endl
+ << " (b#json-dump) (format specifics). Note that the JSON" << ::std::endl
+ << " format is currently unstable (thus the temporary \033[1m-v0.1\033[0m" << ::std::endl
+ << " suffix)." << ::std::endl
+ << ::std::endl
+ << " Note that because it's possible to end up with multiple" << ::std::endl
+ << " dumps (for example, by specifying the \033[1m--dump-scope\033[0m" << ::std::endl
+ << " and/or \033[1m--dump-target\033[0m options multiple times), the JSON" << ::std::endl
+ << " output is in the \"JSON Lines\" form, that is, without" << ::std::endl
+ << " pretty-printing and with the top-level JSON objects" << ::std::endl
+ << " delimited by newlines. Note also that if the JSON dump" << ::std::endl
+ << " output is combined with \033[1m--structured-result=json\033[0m, then" << ::std::endl
+ << " the structured result is the last line." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-scope\033[0m \033[4mdir\033[0m Dump the build system state for the specified scope" << ::std::endl
+ << " only. Repeat this option to dump the state of multiple" << ::std::endl
+ << " scopes." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-target\033[0m \033[4mtarget\033[0m Dump the build system state for the specified target" << ::std::endl
+ << " only. Repeat this option to dump the state of multiple" << ::std::endl
+ << " targets." << ::std::endl;
os << std::endl
<< "\033[1m--trace-match\033[0m \033[4mtarget\033[0m Trace rule matching for the specified target. This is" << ::std::endl
@@ -998,6 +1172,10 @@ namespace build2
&::build2::build::cli::thunk< b_options, &b_options::progress_ >;
_cli_b_options_map_["--no-progress"] =
&::build2::build::cli::thunk< b_options, &b_options::no_progress_ >;
+ _cli_b_options_map_["--diag-color"] =
+ &::build2::build::cli::thunk< b_options, &b_options::diag_color_ >;
+ _cli_b_options_map_["--no-diag-color"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_diag_color_ >;
_cli_b_options_map_["--jobs"] =
&::build2::build::cli::thunk< b_options, size_t, &b_options::jobs_,
&b_options::jobs_specified_ >;
@@ -1034,6 +1212,8 @@ namespace build2
&::build2::build::cli::thunk< b_options, &b_options::no_diag_buffer_ >;
_cli_b_options_map_["--match-only"] =
&::build2::build::cli::thunk< b_options, &b_options::match_only_ >;
+ _cli_b_options_map_["--load-only"] =
+ &::build2::build::cli::thunk< b_options, &b_options::load_only_ >;
_cli_b_options_map_["--no-external-modules"] =
&::build2::build::cli::thunk< b_options, &b_options::no_external_modules_ >;
_cli_b_options_map_["--structured-result"] =
@@ -1044,13 +1224,22 @@ namespace build2
_cli_b_options_map_["--no-mtime-check"] =
&::build2::build::cli::thunk< b_options, &b_options::no_mtime_check_ >;
_cli_b_options_map_["--dump"] =
- &::build2::build::cli::thunk< b_options, std::set<string>, &b_options::dump_,
+ &::build2::build::cli::thunk< b_options, strings, &b_options::dump_,
&b_options::dump_specified_ >;
+ _cli_b_options_map_["--dump-format"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::dump_format_,
+ &b_options::dump_format_specified_ >;
+ _cli_b_options_map_["--dump-scope"] =
+ &::build2::build::cli::thunk< b_options, dir_paths, &b_options::dump_scope_,
+ &b_options::dump_scope_specified_ >;
+ _cli_b_options_map_["--dump-target"] =
+ &::build2::build::cli::thunk< b_options, vector<pair<name, optional<name>>>, &b_options::dump_target_,
+ &b_options::dump_target_specified_ >;
_cli_b_options_map_["--trace-match"] =
- &::build2::build::cli::thunk< b_options, std::vector<name>, &b_options::trace_match_,
+ &::build2::build::cli::thunk< b_options, vector<name>, &b_options::trace_match_,
&b_options::trace_match_specified_ >;
_cli_b_options_map_["--trace-execute"] =
- &::build2::build::cli::thunk< b_options, std::vector<name>, &b_options::trace_execute_,
+ &::build2::build::cli::thunk< b_options, vector<name>, &b_options::trace_execute_,
&b_options::trace_execute_specified_ >;
_cli_b_options_map_["--no-column"] =
&::build2::build::cli::thunk< b_options, &b_options::no_column_ >;
diff --git a/libbuild2/b-options.hxx b/libbuild2/b-options.hxx
index 4e85192..48dd35f 100644
--- a/libbuild2/b-options.hxx
+++ b/libbuild2/b-options.hxx
@@ -13,8 +13,6 @@
//
// End prologue.
-#include <set>
-
#include <libbuild2/common-options.hxx>
namespace build2
@@ -104,6 +102,12 @@ namespace build2
const bool&
no_progress () const;
+ const bool&
+ diag_color () const;
+
+ const bool&
+ no_diag_color () const;
+
const size_t&
jobs () const;
@@ -147,6 +151,9 @@ namespace build2
match_only () const;
const bool&
+ load_only () const;
+
+ const bool&
no_external_modules () const;
const structured_result_format&
@@ -161,19 +168,37 @@ namespace build2
const bool&
no_mtime_check () const;
- const std::set<string>&
+ const strings&
dump () const;
bool
dump_specified () const;
- const std::vector<name>&
+ const string&
+ dump_format () const;
+
+ bool
+ dump_format_specified () const;
+
+ const dir_paths&
+ dump_scope () const;
+
+ bool
+ dump_scope_specified () const;
+
+ const vector<pair<name, optional<name>>>&
+ dump_target () const;
+
+ bool
+ dump_target_specified () const;
+
+ const vector<name>&
trace_match () const;
bool
trace_match_specified () const;
- const std::vector<name>&
+ const vector<name>&
trace_execute () const;
bool
@@ -266,6 +291,8 @@ namespace build2
bool stat_;
bool progress_;
bool no_progress_;
+ bool diag_color_;
+ bool no_diag_color_;
size_t jobs_;
bool jobs_specified_;
size_t max_jobs_;
@@ -280,16 +307,23 @@ namespace build2
bool dry_run_;
bool no_diag_buffer_;
bool match_only_;
+ bool load_only_;
bool no_external_modules_;
structured_result_format structured_result_;
bool structured_result_specified_;
bool mtime_check_;
bool no_mtime_check_;
- std::set<string> dump_;
+ strings dump_;
bool dump_specified_;
- std::vector<name> trace_match_;
+ string dump_format_;
+ bool dump_format_specified_;
+ dir_paths dump_scope_;
+ bool dump_scope_specified_;
+ vector<pair<name, optional<name>>> dump_target_;
+ bool dump_target_specified_;
+ vector<name> trace_match_;
bool trace_match_specified_;
- std::vector<name> trace_execute_;
+ vector<name> trace_execute_;
bool trace_execute_specified_;
bool no_column_;
bool no_line_;
diff --git a/libbuild2/b-options.ixx b/libbuild2/b-options.ixx
index 895831f..34b0d39 100644
--- a/libbuild2/b-options.ixx
+++ b/libbuild2/b-options.ixx
@@ -80,6 +80,18 @@ namespace build2
return this->no_progress_;
}
+ inline const bool& b_options::
+ diag_color () const
+ {
+ return this->diag_color_;
+ }
+
+ inline const bool& b_options::
+ no_diag_color () const
+ {
+ return this->no_diag_color_;
+ }
+
inline const size_t& b_options::
jobs () const
{
@@ -165,6 +177,12 @@ namespace build2
}
inline const bool& b_options::
+ load_only () const
+ {
+ return this->load_only_;
+ }
+
+ inline const bool& b_options::
no_external_modules () const
{
return this->no_external_modules_;
@@ -194,7 +212,7 @@ namespace build2
return this->no_mtime_check_;
}
- inline const std::set<string>& b_options::
+ inline const strings& b_options::
dump () const
{
return this->dump_;
@@ -206,7 +224,43 @@ namespace build2
return this->dump_specified_;
}
- inline const std::vector<name>& b_options::
+ inline const string& b_options::
+ dump_format () const
+ {
+ return this->dump_format_;
+ }
+
+ inline bool b_options::
+ dump_format_specified () const
+ {
+ return this->dump_format_specified_;
+ }
+
+ inline const dir_paths& b_options::
+ dump_scope () const
+ {
+ return this->dump_scope_;
+ }
+
+ inline bool b_options::
+ dump_scope_specified () const
+ {
+ return this->dump_scope_specified_;
+ }
+
+ inline const vector<pair<name, optional<name>>>& b_options::
+ dump_target () const
+ {
+ return this->dump_target_;
+ }
+
+ inline bool b_options::
+ dump_target_specified () const
+ {
+ return this->dump_target_specified_;
+ }
+
+ inline const vector<name>& b_options::
trace_match () const
{
return this->trace_match_;
@@ -218,7 +272,7 @@ namespace build2
return this->trace_match_specified_;
}
- inline const std::vector<name>& b_options::
+ inline const vector<name>& b_options::
trace_execute () const
{
return this->trace_execute_;
diff --git a/libbuild2/b.cli b/libbuild2/b.cli
index 4b5e459..f58b869 100644
--- a/libbuild2/b.cli
+++ b/libbuild2/b.cli
@@ -1,8 +1,6 @@
// file : libbuild2/b.cli
// license : MIT; see accompanying LICENSE file
-include <set>;
-
include <libbuild2/common.cli>;
"\section=1"
@@ -328,11 +326,20 @@ namespace build2
version: 1.0.0
src_root: /tmp/libfoo
out_root: /tmp/libfoo
+ subprojects: @tests
project: libbar
version: 2.0.0
src_root: /tmp/libbar
out_root: /tmp/libbar-out
+ subprojects: @tests
+ \
+
+ To omit discovering and printing subprojects information, use the
+ \cb{no_subprojects} parameter, for example:
+
+ \
+ $ b info: libfoo/,no_subprojects
\
To instead print this information in the JSON format, use the
@@ -549,6 +556,22 @@ namespace build2
"Don't display build progress."
}
+ bool --diag-color
+ {
+ "Use color in diagnostics. If printing to a terminal the color is used
+ by default provided the terminal is not dumb. Use \cb{--no-diag-color}
+ to suppress.
+
+ This option affects the diagnostics printed by the build system itself.
+ Some rules may also choose to propagate its value to tools (such as
+ compilers) that they invoke."
+ }
+
+ bool --no-diag-color
+ {
+ "Don't use color in diagnostics."
+ }
+
size_t --jobs|-j
{
"<num>",
@@ -632,7 +655,7 @@ namespace build2
once after each child exits in order to prevent interleaving.
However, this can have side-effects since the child process'
\cb{stderr} is no longer a terminal. Most notably, the use of
- color in diagnostics will be disabled by most programs. On the
+ color in diagnostics may be disabled by some programs. On the
other hand, depending on the platform and programs invoked, the
interleaving diagnostics may not break lines and thus could be
tolerable."
@@ -640,8 +663,18 @@ namespace build2
bool --match-only
{
- "Match the rules but do not execute the operation. This mode is primarily
- useful for profiling."
+ "Match the rules without executing the operation. This mode is primarily
+ useful for profiling and dumping the build system state."
+ }
+
+ bool --load-only
+ {
+ "Match the rules only to \cb{alias{\}} targets ignoring other targets
+ and without executing the operation. In particular, this has the
+ effect of loading all the subdirectory \cb{buildfiles} that are not
+ explicitly included. Note that this option can only be used with the
+ \cb{perform(update)} action on an \cb{alias{\}} target, usually
+ \cb{dir{\}}."
}
bool --no-external-modules
@@ -673,8 +706,8 @@ namespace build2
outer operation is specified in parenthesis. For example:
\
- unchanged perform update(test) /tmp/dir{hello/}
- changed perform test /tmp/hello/exe{test}
+ unchanged perform update(test) /tmp/hello/hello/exe{hello}
+ changed perform test /tmp/hello/hello/exe{hello}
\
If the output format is \cb{json}, then the output is a JSON array of
@@ -685,7 +718,7 @@ namespace build2
struct target_action_result
{
string target;
- string quoted_target;
+ string display_target;
string target_type;
optional<string> target_path;
string meta_operation;
@@ -700,20 +733,20 @@ namespace build2
\
[
{
- \"target\": \"/tmp/dir{hello/}\",
- \"quoted_target\": \"/tmp/dir{hello/}\",
- \"target_type\": \"dir\",
- \"target_path\": \"/tmp/hello\",
+ \"target\": \"/tmp/hello/hello/exe{hello.}\",
+ \"display_target\": \"/tmp/hello/hello/exe{hello}\",
+ \"target_type\": \"exe\",
+ \"target_path\": \"/tmp/hello/hello/hello\",
\"meta_operation\": \"perform\",
\"operation\": \"update\",
\"outer_operation\": \"test\",
\"state\": \"unchanged\"
},
{
- \"target\": \"/tmp/dir{hello/}\",
- \"quoted_target\": \"/tmp/dir{hello/}\",
- \"target_type\": \"dir\",
- \"target_path\": \"/tmp/hello\",
+ \"target\": \"/tmp/hello/hello/exe{hello.}\",
+ \"display_target\": \"/tmp/hello/hello/exe{hello}\",
+ \"target_type\": \"exe\",
+ \"target_path\": \"/tmp/hello/hello/hello\",
\"meta_operation\": \"perform\",
\"operation\": \"test\",
\"state\": \"changed\"
@@ -725,12 +758,14 @@ namespace build2
properties of this format and the semantics of the \cb{struct}
serialization.
- The \cb{target} member is a \"display\" target name, the same as in the
- \cb{lines} format. The \cb{quoted_target} member is a target name that,
- if required, is quoted so that it can be passed back to the driver on
- the command line. The \cb{target_type} member is the type of target.
- The \cb{target_path} member is an absolute path to the target if the
- target type is path-based or \cb{dir}.
+ The \cb{target} member is the target name that is qualified with the
+ extension (if applicable) and, if required, is quoted so that it can be
+ passed back to the build system driver on the command line. The
+ \cb{display_target} member is the unqualified and unquoted \"display\"
+ target name, the same as in the \cb{lines} format. The \cb{target_type}
+ member is the type of target. The \cb{target_path} member is an
+ absolute path to the target if the target type is path-based or
+ \cb{dir}.
"
}
@@ -749,23 +784,67 @@ namespace build2
\cb{--mtime-check} for details."
}
- std::set<string> --dump
+ strings --dump
{
"<phase>",
"Dump the build system state after the specified phase. Valid <phase>
values are \cb{load} (after loading \cb{buildfiles}) and \cb{match}
- (after matching rules to targets). Repeat this option to dump the
- state after multiple phases."
+ (after matching rules to targets). The \cb{match} value also has the
+ \cb{match-pre} and \cb{match-post} variants to dump the state for the
+ pre/post-operations (\cb{match} dumps the main operation only). Repeat
+ this option to dump the state after multiple phases/variants. By
+ default the entire build state is dumped but this behavior can be
+ altered with the \cb{--dump-scope} and \cb{--dump-target} options.
+ See also the \cb{--match-only} and \cb{--load-only} options."
+ }
+
+ string --dump-format
+ {
+ // NOTE: fix all references to json-v0.1, including the manual.
+ //
+ "<format>",
+ "Representation format and output stream to use when dumping the build
+ system state. Valid values for this option are \cb{buildfile} (a
+ human-readable, Buildfile-like format written to \cb{stderr}; this is
+ the default), and \cb{json-v0.1} (machine-readable, JSON-based format
+ written to \cb{stdout}). For details on the \cb{buildfile} format, see
+ \l{b#intro-diag-debug Diagnostics and Debugging}. For details on the
+ \cb{json-v0.1} format, see the JSON OUTPUT section below (overall
+ properties) and \l{b#json-dump JSON Dump Format} (format specifics).
+ Note that the JSON format is currently unstable (thus the temporary
+ \cb{-v0.1} suffix).
+
+ Note that because it's possible to end up with multiple dumps (for
+ example, by specifying the \cb{--dump-scope} and/or \cb{--dump-target}
+ options multiple times), the JSON output is in the \"JSON Lines\" form,
+ that is, without pretty-printing and with the top-level JSON objects
+ delimited by newlines. Note also that if the JSON dump output is
+ combined with \cb{--structured-result=json}, then the structured
+ result is the last line."
+ }
+
+ dir_paths --dump-scope
+ {
+ "<dir>",
+ "Dump the build system state for the specified scope only. Repeat this
+ option to dump the state of multiple scopes."
+ }
+
+ vector<pair<name, optional<name>>> --dump-target
+ {
+ "<target>",
+ "Dump the build system state for the specified target only. Repeat this
+ option to dump the state of multiple targets."
}
- std::vector<name> --trace-match
+ vector<name> --trace-match
{
"<target>",
"Trace rule matching for the specified target. This is primarily useful
during troubleshooting. Repeat this option to trace multiple targets."
}
- std::vector<name> --trace-execute
+ vector<name> --trace-execute
{
"<target>",
"Trace rule execution for the specified target. This is primarily useful
@@ -904,7 +983,7 @@ namespace build2
The order in which default options files are loaded is traced at the
verbosity level 3 (\cb{-V} option) or higher.
- \h|JSON OUTPUT|
+ \h#json-output|JSON OUTPUT|
Commands that support the JSON output specify their formats as a
serialized representation of a C++ \cb{struct} or an array thereof. For
diff --git a/libbuild2/bash/rule.cxx b/libbuild2/bash/rule.cxx
index 29c6a2a..6e96b34 100644
--- a/libbuild2/bash/rule.cxx
+++ b/libbuild2/bash/rule.cxx
@@ -63,10 +63,12 @@ namespace build2
// in_rule
//
bool in_rule::
- match (action a, target& t, const string& hint, match_extra&) const
+ match (action a, target& xt, const string& hint, match_extra&) const
{
tracer trace ("bash::in_rule::match");
+ file& t (xt.as<file> ()); // Only registered for exe{} and bash{}.
+
// Note that for bash{} and for exe{} with hint we match even if the
// target does not depend on any modules (while it could have been
// handled by the in module, that would require loading it).
@@ -89,6 +91,12 @@ namespace build2
l4 ([&]{trace << "no bash module prerequisite or hint for target "
<< t;});
+ // If we match, derive the file name early as recommended by the in
+ // rule.
+ //
+ if (fi && fm)
+ t.derive_path ();
+
return fi && fm;
}
@@ -447,9 +455,9 @@ namespace build2
}
recipe install_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
- recipe r (file_rule::apply_impl (a, t));
+ recipe r (file_rule::apply_impl (a, t, me));
if (r == nullptr)
return noop_recipe;
diff --git a/libbuild2/bash/rule.hxx b/libbuild2/bash/rule.hxx
index 444d176..3f9618f 100644
--- a/libbuild2/bash/rule.hxx
+++ b/libbuild2/bash/rule.hxx
@@ -74,7 +74,7 @@ namespace build2
match (action, target&) const override;
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
protected:
const in_rule& in_;
diff --git a/libbuild2/bin/def-rule.cxx b/libbuild2/bin/def-rule.cxx
index 0998c89..143cc35 100644
--- a/libbuild2/bin/def-rule.cxx
+++ b/libbuild2/bin/def-rule.cxx
@@ -417,6 +417,8 @@ namespace build2
// we will try to recognize C/C++ identifiers plus the special symbols
// that we need to export (e.g., vtable).
//
+ // Note that it looks like rdata should not be declared DATA. It is
+ // known to break ??_7 (vtable) exporting (see GH issue 315).
//
for (const string& s: syms.r)
{
@@ -424,7 +426,7 @@ namespace build2
(s[0] == '?' && s[1] != '?') || // C++
s.compare (0, 4, "??_7") == 0) // vtable
{
- os << " " << strip (s) << " DATA\n";
+ os << " " << strip (s) << '\n';
}
}
}
@@ -496,6 +498,12 @@ namespace build2
// we will try to recognize C/C++ identifiers plus the special symbols
// that we need to export (e.g., vtable and typeinfo).
//
+ // For the description of GNU binutils .def format, see:
+ //
+ // https://sourceware.org/binutils/docs/binutils/def-file-format.html
+ //
+ // @@ Maybe CONSTANT is more appropriate than DATA?
+ //
for (const string& s: syms.r)
{
if (s.find_first_of (".") != string::npos) // Special (.refptr.*)
diff --git a/libbuild2/bin/init.cxx b/libbuild2/bin/init.cxx
index 78119cb..610082e 100644
--- a/libbuild2/bin/init.cxx
+++ b/libbuild2/bin/init.cxx
@@ -41,16 +41,19 @@ namespace build2
bool
vars_init (scope& rs,
- scope&,
- const location&,
- bool first,
+ scope& bs,
+ const location& loc,
+ bool,
bool,
module_init_extra&)
{
tracer trace ("bin::vars_init");
l5 ([&]{trace << "for " << rs;});
- assert (first);
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "bin.vars module must be loaded in project root";
// Enter variables.
//
@@ -79,6 +82,9 @@ namespace build2
// example, addition of rpaths for prerequisite libraries (see the cc
// module for an example). Default is true.
//
+ // Note also that a rule may need to make rpath relative if
+ // install.relocatable is true.
+ //
vp.insert<dir_paths> ("config.bin.rpath");
vp.insert<bool> ("config.bin.rpath.auto");
@@ -107,12 +113,12 @@ namespace build2
// Link whole archive. Note: with target visibility.
//
// The lookup semantics is as follows: we first look for a prerequisite-
- // specific value, then for a target-specific value in the library being
- // linked, and then for target type/pattern-specific value starting from
- // the scope of the target being linked-to. In that final lookup we do
- // not look in the target being linked-to itself since that is used to
- // indicate how this target should be linked to other targets. For
- // example:
+ // specific value, then for a target-specific value in the prerequisite
+ // library, and then for target type/pattern-specific value starting
+ // from the scope of the target being linked. In that final lookup we do
+ // not look in the target being linked itself since that is used to
+ // indicate how this target should be used as a prerequisite of other
+ // targets. For example:
//
// exe{test}: liba{foo}
// liba{foo}: libua{foo1 foo2}
@@ -153,6 +159,68 @@ namespace build2
return true;
}
+ bool
+ types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("bin::types_init");
+ l5 ([&]{trace << "for " << rs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "bin.types module must be loaded in project root";
+
+ // Register target types.
+ //
+ // Note that certain platform-specific and toolchain-specific types are
+ // registered in bin and bin.ld.
+ //
+ // Note also that it would make sense to configure their default
+ // "installability" here but that requires the knowledge of the platform
+ // in some cases. So we do it all in bin for now. One way to support
+ // both use-cases would be to detect if we are loaded after bin.guess
+ // and then decide whether to do it here or delay to bin.
+ //
+ // NOTE: remember to update the documentation if changing anything here!
+ //
+ rs.insert_target_type<obj> ();
+ rs.insert_target_type<obje> ();
+ rs.insert_target_type<obja> ();
+ rs.insert_target_type<objs> ();
+
+ rs.insert_target_type<bmi> ();
+ rs.insert_target_type<bmie> ();
+ rs.insert_target_type<bmia> ();
+ rs.insert_target_type<bmis> ();
+
+ rs.insert_target_type<hbmi> ();
+ rs.insert_target_type<hbmie> ();
+ rs.insert_target_type<hbmia> ();
+ rs.insert_target_type<hbmis> ();
+
+ rs.insert_target_type<libul> ();
+ rs.insert_target_type<libue> ();
+ rs.insert_target_type<libua> ();
+ rs.insert_target_type<libus> ();
+
+ rs.insert_target_type<lib> ();
+ rs.insert_target_type<liba> ();
+ rs.insert_target_type<libs> ();
+
+ // Register the def{} target type. Note that we do it here since it is
+ // input and can be specified unconditionally (i.e., not only when
+ // building for Windows).
+ //
+ rs.insert_target_type<def> ();
+
+ return true;
+ }
+
void
functions (function_map&); // functions.cxx
@@ -447,53 +515,22 @@ namespace build2
tracer trace ("bin::init");
l5 ([&]{trace << "for " << bs;});
- // Load bin.config.
+ // Load bin.{config,types}.
//
load_module (rs, rs, "bin.config", loc, extra.hints);
+ load_module (rs, rs, "bin.types", loc);
// Cache some config values we will be needing below.
//
const target_triplet& tgt (cast<target_triplet> (rs["bin.target"]));
- // Register target types and configure their default "installability".
+ // Configure target type default "installability". Also register
+ // additional platform-specific types.
//
bool install_loaded (cast_false<bool> (rs["install.loaded"]));
{
using namespace install;
- if (first)
- {
- rs.insert_target_type<obj> ();
- rs.insert_target_type<obje> ();
- rs.insert_target_type<obja> ();
- rs.insert_target_type<objs> ();
-
- rs.insert_target_type<bmi> ();
- rs.insert_target_type<bmie> ();
- rs.insert_target_type<bmia> ();
- rs.insert_target_type<bmis> ();
-
- rs.insert_target_type<hbmi> ();
- rs.insert_target_type<hbmie> ();
- rs.insert_target_type<hbmia> ();
- rs.insert_target_type<hbmis> ();
-
- rs.insert_target_type<libul> ();
- rs.insert_target_type<libue> ();
- rs.insert_target_type<libua> ();
- rs.insert_target_type<libus> ();
-
- rs.insert_target_type<lib> ();
- rs.insert_target_type<liba> ();
- rs.insert_target_type<libs> ();
-
- // Register the def{} target type. Note that we do it here since it
- // is input and can be specified unconditionally (i.e., not only
- // when building for Windows).
- //
- rs.insert_target_type<def> ();
- }
-
// Note: libu*{} members are not installable.
//
if (install_loaded)
@@ -543,6 +580,8 @@ namespace build2
if (tgt.cpu == "wasm32" || tgt.cpu == "wasm64")
{
+ // @@ TODO: shouldn't this be wrapped in if(first) somehow?
+
const target_type& wasm (
rs.derive_target_type(
target_type {
@@ -553,7 +592,7 @@ namespace build2
nullptr, /* default_extension */
&target_pattern_fix<wasm_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
- &file_search,
+ &target_search, // Note: don't look for an existing file.
target_type::flag::none}));
if (install_loaded)
@@ -939,6 +978,8 @@ namespace build2
if (lid == "msvc")
{
+ // @@ TODO: shouldn't this be wrapped in if(first) somehow?
+
const target_type& pdb (
rs.derive_target_type(
target_type {
@@ -949,7 +990,7 @@ namespace build2
nullptr, /* default_extension */
&target_pattern_fix<pdb_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
- &file_search,
+ &target_search, // Note: don't look for an existing file.
target_type::flag::none}));
if (cast_false<bool> (rs["install.loaded"]))
@@ -1213,8 +1254,8 @@ namespace build2
// changing anything here.
{"bin.vars", nullptr, vars_init},
+ {"bin.types", nullptr, types_init},
{"bin.config", nullptr, config_init},
- {"bin", nullptr, init},
{"bin.ar.config", nullptr, ar_config_init},
{"bin.ar", nullptr, ar_init},
{"bin.ld.config", nullptr, ld_config_init},
@@ -1224,6 +1265,7 @@ namespace build2
{"bin.nm.config", nullptr, nm_config_init},
{"bin.nm", nullptr, nm_init},
{"bin.def", nullptr, def_init},
+ {"bin", nullptr, init},
{nullptr, nullptr, nullptr}
};
diff --git a/libbuild2/bin/init.hxx b/libbuild2/bin/init.hxx
index 4eb0f10..b163bf5 100644
--- a/libbuild2/bin/init.hxx
+++ b/libbuild2/bin/init.hxx
@@ -20,9 +20,11 @@ namespace build2
// Submodules:
//
// `bin.vars` -- registers some variables.
+ // `bin.types` -- registers target types.
// `bin.config` -- loads bin.vars and sets some variables.
- // `bin` -- loads bin.config and registers target types and
- // rules.
+ // `bin` -- loads bin.{types,config} and registers rules and
+ // functions.
+ //
// `bin.ar.config` -- loads bin.config and registers/sets more variables.
// `bin.ar` -- loads bin and bin.ar.config.
//
diff --git a/libbuild2/bin/target.cxx b/libbuild2/bin/target.cxx
index 38572ef..7e4875a 100644
--- a/libbuild2/bin/target.cxx
+++ b/libbuild2/bin/target.cxx
@@ -374,7 +374,7 @@ namespace build2
&target_extension_var<nullptr>,
&target_pattern_var<nullptr>,
nullptr,
- &file_search,
+ &target_search, // Note: not _file(); don't look for an existing file.
target_type::flag::none
};
@@ -387,7 +387,7 @@ namespace build2
&target_extension_var<nullptr>,
&target_pattern_var<nullptr>,
nullptr,
- &file_search,
+ &target_search, // Note: not _file(); don't look for an existing file.
target_type::flag::none
};
@@ -452,7 +452,7 @@ namespace build2
&target_extension_var<nullptr>,
&target_pattern_var<nullptr>,
nullptr,
- &file_search,
+ &target_search, // Note: not _file(); don't look for an existing file.
target_type::flag::none
};
diff --git a/libbuild2/bin/target.hxx b/libbuild2/bin/target.hxx
index 9685e39..8f2a92e 100644
--- a/libbuild2/bin/target.hxx
+++ b/libbuild2/bin/target.hxx
@@ -412,6 +412,21 @@ namespace build2
virtual group_view
group_members (action) const override;
+ // Match options for the install operation on the liba{}/libs{} and
+ // libua{}/libus{} target types (note: not lib{}/libul{} nor libue{}).
+ //
+ // If only install_runtime option is specified, then only install the
+ // runtime files omitting everything buildtime (headers, pkg-config
+ // files, shared library version-related symlinks, etc).
+ //
+ // Note that it's either runtime-only or runtime and buildtime (i.e.,
+ // everything), so match with install_all instead of install_buildtime
+ // (the latter is only useful in the rule implementations).
+ //
+ static constexpr uint64_t option_install_runtime = 0x01;
+ static constexpr uint64_t option_install_buildtime = 0x02;
+ static constexpr uint64_t option_install_all = match_extra::all_options;
+
public:
static const target_type static_type;
};
diff --git a/libbuild2/bin/utility.cxx b/libbuild2/bin/utility.cxx
index 2a87bbd..a03ea50 100644
--- a/libbuild2/bin/utility.cxx
+++ b/libbuild2/bin/utility.cxx
@@ -89,7 +89,9 @@ namespace build2
// Make sure group members are resolved.
//
group_view gv (resolve_members (a, l));
- assert (gv.members != nullptr);
+
+ if (gv.members == nullptr)
+ fail << "group " << l << " has no members";
pair<otype, bool> p (
link_member (lmembers {l.a != nullptr, l.s != nullptr}, li.order));
diff --git a/libbuild2/build/script/builtin-options.cxx b/libbuild2/build/script/builtin-options.cxx
index f7ba0e7..dba3c59 100644
--- a/libbuild2/build/script/builtin-options.cxx
+++ b/libbuild2/build/script/builtin-options.cxx
@@ -188,6 +188,56 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
@@ -239,7 +289,15 @@ namespace build2
adhoc_ (),
cwd_ (),
cwd_specified_ (false),
- drop_cycles_ ()
+ drop_cycles_ (),
+ target_what_ (),
+ target_what_specified_ (false),
+ target_default_type_ (),
+ target_default_type_specified_ (false),
+ target_extension_type_ (),
+ target_extension_type_specified_ (false),
+ target_cwd_ (),
+ target_cwd_specified_ (false)
{
}
@@ -341,6 +399,18 @@ namespace build2
&depdb_dyndep_options::cwd_specified_ >;
_cli_depdb_dyndep_options_map_["--drop-cycles"] =
&::build2::build::cli::thunk< depdb_dyndep_options, &depdb_dyndep_options::drop_cycles_ >;
+ _cli_depdb_dyndep_options_map_["--target-what"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::target_what_,
+ &depdb_dyndep_options::target_what_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-default-type"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::target_default_type_,
+ &depdb_dyndep_options::target_default_type_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-extension-type"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, map<string, string>, &depdb_dyndep_options::target_extension_type_,
+ &depdb_dyndep_options::target_extension_type_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-cwd"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_path, &depdb_dyndep_options::target_cwd_,
+ &depdb_dyndep_options::target_cwd_specified_ >;
}
};
diff --git a/libbuild2/build/script/builtin-options.hxx b/libbuild2/build/script/builtin-options.hxx
index 590d3b2..a8c3440 100644
--- a/libbuild2/build/script/builtin-options.hxx
+++ b/libbuild2/build/script/builtin-options.hxx
@@ -174,6 +174,66 @@ namespace build2
void
drop_cycles (const bool&);
+ const string&
+ target_what () const;
+
+ string&
+ target_what ();
+
+ void
+ target_what (const string&);
+
+ bool
+ target_what_specified () const;
+
+ void
+ target_what_specified (bool);
+
+ const string&
+ target_default_type () const;
+
+ string&
+ target_default_type ();
+
+ void
+ target_default_type (const string&);
+
+ bool
+ target_default_type_specified () const;
+
+ void
+ target_default_type_specified (bool);
+
+ const map<string, string>&
+ target_extension_type () const;
+
+ map<string, string>&
+ target_extension_type ();
+
+ void
+ target_extension_type (const map<string, string>&);
+
+ bool
+ target_extension_type_specified () const;
+
+ void
+ target_extension_type_specified (bool);
+
+ const dir_path&
+ target_cwd () const;
+
+ dir_path&
+ target_cwd ();
+
+ void
+ target_cwd (const dir_path&);
+
+ bool
+ target_cwd_specified () const;
+
+ void
+ target_cwd_specified (bool);
+
// Implementation details.
//
protected:
@@ -201,6 +261,14 @@ namespace build2
dir_path cwd_;
bool cwd_specified_;
bool drop_cycles_;
+ string target_what_;
+ bool target_what_specified_;
+ string target_default_type_;
+ bool target_default_type_specified_;
+ map<string, string> target_extension_type_;
+ bool target_extension_type_specified_;
+ dir_path target_cwd_;
+ bool target_cwd_specified_;
};
}
}
diff --git a/libbuild2/build/script/builtin-options.ixx b/libbuild2/build/script/builtin-options.ixx
index ea06a0f..20847c2 100644
--- a/libbuild2/build/script/builtin-options.ixx
+++ b/libbuild2/build/script/builtin-options.ixx
@@ -233,6 +233,126 @@ namespace build2
{
this->drop_cycles_ = x;
}
+
+ inline const string& depdb_dyndep_options::
+ target_what () const
+ {
+ return this->target_what_;
+ }
+
+ inline string& depdb_dyndep_options::
+ target_what ()
+ {
+ return this->target_what_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_what (const string& x)
+ {
+ this->target_what_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_what_specified () const
+ {
+ return this->target_what_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_what_specified (bool x)
+ {
+ this->target_what_specified_ = x;
+ }
+
+ inline const string& depdb_dyndep_options::
+ target_default_type () const
+ {
+ return this->target_default_type_;
+ }
+
+ inline string& depdb_dyndep_options::
+ target_default_type ()
+ {
+ return this->target_default_type_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_default_type (const string& x)
+ {
+ this->target_default_type_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_default_type_specified () const
+ {
+ return this->target_default_type_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_default_type_specified (bool x)
+ {
+ this->target_default_type_specified_ = x;
+ }
+
+ inline const map<string, string>& depdb_dyndep_options::
+ target_extension_type () const
+ {
+ return this->target_extension_type_;
+ }
+
+ inline map<string, string>& depdb_dyndep_options::
+ target_extension_type ()
+ {
+ return this->target_extension_type_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_extension_type (const map<string, string>& x)
+ {
+ this->target_extension_type_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_extension_type_specified () const
+ {
+ return this->target_extension_type_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_extension_type_specified (bool x)
+ {
+ this->target_extension_type_specified_ = x;
+ }
+
+ inline const dir_path& depdb_dyndep_options::
+ target_cwd () const
+ {
+ return this->target_cwd_;
+ }
+
+ inline dir_path& depdb_dyndep_options::
+ target_cwd ()
+ {
+ return this->target_cwd_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_cwd (const dir_path& x)
+ {
+ this->target_cwd_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_cwd_specified () const
+ {
+ return this->target_cwd_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_cwd_specified (bool x)
+ {
+ this->target_cwd_specified_ = x;
+ }
}
}
}
diff --git a/libbuild2/build/script/builtin.cli b/libbuild2/build/script/builtin.cli
index 7d0936f..5aea034 100644
--- a/libbuild2/build/script/builtin.cli
+++ b/libbuild2/build/script/builtin.cli
@@ -17,8 +17,8 @@ namespace build2
//
class depdb_dyndep_options
{
- // Note that --byproduct, if any, must be the first option and is
- // handled ad hoc, kind of as a sub-command.
+ // Note that --byproduct or --dyn-target, if any, must be the first
+ // option and is handled ad hoc.
//
// Similarly, --update-{include,exclude} are handled ad hoc and must
// be literals, similar to the -- separator. They specify prerequisite
@@ -40,23 +40,48 @@ namespace build2
// with support for generated files (and thus -I) at least in the make
// format where we use relative paths for non-existent files.
//
+ // Currently Supported dependency formats (--format) are `make`
+ // (default) and `lines`.
+ //
+ // The `make` format is the make dependency declaration in the
+ // `<target>...: [<prerequisite>...]` form. In the non-byproduct mode
+ // a relative prerequisite path is considered non-existent.
+ //
+ // The `lines` format lists targets and/or prerequisites one per line.
+ // If the --dyn-target option is specified then the target list is
+ // expected to come first separated from the prerequisites list with a
+ // blank line. If there are no prerequisites, then the blank line can
+ // be omitted. If the --dyn-target option is not specified, then all
+ // lines are treated as prerequisites and there should be no blank
+ // lines. In the non-byproduct mode a prerequisite line that starts
+ // with a leading space is considered a non-existent prerequisite.
+ // Currently only relative non-existent prerequisites are supported.
+ // Finally, in this mode, if the prerequisite is syntactically a
+ // directory (that is, it ends with a trailing directory separator),
+ // then it is added as fsdir{}. This can be used to handle situations
+ // where the dynamic targets are placed into subdirectories.
+ //
// Note on naming: whenever we (may) have two options, one for target
// and the other for prerequisite, we omit "prerequisite" as that's
// what we extract by default and most commonly. For example:
//
- // --what --what-target
- // --default-type --default-target-type
+ // --what --target-what
+ // --default-type --target-default-type
//
path --file; // Read from file rather than stdin.
- string --format; // Dependency format: make (default).
+ string --format; // Dependency format: `make` (default),
+ // or `lines`.
- string --what; // Dependency kind, e.g., "header".
+ // Dynamic dependency extraction options.
+ //
+ string --what; // Prerequisite kind, e.g., "header".
- dir_paths --include-path|-I; // Search paths for generated files.
+ dir_paths --include-path|-I; // Search paths for generated
+ // prerequisites.
- string --default-type; // Default prerequisite type to use
- // if none could be derived from ext.
+ string --default-type; // Default prerequisite type to use if
+ // none could be derived from extension.
bool --adhoc; // Treat dynamically discovered
// prerequisites as ad hoc (so they
@@ -64,14 +89,39 @@ namespace build2
// normal mode).
dir_path --cwd; // Builtin's working directory used
- // to complete relative paths (only
- // in --byproduct mode).
+ // to complete relative paths of
+ // prerequisites (only in --byproduct
+ // mode, lines format for existing
+ // paths).
bool --drop-cycles; // Drop prerequisites that are also
// targets. Only use if you are sure
// such cycles are harmless, that is,
// the output is not affected by such
// prerequisites' content.
+
+ // Dynamic target extraction options.
+ //
+ // This functionality is enabled with the --dyn-target option. Only
+ // the make format is supported, where the listed targets are added as
+ // ad hoc group members (unless already specified as static members).
+ // This functionality is not available in the byproduct mode.
+ //
+ string --target-what; // Target kind, e.g., "source".
+
+ string --target-default-type; // Default target type to use if none
+ // could be derived from extension.
+
+ map<string, string> // Extension to target type mapping in
+ --target-extension-type; // the <ext>=<type> form, for example,
+ // h=hxx. This mapping is considered
+ // before attempting to automatically
+ // map the extension and so can be used
+ // to resolve ambiguities.
+
+ dir_path --target-cwd; // Builtin's working directory used to
+ // complete relative paths of targets.
+
};
}
}
diff --git a/libbuild2/build/script/parser.cxx b/libbuild2/build/script/parser.cxx
index df0a419..3ecf23d 100644
--- a/libbuild2/build/script/parser.cxx
+++ b/libbuild2/build/script/parser.cxx
@@ -47,7 +47,7 @@ namespace build2
{
path_ = &pn;
- pre_parse_ = true;
+ top_pre_parse_ = pre_parse_ = true;
lexer l (is, *path_, line, lexer_mode::command_line);
set_lexer (&l);
@@ -61,7 +61,7 @@ namespace build2
pbase_ = scope_->src_path_;
- file_based_ = tt.is_a<file> ();
+ file_based_ = tt.is_a<file> () || tt.is_a<group> ();
perform_update_ = find (as.begin (), as.end (), perform_update_id) !=
as.end ();
@@ -158,6 +158,7 @@ namespace build2
{
s.depdb_dyndep = depdb_dyndep_->second;
s.depdb_dyndep_byproduct = depdb_dyndep_byproduct_;
+ s.depdb_dyndep_dyn_target = depdb_dyndep_dyn_target_;
}
s.depdb_preamble = move (depdb_preamble_);
@@ -747,8 +748,8 @@ namespace build2
}
if (!file_based_)
- fail (l) << "'depdb' builtin can only be used for file-based "
- << "targets";
+ fail (l) << "'depdb' builtin can only be used for file- or "
+ << "file group-based targets";
if (!diag_preamble_.empty ())
fail (diag_loc ()) << "'diag' builtin call before 'depdb' call" <<
@@ -830,8 +831,18 @@ namespace build2
fail (l) << "multiple 'depdb dyndep' calls" <<
info (depdb_dyndep_->first) << "previous call is here";
- if (peek () == type::word && peeked ().value == "--byproduct")
- depdb_dyndep_byproduct_ = true;
+ if (peek () == type::word)
+ {
+ const string& v (peeked ().value);
+
+ // Note: --byproduct and --dyn-target are mutually
+ // exclusive.
+ //
+ if (v == "--byproduct")
+ depdb_dyndep_byproduct_ = true;
+ else if (v == "--dyn-target")
+ depdb_dyndep_dyn_target_ = true;
+ }
}
else
{
@@ -962,6 +973,11 @@ namespace build2
if (!skip_diag)
{
+ // Sanity check: we should not be suspending the pre-parse mode
+ // turned on by the base parser.
+ //
+ assert (top_pre_parse_);
+
pre_parse_ = false; // Make parse_names() perform expansions.
pre_parse_suspended_ = true;
}
@@ -1136,6 +1152,8 @@ namespace build2
{
if (!qs)
{
+ // This could be a script from src so search like a prerequisite.
+ //
if (const target* t = search_existing (
ns[0], *scope_, ns[0].pair ? ns[1].dir : empty_dir_path))
{
@@ -1276,10 +1294,11 @@ namespace build2
}
void parser::
- exec_depdb_preamble (action a, const scope& bs, const file& t,
+ exec_depdb_preamble (action a, const scope& bs, const target& t,
environment& e, const script& s, runner& r,
lines_iterator begin, lines_iterator end,
depdb& dd,
+ dynamic_targets* dyn_targets,
bool* update,
optional<timestamp> mt,
bool* deferred_failure,
@@ -1302,18 +1321,23 @@ namespace build2
action a;
const scope& bs;
- const file& t;
+ const target& t;
environment& env;
const script& scr;
depdb& dd;
+ dynamic_targets* dyn_targets;
bool* update;
bool* deferred_failure;
optional<timestamp> mt;
dyndep_byproduct* byp;
- } data {trace, a, bs, t, e, s, dd, update, deferred_failure, mt, byp};
+ } data {
+ trace,
+ a, bs, t,
+ e, s,
+ dd, dyn_targets, update, deferred_failure, mt, byp};
auto exec_cmd = [this, &data] (token& t,
build2::script::token_type& tt,
@@ -1343,8 +1367,9 @@ namespace build2
//
exec_depdb_dyndep (t, tt,
li, ll,
- data.a, data.bs, const_cast<file&> (data.t),
+ data.a, data.bs, const_cast<target&> (data.t),
data.dd,
+ *data.dyn_targets,
*data.update,
*data.mt,
*data.deferred_failure,
@@ -1354,35 +1379,29 @@ namespace build2
{
names ns (exec_special (t, tt, true /* skip <cmd> */));
+ string v;
+ const char* w (nullptr);
if (cmd == "hash")
{
sha256 cs;
for (const name& n: ns)
to_checksum (cs, n);
- if (data.dd.expect (cs.string ()) != nullptr)
- l4 ([&] {
- data.trace (ll)
- << "'depdb hash' argument change forcing update of "
- << data.t;});
+ v = cs.string ();
+ w = "argument";
}
else if (cmd == "string")
{
- string s;
try
{
- s = convert<string> (move (ns));
+ v = convert<string> (move (ns));
}
catch (const invalid_argument& e)
{
fail (ll) << "invalid 'depdb string' argument: " << e;
}
- if (data.dd.expect (s) != nullptr)
- l4 ([&] {
- data.trace (ll)
- << "'depdb string' argument change forcing update of "
- << data.t;});
+ w = "argument";
}
else if (cmd == "env")
{
@@ -1403,14 +1422,32 @@ namespace build2
fail (ll) << pf << e;
}
- if (data.dd.expect (cs.string ()) != nullptr)
- l4 ([&] {
- data.trace (ll)
- << "'depdb env' environment change forcing update of "
- << data.t;});
+ v = cs.string ();
+ w = "environment";
}
else
assert (false);
+
+ // Prefix the value with the type letter. This serves two
+ // purposes:
+ //
+ // 1. It makes sure the result is never a blank line. We use
+ // blank lines as anchors to skip directly to certain entries
+ // (e.g., dynamic targets).
+ //
+ // 2. It allows us to detect the beginning of prerequisites
+ // since an absolute path will be distinguishable from these
+ // entries (in the future we may want to add an explicit
+ // blank after such custom entries to make this easier).
+ //
+ v.insert (0, 1, ' ');
+ v.insert (0, 1, cmd[0]); // `h`, `s`, or `e`
+
+ if (data.dd.expect (v) != nullptr)
+ l4 ([&] {
+ data.trace (ll)
+ << "'depdb " << cmd << "' " << w << " change forcing "
+ << "update of " << data.t;});
}
}
else
@@ -1515,7 +1552,7 @@ namespace build2
{
path_ = nullptr; // Set by replays.
- pre_parse_ = false;
+ top_pre_parse_ = pre_parse_ = false;
set_lexer (nullptr);
@@ -1615,8 +1652,9 @@ namespace build2
void parser::
exec_depdb_dyndep (token& lt, build2::script::token_type& ltt,
size_t li, const location& ll,
- action a, const scope& bs, file& t,
+ action a, const scope& bs, target& t,
depdb& dd,
+ dynamic_targets& dyn_targets,
bool& update,
timestamp mt,
bool& deferred_failure,
@@ -1629,6 +1667,7 @@ namespace build2
depdb_dyndep_options ops;
bool prog (false);
bool byprod (false);
+ bool dyn_tgt (false);
// Prerequisite update filter (--update-*).
//
@@ -1671,11 +1710,9 @@ namespace build2
next (t, tt); // Skip the 'dyndep' command.
- if (tt == type::word && t.value == "--byproduct")
- {
- byprod = true;
+ if (tt == type::word && ((byprod = (t.value == "--byproduct")) ||
+ (dyn_tgt = (t.value == "--dyn-target"))))
next (t, tt);
- }
assert (byprod == (byprod_result != nullptr));
@@ -1894,10 +1931,23 @@ namespace build2
continue;
}
- // Handle --byproduct in the wrong place.
+ // Handle --byproduct and --dyn-target in the wrong place.
//
if (strcmp (a, "--byproduct") == 0)
- fail (ll) << "depdb dyndep: --byproduct must be first option";
+ {
+ fail (ll) << "depdb dyndep: "
+ << (dyn_tgt
+ ? "--byproduct specified with --dyn-target"
+ : "--byproduct must be first option");
+ }
+
+ if (strcmp (a, "--dyn-target") == 0)
+ {
+ fail (ll) << "depdb dyndep: "
+ << (byprod
+ ? "--dyn-target specified with --byproduct"
+ : "--dyn-target must be first option");
+ }
// Handle non-literal --update-*.
//
@@ -1922,29 +1972,31 @@ namespace build2
}
}
- // --what
- //
- const char* what (ops.what_specified ()
- ? ops.what ().c_str ()
- : "file");
-
// --format
//
dyndep_format format (dyndep_format::make);
-
if (ops.format_specified ())
{
const string& f (ops.format ());
- if (f != "make")
+ if (f == "lines") format = dyndep_format::lines;
+ else if (f != "make")
fail (ll) << "depdb dyndep: invalid --format option value '"
<< f << "'";
}
+ // Prerequisite-specific options.
+ //
+
+ // --what
+ //
+ const char* what (ops.what_specified ()
+ ? ops.what ().c_str ()
+ : "file");
+
// --cwd
//
optional<dir_path> cwd;
-
if (ops.cwd_specified ())
{
if (!byprod)
@@ -1964,28 +2016,6 @@ namespace build2
fail (ll) << "depdb dyndep: -I specified with --byproduct";
}
- // --file
- //
- // Note that if --file is specified without a program, then we assume
- // it is one of the static prerequisites.
- //
- optional<path> file;
-
- if (ops.file_specified ())
- {
- file = move (ops.file ());
-
- if (file->relative ())
- {
- if (!cwd)
- fail (ll) << "depdb dyndep: relative path specified with --file";
-
- *file = *cwd / *file;
- }
- }
- else if (!prog)
- fail (ll) << "depdb dyndep: program or --file expected";
-
// --default-type
//
// Get the default prerequisite type falling back to file{} if not
@@ -1997,7 +2027,7 @@ namespace build2
// translation unit would want to make sure it resolves extracted
// system headers to h{} targets analogous to the c module's rule.
//
- const target_type* def_pt;
+ const target_type* def_pt (&file::static_type);
if (ops.default_type_specified ())
{
const string& t (ops.default_type ());
@@ -2005,10 +2035,8 @@ namespace build2
def_pt = bs.find_target_type (t);
if (def_pt == nullptr)
fail (ll) << "depdb dyndep: unknown target type '" << t
- << "' specific with --default-type";
+ << "' specified with --default-type";
}
- else
- def_pt = &file::static_type;
// --adhoc
//
@@ -2018,6 +2046,93 @@ namespace build2
fail (ll) << "depdb dyndep: --adhoc specified with --byproduct";
}
+ // Target-specific options.
+ //
+
+ // --target-what
+ //
+ const char* what_tgt ("file");
+ if (ops.target_what_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-what specified without "
+ << "--dyn-target";
+
+ what_tgt = ops.target_what ().c_str ();
+ }
+
+ // --target-cwd
+ //
+ optional<dir_path> cwd_tgt;
+ if (ops.target_cwd_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-cwd specified without "
+ << "--dyn-target";
+
+ cwd_tgt = move (ops.target_cwd ());
+
+ if (cwd_tgt->relative ())
+ fail (ll) << "depdb dyndep: relative path specified with "
+ << "--target-cwd";
+ }
+
+ // --target-default-type
+ //
+ const target_type* def_tt (&file::static_type);
+ if (ops.target_default_type_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-default-type specified "
+ << "without --dyn-target";
+
+ const string& t (ops.target_default_type ());
+
+ def_tt = bs.find_target_type (t);
+ if (def_tt == nullptr)
+ fail (ll) << "depdb dyndep: unknown target type '" << t
+ << "' specified with --target-default-type";
+ }
+
+ map<string, const target_type*> map_tt;
+ if (ops.target_extension_type_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-extension-type specified "
+ << "without --dyn-target";
+
+ for (pair<const string, string>& p: ops.target_extension_type ())
+ {
+ const target_type* tt (bs.find_target_type (p.second));
+ if (tt == nullptr)
+ fail (ll) << "depdb dyndep: unknown target type '" << p.second
+ << "' specified with --target-extension-type";
+
+ map_tt[p.first] = tt;
+ }
+ }
+
+ // --file (last since need --*cwd)
+ //
+ // Note that if --file is specified without a program, then we assume
+ // it is one of the static prerequisites.
+ //
+ optional<path> file;
+ if (ops.file_specified ())
+ {
+ file = move (ops.file ());
+
+ if (file->relative ())
+ {
+ if (!cwd && !cwd_tgt)
+ fail (ll) << "depdb dyndep: relative path specified with --file";
+
+ *file = (cwd ? *cwd : *cwd_tgt) / *file;
+ }
+ }
+ else if (!prog)
+ fail (ll) << "depdb dyndep: program or --file expected";
+
// Update prerequisite targets.
//
using dyndep = dyndep_rule;
@@ -2148,6 +2263,10 @@ namespace build2
return;
}
+ const scope& rs (*bs.root_scope ());
+
+ group* g (t.is_a<group> ()); // If not group then file.
+
// This code is based on the prior work in the cc module (specifically
// extract_headers()) where you can often find more detailed rationale
// for some of the steps performed.
@@ -2245,9 +2364,29 @@ namespace build2
command_expr cmd;
srcout_map so_map;
+ // Save/restore script cleanups.
+ //
+ struct cleanups
+ {
+ build2::script::cleanups ordinary;
+ paths special;
+ };
+ optional<cleanups> script_cleanups;
+
+ auto cleanups_guard = make_guard (
+ [this, &script_cleanups] ()
+ {
+ if (script_cleanups)
+ {
+ swap (environment_->cleanups, script_cleanups->ordinary);
+ swap (environment_->special_cleanups, script_cleanups->special);
+ }
+ });
+
auto init_run = [this, &ctx,
&lt, &ltt, &ll,
- prog, &file, &ops, &cmd, &so_map] ()
+ prog, &file, &ops,
+ &cmd, &so_map, &script_cleanups] ()
{
// Populate the srcout map with the -I$out_base -I$src_base pairs.
//
@@ -2260,6 +2399,10 @@ namespace build2
if (prog)
{
+ script_cleanups = cleanups {};
+ swap (environment_->cleanups, script_cleanups->ordinary);
+ swap (environment_->special_cleanups, script_cleanups->special);
+
cmd = parse_command_line (lt, static_cast<token_type&> (ltt));
// If the output goes to stdout, then this should be a single
@@ -2275,15 +2418,10 @@ namespace build2
// they include the line index in their names to avoid clashes
// between lines).
//
- // Cleanups are not an issue, they will simply replaced. And
+ // Cleanups are not an issue, they will simply be replaced. And
// overriding the contents of the special files seems harmless and
// consistent with what would happen if the command redirects its
// output to a non-special file.
- //
- if (file)
- environment_->clean (
- {build2::script::cleanup_type::always, *file},
- true /* implicit */);
}
};
@@ -2293,7 +2431,7 @@ namespace build2
size_t skip_count (0);
auto add = [this, &trace, what,
- a, &bs, &t, &pts, pts_n = pts.size (),
+ a, &bs, &t, g, &pts, pts_n = pts.size (),
&ops, &map_ext, def_pt, &pfx_map, &so_map,
&dd, &skip_count] (path fp,
size_t* skip,
@@ -2303,6 +2441,61 @@ namespace build2
bool cache (skip == nullptr);
+ // Handle fsdir{} prerequisite separately.
+ //
+ // Note: inspired by inject_fsdir().
+ //
+ if (fp.to_directory ())
+ {
+ if (!cache)
+ {
+ // Note: already absolute since cannot be non-existent.
+ //
+ fp.normalize ();
+ }
+
+ const fsdir* dt (&search<fsdir> (t,
+ path_cast<dir_path> (fp),
+ dir_path (),
+ string (), nullptr, nullptr));
+
+ // Subset of code for file below.
+ //
+ if (!cache)
+ {
+ for (size_t i (0); i != pts_n; ++i)
+ {
+ const prerequisite_target& p (pts[i]);
+
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr))
+ {
+ if (dt == pt)
+ return false;
+ }
+ }
+
+ if (*skip != 0)
+ {
+ --(*skip);
+ return false;
+ }
+ }
+
+ match_sync (a, *dt);
+ pts.push_back (
+ prerequisite_target (
+ nullptr, true /* adhoc */, reinterpret_cast<uintptr_t> (dt)));
+
+ if (!cache)
+ dd.expect (fp.representation ());
+
+ skip_count++;
+ return false;
+ }
+
// We can only defer the failure if we will be running the recipe
// body.
//
@@ -2357,13 +2550,26 @@ namespace build2
// Skip if this is one of the targets.
//
+ // Note that for dynamic targets this only works if we see the
+ // targets before prerequisites (like in the make dependency
+ // format).
+ //
if (ops.drop_cycles ())
{
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
{
- if (ft == m)
+ auto& ms (g->members);
+ if (find (ms.begin (), ms.end (), ft) != ms.end ())
return false;
}
+ else
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ return false;
+ }
+ }
}
// Skip until where we left off.
@@ -2392,10 +2598,15 @@ namespace build2
{
prerequisite_target& pt (pts.back ());
+ // Note: set the include_target flag for consistency (the
+ // updated_during_match() check does not apply since it's a
+ // dynamic prerequisite).
+ //
if (pt.adhoc ())
{
pt.data = reinterpret_cast<uintptr_t> (pt.target);
pt.target = nullptr;
+ pt.include |= prerequisite_target::include_target;
}
else
pt.data = 1; // Already updated.
@@ -2429,13 +2640,27 @@ namespace build2
<< t;
});
+ // While in the make format targets come before prerequisites, in
+ // depdb we store them after since any change to prerequisites can
+ // invalidate the set of targets. So we save them first and process
+ // later.
+ //
+ // Note also that we need to return them to the caller in case we are
+ // updating.
+
// If nothing so far has invalidated the dependency database, then try
// the cached data before running the program.
//
bool cache (!update);
+ bool skip_blank (false);
for (bool restart (true), first_run (true); restart; cache = false)
{
+ // Clear the state in case we are restarting.
+ //
+ if (dyn_tgt)
+ dyn_targets.clear ();
+
restart = false;
if (cache)
@@ -2444,7 +2669,8 @@ namespace build2
//
assert (skip_count == 0);
- // We should always end with a blank line.
+ // We should always end with a blank line after the list of
+ // dynamic prerequisites.
//
for (;;)
{
@@ -2458,8 +2684,11 @@ namespace build2
break;
}
- if (l->empty ()) // Done, nothing changed.
- return;
+ if (l->empty ()) // Done with prerequisites, nothing changed.
+ {
+ skip_blank = true;
+ break;
+ }
if (optional<bool> r = add (path (move (*l)), nullptr, mt))
{
@@ -2481,6 +2710,52 @@ namespace build2
return;
}
}
+
+ if (!restart) // Nothing changed.
+ {
+ if (dyn_tgt)
+ {
+ // We should always end with a blank line after the list of
+ // dynamic targets.
+ //
+ for (;;)
+ {
+ string* l (dd.read ());
+
+ // If the line is invalid, run the compiler.
+ //
+ if (l == nullptr)
+ {
+ restart = true;
+ break;
+ }
+
+ if (l->empty ()) // Done with targets.
+ break;
+
+ // Split into type and path (see below for background).
+ //
+ size_t p (l->find (' '));
+ if (p == string::npos || // Invalid format.
+ p == 0 || // Empty type.
+ p + 1 == l->size ()) // Empty path.
+ {
+ dd.write (); // Invalidate this line.
+ restart = true;
+ break;
+ }
+
+ string t (*l, 0, p);
+ l->erase (0, p + 1);
+
+ dyn_targets.push_back (
+ dynamic_target {move (t), path (move (*l))});
+ }
+ }
+
+ if (!restart) // Done, nothing changed.
+ break; // Break earliy to keep cache=true.
+ }
}
else
{
@@ -2489,9 +2764,16 @@ namespace build2
init_run ();
first_run = false;
}
- else if (!prog)
+ else
{
- fail (ll) << "generated " << what << " without program to retry";
+ if (!prog)
+ fail (ll) << "generated " << what << " without program to retry";
+
+ // Drop dyndep cleanups accumulated on the previous run.
+ //
+ assert (script_cleanups); // Sanity check.
+ environment_->cleanups.clear ();
+ environment_->special_cleanups.clear ();
}
// Save the timestamp just before we run the command. If we depend
@@ -2545,8 +2827,17 @@ namespace build2
iss.exceptions (istream::badbit);
}
else
+ {
build2::script::run (
*environment_, cmd, nullptr /* iteration_index */, li, ll);
+
+ // Note: make it a maybe-cleanup in case the command cleans it
+ // up itself.
+ //
+ environment_->clean (
+ {build2::script::cleanup_type::maybe, *file},
+ true /* implicit */);
+ }
}
ifdstream ifs (ifdstream::badbit);
@@ -2614,32 +2905,72 @@ namespace build2
if (r.second.empty ())
continue;
- // @@ TODO: what should we do about targets?
+ // Skip targets unless requested to extract.
//
- // Note that if we take GCC as an example, things are
+ // BTW, if you are wondering why don't we extract targets
+ // by default, take GCC as an example, where things are
// quite messed up: by default it ignores -o and just
// takes the source file name and replaces the extension
// with a platform-appropriate object file extension. One
// can specify a custom target (or even multiple targets)
- // with -MT or with -MQ (quoting). Though MinGW GCC still
- // does not quote `:` with -MQ. So in this case it's
+ // with -MT or with -MQ (quoting). So in this case it's
// definitely easier for the user to ignore the targets
// and just specify everything in the buildfile.
//
- // On the other hand, other tools are likely to produce
- // more sensible output (except perhaps for quoting).
- //
- // @@ Maybe in the lax mode we should only recognize `:`
- // if it's separated on at least one side?
- //
- // Alternatively, we could detect Windows drives in
- // paths and "handle" them (I believe this is what GNU
- // make does). Maybe we should have three formats:
- // make-lax, make, make-strict?
- //
if (r.first == make_type::target)
+ {
+ // NOTE: similar code below.
+ //
+ if (dyn_tgt)
+ {
+ path& f (r.second);
+
+ if (f.relative ())
+ {
+ if (!cwd_tgt)
+ fail (il) << "relative " << what_tgt
+ << " target path '" << f
+ << "' in make dependency declaration" <<
+ info << "consider using --target-cwd to specify "
+ << "relative path base";
+
+ f = *cwd_tgt / f;
+ }
+
+ // Note that unlike prerequisites, here we don't need
+ // normalize_external() since we expect the targets to
+ // be within this project.
+ //
+ try
+ {
+ f.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what_tgt << " target "
+ << "path '" << f.string () << "'";
+ }
+
+ // The target must be within this project.
+ //
+ if (!f.sub (rs.out_path ()))
+ {
+ fail (il) << what_tgt << " target path " << f
+ << " must be inside project output "
+ << "directory " << rs.out_path ();
+ }
+
+ // Note: type is resolved later.
+ //
+ dyn_targets.push_back (
+ dynamic_target {string (), move (f)});
+ }
+
continue;
+ }
+ // NOTE: similar code below.
+ //
if (optional<bool> u = add (move (r.second), &skip, rmt))
{
restart = *u;
@@ -2667,20 +2998,380 @@ namespace build2
break;
}
- break;
+ break; // case
+ }
+ case dyndep_format::lines:
+ {
+ bool tgt (dyn_tgt); // Reading targets or prerequisites.
+
+ for (string l; !restart; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ break;
+
+ if (l.empty ())
+ {
+ if (!tgt)
+ fail (il) << "blank line in prerequisites list";
+
+ tgt = false; // Targets/prerequisites separating blank.
+ continue;
+ }
+
+ // See if this line start with space to indicate a non-
+ // existent prerequisite. This variable serves both as a
+ // flag and as a position of the beginning of the path.
+ //
+ size_t n (l.front () == ' ' ? 1 : 0);
+
+ if (tgt)
+ {
+ // NOTE: similar code above.
+ //
+ path f;
+ try
+ {
+ // Non-existent target doesn't make sense.
+ //
+ if (n)
+ throw invalid_path ("");
+
+ f = path (l);
+
+ if (f.relative ())
+ {
+ if (!cwd_tgt)
+ fail (il) << "relative " << what_tgt
+ << " target path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --target-cwd to specify "
+ << "relative path base";
+
+ f = *cwd_tgt / f;
+ }
+
+ // Note that unlike prerequisites, here we don't need
+ // normalize_external() since we expect the targets to
+ // be within this project.
+ //
+ f.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what_tgt << " target path '"
+ << l << "'";
+ }
+
+ // The target must be within this project.
+ //
+ if (!f.sub (rs.out_path ()))
+ {
+ fail (il) << what_tgt << " target path " << f
+ << " must be inside project output directory "
+ << rs.out_path ();
+ }
+
+ // Note: type is resolved later.
+ //
+ dyn_targets.push_back (
+ dynamic_target {string (), move (f)});
+ }
+ else
+ {
+ path f;
+ try
+ {
+ f = path (l.c_str () + n, l.size () - n);
+
+ if (f.empty () ||
+ (n && f.to_directory ())) // Non-existent fsdir{}.
+ throw invalid_path ("");
+
+ if (f.relative ())
+ {
+ if (!n)
+ {
+ if (!cwd)
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --cwd to specify "
+ << "relative path base";
+
+ f = *cwd / f;
+ }
+ }
+ else if (n)
+ {
+ // @@ TODO: non-existent absolute paths.
+ //
+ throw invalid_path ("");
+ }
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what << " prerequisite path '"
+ << l << "'";
+ }
+
+ // NOTE: similar code above.
+ //
+ if (optional<bool> u = add (move (f), &skip, rmt))
+ {
+ restart = *u;
+
+ if (restart)
+ {
+ update = true;
+ l6 ([&]{trace << "restarting";});
+ }
+ }
+ else
+ {
+ // Trigger recompilation, mark as expected to fail, and
+ // bail out.
+ //
+ update = true;
+ deferred_failure = true;
+ break;
+ }
+ }
+ }
+
+ break; // case
}
}
+ if (file)
+ ifs.close ();
+
// Bail out early if we have deferred a failure.
//
if (deferred_failure)
return;
+
+ // Clean after each depdb-dyndep execution.
+ //
+ if (prog)
+ clean (*environment_, ll);
}
}
- // Add the terminating blank line (we are updating depdb).
+ // Add the dynamic prerequisites terminating blank line if we are
+ // updating depdb and unless it's already there.
+ //
+ if (!cache && !skip_blank)
+ dd.expect ("");
+
+ // Handle dynamic targets.
//
- dd.expect ("");
+ if (dyn_tgt)
+ {
+ if (g != nullptr && g->members_static == 0 && dyn_targets.empty ())
+ fail (ll) << "group " << *g << " has no static or dynamic members";
+
+ // There is one more level (at least that we know of) to this rabbit
+ // hole: if the set of dynamic targets changes between clean and
+ // update and we do a `clean update` batch, then we will end up with
+ // old targets (as entered by clean from old depdb information)
+ // being present during update. So we need to clean them out.
+ //
+ // Optimize this for a first/single batch (common case) by noticing
+ // that there are only real targets to start with.
+ //
+ // Note that this doesn't affect explicit groups where we reset the
+ // members on each update (see adhoc_rule_buildscript::apply()).
+ //
+ optional<vector<const target*>> dts;
+ if (g == nullptr)
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (m->decl != target_decl::real)
+ dts = vector<const target*> ();
+ }
+ }
+
+ struct map_ext_data
+ {
+ const char* what_tgt;
+ const map<string, const target_type*>& map_tt;
+ const path* f; // Updated on each iteration.
+ } d {what_tgt, map_tt, nullptr};
+
+ function<dyndep::map_extension_func> map_ext (
+ [this, &d] (const scope& bs, const string& n, const string& e)
+ {
+ small_vector<const target_type*, 2> tts;
+
+ // Check the custom mapping first.
+ //
+ auto i (d.map_tt.find (e));
+ if (i != d.map_tt.end ())
+ tts.push_back (i->second);
+ else
+ {
+ tts = dyndep::map_extension (bs, n, e, nullptr);
+
+ // Issue custom diagnostics suggesting --target-extension-type.
+ //
+ if (tts.size () > 1)
+ {
+ diag_record dr (fail);
+
+ dr << "mapping of " << d.what_tgt << " target path " << *d.f
+ << " to target type is ambiguous";
+
+ for (const target_type* tt: tts)
+ dr << info << "can be " << tt->name << "{}";
+
+ dr << info << "use --target-extension-type to provide custom "
+ << "mapping";
+ }
+ }
+
+ return tts;
+ });
+
+ function<dyndep::group_filter_func> filter;
+ if (g != nullptr)
+ {
+ // Skip static/duplicate members in explicit group.
+ //
+ filter = [] (mtime_target& g, const build2::file& m)
+ {
+ auto& ms (g.as<group> ().members);
+ return find (ms.begin (), ms.end (), &m) == ms.end ();
+ };
+ }
+
+ // Unlike for prerequisites, for targets we store in depdb both the
+ // resolved target type and path. The target type is used in clean
+ // (see adhoc_rule_buildscript::apply()) where we cannot easily get
+ // hold of all the dyndep options to map the path to target type.
+ // So the format of the target line is:
+ //
+ // <type> <path>
+ //
+ string l; // Reuse the buffer.
+ for (dynamic_target& dt: dyn_targets)
+ {
+ const path& f (dt.path);
+
+ d.f = &f; // Current file being mapped.
+
+ // Note that this logic should be consistent with what we have in
+ // adhoc_buildscript_rule::apply() for perform_clean.
+ //
+ const build2::file* ft (nullptr);
+ if (g != nullptr)
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_group_member (
+ what_tgt,
+ a, bs, *g,
+ f, // Can't move since need to return dyn_targets.
+ map_ext, *def_tt, filter));
+
+ // Note: no target_decl shenanigans since reset the members on
+ // each update.
+ //
+ if (!r.second)
+ {
+ dt.type.clear (); // Static indicator.
+ continue;
+ }
+
+ ft = &r.first;
+
+ // Note: we only currently support dynamic file members so it
+ // will be file if first.
+ //
+ g->members.push_back (ft);
+ }
+ else
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_adhoc_group_member (
+ what_tgt,
+ a, bs, t,
+ f, // Can't move since need to return dyn_targets.
+ map_ext, *def_tt));
+
+ // Note that we have to track the dynamic target even if it was
+ // already a member (think `b update && b clean update`).
+ //
+ if (!r.second && r.first.decl == target_decl::real)
+ {
+ dt.type.clear (); // Static indicator.
+ continue;
+ }
+
+ ft = &r.first;
+
+ if (dts)
+ dts->push_back (ft);
+ }
+
+ const char* tn (ft->type ().name);
+
+ if (dt.type.empty ())
+ dt.type = tn;
+ else if (dt.type != tn)
+ {
+ // This can, for example, happen if the user changed the
+ // extension to target type mapping. Say swapped extension
+ // variable values of two target types.
+ //
+ fail << "mapping of " << what_tgt << " target path " << f
+ << " to target type has changed" <<
+ info << "previously mapped to " << dt.type << "{}" <<
+ info << "now mapped to " << tn << "{}" <<
+ info << "perform from scratch rebuild of " << t;
+ }
+
+ if (!cache)
+ {
+ l = dt.type;
+ l += ' ';
+ l += f.string ();
+ dd.expect (l);
+ }
+ }
+
+ // Add the dynamic targets terminating blank line.
+ //
+ if (!cache)
+ dd.expect ("");
+
+ // Clean out old dynamic targets (skip the primary member).
+ //
+ if (dts)
+ {
+ assert (g == nullptr);
+
+ for (target* p (&t); p->adhoc_member != nullptr; )
+ {
+ target* m (p->adhoc_member);
+
+ if (m->decl != target_decl::real)
+ {
+ // While there could be quite a few dynamic targets (think
+ // something like Doxygen), this will hopefully be optimized
+ // down to a contiguous memory region scan for an integer and
+ // so should be fast.
+ //
+ if (find (dts->begin (), dts->end (), m) == dts->end ())
+ {
+ p->adhoc_member = m->adhoc_member; // Drop m.
+ continue;
+ }
+ }
+
+ p = m;
+ }
+ }
+ }
// Reload $< and $> to make sure they contain the newly discovered
// prerequisites and targets.
@@ -2711,6 +3402,12 @@ namespace build2
{
lookup r;
+ // Note that pre-parse can be switched on by the base parser even
+ // during execute.
+ //
+ if (!top_pre_parse_)
+ return r;
+
// Add the variable name skipping special variables and suppressing
// duplicates, unless the default variables change tracking is
// canceled with `depdb clear`. While at it, check if the script
@@ -2793,7 +3490,10 @@ namespace build2
void parser::
lookup_function (string&& name, const location& loc)
{
- if (perform_update_ && file_based_ && !impure_func_)
+ // Note that pre-parse can be switched on by the base parser even
+ // during execute.
+ //
+ if (top_pre_parse_ && perform_update_ && file_based_ && !impure_func_)
{
const function_overloads* f (ctx->functions.find (name));
diff --git a/libbuild2/build/script/parser.hxx b/libbuild2/build/script/parser.hxx
index 70e24aa..8f86b24 100644
--- a/libbuild2/build/script/parser.hxx
+++ b/libbuild2/build/script/parser.hxx
@@ -103,8 +103,10 @@ namespace build2
// runner's enter() function is called before the first preamble/body
// command execution and leave() -- after the last command.
//
+ // Note: target must be file or group.
+ //
void
- execute_depdb_preamble (action a, const scope& base, const file& t,
+ execute_depdb_preamble (action a, const scope& base, const target& t,
environment& e, const script& s, runner& r,
depdb& dd)
{
@@ -119,18 +121,28 @@ namespace build2
dd);
}
+ struct dynamic_target
+ {
+ string type; // Target type name (absent if static member).
+ build2::path path;
+ };
+
+ using dynamic_targets = vector<dynamic_target>;
+
void
execute_depdb_preamble_dyndep (
- action a, const scope& base, file& t,
+ action a, const scope& base, target& t,
environment& e, const script& s, runner& r,
- depdb& dd, bool& update, timestamp mt, bool& deferred_failure)
+ depdb& dd,
+ dynamic_targets& dyn_targets,
+ bool& update, timestamp mt, bool& deferred_failure)
{
exec_depdb_preamble (
a, base, t,
e, s, r,
s.depdb_preamble.begin () + *s.depdb_dyndep,
s.depdb_preamble.end (),
- dd, &update, mt, &deferred_failure);
+ dd, &dyn_targets, &update, mt, &deferred_failure);
}
// This version doesn't actually execute the depdb-dyndep builtin (but
@@ -139,7 +151,7 @@ namespace build2
// depdb-dyndep --byproduct logic (which fits better into the rule
// implementation).
//
- enum class dyndep_format {make};
+ enum class dyndep_format {make, lines};
struct dyndep_byproduct
{
@@ -154,14 +166,17 @@ namespace build2
dyndep_byproduct
execute_depdb_preamble_dyndep_byproduct (
- action a, const scope& base, const file& t,
+ action a, const scope& base, const target& t,
environment& e, const script& s, runner& r,
depdb& dd, bool& update, timestamp mt)
{
+ // Dummies.
+ //
// This is getting a bit ugly (we also don't really need to pass
// depdb here). One day we will find a better way...
//
- bool deferred_failure; // Dymmy.
+ dynamic_targets dyn_targets;
+ bool deferred_failure;
dyndep_byproduct v;
exec_depdb_preamble (
@@ -169,7 +184,7 @@ namespace build2
e, s, r,
s.depdb_preamble.begin () + *s.depdb_dyndep,
s.depdb_preamble.end (),
- dd, &update, mt, &deferred_failure, &v);
+ dd, &dyn_targets, &update, mt, &deferred_failure, &v);
return v;
}
@@ -216,21 +231,27 @@ namespace build2
names
exec_special (token&, build2::script::token_type&, bool skip_first);
+ // Note: target must be file or group.
+ //
void
- exec_depdb_preamble (action, const scope& base, const file&,
+ exec_depdb_preamble (action, const scope& base, const target&,
environment&, const script&, runner&,
lines_iterator begin, lines_iterator end,
depdb&,
+ dynamic_targets* dyn_targets = nullptr,
bool* update = nullptr,
optional<timestamp> mt = nullopt,
bool* deferred_failure = nullptr,
dyndep_byproduct* = nullptr);
+ // Note: target must be file or group.
+ //
void
exec_depdb_dyndep (token&, build2::script::token_type&,
size_t line_index, const location&,
- action, const scope& base, file&,
+ action, const scope& base, target&,
depdb&,
+ dynamic_targets& dyn_targets,
bool& update,
timestamp,
bool& deferred_failure,
@@ -271,9 +292,9 @@ namespace build2
script* script_;
const small_vector<action, 1>* actions_; // Non-NULL during pre-parse.
- // True if this script is for file-based targets and performing update
- // is one of the actions, respectively. Only set for the pre-parse
- // mode.
+ // True if this script is for file- or file group-based targets and
+ // performing update is one of the actions, respectively. Only set for
+ // the pre-parse mode.
//
bool file_based_;
bool perform_update_;
@@ -355,6 +376,7 @@ namespace build2
optional<pair<location, size_t>>
depdb_dyndep_; // depdb-dyndep location/position.
bool depdb_dyndep_byproduct_ = false; // --byproduct
+ bool depdb_dyndep_dyn_target_ = false; // --dyn-target
lines depdb_preamble_; // Note: excluding depdb-clear.
// If present, the first impure function called in the body of the
@@ -374,7 +396,12 @@ namespace build2
//
optional<location> computed_var_;
- // True during pre-parsing when the pre-parse mode is temporarily
+ // True if we (rather than the base parser) turned on the pre-parse
+ // mode.
+ //
+ bool top_pre_parse_;
+
+ // True during top-pre-parsing when the pre-parse mode is temporarily
// suspended to perform expansion.
//
bool pre_parse_suspended_ = false;
diff --git a/libbuild2/build/script/runner.cxx b/libbuild2/build/script/runner.cxx
index c52ef66..5d9764b 100644
--- a/libbuild2/build/script/runner.cxx
+++ b/libbuild2/build/script/runner.cxx
@@ -28,12 +28,37 @@ namespace build2
//
for (auto i (env.cleanups.begin ()); i != env.cleanups.end (); )
{
- const target* m (&env.target);
- for (; m != nullptr; m = m->adhoc_member)
+ const target* m (nullptr);
+ if (const group* g = env.target.is_a<group> ())
{
- if (const path_target* pm = m->is_a<path_target> ())
- if (i->path == pm->path ())
- break;
+ for (const target* gm: g->members)
+ {
+ if (const path_target* pm = gm->is_a<path_target> ())
+ {
+ if (i->path == pm->path ())
+ {
+ m = gm;
+ break;
+ }
+ }
+ }
+ }
+ else if (const fsdir* fd = env.target.is_a<fsdir> ())
+ {
+ // Compare ignoring the trailing directory separator.
+ //
+ if (path_traits::compare (i->path.string (),
+ fd->dir.string ()) == 0)
+ m = fd;
+ }
+ else
+ {
+ for (m = &env.target; m != nullptr; m = m->adhoc_member)
+ {
+ if (const path_target* pm = m->is_a<path_target> ())
+ if (i->path == pm->path ())
+ break;
+ }
}
if (m != nullptr)
diff --git a/libbuild2/build/script/script.cxx b/libbuild2/build/script/script.cxx
index 9d9b5a8..0d96cc3 100644
--- a/libbuild2/build/script/script.cxx
+++ b/libbuild2/build/script/script.cxx
@@ -7,6 +7,8 @@
#include <libbuild2/target.hxx>
+#include <libbuild2/adhoc-rule-buildscript.hxx> // include_unmatch*
+
#include <libbuild2/script/timeout.hxx>
#include <libbuild2/build/script/parser.hxx>
@@ -58,11 +60,27 @@ namespace build2
{
// $>
//
+ // What should it contain for an explicit group? While it may seem
+ // that just the members should be enough (and analogous to the ad
+ // hoc case), this won't let us get the group name for diagnostics.
+ // So the group name followed by all the members seems like the
+ // logical choice.
+ //
names ns;
- for (const target_type* m (&target);
- m != nullptr;
- m = m->adhoc_member)
- m->as_name (ns);
+
+ if (const group* g = target.is_a<group> ())
+ {
+ g->as_name (ns);
+ for (const target_type* m: g->members)
+ m->as_name (ns);
+ }
+ else
+ {
+ for (const target_type* m (&target);
+ m != nullptr;
+ m = m->adhoc_member)
+ m->as_name (ns);
+ }
assign (var_ts) = move (ns);
}
@@ -75,13 +93,25 @@ namespace build2
// much sense, they could be handy to exclude certain prerequisites
// from $< while still treating them as such, especially in rule.
//
+ // While initially we treated update=unmatch prerequisites as
+ // implicitly ad hoc, this turned out to be not quite correct, so
+ // now we add them unless they are explicitly marked ad hoc.
+ //
names ns;
- for (const prerequisite_target& pt: target.prerequisite_targets[a])
+ for (const prerequisite_target& p: target.prerequisite_targets[a])
{
// See adhoc_buildscript_rule::execute_update_prerequisites().
//
- if (pt.target != nullptr && !pt.adhoc ())
- pt.target->as_name (ns);
+ if (const target_type* pt =
+ p.target != nullptr ? (p.adhoc () ? nullptr : p.target) :
+ (p.include & adhoc_buildscript_rule::include_unmatch) != 0 &&
+ (p.include & prerequisite_target::include_adhoc) == 0 &&
+ (p.include & adhoc_buildscript_rule::include_unmatch_adhoc) == 0
+ ? reinterpret_cast<target_type*> (p.data)
+ : nullptr)
+ {
+ pt->as_name (ns);
+ }
}
assign (var_ps) = move (ns);
diff --git a/libbuild2/build/script/script.hxx b/libbuild2/build/script/script.hxx
index 57a893e..08f1bf4 100644
--- a/libbuild2/build/script/script.hxx
+++ b/libbuild2/build/script/script.hxx
@@ -82,8 +82,9 @@ namespace build2
bool depdb_value; // String or hash.
optional<size_t> depdb_dyndep; // Pos of first dyndep.
bool depdb_dyndep_byproduct = false; // dyndep --byproduct
+ bool depdb_dyndep_dyn_target = false;// dyndep --dyn-target
lines depdb_preamble; // Note include vars.
- bool depdb_preamble_temp_dir = false; // True if refs $~.
+ bool depdb_preamble_temp_dir = false;// True if refs $~.
location start_loc;
location end_loc;
diff --git a/libbuild2/buildfile b/libbuild2/buildfile
index b4f420c..3518d93 100644
--- a/libbuild2/buildfile
+++ b/libbuild2/buildfile
@@ -4,7 +4,7 @@
# NOTE: remember to update bundled_modules in libbuild2/module.cxx if adding a
# new module.
#
-bundled_modules = bash/ bin/ c/ cc/ cxx/ in/ version/
+bundled_modules = bash/ bin/ c/ cc/ cli/ cxx/ in/ version/
./: lib{build2} $bundled_modules
@@ -59,42 +59,120 @@ lib{build2}: cxx{utility-uninstalled}: for_install = false
libul{build2}: config/{hxx ixx txx cxx}{** -host-config -**.test...} \
config/cxx{host-config}
+# Derive ~host and ~build2 configurations from current configuration.
+#
# This will of course blow up spectacularly if we are cross-compiling. But
# let's wait and enjoy the fireworks (and get a sense of why someone would
# want to cross-compile a build system).
#
-config/cxx{host-config}: config/in{host-config}
+# For the ~host configuration we only want c/cxx/cc and bin that they load.
+# For ~build2 we want to keep everything except dist.
+#
+# We also remove comment lines which could be confused with preprocessor
+# directives by some lesser compilers and blank lines between groups of
+# options which could cause spurious rebuilds when we filter out entire
+# groups.
+#
+# For ~host also filter out config.bin.lib/config.bin.*.lib (static/shared
+# library build/link preferences). In particular, we don't want to force
+# config.bin.lib=shared since that will cause static libraries to link shared
+# versions of their prerequisites (see mysql-client for a case where this can
+# make a difference).
+#
+# For ~build2 also filter out config.install.chroot -- we definitely don't
+# want it carried through. Also filter out variables that control tests
+# execution.
+#
+# Finally, for both ~host and ~build2 we keep config.config.environment
+# but strip config.config.hermetic* (we shouldn't be forcing hermiticity
+# on the users of ~host/~build2; they can decide for themselves if they
+# want it).
+#
+# The *_no_warnings variants are with the suppressed C/C++ compiler warnings
+# (in particular, used for private host configuration in bpkg).
+#
+#
+host_config_lines = [strings]
+build2_config_lines = [strings]
+
+host_config_no_warnings_lines = [strings]
+build2_config_no_warnings_lines = [strings]
+
+for l: $regex.replace_lines( \
+ $config.save(), \
+ '^( *(#|(config\.(test[. ]|dist\.|install\.chroot|config\.hermetic))).*|)$', \
+ [null])
{
- # For the ~host configuration we only want c/cxx/cc and bin that they load.
- # For ~build2 we want to keep everything except dist.
- #
- # We also remove comment lines which could be confused with preprocessor
- # directives by some lesser compilers and blank lines between groups of
- # options which could cause spurious rebuilds when we filter out entire
- # groups.
- #
- # For ~build2 also filter out config.install.chroot -- we definitely don't
- # want it carried through. Also filter out variables that control tests
- # execution.
+ # Note: also preserve config.version.
#
- # Finally, for both ~host and ~build2 we keep config.config.environment
- # but strip config.config.hermetic* (we shouldn't be forcing hermiticity
- # on the users of ~host/~build2; they can decide for themselves if they
- # want it).
- #
- build2_config = $regex.replace_lines( \
- $config.save(), \
- '^( *(#|(config\.(test[. ]|dist\.|install\.chroot|config\.hermetic))).*|)$', \
- [null], \
- return_lines)
+ h = [null]
+ if $regex.match( \
+ $l, \
+ ' *config\.(c[. ]|cxx[. ]|cc[.]|bin[.]|config.environment |version ).*')
+ {
+ if! ($regex.match(\
+ $l, \
+ ' *config\.bin\.(lib|exe\.lib|liba\.lib|libs\.lib)[ =].*'))
+ {
+ # Filter out sanitizer options in ~host. We run the toolchain with
+ # various sanitizers on CI but sanitizers cause issues in some packages.
+ # Note that we can have both -fsanitize and -fno-sanitize forms. For
+ # example:
+ #
+ # -fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all
+ #
+ if $regex.match($l, ' *config\.(c|cxx|cc)\.(coptions|loptions)[ =].*')
+ {
+ h = $regex.replace($l, ' ?-f(no-)?sanitize[=-][^ ]+', '')
+ }
+ else
+ h = $l
+ }
+ }
+
+ if ($h != [null])
+ host_config_lines += $h
- # Also preserve config.version.
+ build2_config_lines += $l
+
+ # Append the warning suppressing option to config.{c,cxx}.coptions rather
+ # than config.cc.coptions since the former could re-enable them.
#
- host_config = $regex.replace_lines( \
- $build2_config, \
- '^ *config\.(c[. ]|cxx[. ]|cc[.]|bin[.]|config.environment |version ).*$', \
- '$&', \
- format_no_copy return_lines)
+ if ($regex.match($l, ' *config\.(c|cxx)\.coptions[ =].*'))
+ {
+ # Note that in MSVC overriding one warning option (say /W3) with another
+ # (say /w) triggers a warning. However, our compile_rule sanitizes the
+ # command line to resolve such overrides (see msvc_sanitize_cl()).
+ #
+ o = ($cxx.class == 'gcc' ? -w : $cxx.class == 'msvc' ? /w : )
+
+ if ($regex.match($l, '[^=]+= *\[null\] *'))
+ {
+ l = $regex.replace($l, '= *\[null\] *$', "= $o")
+ h = $regex.replace($h, '= *\[null\] *$', "= $o")
+ }
+ else
+ {
+ l = $regex.replace($l, '=(.*)$', "=\\1 $o")
+ h = $regex.replace($h, '=(.*)$', "=\\1 $o")
+ }
+ }
+
+ if ($h != [null])
+ host_config_no_warnings_lines += $h
+
+ build2_config_no_warnings_lines += $l
+}
+
+config/cxx{host-config}: config/in{host-config}
+{
+ host_config = $regex.merge($host_config_lines, '(.+)', '\1\n')
+ build2_config = $regex.merge($build2_config_lines, '(.+)', '\1\n')
+
+ host_config_no_warnings = $regex.merge($host_config_no_warnings_lines, \
+ '(.+)', '\1\n')
+ build2_config_no_warnings = $regex.merge($build2_config_no_warnings_lines, \
+ '(.+)', '\1\n')
}
libul{build2}: dist/{hxx ixx txx cxx}{** -**.test...}
@@ -169,14 +247,48 @@ if! $cross
{
{obja objs}{context}: cxx.poptions += \
-DBUILD2_IMPORT_PATH=\"$regex.replace($out_root, '\\', '\\\\')\"
+}
- # While this object file should only be linked when we are installing, it
- # will be compiled even in the uninstalled case.
+# Note that while the -installed object file should only be linked when we
+# are installing, it will be compiled even in the uninstalled case.
+#
+if ($install.root != [null])
+{
+ # Only if installed.
+ #
+ {obja objs}{utility-installed}: cxx.poptions += \
+ -DBUILD2_INSTALL_LIB=\"$regex.replace(\
+ $install.resolve($install.lib), '\\', '\\\\')\"
+
+ # Only if configured.
+ #
+ # Note: strip the last directory component (<project>).
#
- if ($install.root != [null])
- {obja objs}{utility-installed}: cxx.poptions += \
- -DBUILD2_INSTALL_LIB=\"$regex.replace(\
- $install.resolve($install.lib), '\\', '\\\\')\"
+ # @@ TMP drop after 0.16.0 release.
+ #
+ install_buildfile = ($install.buildfile != [null] \
+ ? $directory($install.resolve($install.buildfile)) \
+ :)
+ {obja objs}{utility-installed utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_BUILDFILE=\"$regex.replace($install_buildfile, '\\', '\\\\')\"
+
+ #\
+ {obja objs}{utility-installed utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_BUILDFILE=\"$regex.replace(\
+ $directory($install.resolve($install.buildfile)), '\\', '\\\\')\"
+ #\
+
+ # Data directory or src_root if not installed.
+ #
+ # Note: normalized in both cases.
+ #
+ {obja objs}{utility-installed}: cxx.poptions += \
+ -DBUILD2_INSTALL_DATA=\"$regex.replace(\
+ $install.resolve($install.data), '\\', '\\\\')\"
+
+ {obja objs}{utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_DATA=\"$regex.replace(\
+ $src_root, '\\', '\\\\')\"
}
if ($cxx.target.class != 'windows')
diff --git a/libbuild2/buildspec.cxx b/libbuild2/buildspec.cxx
index bd580ca..2eeaf31 100644
--- a/libbuild2/buildspec.cxx
+++ b/libbuild2/buildspec.cxx
@@ -53,7 +53,7 @@ namespace build2
if (v)
{
names storage;
- os << reverse (v, storage);
+ os << reverse (v, storage, true /* reduce */);
}
else
os << "[null]";
@@ -86,7 +86,7 @@ namespace build2
if (v)
{
names storage;
- os << reverse (v, storage);
+ os << reverse (v, storage, true /* reduce */);
}
else
os << "[null]";
diff --git a/libbuild2/c/init.cxx b/libbuild2/c/init.cxx
index 2dbd534..8bc2f7d 100644
--- a/libbuild2/c/init.cxx
+++ b/libbuild2/c/init.cxx
@@ -6,9 +6,12 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/install/utility.hxx>
+
#include <libbuild2/cc/guess.hxx>
#include <libbuild2/cc/module.hxx>
+#include <libbuild2/cc/target.hxx> // pc*
#include <libbuild2/c/target.hxx>
#ifndef BUILD2_DEFAULT_C
@@ -54,6 +57,26 @@ namespace build2
strings& mode,
const string* v) const
{
+ // The standard is `NN` but can also be `gnuNN`.
+
+ // This helper helps recognize both NN and [cC]NN to avoid an endless
+ // stream of user questions. It can also be used to recognize Nx in
+ // addition to NN (e.g., "23" and "2x").
+ //
+ auto stdcmp = [v] (const char* nn, const char* nx = nullptr)
+ {
+ if (v != nullptr)
+ {
+ const char* s (v->c_str ());
+ if (s[0] == 'c' || s[0] == 'C')
+ s += 1;
+
+ return strcmp (s, nn) == 0 || (nx != nullptr && strcmp (s, nx) == 0);
+ }
+
+ return false;
+ };
+
switch (ci.class_)
{
case compiler_class::msvc:
@@ -78,7 +101,12 @@ namespace build2
// C17/18 is a bug-fix version of C11 so here we assume it is the
// same as C11.
//
- // And it's still early days for C2X.
+ // And it's still early days for C2X. Specifically, there is not
+ // much about C2X in MSVC in the official places and the following
+ // page shows that it's pretty much unimplement at the time of the
+ // MSVC 17.6 release:
+ //
+ // https://en.cppreference.com/w/c/compiler_support/23
//
// From version 16.8 VC now supports /std:c11 and /std:c17 options
// which enable C11/17 conformance. However, as of version 16.10,
@@ -87,17 +115,17 @@ namespace build2
//
if (v == nullptr)
;
- else if (*v != "90")
+ else if (!stdcmp ("90"))
{
uint64_t cver (ci.version.major);
- if ((*v == "99" && cver < 16) || // Since VS2010/10.0.
- ((*v == "11" ||
- *v == "17" ||
- *v == "18") && cver < 18) ||
- (*v == "2x" ))
+ if ((stdcmp ("99") && cver < 16) || // Since VS2010/10.0.
+ ((stdcmp ("11") ||
+ stdcmp ("17") ||
+ stdcmp ("18")) && cver < 18) || // Since VS????/11.0.
+ (stdcmp ("23", "2x") ))
{
- fail << "C" << *v << " is not supported by " << ci.signature <<
+ fail << "C " << *v << " is not supported by " << ci.signature <<
info << "required by " << project (rs) << '@' << rs;
}
}
@@ -114,12 +142,12 @@ namespace build2
{
string o ("-std=");
- if (*v == "2x") o += "c2x"; // GCC 9, Clang 9 (8?).
- else if (*v == "17" ||
- *v == "18") o += "c17"; // GCC 8, Clang 6.
- else if (*v == "11") o += "c1x";
- else if (*v == "99") o += "c9x";
- else if (*v == "90") o += "c90";
+ if (stdcmp ("23", "2x")) o += "c2x"; // GCC 9, Clang 9 (8?).
+ else if (stdcmp ("17") ||
+ stdcmp ("18")) o += "c17"; // GCC 8, Clang 6.
+ else if (stdcmp ("11")) o += "c1x";
+ else if (stdcmp ("99")) o += "c9x";
+ else if (stdcmp ("90")) o += "c90";
else o += *v; // In case the user specifies `gnuNN` or some such.
mode.insert (mode.begin (), move (o));
@@ -129,6 +157,79 @@ namespace build2
}
}
+ // See cc::data::x_{hdr,inc} for background.
+ //
+ static const target_type* const hdr[] =
+ {
+ &h::static_type,
+ nullptr
+ };
+
+ // Note that we include S{} here because .S files can include each other.
+ // (And maybe from inline assembler instructions?)
+ //
+ static const target_type* const inc[] =
+ {
+ &h::static_type,
+ &c::static_type,
+ &m::static_type,
+ &S::static_type,
+ &c_inc::static_type,
+ nullptr
+ };
+
+ bool
+ types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.types module must be loaded in project root";
+
+ // Register target types and configure their "installability".
+ //
+ using namespace install;
+
+ bool install_loaded (cast_false<bool> (rs["install.loaded"]));
+
+ // Note: not registering m{} or S{} (they are registered seperately
+ // by the respective optional .types submodules).
+ //
+ rs.insert_target_type<c> ();
+
+ auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
+ {
+ rs.insert_target_type (tt);
+
+ // Install headers into install.include.
+ //
+ if (install_loaded)
+ install_path (rs, tt, dir_path ("include"));
+ };
+
+ for (const target_type* const* ht (hdr); *ht != nullptr; ++ht)
+ insert_hdr (**ht);
+
+ // @@ PERF: maybe factor this to cc.types?
+ //
+ rs.insert_target_type<cc::pc> ();
+ rs.insert_target_type<cc::pca> ();
+ rs.insert_target_type<cc::pcs> ();
+
+ if (install_loaded)
+ install_path<cc::pc> (rs, dir_path ("pkgconfig"));
+
+ return true;
+ }
+
static const char* const hinters[] = {"cxx", nullptr};
// See cc::module for details on guess_init vs config_init.
@@ -231,8 +332,8 @@ namespace build2
vp["cc.export.libs"],
vp["cc.export.impl_libs"],
- vp["cc.pkconfig.include"],
- vp["cc.pkconfig.lib"],
+ vp["cc.pkgconfig.include"],
+ vp["cc.pkgconfig.lib"],
vp.insert_alias (vp["cc.stdlib"], "c.stdlib"), // Same as cc.stdlib.
@@ -244,6 +345,7 @@ namespace build2
vp["cc.module_name"],
vp["cc.importable"],
vp["cc.reprocess"],
+ vp["cc.serialize"],
vp.insert<string> ("c.preprocessed"), // See cxx.preprocessed.
nullptr, // No __symexport (no modules).
@@ -318,20 +420,6 @@ namespace build2
return true;
}
- static const target_type* const hdr[] =
- {
- &h::static_type,
- nullptr
- };
-
- static const target_type* const inc[] =
- {
- &h::static_type,
- &c::static_type,
- &m::static_type,
- nullptr
- };
-
bool
init (scope& rs,
scope& bs,
@@ -360,8 +448,7 @@ namespace build2
"c.link",
"c.install",
- cm.x_info->id.type,
- cm.x_info->id.variant,
+ cm.x_info->id,
cm.x_info->class_,
cm.x_info->version.major,
cm.x_info->version.minor,
@@ -394,6 +481,7 @@ namespace build2
c::static_type,
nullptr, // No C modules yet.
+ c_inc::static_type,
hdr,
inc
};
@@ -405,6 +493,29 @@ namespace build2
}
bool
+ objc_types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::objc_types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.objc.types module must be loaded in project root";
+
+ // Register the m{} target type.
+ //
+ rs.insert_target_type<m> ();
+
+ return true;
+ }
+
+ bool
objc_init (scope& rs,
scope& bs,
const location& loc,
@@ -433,7 +544,7 @@ namespace build2
//
// Note: see similar code in the cxx module.
//
- rs.insert_target_type<m> ();
+ load_module (rs, rs, "c.objc.types", loc);
// Note that while Objective-C is supported by MinGW GCC, it's unlikely
// Clang supports it when targeting MSVC or Emscripten. But let's keep
@@ -446,16 +557,116 @@ namespace build2
return true;
}
+ bool
+ as_cpp_types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::as_cpp_types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.as-cpp.types module must be loaded in project root";
+
+ // Register the S{} target type.
+ //
+ rs.insert_target_type<S> ();
+
+ return true;
+ }
+
+ bool
+ as_cpp_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::as_cpp_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.as-cpp module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.as-cpp module must be loaded after c module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C compiler is capable of compiling Assember with C preprocessor. But
+ // we enable only if it is.
+ //
+ load_module (rs, rs, "c.as-cpp.types", loc);
+
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_asp = &S::static_type;
+
+ return true;
+ }
+
+ bool
+ predefs_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::predefs_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.predefs module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.predefs module must be loaded after c module";
+
+ // Register the c.predefs rule.
+ //
+ // Why invent a separate module instead of just always registering it in
+ // the c module? The reason is performance: this rule will be called for
+ // every C header.
+ //
+ cc::predefs_rule& r (*mod);
+
+ rs.insert_rule<h> (perform_update_id, r.rule_name, r);
+ rs.insert_rule<h> (perform_clean_id, r.rule_name, r);
+ rs.insert_rule<h> (configure_update_id, r.rule_name, r);
+
+ return true;
+ }
+
static const module_functions mod_functions[] =
{
// NOTE: don't forget to also update the documentation in init.hxx if
// changing anything here.
- {"c.guess", nullptr, guess_init},
- {"c.config", nullptr, config_init},
- {"c", nullptr, init},
- {"c.objc", nullptr, objc_init},
- {nullptr, nullptr, nullptr}
+ {"c.types", nullptr, types_init},
+ {"c.guess", nullptr, guess_init},
+ {"c.config", nullptr, config_init},
+ {"c.objc.types", nullptr, objc_types_init},
+ {"c.objc", nullptr, objc_init},
+ {"c.as-cpp.types", nullptr, as_cpp_types_init},
+ {"c.as-cpp", nullptr, as_cpp_init},
+ {"c.predefs", nullptr, predefs_init},
+ {"c", nullptr, init},
+ {nullptr, nullptr, nullptr}
};
const module_functions*
diff --git a/libbuild2/c/init.hxx b/libbuild2/c/init.hxx
index f324c31..38515c1 100644
--- a/libbuild2/c/init.hxx
+++ b/libbuild2/c/init.hxx
@@ -19,11 +19,22 @@ namespace build2
//
// Submodules:
//
- // `c.guess` -- registers and sets some variables.
- // `c.config` -- loads c.guess and sets more variables.
- // `c` -- loads c.config and registers target types and rules.
- // `c.objc` -- registers m{} target type and enables Objective-C
- // compilation.
+ // `c.types` -- registers target types.
+ // `c.guess` -- registers and sets some variables.
+ // `c.config` -- loads c.guess and sets more variables.
+ // `c` -- loads c.{types,config} and registers rules and
+ // functions.
+ //
+ // `c.objc.types` -- registers m{} target type.
+ // `c.objc` -- loads c.objc.types and enables Objective-C
+ // compilation. Must be loaded after c.
+ //
+ // `c.as-cpp.types` -- registers S{} target type.
+ // `c.as-cpp` -- loads c.as-cpp.types and enables Assembler with C
+ // preprocessor compilation. Must be loaded after c.
+ //
+ // `c.predefs` -- registers rule for generating a C header with
+ // predefined compiler macros. Must be loaded after c.
//
extern "C" LIBBUILD2_C_SYMEXPORT const module_functions*
build2_c_load ();
diff --git a/libbuild2/c/target.hxx b/libbuild2/c/target.hxx
index 308bda9..c9955e3 100644
--- a/libbuild2/c/target.hxx
+++ b/libbuild2/c/target.hxx
@@ -16,6 +16,8 @@ namespace build2
using cc::h;
using cc::c;
using cc::m;
+ using cc::S;
+ using cc::c_inc;
}
}
diff --git a/libbuild2/cc/buildfile b/libbuild2/cc/buildfile
index e090e76..7dcd811 100644
--- a/libbuild2/cc/buildfile
+++ b/libbuild2/cc/buildfile
@@ -18,12 +18,22 @@ intf_libs = ../bin/lib{build2-bin}
./: lib{build2-cc}: libul{build2-cc}: \
{hxx ixx txx cxx}{** -pkgconfig-lib* -**.test...} \
- h{msvc-setup} \
- $intf_libs $impl_libs
+ h{msvc-setup}
libul{build2-cc}: cxx{pkgconfig-libpkgconf}: include = $libpkgconf
libul{build2-cc}: cxx{pkgconfig-libpkg-config}: include = (!$libpkgconf)
+libul{build2-cc}: $intf_libs $impl_libs
+
+# libc++ std module interface translation unit.
+#
+# Hopefully temporary, see llvm-project GH issues #73089.
+#
+# @@ TMP: make sure sync'ed with upstream before release (keep this note).
+#
+lib{build2-cc}: file{std.cppm}
+file{std.cppm}@./: install = data/libbuild2/cc/
+
# Unit tests.
#
exe{*.test}:
diff --git a/libbuild2/cc/common.cxx b/libbuild2/cc/common.cxx
index 2d344f1..2a8bc50 100644
--- a/libbuild2/cc/common.cxx
+++ b/libbuild2/cc/common.cxx
@@ -162,7 +162,12 @@ namespace build2
// Add the library to the chain.
//
if (self && proc_lib)
+ {
+ if (find (chain->begin (), chain->end (), &l) != chain->end ())
+ fail << "dependency cycle detected involving library " << l;
+
chain->push_back (&l);
+ }
// We only lookup public variables so go straight for the public
// variable pool.
@@ -382,9 +387,10 @@ namespace build2
for (const prerequisite_target& pt: l.prerequisite_targets[a])
{
// Note: adhoc prerequisites are not part of the library metadata
- // protocol (and we should check for adhoc first to avoid races).
+ // protocol (and we should check for adhoc first to avoid races
+ // during execute).
//
- if (pt == nullptr || pt.adhoc ())
+ if (pt.adhoc () || pt == nullptr)
continue;
if (marked (pt))
@@ -408,7 +414,7 @@ namespace build2
if (!li) find_linfo ();
process_libraries_impl (a, bs, *li, *sysd,
- g, *f, la, pt.data,
+ g, *f, la, pt.data /* lflags */,
proc_impl, proc_lib, proc_opt,
true /* self */, proc_opt_group,
cache, chain, nullptr);
@@ -642,18 +648,36 @@ namespace build2
<< " dependency " << *t << " is " << w <<
info << "mentioned in *.export." << (impl ? "impl_" : "")
<< "libs of target " << l <<
- info << "is it a prerequisite of " << l << "?";
+ info << "is it a prerequisite of " << l << "?" << endf;
}
// Process it recursively.
//
- // @@ Where can we get the link flags? Should we try to find
- // them in the library's prerequisites? What about
- // installed stuff?
+ bool u;
+ bool la ((u = t->is_a<libux> ()) || t->is_a<liba> ());
+ lflags lf (0);
+
+ // If this is a static library, see if we need to link it
+ // whole.
//
+ if (la && proc_lib)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ const variable& var (t->ctx.var_pool["bin.whole"]);
+
+ // See the link rule for the lookup semantics.
+ //
+ lookup l (
+ t->lookup_original (var, true /* target_only */).first);
+
+ if (l ? cast<bool> (*l) : u)
+ lf |= lflag_whole;
+ }
+
process_libraries_impl (
a, bs, *li, *sysd,
- g, *t, t->is_a<liba> () || t->is_a<libux> (), 0,
+ g, *t, la, lf,
proc_impl, proc_lib, proc_opt,
true /* self */, proc_opt_group,
cache, chain, dedup);
@@ -804,6 +828,8 @@ namespace build2
// always a file. The second half of the returned pair is the group, if
// the member was picked.
//
+ // Note: paths in sysd/usrd are expected to be absolute and normalized.
+ //
// Note: may throw non_existent_library.
//
pair<const mtime_target&, const target*> common::
@@ -911,6 +937,8 @@ namespace build2
// Action should be absent if called during the load phase. Note that pk's
// scope should not be NULL (even if dir is absolute).
//
+ // Note: paths in sysd/usrd are expected to be absolute and normalized.
+ //
// Note: see similar logic in find_system_library().
//
target* common::
@@ -1039,6 +1067,21 @@ namespace build2
{
context& ctx (p.scope->ctx);
+ // Whether to look for a binless variant using the common .pc file
+ // (see below).
+ //
+ // Normally we look for a binless version if the binful one was not
+ // found. However, sometimes we may find what looks like a binful
+ // library but on a closer examination realize that there is something
+ // wrong with it (for example, it's not a Windows import library). In
+ // such cases we want to omit looking for a binless library using the
+ // common .pc file since it most likely corresponds to the binful
+ // library (and we may end up in a infinite loop trying to resolve
+ // itself).
+ //
+ bool ba (true);
+ bool bs (true);
+
timestamp mt;
// libs
@@ -1110,6 +1153,31 @@ namespace build2
s->path_mtime (move (f), mt);
}
}
+ else if (!ext && tsys == "darwin")
+ {
+ // Besides .dylib, Mac OS now also has "text-based stub libraries"
+ // that use the .tbd extension. They appear to be similar to
+ // Windows import libraries and contain information such as the
+ // location of the .dylib library, its symbols, etc. For example,
+ // there is /Library/.../MacOSX13.3.sdk/usr/lib/libsqlite3.tbd
+ // which points to /usr/lib/libsqlite3.dylib (but which itself is
+ // invisible/inaccessible, presumably for security).
+ //
+ // Note that for now we are treating the .tbd library as the
+ // shared library but could probably do the more elaborate dance
+ // with ad hoc members like on Windows if really necessary.
+ //
+ se = string ("tbd");
+ f = f.base (); // Remove .dylib.
+ f += ".tbd";
+ mt = mtime (f);
+
+ if (mt != timestamp_nonexistent)
+ {
+ insert_library (ctx, s, name, d, ld, se, exist, trace);
+ s->path_mtime (move (f), mt);
+ }
+ }
}
// liba
@@ -1139,10 +1207,24 @@ namespace build2
if (tsys == "win32-msvc")
{
if (s == nullptr && !sn.empty ())
- s = msvc_search_shared (ld, d, p, exist);
+ {
+ pair<libs*, bool> r (msvc_search_shared (ld, d, p, exist));
+
+ if (r.first != nullptr)
+ s = r.first;
+ else if (!r.second)
+ bs = false;
+ }
if (a == nullptr && !an.empty ())
- a = msvc_search_static (ld, d, p, exist);
+ {
+ pair<liba*, bool> r (msvc_search_static (ld, d, p, exist));
+
+ if (r.first != nullptr)
+ a = r.first;
+ else if (!r.second)
+ ba = false;
+ }
}
// Look for binary-less libraries via pkg-config .pc files. Note that
@@ -1159,7 +1241,10 @@ namespace build2
// is no binful variant.
//
pair<path, path> r (
- pkgconfig_search (d, p.proj, name, na && ns /* common */));
+ pkgconfig_search (d,
+ p.proj,
+ name,
+ na && ns && ba && bs /* common */));
if (na && !r.first.empty ())
{
@@ -1212,6 +1297,8 @@ namespace build2
// making it the only one to allow things to be overriden (e.g.,
// if build2 was moved or some such).
//
+ // Note: build_install_lib is already normalized.
+ //
usrd->insert (usrd->begin (), build_install_lib);
}
}
@@ -1269,7 +1356,7 @@ namespace build2
// idea is that in .pc files that we generate, we copy those macros (or
// custom ones) from *.export.poptions.
//
- // @@ Should we add .pc files as ad hoc members so pkconfig_save() can
+ // @@ Should we add .pc files as ad hoc members so pkgconfig_save() can
// use their names when deriving -l-names (this would be especially
// helpful for binless libraries to get hold of prefix/suffix, etc).
//
@@ -1390,7 +1477,7 @@ namespace build2
// @@ TODO: we currently always reload pkgconfig for lt (and below).
//
mark_cc (*lt);
- lt->mtime (mt);
+ lt->mtime (mt); // Note: problematic, see below for details.
// We can only load metadata from here since we can only do this
// during the load phase. But it's also possible that a racing match
@@ -1441,6 +1528,9 @@ namespace build2
return l;
};
+ target_lock al (lock (a));
+ target_lock sl (lock (s));
+
target_lock ll (lock (lt));
// Set lib{} group members to indicate what's available. Note that we
@@ -1468,9 +1558,6 @@ namespace build2
ll.unlock ();
}
- target_lock al (lock (a));
- target_lock sl (lock (s));
-
if (!al) a = nullptr;
if (!sl) s = nullptr;
@@ -1504,6 +1591,34 @@ namespace build2
if (s != nullptr) match_rule (sl, file_rule::rule_match);
if (ll)
{
+ // @@ Turns out this has a problem: file_rule won't match/execute
+ // group members. So what happens is that if we have two installed
+ // libraries, say lib{build2} that depends on lib{butl}, then
+ // lib{build2} will have lib{butl} as a prerequisite and file_rule
+ // that matches lib{build2} will update lib{butl} (also matched by
+ // file_rule), but not its members. Later, someone (for example,
+ // the newer() call in append_libraries()) will pick one of the
+ // members assuming it is executed and things will go sideways.
+ //
+ // For now we hacked around the issue but the long term solution is
+ // probably to add to the bin module a special rule that is
+ // registered on the global scope and matches the installed lib{}
+ // targets. This rule will have to both update prerequisites like
+ // the file_rule and group members like the lib_rule (or maybe it
+ // can skip prerequisites since one of the member will do that; in
+ // which case maybe we will be able to reuse lib_rule maybe with
+ // the "all members" flag or some such). A few additional
+ // notes/thoughts:
+ //
+ // - Will be able to stop inheriting lib{} from mtime_target.
+ //
+ // - Will need to register for perform_update/clean like in context
+ // as well as for configure as in the config module (feels like
+ // shouldn't need to register for dist).
+ //
+ // - Will need to test batches, immediate import thoroughly (this
+ // stuff is notoriously tricky to get right in all situations).
+ //
match_rule (ll, file_rule::rule_match);
// Also bless the library group with a "trust me it exists" timestamp.
@@ -1512,6 +1627,8 @@ namespace build2
// won't match.
//
lt->mtime (mt);
+
+ ll.unlock (); // Unlock group before members, for good measure.
}
return r;
@@ -1553,5 +1670,85 @@ namespace build2
return r;
}
+
+ void common::
+ append_diag_color_options (cstrings& args) const
+ {
+ switch (cclass)
+ {
+ case compiler_class::msvc:
+ {
+ // MSVC has the /diagnostics: option which has an undocumented value
+ // `color`. It's unclear from which version of MSVC this value is
+ // supported, but it works in 17.0, so let's start from there.
+ //
+ // Note that there is currently no way to disable color in the MSVC
+ // diagnostics specifically (the /diagnostics:* option values are
+ // cumulative and there doesn't seem to be a `color-` value). This
+ // is probably not a big deal since one can just disable the color
+ // globally (--no-diag-color).
+ //
+ // Note that clang-cl appears to use -fansi-escape-codes. See GH
+ // issue #312 for background.
+ //
+ if (show_diag_color ())
+ {
+ if (cvariant.empty () &&
+ (cmaj > 19 || (cmaj == 19 && cmin >= 30)))
+ {
+ // Check for the prefix in case /diagnostics:color- gets added
+ // eventually.
+ //
+ if (!find_option_prefixes ({"/diagnostics:color",
+ "-diagnostics:color"}, args))
+ {
+ args.push_back ("/diagnostics:color");
+ }
+ }
+ }
+
+ break;
+ }
+ case compiler_class::gcc:
+ {
+ // Enable/disable diagnostics color unless a custom option is
+ // specified.
+ //
+ // Supported from GCC 4.9 (8.1 on Windows) and (at least) from Clang
+ // 3.5. Clang supports -f[no]color-diagnostics in addition to the
+ // GCC's spelling.
+ //
+ if (
+#ifndef _WIN32
+ ctype == compiler_type::gcc ? cmaj > 4 || (cmaj == 4 && cmin >= 9) :
+#else
+ ctype == compiler_type::gcc ? cmaj > 8 || (cmaj == 8 && cmin >= 1) :
+#endif
+ ctype == compiler_type::clang ? cmaj > 3 || (cmaj == 3 && cmin >= 5) :
+ false)
+ {
+ if (!(find_option_prefix ("-fdiagnostics-color", args) ||
+ find_option ("-fno-diagnostics-color", args) ||
+ find_option ("-fdiagnostics-plain-output", args) ||
+ (ctype == compiler_type::clang &&
+ (find_option ("-fcolor-diagnostics", args) ||
+ find_option ("-fno-color-diagnostics", args)))))
+ {
+ // Omit -fno-diagnostics-color if stderr is not a terminal (we
+ // know there will be no color in this case and the option will
+ // just add noise, for example, in build logs).
+ //
+ if (const char* o = (
+ show_diag_color () ? "-fdiagnostics-color" :
+ stderr_term ? "-fno-diagnostics-color" :
+ nullptr))
+ args.push_back (o);
+ }
+ }
+
+ break;
+ }
+ }
+ }
}
}
diff --git a/libbuild2/cc/common.hxx b/libbuild2/cc/common.hxx
index 4ad0e22..cb85632 100644
--- a/libbuild2/cc/common.hxx
+++ b/libbuild2/cc/common.hxx
@@ -117,6 +117,7 @@ namespace build2
const variable& c_module_name; // cc.module_name
const variable& c_importable; // cc.importable
const variable& c_reprocess; // cc.reprocess
+ const variable& c_serialize; // cc.serialize
const variable& x_preprocessed; // x.preprocessed
const variable* x_symexport; // x.features.symexport
@@ -165,6 +166,7 @@ namespace build2
// Cached values for some commonly-used variables/values.
//
+ const compiler_id& cid; // x.id
compiler_type ctype; // x.id.type
const string& cvariant; // x.id.variant
compiler_class cclass; // x.class
@@ -198,26 +200,34 @@ namespace build2
build2::cc::importable_headers* importable_headers;
// The order of sys_*_dirs is the mode entries first, followed by the
- // compiler built-in entries, and finished off with any extra entries
- // (e.g., fallback directories such as /usr/local/*).
+ // extra entries (e.g., /usr/local/*), followed by the compiler built-in
+ // entries.
+ //
+ // Note that even if we wanted to, we wouldn't be able to support extra
+ // trailing (after built-in) directories since we would need a portable
+ // equivalent of -idirafter for both headers and libraries.
//
const dir_paths& sys_lib_dirs; // x.sys_lib_dirs
const dir_paths& sys_hdr_dirs; // x.sys_hdr_dirs
const dir_paths* sys_mod_dirs; // compiler_info::sys_mod_dirs
- size_t sys_lib_dirs_mode; // Number of leading mode entries (0 if none).
+ size_t sys_lib_dirs_mode; // Number of mode entries (0 if none).
size_t sys_hdr_dirs_mode;
size_t sys_mod_dirs_mode;
- size_t sys_lib_dirs_extra; // First trailing extra entry (size if none).
+ size_t sys_lib_dirs_extra; // Number of extra entries (0 if none).
size_t sys_hdr_dirs_extra;
// Note that x_obj is patched in by the x.objx module. So it stays NULL
- // if Objective-X compilation is not enabled.
+ // if Objective-X compilation is not enabled. Similarly for x_asp except
+ // here we don't have duality and it's purely to signal (by the c.as-cpp
+ // module) that it's enabled.
//
const target_type& x_src; // Source target type (c{}, cxx{}).
const target_type* x_mod; // Module target type (mxx{}), if any.
+ const target_type& x_inc; // Includable base target type (e.g., c_inc{}).
const target_type* x_obj; // Objective-X target type (m{}, mm{}).
+ const target_type* x_asp; // Assembler with CPP target type (S{}).
// Check if an object (target, prerequisite, etc) is an Objective-X
// source.
@@ -229,11 +239,21 @@ namespace build2
return x_obj != nullptr && t.is_a (*x_obj);
}
+ // Check if an object (target, prerequisite, etc) is an Assembler with
+ // C preprocessor source.
+ //
+ template <typename T>
+ bool
+ x_assembler_cpp (const T& t) const
+ {
+ return x_asp != nullptr && t.is_a (*x_asp);
+ }
+
// Array of target types that are considered the X-language headers
// (excluding h{} except for C). Keep them in the most likely to appear
// order with the "real header" first and terminated with NULL.
//
- const target_type* const* x_hdr;
+ const target_type* const* x_hdrs;
// Check if an object (target, prerequisite, etc) is a header.
//
@@ -241,7 +261,7 @@ namespace build2
bool
x_header (const T& t, bool c_hdr = true) const
{
- for (const target_type* const* ht (x_hdr); *ht != nullptr; ++ht)
+ for (const target_type* const* ht (x_hdrs); *ht != nullptr; ++ht)
if (t.is_a (**ht))
return true;
@@ -252,7 +272,7 @@ namespace build2
// extensions to target types. Keep them in the most likely to appear
// order and terminate with NULL.
//
- const target_type* const* x_inc;
+ const target_type* const* x_incs;
// Aggregate-like constructor with from-base support.
//
@@ -260,8 +280,7 @@ namespace build2
const char* compile,
const char* link,
const char* install,
- compiler_type ct,
- const string& cv,
+ const compiler_id& ci,
compiler_class cl,
uint64_t mj, uint64_t mi,
uint64_t vmj, uint64_t vmi,
@@ -280,13 +299,14 @@ namespace build2
size_t sle, size_t she,
const target_type& src,
const target_type* mod,
- const target_type* const* hdr,
- const target_type* const* inc)
+ const target_type& inc,
+ const target_type* const* hdrs,
+ const target_type* const* incs)
: config_data (cd),
x_compile (compile),
x_link (link),
x_install (install),
- ctype (ct), cvariant (cv), cclass (cl),
+ cid (ci), ctype (ci.type), cvariant (ci.variant), cclass (cl),
cmaj (mj), cmin (mi),
cvmaj (vmj), cvmin (vmi),
cpath (path), cmode (mode),
@@ -301,8 +321,9 @@ namespace build2
sys_lib_dirs_mode (slm), sys_hdr_dirs_mode (shm),
sys_mod_dirs_mode (smm),
sys_lib_dirs_extra (sle), sys_hdr_dirs_extra (she),
- x_src (src), x_mod (mod), x_obj (nullptr),
- x_hdr (hdr), x_inc (inc) {}
+ x_src (src), x_mod (mod), x_inc (inc),
+ x_obj (nullptr), x_asp (nullptr),
+ x_hdrs (hdrs), x_incs (incs) {}
};
class LIBBUILD2_CC_SYMEXPORT common: public data
@@ -440,13 +461,16 @@ namespace build2
// Alternative search logic for VC (msvc.cxx).
//
- bin::liba*
+ // The second half is false if we should poison the binless search via
+ // the common .pc file.
+ //
+ pair<bin::liba*, bool>
msvc_search_static (const process_path&,
const dir_path&,
const prerequisite_key&,
bool existing) const;
- bin::libs*
+ pair<bin::libs*, bool>
msvc_search_shared (const process_path&,
const dir_path&,
const prerequisite_key&,
@@ -483,6 +507,11 @@ namespace build2
const dir_paths&,
const dir_paths&,
pair<bool, bool>) const;
+
+ // Append compiler-specific diagnostics color options as necessary.
+ //
+ void
+ append_diag_color_options (cstrings&) const;
};
}
}
diff --git a/libbuild2/cc/common.txx b/libbuild2/cc/common.txx
index f55072c..8c80686 100644
--- a/libbuild2/cc/common.txx
+++ b/libbuild2/cc/common.txx
@@ -19,13 +19,14 @@ namespace build2
bool exist,
tracer& trace)
{
- auto p (ctx.targets.insert_locked (T::static_type,
- move (dir),
- path_cast<dir_path> (out.effect),
- name,
- move (ext),
- target_decl::implied,
- trace));
+ auto p (ctx.targets.insert_locked (
+ T::static_type,
+ move (dir),
+ dir_path (out.effect_string ()).normalize (),
+ name,
+ move (ext),
+ target_decl::implied,
+ trace));
if (exist && p.second)
throw non_existent_library {p.first.template as<mtime_target> ()};
diff --git a/libbuild2/cc/compile-rule.cxx b/libbuild2/cc/compile-rule.cxx
index 51d9b4d..2e4775e 100644
--- a/libbuild2/cc/compile-rule.cxx
+++ b/libbuild2/cc/compile-rule.cxx
@@ -295,24 +295,25 @@ namespace build2
void compile_rule::
append_sys_hdr_options (T& args) const
{
- assert (sys_hdr_dirs_extra <= sys_hdr_dirs.size ());
+ assert (sys_hdr_dirs_mode + sys_hdr_dirs_extra <= sys_hdr_dirs.size ());
// Note that the mode options are added as part of cmode.
//
auto b (sys_hdr_dirs.begin () + sys_hdr_dirs_mode);
- auto m (sys_hdr_dirs.begin () + sys_hdr_dirs_extra);
- auto e (sys_hdr_dirs.end ());
+ auto x (b + sys_hdr_dirs_extra);
+ // Add extras.
+ //
// Note: starting from 16.10, MSVC gained /external:I option though it
// doesn't seem to affect the order, only "system-ness".
//
append_option_values (
args,
- cclass == compiler_class::gcc ? "-idirafter" :
+ cclass == compiler_class::gcc ? "-isystem" :
cclass == compiler_class::msvc ? (isystem (*this)
? "/external:I"
: "/I") : "-I",
- m, e,
+ b, x,
[] (const dir_path& d) {return d.string ().c_str ();});
// For MSVC if we have no INCLUDE environment variable set, then we
@@ -328,7 +329,7 @@ namespace build2
{
append_option_values (
args, "/I",
- b, m,
+ x, sys_hdr_dirs.end (),
[] (const dir_path& d) {return d.string ().c_str ();});
}
}
@@ -353,6 +354,35 @@ namespace build2
case lang::c: o1 = "/TC"; break;
case lang::cxx: o1 = "/TP"; break;
}
+
+ // Note: /interface and /internalPartition are in addition to /TP.
+ //
+ switch (md.type)
+ {
+ case unit_type::non_modular:
+ case unit_type::module_impl:
+ {
+ break;
+ }
+ case unit_type::module_intf:
+ case unit_type::module_intf_part:
+ {
+ o2 = "/interface";
+ break;
+ }
+ case unit_type::module_impl_part:
+ {
+ o2 = "/internalPartition";
+ break;
+ }
+ case unit_type::module_header:
+ {
+ //@@ MODHDR TODO: /exportHeader
+ assert (false);
+ break;
+ }
+ }
+
break;
}
case compiler_class::gcc:
@@ -370,14 +400,21 @@ namespace build2
case unit_type::non_modular:
case unit_type::module_impl:
{
- bool obj (x_objective (md.src));
-
o1 = "-x";
- switch (x_lang)
+
+ if (x_assembler_cpp (md.src))
+ o2 = "assembler-with-cpp";
+ else
{
- case lang::c: o2 = obj ? "objective-c" : "c"; break;
- case lang::cxx: o2 = obj ? "objective-c++" : "c++"; break;
+ bool obj (x_objective (md.src));
+
+ switch (x_lang)
+ {
+ case lang::c: o2 = obj ? "objective-c" : "c"; break;
+ case lang::cxx: o2 = obj ? "objective-c++" : "c++"; break;
+ }
}
+
break;
}
case unit_type::module_intf:
@@ -417,9 +454,11 @@ namespace build2
default:
assert (false);
}
+
break;
}
}
+
break;
}
}
@@ -476,9 +515,11 @@ namespace build2
// For a header unit we check the "real header" plus the C header.
//
- if (ut == unit_type::module_header ? p.is_a (**x_hdr) || p.is_a<h> () :
- ut == unit_type::module_intf ? p.is_a (*x_mod) :
- p.is_a (x_src) || (x_obj != nullptr && p.is_a (*x_obj)))
+ if (ut == unit_type::module_header ? p.is_a (**x_hdrs) || p.is_a<h> () :
+ ut == unit_type::module_intf ? p.is_a (*x_mod) :
+ p.is_a (x_src) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj)))
{
// Save in the target's auxiliary storage.
//
@@ -937,7 +978,9 @@ namespace build2
//
// Note: ut is still unrefined.
//
- if (ut == unit_type::module_intf && cast_true<bool> (t[b_binless]))
+ if ((ut == unit_type::module_intf ||
+ ut == unit_type::module_intf_part ||
+ ut == unit_type::module_impl_part) && cast_true<bool> (t[b_binless]))
{
// The module interface unit can be the same as an implementation
// (e.g., foo.mxx and foo.cxx) which means obj*{} targets could
@@ -998,6 +1041,12 @@ namespace build2
// to match it if we may need its modules or importable headers
// (see search_modules(), make_header_sidebuild() for details).
//
+ // Well, that was the case until we've added support for immediate
+ // importation of libraries, which happens during the load phase
+ // and natually leaves the library unmatched. While we could have
+ // returned from search_library() an indication of whether the
+ // library has been matched, this doesn't seem worth the trouble.
+ //
if (p.proj ())
{
pt = search_library (a,
@@ -1005,8 +1054,10 @@ namespace build2
usr_lib_dirs,
p.prerequisite);
+#if 0
if (pt != nullptr && !modules)
continue;
+#endif
}
if (pt == nullptr)
@@ -1034,7 +1085,8 @@ namespace build2
{
pt = &p.search (t);
- if (a.operation () == clean_id && !pt->dir.sub (rs.out_path ()))
+ if (pt == dir ||
+ (a.operation () == clean_id && !pt->dir.sub (rs.out_path ())))
continue;
}
@@ -1126,12 +1178,14 @@ namespace build2
// this can very well be happening in parallel. But that's not a
// problem since fsdir{}'s update is idempotent.
//
- fsdir_rule::perform_update_direct (a, t);
+ fsdir_rule::perform_update_direct (a, *dir);
}
// Note: the leading '@' is reserved for the module map prefix (see
// extract_modules()) and no other line must start with it.
//
+ // NOTE: see also the predefs rule if changing anything here.
+ //
depdb dd (tp + ".d");
// First should come the rule name/version.
@@ -1351,6 +1405,10 @@ namespace build2
//
if (mt != timestamp_nonexistent)
{
+ // Appended to by to_module_info() below.
+ //
+ tu.module_info.imports.clear ();
+
u = false;
md.touch = true;
}
@@ -1435,24 +1493,6 @@ namespace build2
extract_modules (a, bs, t, li,
tts, src,
md, move (tu.module_info), dd, u);
-
- // Currently in VC module interface units must be compiled from
- // the original source (something to do with having to detect and
- // store header boundaries in the .ifc files).
- //
- // @@ MODHDR MSVC: should we do the same for header units? I guess
- // we will figure it out when MSVC supports header units.
- //
- // @@ TMP: probably outdated. Probably the same for partitions.
- //
- // @@ See also similar check in extract_headers(), existing entry
- // case.
- //
- if (ctype == compiler_type::msvc)
- {
- if (ut == unit_type::module_intf)
- psrc.second = false;
- }
}
}
@@ -1471,7 +1511,7 @@ namespace build2
// to keep re-validating the file on every subsequent dry-run as well
// on the real run).
//
- if (u && dd.reading () && !ctx.dry_run)
+ if (u && dd.reading () && !ctx.dry_run_option)
dd.touch = timestamp_unknown;
dd.close (false /* mtime_check */);
@@ -1926,23 +1966,211 @@ namespace build2
for (size_t i (0); i != batch_n; ++i)
{
string& r (batch[i]);
+ size_t rn (r.size ());
- // @@ TODO: quoting and escaping.
+ // The protocol uses a peculiar quoting/escaping scheme that can be
+ // summarized as follows (see the libcody documentation for details):
+ //
+ // - Words are seperated with spaces and/or tabs.
//
- size_t b (0), e (0), n; // Next word.
+ // - Words need not be quoted if they only containing characters from
+ // the [-+_/%.A-Za-z0-9] set.
+ //
+ // - Otherwise words need to be single-quoted.
+ //
+ // - Inside single-quoted words, the \n \t \' and \\ escape sequences
+ // are recognized.
+ //
+ // Note that we currently don't treat abutted quotes (as in a' 'b) as
+ // a single word (it doesn't seem plausible that we will ever receive
+ // something like this).
+ //
+ size_t b (0), e (0), n; bool q; // Next word.
- auto next = [&r, &b, &e, &n] () -> size_t
+ auto next = [&r, rn, &b, &e, &n, &q] () -> size_t
{
- return (n = next_word (r, b, e, ' ', '\t'));
+ if (b != e)
+ b = e;
+
+ // Skip leading whitespaces.
+ //
+ for (; b != rn && (r[b] == ' ' || r[b] == '\t'); ++b) ;
+
+ if (b != rn)
+ {
+ q = (r[b] == '\'');
+
+ // Find first trailing whitespace or closing quote.
+ //
+ for (e = b + 1; e != rn; ++e)
+ {
+ // Note that we deal with invalid quoting/escaping in unquote().
+ //
+ switch (r[e])
+ {
+ case ' ':
+ case '\t':
+ if (q)
+ continue;
+ else
+ break;
+ case '\'':
+ if (q)
+ {
+ ++e; // Include closing quote (hopefully).
+ break;
+ }
+ else
+ {
+ assert (false); // Abutted quote.
+ break;
+ }
+ case '\\':
+ if (++e != rn) // Skip next character (hopefully).
+ continue;
+ else
+ break;
+ default:
+ continue;
+ }
+
+ break;
+ }
+
+ n = e - b;
+ }
+ else
+ {
+ q = false;
+ e = rn;
+ n = 0;
+ }
+
+ return n;
+ };
+
+ // Unquote into tmp the current word returning false if malformed.
+ //
+ auto unquote = [&r, &b, &n, &q, &tmp] (bool clear = true) -> bool
+ {
+ if (q && n > 1)
+ {
+ size_t e (b + n - 1);
+
+ if (r[b] == '\'' && r[e] == '\'')
+ {
+ if (clear)
+ tmp.clear ();
+
+ size_t i (b + 1);
+ for (; i != e; ++i)
+ {
+ char c (r[i]);
+ if (c == '\\')
+ {
+ if (++i == e)
+ {
+ i = 0;
+ break;
+ }
+
+ c = r[i];
+ if (c == 'n') c = '\n';
+ else if (c == 't') c = '\t';
+ }
+ tmp += c;
+ }
+
+ if (i == e)
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+#if 0
+#define UNQUOTE(x, y) \
+ r = x; rn = r.size (); b = e = 0; \
+ assert (next () && unquote () && tmp == y)
+
+ UNQUOTE ("'foo bar'", "foo bar");
+ UNQUOTE (" 'foo bar' ", "foo bar");
+ UNQUOTE ("'foo\\\\bar'", "foo\\bar");
+ UNQUOTE ("'\\'foo bar'", "'foo bar");
+ UNQUOTE ("'foo bar\\''", "foo bar'");
+ UNQUOTE ("'\\'foo\\\\bar\\''", "'foo\\bar'");
+
+ fail << "all good";
+#endif
+
+ // Escape if necessary the specified string and append to r.
+ //
+ auto escape = [&r] (const string& s)
+ {
+ size_t b (0), e, n (s.size ());
+ while (b != n && (e = s.find_first_of ("\\'\n\t", b)) != string::npos)
+ {
+ r.append (s, b, e - b); // Preceding chunk.
+
+ char c (s[e]);
+ r += '\\';
+ r += (c == '\n' ? 'n' : c == '\t' ? 't' : c);
+ b = e + 1;
+ }
+
+ if (b != n)
+ r.append (s, b, e); // Final chunk.
+ };
+
+ // Quote and escape if necessary the specified string and append to r.
+ //
+ auto quote = [&r, &escape] (const string& s)
+ {
+ if (find_if (s.begin (), s.end (),
+ [] (char c)
+ {
+ return !((c >= 'a' && c <= 'z') ||
+ (c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'Z') ||
+ c == '-' || c == '_' || c == '/' ||
+ c == '.' || c == '+' || c == '%');
+ }) == s.end ())
+ {
+ r += s;
+ }
+ else
+ {
+ r += '\'';
+ escape (s);
+ r += '\'';
+ }
};
+#if 0
+#define QUOTE(x, y) \
+ r.clear (); quote (x); \
+ assert (r == y)
+
+ QUOTE ("foo/Bar-7.h", "foo/Bar-7.h");
+
+ QUOTE ("foo bar", "'foo bar'");
+ QUOTE ("foo\\bar", "'foo\\\\bar'");
+ QUOTE ("'foo bar", "'\\'foo bar'");
+ QUOTE ("foo bar'", "'foo bar\\''");
+ QUOTE ("'foo\\bar'", "'\\'foo\\\\bar\\''");
+
+ fail << "all good";
+#endif
+
next (); // Request name.
- auto name = [&r, b, n] (const char* c) -> bool
+ auto name = [&r, b, n, q] (const char* c) -> bool
{
// We can reasonably assume a command will never be quoted.
//
- return (r.compare (b, n, c) == 0 &&
+ return (!q &&
+ r.compare (b, n, c) == 0 &&
(r[n] == ' ' || r[n] == '\t' || r[n] == '\0'));
};
@@ -1991,7 +2219,17 @@ namespace build2
if (next ())
{
- path f (r, b, n);
+ path f;
+ if (!q)
+ f = path (r, b, n);
+ else if (unquote ())
+ f = path (tmp);
+ else
+ {
+ r = "ERROR 'malformed quoting/escaping in request'";
+ continue;
+ }
+
bool exists (true);
// The TU path we pass to the compiler is always absolute so any
@@ -2002,8 +2240,9 @@ namespace build2
//
if (exists && f.relative ())
{
- tmp.assign (r, b, n);
- r = "ERROR 'relative header path "; r += tmp; r += '\'';
+ r = "ERROR 'relative header path ";
+ escape (f.string ());
+ r += '\'';
continue;
}
@@ -2111,7 +2350,7 @@ namespace build2
// Note: if ht is NULL, f is still valid.
//
r = "ERROR 'unable to update header ";
- r += (ht != nullptr ? ht->path () : f).string ();
+ escape ((ht != nullptr ? ht->path () : f).string ());
r += '\'';
continue;
}
@@ -2246,17 +2485,27 @@ namespace build2
// original (which we may need to normalize when we read
// this mapping in extract_headers()).
//
- tmp = "@ "; tmp.append (r, b, n); tmp += ' '; tmp += bp;
+ // @@ This still breaks if the header path contains spaces.
+ // GCC bug 110153.
+ //
+ tmp = "@ ";
+ if (!q) tmp.append (r, b, n);
+ else unquote (false /* clear */); // Can't fail.
+ tmp += ' ';
+ tmp += bp;
+
dd.expect (tmp);
st.header_units++;
}
- r = "PATHNAME "; r += bp;
+ r = "PATHNAME ";
+ quote (bp);
}
catch (const failed&)
{
r = "ERROR 'unable to update header unit for ";
- r += hs; r += '\'';
+ escape (hs);
+ r += '\'';
continue;
}
}
@@ -2282,7 +2531,7 @@ namespace build2
// Truncate the response batch and terminate the communication (see
// also libcody issue #22).
//
- tmp.assign (r, b, n);
+ tmp.assign (r, b, n); // Request name (unquoted).
r = "ERROR '"; r += w; r += ' '; r += tmp; r += '\'';
batch_n = i + 1;
term = true;
@@ -2865,7 +3114,7 @@ namespace build2
// single "version" of a header. Seems reasonable.
//
// Note also that while it would have been nice to have a unified cc
- // cache, the map_extension() call is passed x_inc which is module-
+ // cache, the map_extension() call is passed x_incs which is module-
// specific. In other words, we may end up mapping the same header to
// two different targets depending on whether it is included from, say,
// C or C++ translation unit. We could have used a unified cache for
@@ -2929,7 +3178,7 @@ namespace build2
fp, cache, norm,
[this] (const scope& bs, const string& n, const string& e)
{
- return map_extension (bs, n, e, x_inc);
+ return map_extension (bs, n, e, x_incs);
},
h::static_type,
[this, &d] (action a, const scope& bs, const target& t)
@@ -3020,11 +3269,14 @@ namespace build2
// Preprocessed file extension.
//
- const char* pext (x_objective (src) ? x_obj_pext : x_pext);
+ const char* pext (x_assembler_cpp (src) ? ".Si" :
+ x_objective (src) ? x_obj_pext :
+ x_pext);
// Preprocesor mode that preserves as much information as possible while
// still performing inclusions. Also serves as a flag indicating whether
- // this compiler uses the separate preprocess and compile setup.
+ // this (non-MSVC) compiler uses the separate preprocess and compile
+ // setup.
//
const char* pp (nullptr);
@@ -3035,7 +3287,16 @@ namespace build2
// -fdirectives-only is available since GCC 4.3.0.
//
if (cmaj > 4 || (cmaj == 4 && cmin >= 3))
- pp = "-fdirectives-only";
+ {
+ // Note that for assembler-with-cpp GCC currently forces full
+ // preprocessing in (what appears to be) an attempt to paper over
+ // a deeper issue (see GCC bug 109534). If/when that bug gets
+ // fixed, we can enable this on our side. Note that Clang's
+ // -frewrite-includes also has issues (see below).
+ //
+ if (!x_assembler_cpp (src))
+ pp = "-fdirectives-only";
+ }
break;
}
@@ -3044,7 +3305,16 @@ namespace build2
// -frewrite-includes is available since Clang 3.2.0.
//
if (cmaj > 3 || (cmaj == 3 && cmin >= 2))
- pp = "-frewrite-includes";
+ {
+ // While Clang's -frewrite-includes appears to work, there are
+ // some issues with correctly tracking location information
+ // (manifests itself as wrong line numbers in debug info, for
+ // example). The result also appears to reference the .Si file
+ // instead of the original source file for some reason.
+ //
+ if (!x_assembler_cpp (src))
+ pp = "-frewrite-includes";
+ }
break;
}
@@ -3214,8 +3484,8 @@ namespace build2
// The gen argument to init_args() is in/out. The caller signals whether
// to force the generated header support and on return it signals
- // whether this support is enabled. The first call to init_args is
- // expected to have gen false.
+ // whether this support is enabled. If gen is false, then stderr is
+ // expected to be either discarded or merged with sdtout.
//
// Return NULL if the dependency information goes to stdout and a
// pointer to the temporary file path otherwise.
@@ -3366,16 +3636,6 @@ namespace build2
// Some compile options (e.g., -std, -m) affect the preprocessor.
//
- // Currently Clang supports importing "header modules" even when in
- // the TS mode. And "header modules" support macros which means
- // imports have to be resolved during preprocessing. Which poses a
- // bit of a chicken and egg problem for us. For now, the workaround
- // is to remove the -fmodules-ts option when preprocessing. Hopefully
- // there will be a "pure modules" mode at some point.
- //
- // @@ MODHDR Clang: should be solved with the dynamic module mapper
- // if/when Clang supports it?
- //
// Don't treat warnings as errors.
//
@@ -3404,8 +3664,13 @@ namespace build2
append_options (args, cmode);
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note that for MSVC stderr is merged with stdout and is then
+ // parsed, so no append_diag_color_options() call.
+
// See perform_update() for details on the choice of options.
//
+ // NOTE: see also the predefs rule if adding anything here.
+ //
{
bool sc (find_option_prefixes (
{"/source-charset:", "-source-charset:"}, args));
@@ -3435,6 +3700,8 @@ namespace build2
!find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
+ // NOTE: see similar code in search_modules().
+ //
if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
@@ -3446,7 +3713,7 @@ namespace build2
msvc_sanitize_cl (args);
- psrc = ctx.fcache.create (t.path () + pext, !modules);
+ psrc = ctx.fcache->create (t.path () + pext, !modules);
if (fc)
{
@@ -3465,12 +3732,18 @@ namespace build2
}
case compiler_class::gcc:
{
- append_options (args, cmode,
- cmode.size () - (modules && clang ? 1 : 0));
+ append_options (args, cmode);
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // If not gen, then stderr is discarded.
+ //
+ if (gen)
+ append_diag_color_options (args);
+
// See perform_update() for details on the choice of options.
//
+ // NOTE: see also the predefs rule if adding anything here.
+ //
if (!find_option_prefix ("-finput-charset=", args))
args.push_back ("-finput-charset=UTF-8");
@@ -3482,8 +3755,7 @@ namespace build2
if (ctype == compiler_type::clang && tsys == "win32-msvc")
{
- initializer_list<const char*> os {"-nostdlib", "-nostartfiles"};
- if (!find_options (os, cmode) && !find_options (os, args))
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
{
args.push_back ("-D_MT");
args.push_back ("-D_DLL");
@@ -3593,7 +3865,7 @@ namespace build2
// Preprocessor output.
//
- psrc = ctx.fcache.create (t.path () + pext, !modules);
+ psrc = ctx.fcache->create (t.path () + pext, !modules);
args.push_back ("-o");
args.push_back (psrc.path ().string ().c_str ());
}
@@ -3883,12 +4155,9 @@ namespace build2
// If modules are enabled, then we keep the preprocessed output
// around (see apply() for details).
//
- // See apply() for details on the extra MSVC check.
- //
- if (modules && (ctype != compiler_type::msvc ||
- md.type != unit_type::module_intf))
+ if (modules)
{
- result.first = ctx.fcache.create_existing (t.path () + pext);
+ result.first = ctx.fcache->create_existing (t.path () + pext);
result.second = true;
}
@@ -4835,6 +5104,18 @@ namespace build2
{
tracer trace (x, "compile_rule::parse_unit");
+ // Scanning .S files with our parser is hazardous since such files
+ // sometimes use `#`-style comments. Presumably real compilers just
+ // ignore them in some way, but it doesn't seem worth it to bother in
+ // our case. Also, the checksum calculation over assembler tokens feels
+ // iffy.
+ //
+ if (x_assembler_cpp (src))
+ {
+ tu.type = unit_type::non_modular;
+ return "";
+ }
+
otype ot (li.type);
// If things go wrong give the user a bit extra context. Let's call it
@@ -4913,8 +5194,6 @@ namespace build2
case compiler_class::msvc: werror = "/WX"; break;
}
- bool clang (ctype == compiler_type::clang);
-
append_options (args, t, c_coptions, werror);
append_options (args, t, x_coptions, werror);
@@ -4929,6 +5208,9 @@ namespace build2
append_options (args, cmode);
append_sys_hdr_options (args);
+ // Note: no append_diag_color_options() call since the
+ // diagnostics is discarded.
+
// See perform_update() for details on the choice of options.
//
{
@@ -4974,10 +5256,12 @@ namespace build2
}
case compiler_class::gcc:
{
- append_options (args, cmode,
- cmode.size () - (modules && clang ? 1 : 0));
+ append_options (args, cmode);
append_sys_hdr_options (args);
+ // Note: no append_diag_color_options() call since the
+ // diagnostics is discarded.
+
// See perform_update() for details on the choice of options.
//
if (!find_option_prefix ("-finput-charset=", args))
@@ -4991,8 +5275,7 @@ namespace build2
if (ctype == compiler_type::clang && tsys == "win32-msvc")
{
- initializer_list<const char*> os {"-nostdlib", "-nostartfiles"};
- if (!find_options (os, cmode) && !find_options (os, args))
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
{
args.push_back ("-D_MT");
args.push_back ("-D_DLL");
@@ -5019,12 +5302,36 @@ namespace build2
//
if (ps)
{
- if (ctype == compiler_type::gcc)
+ switch (ctype)
{
- // Note that only these two *plus* -x do the trick.
- //
- args.push_back ("-fpreprocessed");
- args.push_back ("-fdirectives-only");
+ case compiler_type::gcc:
+ {
+ // Note that only these two *plus* -x do the trick.
+ //
+ args.push_back ("-fpreprocessed");
+ args.push_back ("-fdirectives-only");
+ break;
+ }
+ case compiler_type::clang:
+ {
+ // See below for details.
+ //
+ if (ctype == compiler_type::clang &&
+ cmaj >= (cvariant != "apple" ? 15 : 16))
+ {
+ if (find_options ({"-pedantic", "-pedantic-errors",
+ "-Wpedantic", "-Werror=pedantic"},
+ args))
+ {
+ args.push_back ("-Wno-gnu-line-marker");
+ }
+ }
+
+ break;
+ }
+ case compiler_type::msvc:
+ case compiler_type::icc:
+ assert (false);
}
}
@@ -5093,7 +5400,7 @@ namespace build2
fdstream_mode::binary | fdstream_mode::skip);
parser p;
- p.parse (is, path_name (*sp), tu);
+ p.parse (is, path_name (*sp), tu, cid);
is.close ();
@@ -5108,7 +5415,9 @@ namespace build2
if (!modules)
{
if (ut != unit_type::non_modular || !mi.imports.empty ())
- fail << "modules support required by " << src;
+ fail << "modules support required by " << src <<
+ info << "consider enabling modules with "
+ << x << ".features.modules=true in root.build";
}
else
{
@@ -5133,18 +5442,6 @@ namespace build2
ut = md.type;
mi.name = src.path ().string ();
}
-
- // Prior to 15.5 (19.12) VC was not using the 'export module M;'
- // syntax so we use the preprequisite type to distinguish
- // between interface and implementation units.
- //
- // @@ TMP: probably outdated.
- //
- if (ctype == compiler_type::msvc && cmaj == 19 && cmin <= 11)
- {
- if (ut == unit_type::module_impl && src.is_a (*x_mod))
- ut = unit_type::module_intf;
- }
}
// If we were forced to reprocess, assume the checksum is not
@@ -5359,6 +5656,9 @@ namespace build2
{
tracer trace (x, "compile_rule::search_modules");
+ context& ctx (bs.ctx);
+ const scope& rs (*bs.root_scope ());
+
// NOTE: currently we don't see header unit imports (they are handled by
// extract_headers() and are not in imports).
@@ -5394,7 +5694,7 @@ namespace build2
// So, the fuzzy match: the idea is that each match gets a score, the
// number of characters in the module name that got matched. A match
// with the highest score is used. And we use the (length + 1) for a
- // match against an actual module name.
+ // match against an actual (extracted) module name.
//
// Actually, the scoring system is a bit more elaborate than that.
// Consider module name core.window and two files, window.mxx and
@@ -5422,10 +5722,10 @@ namespace build2
// module (or partition) component. Failed that, we will match `format`
// to `print` because the last character (`t`) is the same.
//
- // For std.* modules we only accept non-fuzzy matches (think std.core vs
- // some core.mxx). And if such a module is unresolved, then we assume it
- // is pre-built and will be found by some other means (e.g., VC's
- // IFCPATH).
+ // For std.* modules we only accept non-fuzzy matches (think std.compat
+ // vs some compat.mxx). And if such a module is unresolved, then we
+ // assume it is pre-built and will be found by some other means (e.g.,
+ // VC's IFCPATH).
//
// Note also that we handle module partitions the same as submodules. In
// other words, for matching, `.` and `:` are treated the same.
@@ -5438,7 +5738,7 @@ namespace build2
// PPPPABBBB
//
// Where PPPP is the primary score, A is the A) score, and BBBB is
- // the B) scope described above. Zero signifies no match.
+ // the B) score described above. Zero signifies no match.
//
// We use decimal instead of binary packing to make it easier for the
// human to separate fields in the trace messages, during debugging,
@@ -5544,6 +5844,31 @@ namespace build2
if (!match)
return 0;
+ // Here is another corner case, the module is async_simple:IOExecutor
+ // and the file names are:
+ //
+ // IOExecutor.mxx
+ // SimpleIOExecutor.mxx
+ //
+ // The above implementation treats the latter as better because
+ // `Simple` in SimpleIOExecutor matches `simple` in async_simple. It's
+ // unclear what we can do about it without potentially breaking other
+ // legitimate cases (think Boost_Simple:IOExecutor). Maybe we could
+ // boost the exact partition name match score, similar to the exact
+ // module match, as some sort of a heuristics? Let's try.
+ //
+ if (fi == 0 && mi != 0 && m[mi - 1] == ':')
+ {
+ // Pretend we matched one short of the next module component. This
+ // way AsyncSimpleIOExecutor.mxx would still be a better match.
+ //
+ while (--mi != 0 && m[mi - 1] != '.')
+ ;
+
+ msep = (mi != 0); // For uncount logic below.
+ mi++; // One short.
+ }
+
// "Uncount" real separators.
//
if (fsep) fi++;
@@ -5572,6 +5897,20 @@ namespace build2
return ps * 100000 + as * 10000 + bs;
};
+#if 0
+ assert (match ("IOExecutor", "async_simple:IOExecutor") >
+ match ("SimpleIOExecutor", "async_simple:IOExecutor"));
+
+ assert (match ("IOExecutor", "async_simple:IOExecutor") <
+ match ("AsyncSimpleIOExecutor", "async_simple:IOExecutor"));
+
+ assert (match ("IOExecutor", "x.async_simple:IOExecutor") >
+ match ("SimpleIOExecutor", "x.async_simple:IOExecutor"));
+
+ assert (match ("IOExecutor", "x.async_simple:IOExecutor") <
+ match ("AsyncSimpleIOExecutor", "x.async_simple:IOExecutor"));
+#endif
+
auto& pts (t.prerequisite_targets[a]);
size_t start (pts.size ()); // Index of the first to be added.
@@ -5586,7 +5925,7 @@ namespace build2
// promise. It has to do with module re-exporting (export import M;).
// In this case (currently) all implementations simply treat it as a
// shallow (from the BMI's point of view) reference to the module (or an
- // implicit import, if you will). Do you see where it's going? Nowever
+ // implicit import, if you will). Do you see where it's going? Nowhere
// good, that's right. This shallow reference means that the compiler
// should be able to find BMIs for all the re-exported modules,
// recursively. The good news is we are actually in a pretty good shape
@@ -5632,6 +5971,7 @@ namespace build2
// so we actually don't need to pass any extra options (unless things
// get moved) but they still need access to the BMIs (and things will
// most likely have to be done differenly for distributed compilation).
+ // @@ Note: no longer the case for Clang either.
//
// So the revised plan: on the off chance that some implementation will
// do it differently we will continue maintaing the imported/re-exported
@@ -5725,6 +6065,8 @@ namespace build2
continue; // Scan the rest to detect if all done.
}
}
+ else
+ assert (name != m.name); // No duplicates.
done = false;
}
@@ -5752,10 +6094,18 @@ namespace build2
//
if (pt->is_a<bmix> ())
{
- const string& n (cast<string> (pt->state[a].vars[c_module_name]));
-
- if (const target** p = check_exact (n))
- *p = pt;
+ // If the extraction of the module information for this BMI failed
+ // and we have deferred failure to compiler diagnostics, then
+ // there will be no module name assigned. It would have been
+ // better to make sure that's the cause, but that won't be easy.
+ //
+ const string* n (cast_null<string> (
+ pt->state[a].vars[c_module_name]));
+ if (n != nullptr)
+ {
+ if (const target** p = check_exact (*n))
+ *p = pt;
+ }
}
else if (pt->is_a (*x_mod))
{
@@ -5764,7 +6114,8 @@ namespace build2
// rule puts them into prerequisite_targets for us).
//
// The module names should be specified but if not assume
- // something else is going on and ignore.
+ // something else is going on (like a deferred failure) and
+ // ignore.
//
// Note also that besides modules, prerequisite_targets may
// contain libraries which are interface dependencies of this
@@ -5777,7 +6128,15 @@ namespace build2
continue;
if (const target** p = check_exact (*n))
- *p = &this->make_module_sidebuild (a, bs, l, *pt, *n); // GCC 4.9
+ {
+ // It seems natural to build a BMI type that corresponds to the
+ // library type. After all, this is where the object file part
+ // of the BMI is going to come from (unless it's a module
+ // interface-only library).
+ //
+ *p = &this->make_module_sidebuild (
+ a, bs, &l, link_type (l).type, *pt, *n).first; // GCC 4.9
+ }
}
// Note that in prerequisite targets we will have the libux{}
// members, not the group.
@@ -5792,112 +6151,295 @@ namespace build2
}
};
- for (prerequisite_member p: group_prerequisite_members (a, t))
+ // Pre-resolve std modules in an ad hoc way for certain compilers.
+ //
+ // @@ TODO: cache x_stdlib value.
+ //
+ if ((ctype == compiler_type::msvc) ||
+ (ctype == compiler_type::clang &&
+ cmaj >= 17 &&
+ cast<string> (rs[x_stdlib]) == "libc++"))
{
- if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
- continue;
-
- const target* pt (p.load ()); // Should be cached for libraries.
+ // Similar logic to check_exact() above.
+ //
+ done = true;
- if (pt != nullptr)
+ for (size_t i (0); i != n; ++i)
{
- const file* lt (nullptr);
-
- if (const libx* l = pt->is_a<libx> ())
- lt = link_member (*l, a, li);
- else if (pt->is_a<liba> () || pt->is_a<libs> () || pt->is_a<libux> ())
- lt = &pt->as<file> ();
+ module_import& m (imports[i]);
- // If this is a library, check its bmi{}s and mxx{}s.
- //
- if (lt != nullptr)
+ if (m.name == "std" || m.name == "std.compat")
{
- find (*lt, find);
+ otype ot (otype::e);
+ const target* mt (nullptr);
- if (done)
- break;
+ switch (ctype)
+ {
+ case compiler_type::clang:
+ {
+ if (m.name != "std")
+ fail << "module " << m.name << " not yet provided by libc++";
- continue;
- }
+ // Find or insert std.cppm (similar code to pkgconfig.cxx).
+ //
+ // Note: build_install_data is absolute and normalized.
+ //
+ mt = &ctx.targets.insert_locked (
+ *x_mod,
+ (dir_path (build_install_data) /= "libbuild2") /= "cc",
+ dir_path (),
+ "std",
+ string ("cppm"), // For C++14 during bootstrap.
+ target_decl::implied,
+ trace).first;
+
+ // Which output type should we use, static or shared? The
+ // correct way would be to detect whether static or shared
+ // version of libc++ is to be linked and use the corresponding
+ // type. And we could do that by looking for -static-libstdc++
+ // in loption (and no, it's not -static-libc++).
+ //
+ // But, looking at the object file produced from std.cppm, it
+ // only contains one symbol, the static object initializer.
+ // And this is unlikely to change since all other non-inline
+ // or template symbols should be in libc++. So feels like it's
+ // not worth the trouble and one variant should be good enough
+ // for both cases. Let's use the shared one for less
+ // surprising diagnostics (as in, "why are you linking obje{}
+ // to a shared library?")
+ //
+ // (Of course, theoretically, std.cppm could detect via a
+ // macro whether it's being compiled with -fPIC or not and do
+ // things differently, but this seems far-fetched).
+ //
+ ot = otype::s;
- // Fall through.
- }
+ break;
+ }
+ case compiler_type::msvc:
+ {
+ // For MSVC, the source files std.ixx and std.compat.ixx are
+ // found in the modules/ subdirectory which is a sibling of
+ // include/ in the MSVC toolset (and "that is a contract with
+ // customers" to quote one of the developers).
+ //
+ // The problem of course is that there are multiple system
+ // header search directories (for example, as specified in the
+ // INCLUDE environment variable) and which one of them is for
+ // the MSVC toolset is not specified. So what we are going to
+ // do is search for one of the well-known standard C++ headers
+ // and assume that the directory where we found it is the one
+ // we are looking for. Or we could look for something
+ // MSVC-specific like vcruntime.h.
+ //
+ dir_path modules;
+ if (optional<path> p = find_system_header (path ("vcruntime.h")))
+ {
+ p->make_directory (); // Strip vcruntime.h.
+ if (p->leaf () == path ("include")) // Sanity check.
+ {
+ modules = path_cast<dir_path> (move (p->make_directory ()));
+ modules /= "modules";
+ }
+ }
- // While it would have been even better not to search for a target, we
- // need to get hold of the corresponding mxx{} (unlikely but possible
- // for bmi{} to have a different name).
- //
- // While we want to use group_prerequisite_members() below, we cannot
- // call resolve_group() since we will be doing it "speculatively" for
- // modules that we may use but also for modules that may use us. This
- // quickly leads to deadlocks. So instead we are going to perform an
- // ad hoc group resolution.
- //
- const target* pg;
- if (p.is_a<bmi> ())
- {
- pg = pt != nullptr ? pt : &p.search (t);
- pt = &search (t, btt, p.key ()); // Same logic as in picking obj*{}.
- }
- else if (p.is_a (btt))
- {
- pg = &search (t, bmi::static_type, p.key ());
- if (pt == nullptr) pt = &p.search (t);
+ if (modules.empty ())
+ fail << "unable to locate MSVC standard modules directory";
+
+ mt = &ctx.targets.insert_locked (
+ *x_mod,
+ move (modules),
+ dir_path (),
+ m.name,
+ string ("ixx"), // For C++14 during bootstrap.
+ target_decl::implied,
+ trace).first;
+
+ // For MSVC it's easier to detect the runtime being used since
+ // it's specified with the compile options (/MT[d], /MD[d]).
+ //
+ // Similar semantics as in extract_headers() except here we
+ // use options visible from the root scope. Note that
+ // find_option_prefixes() looks in reverse, so look in the
+ // cmode, x_coptions, c_coptions order.
+ //
+ initializer_list<const char*> os {"/MD", "/MT", "-MD", "-MT"};
+
+ const string* o;
+ if ((o = find_option_prefixes (os, cmode)) != nullptr ||
+ (o = find_option_prefixes (os, rs, x_coptions)) != nullptr ||
+ (o = find_option_prefixes (os, rs, c_coptions)) != nullptr)
+ {
+ ot = (*o)[2] == 'D' ? otype::s : otype::a;
+ }
+ else
+ ot = otype::s; // The default is /MD.
+
+ break;
+ }
+ case compiler_type::gcc:
+ case compiler_type::icc:
+ assert (false);
+ };
+
+ pair<target&, ulock> tl (
+ this->make_module_sidebuild ( // GCC 4.9
+ a, bs, nullptr, ot, *mt, m.name));
+
+ if (tl.second.owns_lock ())
+ {
+ // Special compile options for the std modules.
+ //
+ if (ctype == compiler_type::clang)
+ {
+ value& v (tl.first.append_locked (x_coptions));
+
+ if (v.null)
+ v = strings {};
+
+ strings& cops (v.as<strings> ());
+
+ switch (ctype)
+ {
+ case compiler_type::clang:
+ {
+ cops.push_back ("-Wno-reserved-module-identifier");
+ break;
+ }
+ case compiler_type::msvc:
+ // It appears nothing special is needed to compile MSVC
+ // standard modules.
+ case compiler_type::gcc:
+ case compiler_type::icc:
+ assert (false);
+ };
+ }
+
+ tl.second.unlock ();
+ }
+
+ pts[start + i].target = &tl.first;
+ m.score = match_max (m.name) + 1;
+ continue; // Scan the rest to detect if all done.
+ }
+
+ done = false;
}
- else
- continue;
+ }
- // Find the mxx{} prerequisite and extract its "file name" for the
- // fuzzy match unless the user specified the module name explicitly.
- //
- for (prerequisite_member p:
- prerequisite_members (a, t, group_prerequisites (*pt, pg)))
+ // Go over prerequisites and try to resolve imported modules with them.
+ //
+ if (!done)
+ {
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
continue;
- if (p.is_a (*x_mod))
+ const target* pt (p.load ()); // Should be cached for libraries.
+
+ if (pt != nullptr)
{
- // Check for an explicit module name. Only look for an existing
- // target (which means the name can only be specified on the
- // target itself, not target type/pattern-spec).
+ const file* lt (nullptr);
+
+ if (const libx* l = pt->is_a<libx> ())
+ lt = link_member (*l, a, li);
+ else if (pt->is_a<liba> () ||
+ pt->is_a<libs> () ||
+ pt->is_a<libux> ())
+ lt = &pt->as<file> ();
+
+ // If this is a library, check its bmi{}s and mxx{}s.
//
- const target* t (p.search_existing ());
- const string* n (t != nullptr
- ? cast_null<string> (t->vars[c_module_name])
- : nullptr);
- if (n != nullptr)
+ if (lt != nullptr)
{
- if (const target** p = check_exact (*n))
- *p = pt;
+ find (*lt, find);
+
+ if (done)
+ break;
+
+ continue;
}
- else
+
+ // Fall through.
+ }
+
+ // While it would have been even better not to search for a target,
+ // we need to get hold of the corresponding mxx{} (unlikely but
+ // possible for bmi{} to have a different name).
+ //
+ // While we want to use group_prerequisite_members() below, we
+ // cannot call resolve_group() since we will be doing it
+ // "speculatively" for modules that we may use but also for modules
+ // that may use us. This quickly leads to deadlocks. So instead we
+ // are going to perform an ad hoc group resolution.
+ //
+ const target* pg;
+ if (p.is_a<bmi> ())
+ {
+ pg = pt != nullptr ? pt : &p.search (t);
+ pt = &search (t, btt, p.key ()); // Same logic as in picking obj*{}.
+ }
+ else if (p.is_a (btt))
+ {
+ pg = &search (t, bmi::static_type, p.key ());
+ if (pt == nullptr) pt = &p.search (t);
+ }
+ else
+ continue;
+
+ // Find the mxx{} prerequisite and extract its "file name" for the
+ // fuzzy match unless the user specified the module name explicitly.
+ //
+ for (prerequisite_member p:
+ prerequisite_members (a, t, group_prerequisites (*pt, pg)))
+ {
+ if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
+ continue;
+
+ if (p.is_a (*x_mod))
{
- // Fuzzy match.
+ // Check for an explicit module name. Only look for an existing
+ // target (which means the name can only be specified on the
+ // target itself, not target type/pattern-spec).
//
- string f;
+ const target* mt (p.search_existing ());
+ const string* n (mt != nullptr
+ ? cast_null<string> (mt->vars[c_module_name])
+ : nullptr);
+ if (n != nullptr)
+ {
+ if (const target** p = check_exact (*n))
+ *p = pt;
+ }
+ else
+ {
+ // Fuzzy match.
+ //
+ string f;
- // Add the directory part if it is relative. The idea is to
- // include it into the module match, say hello.core vs
- // hello/mxx{core}.
- //
- // @@ MOD: Why not for absolute? Good question. What if it
- // contains special components, say, ../mxx{core}?
- //
- const dir_path& d (p.dir ());
+ // Add the directory part if it is relative. The idea is to
+ // include it into the module match, say hello.core vs
+ // hello/mxx{core}.
+ //
+ // @@ MOD: Why not for absolute? Good question. What if it
+ // contains special components, say, ../mxx{core}?
+ //
+ const dir_path& d (p.dir ());
- if (!d.empty () && d.relative ())
- f = d.representation (); // Includes trailing slash.
+ if (!d.empty () && d.relative ())
+ f = d.representation (); // Includes trailing slash.
- f += p.name ();
- check_fuzzy (pt, f);
+ f += p.name ();
+ check_fuzzy (pt, f);
+ }
+ break;
}
- break;
}
- }
- if (done)
- break;
+ if (done)
+ break;
+ }
}
// Diagnose unresolved modules.
@@ -5967,9 +6509,12 @@ namespace build2
if (m.score <= match_max (in))
{
- const string& mn (cast<string> (bt->state[a].vars[c_module_name]));
+ // As above (deffered failure).
+ //
+ const string* mn (
+ cast_null<string> (bt->state[a].vars[c_module_name]));
- if (in != mn)
+ if (mn != nullptr && in != *mn)
{
// Note: matched, so the group should be resolved.
//
@@ -5983,7 +6528,7 @@ namespace build2
fail (relative (src))
<< "failed to correctly guess module name from " << p <<
info << "guessed: " << in <<
- info << "actual: " << mn <<
+ info << "actual: " << *mn <<
info << "consider adjusting module interface file names or" <<
info << "consider specifying module name with " << x
<< ".module_name";
@@ -6011,12 +6556,15 @@ namespace build2
if (et == nullptr)
continue; // Unresolved (std.*).
- const string& mn (cast<string> (et->state[a].vars[c_module_name]));
+ // As above (deferred failure).
+ //
+ const string* mn (cast_null<string> (et->state[a].vars[c_module_name]));
- if (find_if (imports.begin (), imports.end (),
- [&mn] (const module_import& i)
+ if (mn != nullptr &&
+ find_if (imports.begin (), imports.end (),
+ [mn] (const module_import& i)
{
- return i.name == mn;
+ return i.name == *mn;
}) == imports.end ())
{
pts.push_back (et);
@@ -6027,10 +6575,10 @@ namespace build2
// but it's probably not worth it if we have a small string
// optimization.
//
- import_type t (mn.find (':') != string::npos
+ import_type t (mn->find (':') != string::npos
? import_type::module_part
: import_type::module_intf);
- imports.push_back (module_import {t, mn, true, 0});
+ imports.push_back (module_import {t, *mn, true, 0});
}
}
}
@@ -6050,6 +6598,10 @@ namespace build2
// Find or create a modules sidebuild subproject returning its root
// directory.
//
+ // @@ Could we omit creating a subproject if the sidebuild scope is the
+ // project scope itself? This would speed up simple examples (and
+ // potentially direct compilation that we may support).
+ //
pair<dir_path, const scope&> compile_rule::
find_modules_sidebuild (const scope& rs) const
{
@@ -6154,13 +6706,18 @@ namespace build2
return pair<dir_path, const scope&> (move (pd), *as);
}
- // Synthesize a dependency for building a module binary interface on
- // the side.
+ // Synthesize a dependency for building a module binary interface of a
+ // library on the side. If library is missing, then assume it's some
+ // ad hoc/system library case (in which case we assume it's binless,
+ // for now).
//
- const file& compile_rule::
+ // The return value semantics is as in target_set::insert_locked().
+ //
+ pair<target&, ulock> compile_rule::
make_module_sidebuild (action a,
const scope& bs,
- const file& lt,
+ const file* lt,
+ otype ot,
const target& mt,
const string& mn) const
{
@@ -6181,24 +6738,20 @@ namespace build2
back_inserter (mf),
[] (char c) {return c == '.' ? '-' : c == ':' ? '+' : c;});
- // It seems natural to build a BMI type that corresponds to the library
- // type. After all, this is where the object file part of the BMI is
- // going to come from (unless it's a module interface-only library).
- //
- const target_type& tt (compile_types (link_type (lt).type).bmi);
+ const target_type& tt (compile_types (ot).bmi);
// Store the BMI target in the subproject root. If the target already
// exists then we assume all this is already done (otherwise why would
// someone have created such a target).
//
- if (const file* bt = bs.ctx.targets.find<file> (
+ if (const target* bt = bs.ctx.targets.find (
tt,
pd,
dir_path (), // Always in the out tree.
mf,
nullopt, // Use default extension.
trace))
- return *bt;
+ return pair<target&, ulock> (const_cast<target&> (*bt), ulock ());
prerequisites ps;
ps.push_back (prerequisite (mt));
@@ -6211,19 +6764,22 @@ namespace build2
//
// Note: lt is matched and so the group is resolved.
//
- ps.push_back (prerequisite (lt));
- for (prerequisite_member p: group_prerequisite_members (a, lt))
+ if (lt != nullptr)
{
- // Ignore update=match.
- //
- lookup l;
- if (include (a, lt, p, &l) != include_type::normal) // Excluded/ad hoc.
- continue;
-
- if (p.is_a<libx> () ||
- p.is_a<liba> () || p.is_a<libs> () || p.is_a<libux> ())
+ ps.push_back (prerequisite (*lt));
+ for (prerequisite_member p: group_prerequisite_members (a, *lt))
{
- ps.push_back (p.as_prerequisite ());
+ // Ignore update=match.
+ //
+ lookup l;
+ if (include (a, *lt, p, &l) != include_type::normal) // Excluded/ad hoc.
+ continue;
+
+ if (p.is_a<libx> () ||
+ p.is_a<liba> () || p.is_a<libs> () || p.is_a<libux> ())
+ {
+ ps.push_back (p.as_prerequisite ());
+ }
}
}
@@ -6236,22 +6792,22 @@ namespace build2
target_decl::implied,
trace,
true /* skip_find */));
- file& bt (p.first.as<file> ());
// Note that this is racy and someone might have created this target
// while we were preparing the prerequisite list.
//
if (p.second)
{
- bt.prerequisites (move (ps));
+ p.first.prerequisites (move (ps));
// Unless this is a binless library, we don't need the object file
// (see config_data::b_binless for details).
//
- bt.vars.assign (b_binless) = (lt.mtime () == timestamp_unreal);
+ p.first.vars.assign (b_binless) = (lt == nullptr ||
+ lt->mtime () == timestamp_unreal);
}
- return bt;
+ return p;
}
// Synthesize a dependency for building a header unit binary interface on
@@ -6566,7 +7122,7 @@ namespace build2
// options).
//
void compile_rule::
- append_module_options (environment& env,
+ append_module_options (environment&,
cstrings& args,
small_vector<string, 2>& stor,
action a,
@@ -6577,8 +7133,6 @@ namespace build2
unit_type ut (md.type);
const module_positions& ms (md.modules);
- dir_path stdifc; // See the VC case below.
-
switch (ctype)
{
case compiler_type::gcc:
@@ -6607,15 +7161,12 @@ namespace build2
if (ms.start == 0)
return;
- // Clang embeds module file references so we only need to specify
- // our direct imports.
- //
- // If/when we get the ability to specify the mapping in a file, we
- // will pass the whole list.
+ // If/when we get the ability to specify the mapping in a file.
//
#if 0
// In Clang the module implementation's unit .pcm is special and
- // must be "loaded".
+ // must be "loaded". Note: not anymore, not from Clang 16 and is
+ // deprecated in 17.
//
if (ut == unit_type::module_impl)
{
@@ -6632,10 +7183,7 @@ namespace build2
stor.push_back (move (s));
#else
auto& pts (t.prerequisite_targets[a]);
- for (size_t i (ms.start),
- n (ms.copied != 0 ? ms.copied : pts.size ());
- i != n;
- ++i)
+ for (size_t i (ms.start), n (pts.size ()); i != n; ++i)
{
const target* pt (pts[i]);
@@ -6648,17 +7196,9 @@ namespace build2
const file& f (pt->as<file> ());
string s (relative (f.path ()).string ());
- // In Clang the module implementation's unit .pcm is special and
- // must be "loaded".
- //
- if (ut == unit_type::module_impl && i == ms.start)
- s.insert (0, "-fmodule-file=");
- else
- {
- s.insert (0, 1, '=');
- s.insert (0, cast<string> (f.state[a].vars[c_module_name]));
- s.insert (0, "-fmodule-file=");
- }
+ s.insert (0, 1, '=');
+ s.insert (0, cast<string> (f.state[a].vars[c_module_name]));
+ s.insert (0, "-fmodule-file=");
stor.push_back (move (s));
}
@@ -6670,10 +7210,11 @@ namespace build2
if (ms.start == 0)
return;
+ // MSVC requires a transitive set of interfaces, including
+ // implementation partitions.
+ //
auto& pts (t.prerequisite_targets[a]);
- for (size_t i (ms.start), n (pts.size ());
- i != n;
- ++i)
+ for (size_t i (ms.start), n (pts.size ()); i != n; ++i)
{
const target* pt (pts[i]);
@@ -6684,34 +7225,14 @@ namespace build2
// of these are bmi's.
//
const file& f (pt->as<file> ());
+ string s (relative (f.path ()).string ());
- // In VC std.* modules can only come from a single directory
- // specified with the IFCPATH environment variable or the
- // /module:stdIfcDir option.
- //
- if (std_module (cast<string> (f.state[a].vars[c_module_name])))
- {
- dir_path d (f.path ().directory ());
+ s.insert (0, 1, '=');
+ s.insert (0, cast<string> (f.state[a].vars[c_module_name]));
- if (stdifc.empty ())
- {
- // Go one directory up since /module:stdIfcDir will look in
- // either Release or Debug subdirectories. Keeping the result
- // absolute feels right.
- //
- stor.push_back ("/module:stdIfcDir");
- stor.push_back (d.directory ().string ());
- stdifc = move (d);
- }
- else if (d != stdifc) // Absolute and normalized.
- fail << "multiple std.* modules in different directories";
- }
- else
- {
- stor.push_back ("/module:reference");
- stor.push_back (relative (f.path ()).string ());
- }
+ stor.push_back (move (s));
}
+
break;
}
case compiler_type::icc:
@@ -6722,25 +7243,11 @@ namespace build2
// into storage? Because of potential reallocations.
//
for (const string& a: stor)
- args.push_back (a.c_str ());
-
- if (getenv ("IFCPATH"))
{
- // VC's IFCPATH takes precedence over /module:stdIfcDir so unset it if
- // we are using our own std modules. Note: IFCPATH saved in guess.cxx.
- //
- if (!stdifc.empty ())
- env.push_back ("IFCPATH");
- }
- else if (stdifc.empty ())
- {
- // Add the VC's default directory (should be only one).
- //
- if (sys_mod_dirs != nullptr && !sys_mod_dirs->empty ())
- {
- args.push_back ("/module:stdIfcDir");
- args.push_back (sys_mod_dirs->front ().string ().c_str ());
- }
+ if (ctype == compiler_type::msvc)
+ args.push_back ("/reference");
+
+ args.push_back (a.c_str ());
}
}
@@ -6813,7 +7320,8 @@ namespace build2
// If we are building a module interface or partition, then the target
// is bmi*{} and it may have an ad hoc obj*{} member. For header units
// there is no obj*{} (see the corresponding add_adhoc_member() call in
- // apply()).
+ // apply()). For named modules there may be no obj*{} if this is a
+ // sidebuild (obj*{} is already in the library binary).
//
path relm;
path relo;
@@ -6861,9 +7369,6 @@ namespace build2
small_vector<string, 2> header_args; // Header unit options storage.
small_vector<string, 2> module_args; // Module options storage.
- size_t out_i (0); // Index of the -o option.
- //size_t lang_n (0); // Number of lang options. @@ TMP
-
switch (cclass)
{
case compiler_class::msvc:
@@ -6883,6 +7388,10 @@ namespace build2
if (md.pp != preprocessed::all)
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note: could be overridden in mode.
+ //
+ append_diag_color_options (args);
+
// Set source/execution charsets to UTF-8 unless a custom charset
// is specified.
//
@@ -6973,9 +7482,8 @@ namespace build2
// Note also that what we are doing here appears to be incompatible
// with PCH (/Y* options) and /Gm (minimal rebuild).
//
- // @@ MOD: TODO deal with absent relo.
- //
- if (find_options ({"/Zi", "/ZI", "-Zi", "-ZI"}, args))
+ if (!relo.empty () &&
+ find_options ({"/Zi", "/ZI", "-Zi", "-ZI"}, args))
{
if (fc)
args.push_back ("/Fd:");
@@ -6988,27 +7496,38 @@ namespace build2
args.push_back (out1.c_str ());
}
- if (fc)
- {
- args.push_back ("/Fo:");
- args.push_back (relo.string ().c_str ());
- }
- else
+ if (ut == unit_type::module_intf ||
+ ut == unit_type::module_intf_part ||
+ ut == unit_type::module_impl_part ||
+ ut == unit_type::module_header)
{
- out = "/Fo" + relo.string ();
- args.push_back (out.c_str ());
- }
+ assert (ut != unit_type::module_header); // @@ MODHDR
- // @@ MODHDR MSVC
- // @@ MODPART MSVC
- //
- if (ut == unit_type::module_intf)
- {
relm = relative (tp);
- args.push_back ("/module:interface");
- args.push_back ("/module:output");
+ args.push_back ("/ifcOutput");
args.push_back (relm.string ().c_str ());
+
+ if (relo.empty ())
+ args.push_back ("/ifcOnly");
+ else
+ {
+ args.push_back ("/Fo:");
+ args.push_back (relo.string ().c_str ());
+ }
+ }
+ else
+ {
+ if (fc)
+ {
+ args.push_back ("/Fo:");
+ args.push_back (relo.string ().c_str ());
+ }
+ else
+ {
+ out = "/Fo" + relo.string ();
+ args.push_back (out.c_str ());
+ }
}
// Note: no way to indicate that the source if already preprocessed.
@@ -7023,9 +7542,53 @@ namespace build2
{
append_options (args, cmode);
+ // Clang 15 introduced the unqualified-std-cast-call warning which
+ // warns about unqualified calls to std::move() and std::forward()
+ // (because they can be "hijacked" via ADL). Surprisingly, this
+ // warning is enabled by default, as opposed to with -Wextra or at
+ // least -Wall. It has also proven to be quite disruptive, causing a
+ // large number of warnings in a large number of packages. So we are
+ // going to "remap" it to -Wextra for now and in the future may
+ // "relax" it to -Wall and potentially to being enabled by default.
+ // See GitHub issue #259 for background and details.
+ //
+ if (x_lang == lang::cxx &&
+ ctype == compiler_type::clang &&
+ cmaj >= 15)
+ {
+ bool w (false); // Seen -W[no-]unqualified-std-cast-call
+ optional<bool> extra; // Seen -W[no-]extra
+
+ for (const char* s: reverse_iterate (args))
+ {
+ if (s != nullptr)
+ {
+ if (strcmp (s, "-Wunqualified-std-cast-call") == 0 ||
+ strcmp (s, "-Wno-unqualified-std-cast-call") == 0)
+ {
+ w = true;
+ break;
+ }
+
+ if (!extra) // Last seen option wins.
+ {
+ if (strcmp (s, "-Wextra") == 0) extra = true;
+ else if (strcmp (s, "-Wno-extra") == 0) extra = false;
+ }
+ }
+ }
+
+ if (!w && (!extra || !*extra))
+ args.push_back ("-Wno-unqualified-std-cast-call");
+ }
+
if (md.pp != preprocessed::all)
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note: could be overridden in mode.
+ //
+ append_diag_color_options (args);
+
// Set the input charset to UTF-8 unless a custom one is specified.
//
// Note that the execution charset (-fexec-charset) is UTF-8 by
@@ -7079,8 +7642,7 @@ namespace build2
// either -nostdlib or -nostartfiles is specified. Let's do
// the same.
//
- initializer_list<const char*> os {"-nostdlib", "-nostartfiles"};
- if (!find_options (os, cmode) && !find_options (os, args))
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
{
args.push_back ("-D_MT");
args.push_back ("-D_DLL");
@@ -7142,10 +7704,6 @@ namespace build2
append_header_options (env, args, header_args, a, t, md, md.dd);
append_module_options (env, args, module_args, a, t, md, md.dd);
- // Note: the order of the following options is relied upon below.
- //
- out_i = args.size (); // Index of the -o option.
-
if (ut == unit_type::module_intf ||
ut == unit_type::module_intf_part ||
ut == unit_type::module_impl_part ||
@@ -7184,21 +7742,35 @@ namespace build2
}
case compiler_type::clang:
{
- // @@ MOD TODO: deal with absent relo.
+ assert (ut != unit_type::module_header); // @@ MODHDR
relm = relative (tp);
- args.push_back ("-o");
- args.push_back (relm.string ().c_str ());
- args.push_back ("--precompile");
-
// Without this option Clang's .pcm will reference source
- // files. In our case this file may be transient (.ii). Plus,
+ // files. In our case this file may be transient (.ii). Plus,
// it won't play nice with distributed compilation.
//
+ // Note that this sort of appears to be the default from Clang
+ // 17, but not quite, see llvm-project issued #72383.
+ //
args.push_back ("-Xclang");
args.push_back ("-fmodules-embed-all-files");
+ if (relo.empty ())
+ {
+ args.push_back ("-o");
+ args.push_back (relm.string ().c_str ());
+ args.push_back ("--precompile");
+ }
+ else
+ {
+ out1 = "-fmodule-output=" + relm.string ();
+ args.push_back (out1.c_str ());
+ args.push_back ("-o");
+ args.push_back (relo.string ().c_str ());
+ args.push_back ("-c");
+ }
+
break;
}
case compiler_type::msvc:
@@ -7213,7 +7785,7 @@ namespace build2
args.push_back ("-c");
}
- /*lang_n = */append_lang_options (args, md); // @@ TMP
+ append_lang_options (args, md);
if (md.pp == preprocessed::all)
{
@@ -7258,6 +7830,14 @@ namespace build2
if (!env.empty ())
env.push_back (nullptr);
+ // We have no choice but to serialize early if we want the command line
+ // printed shortly before actually executing the compiler. Failed that,
+ // it may look like we are still executing in parallel.
+ //
+ scheduler::alloc_guard jobs_ag;
+ if (!ctx.dry_run && cast_false<bool> (t[c_serialize]))
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, phase_unlock (nullptr));
+
// With verbosity level 2 print the command line as if we are compiling
// the source file, not its preprocessed version (so that it's easy to
// copy and re-run, etc). Only at level 3 and above print the real deal.
@@ -7265,7 +7845,13 @@ namespace build2
// @@ TODO: why don't we print env (here and/or below)? Also link rule.
//
if (verb == 1)
- print_diag (x_objective (s) ? x_obj_name : x_name, s, t);
+ {
+ const char* name (x_assembler_cpp (s) ? "as-cpp" :
+ x_objective (s) ? x_obj_name :
+ x_name);
+
+ print_diag (name, s, t);
+ }
else if (verb == 2)
print_process (args);
@@ -7273,7 +7859,7 @@ namespace build2
//
// But we remember the original source/position to restore later.
//
- bool psrc (md.psrc);
+ bool psrc (md.psrc); // Note: false if cc.reprocess.
bool ptmp (psrc && md.psrc.temporary);
pair<size_t, const char*> osrc;
if (psrc)
@@ -7291,36 +7877,40 @@ namespace build2
{
case compiler_type::gcc:
{
- // @@ TMP
-#if 0
- // The -fpreprocessed is implied by .i/.ii. But not when compiling
- // a header unit (there is no .hi/.hii).
- //
- if (ut == unit_type::module_header)
- args.push_back ("-fpreprocessed");
- else
- // Pop -x since it takes precedence over the extension.
- //
- // @@ I wonder why bother and not just add -fpreprocessed? Are
- // we trying to save an option or does something break?
- //
- for (; lang_n != 0; --lang_n)
- args.pop_back ();
-#else
// -fpreprocessed is implied by .i/.ii unless compiling a header
// unit (there is no .hi/.hii). Also, we would need to pop -x
// since it takes precedence over the extension, which would mess
// up our osrc logic. So in the end it feels like always passing
// explicit -fpreprocessed is the way to go.
//
+ // Also note that similarly there is no .Si for .S files.
+ //
args.push_back ("-fpreprocessed");
-#endif
-
args.push_back ("-fdirectives-only");
break;
}
case compiler_type::clang:
{
+ // Clang 15 and later with -pedantic warns about GNU-style line
+ // markers that it wrote itself in the -frewrite-includes output
+ // (llvm-project issue 63284). So we suppress this warning unless
+ // compiling from source.
+ //
+ // In Apple Clang this warning/option are absent in 14.0.3 (which
+ // is said to be based on vanilla Clang 15.0.5) for some reason
+ // (let's hope it's because they patched it out rather than due to
+ // a misleading _LIBCPP_VERSION value).
+ //
+ if (ctype == compiler_type::clang &&
+ cmaj >= (cvariant != "apple" ? 15 : 16))
+ {
+ if (find_options ({"-pedantic", "-pedantic-errors",
+ "-Wpedantic", "-Werror=pedantic"}, args))
+ {
+ args.push_back ("-Wno-gnu-line-marker");
+ }
+ }
+
// Note that without -x Clang will treat .i/.ii as fully
// preprocessed.
//
@@ -7412,6 +8002,8 @@ namespace build2
throw failed ();
}
+ jobs_ag.deallocate ();
+
if (md.deferred_failure)
fail << "expected error exit status from " << x_lang << " compiler";
}
@@ -7421,59 +8013,6 @@ namespace build2
if (ptmp && verb >= 3)
md.psrc.temporary = true;
- // Clang's module compilation requires two separate compiler
- // invocations.
- //
- // @@ MODPART: Clang (all of this is probably outdated).
- //
- if (ctype == compiler_type::clang && ut == unit_type::module_intf)
- {
- // Adjust the command line. First discard everything after -o then
- // build the new "tail".
- //
- args.resize (out_i + 1);
- args.push_back (relo.string ().c_str ()); // Produce .o.
- args.push_back ("-c"); // By compiling .pcm.
- args.push_back ("-Wno-unused-command-line-argument");
- args.push_back (relm.string ().c_str ());
- args.push_back (nullptr);
-
- if (verb >= 2)
- print_process (args);
-
- if (!ctx.dry_run)
- {
- // Remove the target file if this fails. If we don't do that, we
- // will end up with a broken build that is up-to-date.
- //
- auto_rmfile rm (relm);
-
- try
- {
- process pr (cpath,
- args,
- 0, 2, diag_buffer::pipe (ctx),
- nullptr, // CWD
- env.empty () ? nullptr : env.data ());
-
- diag_buffer dbuf (ctx, args[0], pr);
- dbuf.read ();
- run_finish (dbuf, args, pr, 1 /* verbosity */);
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e;
-
- if (e.child)
- exit (1);
-
- throw failed ();
- }
-
- rm.cancel ();
- }
- }
-
timestamp now (system_clock::now ());
if (!ctx.dry_run)
@@ -7495,11 +8034,13 @@ namespace build2
// Preprocessed file extension.
//
- const char* pext (x_objective (srct) ? x_obj_pext : x_pext);
+ const char* pext (x_assembler_cpp (srct) ? ".Si" :
+ x_objective (srct) ? x_obj_pext :
+ x_pext);
// Compressed preprocessed file extension.
//
- string cpext (t.ctx.fcache.compressed_extension (pext));
+ string cpext (t.ctx.fcache->compressed_extension (pext));
clean_extras extras;
switch (ctype)
diff --git a/libbuild2/cc/compile-rule.hxx b/libbuild2/cc/compile-rule.hxx
index a9a22c4..0886b4b 100644
--- a/libbuild2/cc/compile-rule.hxx
+++ b/libbuild2/cc/compile-rule.hxx
@@ -156,8 +156,9 @@ namespace build2
pair<dir_path, const scope&>
find_modules_sidebuild (const scope&) const;
- const file&
- make_module_sidebuild (action, const scope&, const file&,
+ pair<target&, ulock>
+ make_module_sidebuild (action, const scope&,
+ const file*, otype,
const target&, const string&) const;
const file&
diff --git a/libbuild2/cc/functions.cxx b/libbuild2/cc/functions.cxx
index 94900ee..9d408af 100644
--- a/libbuild2/cc/functions.cxx
+++ b/libbuild2/cc/functions.cxx
@@ -52,7 +52,7 @@ namespace build2
//
if (bs->ctx.phase != run_phase::match &&
bs->ctx.phase != run_phase::execute)
- fail << f.name << " can only be called during execution";
+ fail << f.name << " can only be called from recipe";
const module* m (rs->find_module<module> (d.x));
@@ -131,7 +131,7 @@ namespace build2
if (bs->ctx.phase != run_phase::match && // See above.
bs->ctx.phase != run_phase::execute)
- fail << f.name << " can only be called during execution";
+ fail << f.name << " can only be called from recipe";
const module* m (rs->find_module<module> (d.x));
diff --git a/libbuild2/cc/gcc.cxx b/libbuild2/cc/gcc.cxx
index 755b0d8..286ba10 100644
--- a/libbuild2/cc/gcc.cxx
+++ b/libbuild2/cc/gcc.cxx
@@ -45,6 +45,13 @@ namespace build2
d = dir_path (o, 2, string::npos);
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -52,10 +59,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -78,6 +82,71 @@ namespace build2
}
#endif
+ // Parse color/semicolon-separated list of search directories (from
+ // -print-search-dirs output, environment variables).
+ //
+ static void
+ parse_search_dirs (const string& v, dir_paths& r,
+ const char* what, const char* what2 = "")
+ {
+ // Now the fun part: figuring out which delimiter is used. Normally it
+ // is ':' but on Windows it is ';' (or can be; who knows for sure). Also
+ // note that these paths are absolute (or should be). So here is what we
+ // are going to do: first look for ';'. If found, then that's the
+ // delimiter. If not found, then there are two cases: it is either a
+ // single Windows path or the delimiter is ':'. To distinguish these two
+ // cases we check if the path starts with a Windows drive.
+ //
+ char d (';');
+ string::size_type e (v.find (d));
+
+ if (e == string::npos &&
+ (v.size () < 2 || v[0] == '/' || v[1] != ':'))
+ {
+ d = ':';
+ e = v.find (d);
+ }
+
+ // Now chop it up. We already have the position of the first delimiter
+ // (if any).
+ //
+ for (string::size_type b (0);; e = v.find (d, (b = e + 1)))
+ {
+ dir_path d;
+ try
+ {
+ string ds (v, b, (e != string::npos ? e - b : e));
+
+ // Skip empty entries (sometimes found in random MinGW toolchains).
+ //
+ if (!ds.empty ())
+ {
+#ifdef _WIN32
+ if (path_traits::is_separator (ds[0]))
+ add_current_drive (ds);
+#endif
+ d = dir_path (move (ds));
+
+ if (d.relative ())
+ throw invalid_path (move (d).string ());
+
+ d.normalize ();
+ }
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid directory '" << e.path << "'" << " in "
+ << what << what2;
+ }
+
+ if (!d.empty () && find (r.begin (), r.end (), d) == r.end ())
+ r.push_back (move (d));
+
+ if (e == string::npos)
+ break;
+ }
+ }
+
// Extract system header search paths from GCC (gcc/g++) or compatible
// (Clang, Intel) using the `-v -E </dev/null` method.
//
@@ -88,14 +157,15 @@ namespace build2
// do this is to run the compiler twice.
//
pair<dir_paths, size_t> config_module::
- gcc_header_search_dirs (const process_path& xc, scope& rs) const
+ gcc_header_search_dirs (const compiler_info& xi, scope& rs) const
{
dir_paths r;
// Note also that any -I and similar that we may specify on the command
- // line are factored into the output.
+ // line are factored into the output. As well as the CPATH, etc.,
+ // environment variable values.
//
- cstrings args {xc.recall_string ()};
+ cstrings args {xi.path.recall_string ()};
append_options (args, rs, x_mode);
// Compile as.
@@ -119,7 +189,7 @@ namespace build2
args.push_back ("-");
args.push_back (nullptr);
- process_env env (xc);
+ process_env env (xi.path);
// For now let's assume that all the platforms other than Windows
// recognize LC_ALL.
@@ -132,6 +202,9 @@ namespace build2
if (verb >= 3)
print_process (env, args);
+ bool found_q (false); // Found `#include "..." ...` marker.
+ bool found_b (false); // Found `#include <...> ...` marker.
+
// Open pipe to stderr, redirect stdin and stdout to /dev/null.
//
process pr (run_start (
@@ -152,7 +225,7 @@ namespace build2
// End of search list.
//
// The exact text depends on the current locale. What we can rely on
- // is the presence of the "#include <...>" substring in the "opening"
+ // is the presence of the "#include <...>" marker in the "opening"
// line and the fact that the paths are indented with a single space
// character, unlike the "closing" line.
//
@@ -166,11 +239,15 @@ namespace build2
// which we fail to normalize or stat. @@ Maybe this is a bit too
// loose, especially compared to gcc_library_search_dirs()?
//
- string s;
- for (bool found (false); getline (is, s); )
+ // Note that when there are no paths (e.g., because of -nostdinc),
+ // then GCC prints both #include markers while Clang -- only "...".
+ //
+ for (string s; getline (is, s); )
{
- if (!found)
- found = s.find ("#include <...>") != string::npos;
+ if (!found_q)
+ found_q = s.find ("#include \"...\"") != string::npos;
+ else if (!found_b)
+ found_b = s.find ("#include <...>") != string::npos;
else
{
if (s[0] != ' ')
@@ -222,10 +299,12 @@ namespace build2
fail << "error reading " << x_lang << " compiler -v -E output";
}
- // It's highly unlikely not to have any system directories. More likely
- // we misinterpreted the compiler output.
+ // Note that it's possible that we will have no system directories, for
+ // example, if the user specified -nostdinc. But we must have still seen
+ // at least one marker. Failed that we assume we misinterpreted the
+ // compiler output.
//
- if (r.empty ())
+ if (!found_b && !found_q)
fail << "unable to extract " << x_lang << " compiler system header "
<< "search paths";
@@ -236,7 +315,7 @@ namespace build2
// (Clang, Intel) using the -print-search-dirs option.
//
pair<dir_paths, size_t> config_module::
- gcc_library_search_dirs (const process_path& xc, scope& rs) const
+ gcc_library_search_dirs (const compiler_info& xi, scope& rs) const
{
// The output of -print-search-dirs are a bunch of lines that start with
// "<name>: =" where name can be "install", "programs", or "libraries".
@@ -263,12 +342,12 @@ namespace build2
gcc_extract_library_search_dirs (cast<strings> (rs[x_mode]), r);
size_t rn (r.size ());
- cstrings args {xc.recall_string ()};
+ cstrings args {xi.path.recall_string ()};
append_options (args, rs, x_mode);
args.push_back ("-print-search-dirs");
args.push_back (nullptr);
- process_env env (xc);
+ process_env env (xi.path);
// For now let's assume that all the platforms other than Windows
// recognize LC_ALL.
@@ -326,62 +405,16 @@ namespace build2
fail << "unable to extract " << x_lang << " compiler system library "
<< "search paths";
- // Now the fun part: figuring out which delimiter is used. Normally it
- // is ':' but on Windows it is ';' (or can be; who knows for sure). Also
- // note that these paths are absolute (or should be). So here is what we
- // are going to do: first look for ';'. If found, then that's the
- // delimiter. If not found, then there are two cases: it is either a
- // single Windows path or the delimiter is ':'. To distinguish these two
- // cases we check if the path starts with a Windows drive.
- //
- char d (';');
- string::size_type e (l.find (d));
+ parse_search_dirs (l, r, args[0], " -print-search-dirs output");
- if (e == string::npos &&
- (l.size () < 2 || l[0] == '/' || l[1] != ':'))
- {
- d = ':';
- e = l.find (d);
- }
-
- // Now chop it up. We already have the position of the first delimiter
- // (if any).
+ // While GCC incorporates the LIBRARY_PATH environment variable value
+ // into the -print-search-dirs output, Clang does not. Also, unlike GCC,
+ // it appears to consider such paths last.
//
- for (string::size_type b (0);; e = l.find (d, (b = e + 1)))
+ if (xi.id.type == compiler_type::clang)
{
- dir_path d;
- try
- {
- string ds (l, b, (e != string::npos ? e - b : e));
-
- // Skip empty entries (sometimes found in random MinGW toolchains).
- //
- if (!ds.empty ())
- {
-#ifdef _WIN32
- if (path_traits::is_separator (ds[0]))
- add_current_drive (ds);
-#endif
-
- d = dir_path (move (ds));
-
- if (d.relative ())
- throw invalid_path (move (d).string ());
-
- d.normalize ();
- }
- }
- catch (const invalid_path& e)
- {
- fail << "invalid directory '" << e.path << "'" << " in "
- << args[0] << " -print-search-dirs output";
- }
-
- if (!d.empty () && find (r.begin (), r.end (), d) == r.end ())
- r.emplace_back (move (d));
-
- if (e == string::npos)
- break;
+ if (optional<string> v = getenv ("LIBRARY_PATH"))
+ parse_search_dirs (*v, r, "LIBRARY_PATH environment variable");
}
return make_pair (move (r), rn);
diff --git a/libbuild2/cc/guess.cxx b/libbuild2/cc/guess.cxx
index 7a2ede9..d7e9c63 100644
--- a/libbuild2/cc/guess.cxx
+++ b/libbuild2/cc/guess.cxx
@@ -412,6 +412,8 @@ namespace build2
//
// Note that Visual Studio versions prior to 15.0 are not supported.
//
+ // Note also the directories are absolute and normalized.
+ //
struct msvc_info
{
dir_path msvc_dir; // VC tools directory (...\Tools\MSVC\<ver>\).
@@ -759,7 +761,7 @@ namespace build2
//
for (const dir_entry& de:
dir_iterator (r.psdk_dir / dir_path ("Include"),
- false /* ignore_dangling */))
+ dir_iterator::no_follow))
{
if (de.type () == entry_type::directory)
{
@@ -777,6 +779,16 @@ namespace build2
return nullopt;
}
+ try
+ {
+ r.msvc_dir.normalize ();
+ r.psdk_dir.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ return nullopt;
+ }
+
return r;
}
#endif
@@ -1537,6 +1549,8 @@ namespace build2
msvc_extract_header_search_dirs (mo, r);
size_t rn (r.size ());
+ // Note: the resulting directories are normalized by construction.
+ //
r.push_back (dir_path (mi.msvc_dir) /= "include");
// This path structure only appeared in Platform SDK 10 (if anyone wants
@@ -1586,6 +1600,8 @@ namespace build2
msvc_extract_library_search_dirs (mo, r);
size_t rn (r.size ());
+ // Note: the resulting directories are normalized by construction.
+ //
r.push_back ((dir_path (mi.msvc_dir) /= "lib") /= cpu);
// This path structure only appeared in Platform SDK 10 (if anyone wants
@@ -2349,17 +2365,20 @@ namespace build2
// These are derived from gcc_* plus the sparse documentation (clang(1))
// and source code.
//
+ // Note that for now for Clang targeting MSVC we use msvc_env but should
+ // probably use a combined list.
+ //
// See also the note on environment and caching below if adding any new
// variables.
//
static const char* clang_c_env[] = {
- "CPATH", "C_INCLUDE_PATH",
+ "CPATH", "C_INCLUDE_PATH", "CCC_OVERRIDE_OPTIONS",
"LIBRARY_PATH", "LD_RUN_PATH",
"COMPILER_PATH",
nullptr};
static const char* clang_cxx_env[] = {
- "CPATH", "CPLUS_INCLUDE_PATH",
+ "CPATH", "CPLUS_INCLUDE_PATH", "CCC_OVERRIDE_OPTIONS",
"LIBRARY_PATH", "LD_RUN_PATH",
"COMPILER_PATH",
nullptr};
@@ -2405,6 +2424,12 @@ namespace build2
//
// emcc (...) 2.0.8
//
+ // Pre-releases of the vanilla Clang append `rc` or `git` to the
+ // version, unfortunately without a separator. So we will handle these
+ // ad hoc. For example:
+ //
+ // FreeBSD clang version 18.1.0rc (https://github.com/llvm/llvm-project.git llvmorg-18-init-18361-g22683463740e)
+ //
auto extract_version = [] (const string& s, bool patch, const char* what)
-> compiler_version
{
@@ -2419,8 +2444,28 @@ namespace build2
// end of the word position (first space). In fact, we can just
// check if it is >= e.
//
- if (s.find_first_not_of ("1234567890.", b, 11) >= e)
+ size_t p (s.find_first_not_of ("1234567890.", b, 11));
+ if (p >= e)
break;
+
+ // Handle the unseparated `rc` and `git` suffixes.
+ //
+ if (p != string::npos)
+ {
+ if (p + 2 == e && (e - b) > 2 &&
+ s[p] == 'r' && s[p + 1] == 'c')
+ {
+ e -= 2;
+ break;
+ }
+
+ if (p + 3 == e && (e - b) > 3 &&
+ s[p] == 'g' && s[p + 1] == 'i' && s[p + 2] == 't')
+ {
+ e -= 3;
+ break;
+ }
+ }
}
if (b == e)
@@ -2456,7 +2501,14 @@ namespace build2
ver.patch = next ("patch", patch);
if (e != s.size ())
- ver.build.assign (s, e + 1, string::npos);
+ {
+ // Skip the separator (it could also be unseparated `rc` or `git`).
+ //
+ if (s[e] == ' ' || s[e] == '-')
+ e++;
+
+ ver.build.assign (s, e, string::npos);
+ }
return ver;
};
@@ -2480,7 +2532,10 @@ namespace build2
// Some overrides for testing.
//
+ //string s (xv != nullptr ? *xv : "");
+ //
//s = "clang version 3.7.0 (tags/RELEASE_370/final)";
+ //s = "FreeBSD clang version 18.1.0rc (https://github.com/llvm/llvm-project.git llvmorg-18-init-18361-g22683463740e)";
//
//gr.id.variant = "apple";
//s = "Apple LLVM version 7.3.0 (clang-703.0.16.1)";
@@ -2508,10 +2563,21 @@ namespace build2
//
// Specifically, we now look in the libc++'s __config file for the
// _LIBCPP_VERSION and use the previous version as a conservative
- // estimate (NOTE that there could be multiple __config files with
+ // estimate (NOTE: that there could be multiple __config files with
// potentially different versions so compile with -v to see which one
// gets picked up).
//
+ // Also, lately, we started seeing _LIBCPP_VERSION values like 15.0.6
+ // or 16.0.2 which would suggest the base is 15.0.5 or 16.0.1. But
+ // that assumption did not check out with the actual usage. For
+ // example, vanilla Clang 16 should no longer require -fmodules-ts but
+ // the Apple's version (that is presumably based on it) still does. So
+ // the theory here is that Apple upgrades to newer libc++ while
+ // keeping the old compiler. Which means we must be more conservative
+ // and assume something like 15.0.6 is still 14-based. But then you
+ // get -Wunqualified-std-cast-call in 14, which was supposedly only
+ // introduced in Clang 15. So maybe not.
+ //
// Note that this is Apple Clang version and not XCode version.
//
// 4.2 -> 3.2svn
@@ -2532,35 +2598,40 @@ namespace build2
// 12.0.5 -> 10.0 (yes, seriously!)
// 13.0.0 -> 11.0
// 13.1.6 -> 12.0
+ // 14.0.0 -> 12.0 (_LIBCPP_VERSION=130000)
+ // 14.0.3 -> 15.0 (_LIBCPP_VERSION=150006)
+ // 15.0.0 -> 16.0 (_LIBCPP_VERSION=160002)
//
uint64_t mj (var_ver->major);
uint64_t mi (var_ver->minor);
uint64_t pa (var_ver->patch);
- if (mj > 13 || (mj == 13 && mi >= 1)) {mj = 12; mi = 0;}
- else if (mj == 13) {mj = 11; mi = 0;}
- else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0;}
- else if (mj == 12) {mj = 9; mi = 0;}
- else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0;}
- else if (mj == 11) {mj = 7; mi = 0;}
- else if (mj == 10) {mj = 6; mi = 0;}
- else if (mj == 9 && mi >= 1) {mj = 5; mi = 0;}
- else if (mj == 9) {mj = 4; mi = 0;}
- else if (mj == 8) {mj = 3; mi = 9;}
- else if (mj == 7 && mi >= 3) {mj = 3; mi = 8;}
- else if (mj == 7) {mj = 3; mi = 7;}
- else if (mj == 6 && mi >= 1) {mj = 3; mi = 5;}
- else if (mj == 6) {mj = 3; mi = 4;}
- else if (mj == 5 && mi >= 1) {mj = 3; mi = 3;}
- else if (mj == 5) {mj = 3; mi = 2;}
- else if (mj == 4 && mi >= 2) {mj = 3; mi = 1;}
- else {mj = 3; mi = 0;}
+ if (mj >= 15) {mj = 16; mi = 0; pa = 0;}
+ else if (mj == 14 && (mi > 0 || pa >= 3)) {mj = 15; mi = 0; pa = 0;}
+ else if (mj == 14 || (mj == 13 && mi >= 1)) {mj = 12; mi = 0; pa = 0;}
+ else if (mj == 13) {mj = 11; mi = 0; pa = 0;}
+ else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0; pa = 0;}
+ else if (mj == 12) {mj = 9; mi = 0; pa = 0;}
+ else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0; pa = 0;}
+ else if (mj == 11) {mj = 7; mi = 0; pa = 0;}
+ else if (mj == 10) {mj = 6; mi = 0; pa = 0;}
+ else if (mj == 9 && mi >= 1) {mj = 5; mi = 0; pa = 0;}
+ else if (mj == 9) {mj = 4; mi = 0; pa = 0;}
+ else if (mj == 8) {mj = 3; mi = 9; pa = 0;}
+ else if (mj == 7 && mi >= 3) {mj = 3; mi = 8; pa = 0;}
+ else if (mj == 7) {mj = 3; mi = 7; pa = 0;}
+ else if (mj == 6 && mi >= 1) {mj = 3; mi = 5; pa = 0;}
+ else if (mj == 6) {mj = 3; mi = 4; pa = 0;}
+ else if (mj == 5 && mi >= 1) {mj = 3; mi = 3; pa = 0;}
+ else if (mj == 5) {mj = 3; mi = 2; pa = 0;}
+ else if (mj == 4 && mi >= 2) {mj = 3; mi = 1; pa = 0;}
+ else {mj = 3; mi = 0; pa = 0;}
ver = compiler_version {
- to_string (mj) + '.' + to_string (mi) + ".0",
+ to_string (mj) + '.' + to_string (mi) + '.' + to_string (pa),
mj,
mi,
- 0,
+ pa,
""};
}
else if (emscr)
@@ -2673,7 +2744,7 @@ namespace build2
const char* cpu (msvc_cpu (tt.cpu));
// Come up with the system library search paths. Ideally we would want
- // to extract this from Clang and -print-search-paths would have been
+ // to extract this from Clang and -print-search-dirs would have been
// the natural way for Clang to report it. But no luck.
//
lib_dirs = msvc_lib (mi, x_mo, cpu);
diff --git a/libbuild2/cc/init.cxx b/libbuild2/cc/init.cxx
index 33a1133..e124450 100644
--- a/libbuild2/cc/init.cxx
+++ b/libbuild2/cc/init.cxx
@@ -100,13 +100,19 @@ namespace build2
vp.insert<strings> ("config.cc.loptions");
vp.insert<strings> ("config.cc.aoptions");
vp.insert<strings> ("config.cc.libs");
- vp.insert<string> ("config.cc.internal.scope");
+
+ vp.insert<string> ("config.cc.internal.scope");
+
+ vp.insert<bool> ("config.cc.reprocess"); // See cc.preprocess below.
+
+ vp.insert<abs_dir_path> ("config.cc.pkgconfig.sysroot");
vp.insert<strings> ("cc.poptions");
vp.insert<strings> ("cc.coptions");
vp.insert<strings> ("cc.loptions");
vp.insert<strings> ("cc.aoptions");
vp.insert<strings> ("cc.libs");
+
vp.insert<string> ("cc.internal.scope");
vp.insert<strings> ("cc.internal.libs");
@@ -120,8 +126,8 @@ namespace build2
// files instead of the default install.{include,lib}. Relative paths
// are resolved as install paths.
//
- vp.insert<dir_paths> ("cc.pkconfig.include");
- vp.insert<dir_paths> ("cc.pkconfig.lib");
+ vp.insert<dir_paths> ("cc.pkgconfig.include");
+ vp.insert<dir_paths> ("cc.pkgconfig.lib");
// Hint variables (not overridable).
//
@@ -177,9 +183,15 @@ namespace build2
// Ability to disable using preprocessed output for compilation.
//
- vp.insert<bool> ("config.cc.reprocess");
vp.insert<bool> ("cc.reprocess");
+ // Execute serially with regards to any other recipe. This is primarily
+ // useful when compiling large translation units or linking large
+ // binaries that require so much memory that doing that in parallel with
+ // other compilation/linking jobs is likely to summon the OOM killer.
+ //
+ vp.insert<bool> ("cc.serialize");
+
// Register scope operation callback.
//
// It feels natural to clean up sidebuilds as a post operation but that
@@ -337,6 +349,15 @@ namespace build2
if (lookup l = lookup_config (rs, "config.cc.reprocess"))
rs.assign ("cc.reprocess") = *l;
+ // config.cc.pkgconfig.sysroot
+ //
+ // Let's look it up instead of just marking for saving to make sure the
+ // path is valid.
+ //
+ // Note: save omitted.
+ //
+ lookup_config (rs, "config.cc.pkgconfig.sysroot");
+
// Load the bin.config module.
//
if (!cast_false<bool> (rs["bin.config.loaded"]))
diff --git a/libbuild2/cc/install-rule.cxx b/libbuild2/cc/install-rule.cxx
index 640612c..6758e03 100644
--- a/libbuild2/cc/install-rule.cxx
+++ b/libbuild2/cc/install-rule.cxx
@@ -18,20 +18,67 @@ namespace build2
{
using namespace bin;
+ using posthoc_prerequisite_target =
+ context::posthoc_target::prerequisite_target;
+
// install_rule
//
install_rule::
install_rule (data&& d, const link_rule& l)
: common (move (d)), link_ (l) {}
- const target* install_rule::
+ // Wrap the file_rule's recipe into a data-carrying recipe.
+ //
+ struct install_match_data
+ {
+ build2::recipe recipe;
+ uint64_t options; // Match options.
+ link_rule::libs_paths libs_paths;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return recipe (a, t);
+ }
+ };
+
+ bool install_rule::
+ filter (action a, const target& t, const target& m) const
+ {
+ if (!t.is_a<exe> ())
+ {
+ // If runtime-only, filter out all known buildtime target types.
+ //
+ const auto& md (t.data<install_match_data> (a));
+
+ if ((md.options & lib::option_install_buildtime) == 0)
+ {
+ if (m.is_a<liba> () || // Staic library.
+ m.is_a<pc> () || // pkg-config file.
+ m.is_a<libi> ()) // Import library.
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ pair<const target*, uint64_t> install_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
// NOTE: see libux_install_rule::filter() if changing anything here.
const prerequisite& p (i->prerequisite);
+ uint64_t options (match_extra::all_options);
+
+ otype ot (link_type (t).type);
+
+ // @@ TMP: drop eventually.
+ //
+#if 0
// If this is a shared library prerequisite, install it as long as it is
// in the installation scope.
//
@@ -43,10 +90,14 @@ namespace build2
//
// Note: we install ad hoc prerequisites by default.
//
- otype ot (link_type (t).type);
+ // Note: at least one must be true since we only register this rule for
+ // exe{}, and lib[as]{} (this makes sure the following if-condition will
+ // always be true for libx{}).
+ //
bool st (t.is_a<exe> () || t.is_a<libs> ()); // Target needs shared.
bool at (t.is_a<liba> () || t.is_a<libs> ()); // Target needs static.
+ assert (st || at);
if ((st && (p.is_a<libx> () || p.is_a<libs> ())) ||
(at && (p.is_a<libx> () || p.is_a<liba> ())))
@@ -59,26 +110,115 @@ namespace build2
if (const libx* l = pt->is_a<libx> ())
pt = link_member (*l, a, link_info (t.base_scope (), ot));
- // Note: not redundant since we are returning a member.
+ // Note: not redundant since we could be returning a member.
//
if ((st && pt->is_a<libs> ()) || (at && pt->is_a<liba> ()))
- return is == nullptr || pt->in (*is) ? pt : nullptr;
+ {
+ // Adjust match options.
+ //
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<exe> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ // This is a library prerequisite of a library target and
+ // runtime-only begets runtime-only.
+ //
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
// See through to libu*{} members. Note that we are always in the same
// project (and thus amalgamation).
//
if (pt->is_a<libux> ())
- return pt;
+ {
+ // Adjust match options (similar to above).
+ //
+ if (a.operation () != update_id && !pt->is_a<libue> ())
+ {
+ if (t.is_a<exe> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (pt, options);
+ }
}
+#else
+ // Note that at first it may seem like we don't need to install static
+ // library prerequisites of executables. But such libraries may still
+ // have prerequisites that are needed at runtime (say, some data files).
+ // So we install all libraries as long as they are in the installation
+ // scope and deal with runtime vs buildtime distiction using match
+ // options.
+ //
+ // Note: for now we assume these prerequisites never come from see-
+ // through groups.
+ //
+ // Note: we install ad hoc prerequisites by default.
+ //
+ if (p.is_a<libx> () || p.is_a<libs> () || p.is_a<liba> ())
+ {
+ const target* pt (&search (t, p));
+
+ // If this is the lib{}/libu*{} group, pick a member which we would
+ // link. For libu*{} we want the "see through" logic.
+ //
+ if (const libx* l = pt->is_a<libx> ())
+ pt = link_member (*l, a, link_info (t.base_scope (), ot));
+
+ // Adjust match options.
+ //
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<exe> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ // This is a library prerequisite of a library target and
+ // runtime-only begets runtime-only.
+ //
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ // Note: not redundant since we could be returning a member.
+ //
+ if (pt->is_a<libs> () || pt->is_a<liba> ())
+ {
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
+ else // libua{} or libus{}
+ {
+ // See through to libu*{} members. Note that we are always in the
+ // same project (and thus amalgamation).
+ //
+ return make_pair (pt, options);
+ }
+ }
+#endif
// The rest of the tests only succeed if the base filter() succeeds.
//
- const target* pt (file_rule::filter (is, a, t, p));
+ const target* pt (file_rule::filter (is, a, t, p, me).first);
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
- // Don't install executable's prerequisite headers and module
- // interfaces.
+ // Don't install executable's or runtime-only library's prerequisite
+ // headers and module interfaces.
//
// Note that if they come from a group, then we assume the entire
// group is not to be installed.
@@ -88,13 +228,18 @@ namespace build2
//
auto header_source = [this] (const auto& p)
{
- return (x_header (p) ||
- p.is_a (x_src) ||
- (x_mod != nullptr && p.is_a (*x_mod)) ||
- (x_obj != nullptr && p.is_a (*x_obj)));
+ return (x_header (p) ||
+ p.is_a (x_src) ||
+ p.is_a (c::static_type) ||
+ p.is_a (S::static_type) ||
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) ||
+ p.is_a (m::static_type))));
};
- if (t.is_a<exe> ())
+ if (t.is_a<exe> () ||
+ (a.operation () != update_id &&
+ me.cur_options == lib::option_install_runtime))
{
if (header_source (p))
pt = nullptr;
@@ -109,7 +254,7 @@ namespace build2
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
// Here is a problem: if the user spells the obj*/bmi*{} targets
@@ -139,16 +284,16 @@ namespace build2
{
pt = t.is_a<exe> ()
? nullptr
- : file_rule::filter (is, a, *pt, pm.prerequisite);
+ : file_rule::filter (is, a, *pt, pm.prerequisite, me).first;
break;
}
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
- return pt;
+ return make_pair (pt, options);
}
bool install_rule::
@@ -161,27 +306,34 @@ namespace build2
file_rule::match (a, t);
}
- // Wrap the file_rule's recipe into a data-carrying recipe.
- //
- struct install_match_data
+ recipe install_rule::
+ apply (action a, target& t, match_extra& me) const
{
- build2::recipe recipe;
- link_rule::libs_paths libs_paths;
-
- target_state
- operator() (action a, const target& t)
+ // Handle match options.
+ //
+ // Do it before calling apply_impl() since we need this information
+ // in the filter() callbacks.
+ //
+ if (a.operation () != update_id)
{
- return recipe (a, t);
+ if (!t.is_a<exe> ())
+ {
+ if (me.new_options == 0)
+ me.new_options = lib::option_install_runtime; // Minimum we can do.
+
+ me.cur_options = me.new_options;
+ }
}
- };
- recipe install_rule::
- apply (action a, target& t) const
- {
- recipe r (file_rule::apply_impl (a, t));
+ recipe r (file_rule::apply_impl (
+ a, t, me,
+ me.cur_options != match_extra::all_options /* reapply */));
if (r == nullptr)
+ {
+ me.cur_options = match_extra::all_options; // Noop for all options.
return noop_recipe;
+ }
if (a.operation () == update_id)
{
@@ -203,29 +355,109 @@ namespace build2
}
else // install or uninstall
{
- // Derive shared library paths and cache them in the target's aux
- // storage if we are un/installing (used in the *_extra() functions
- // below).
- //
- if (file* f = t.is_a<libs> ())
+ file* ls;
+ if ((ls = t.is_a<libs> ()) || t.is_a<liba> ())
{
- if (!f->path ().empty ()) // Not binless.
+ // Derive shared library paths and cache them in the target's aux
+ // storage if we are un/installing (used in the *_extra() functions
+ // below).
+ //
+ link_rule::libs_paths lsp;
+ if (ls != nullptr && !ls->path ().empty ()) // Not binless.
{
const string* p (cast_null<string> (t["bin.lib.prefix"]));
const string* s (cast_null<string> (t["bin.lib.suffix"]));
- return install_match_data {
- move (r),
- link_.derive_libs_paths (*f,
- p != nullptr ? p->c_str (): nullptr,
- s != nullptr ? s->c_str (): nullptr)};
+ lsp = link_.derive_libs_paths (*ls,
+ p != nullptr ? p->c_str (): nullptr,
+ s != nullptr ? s->c_str (): nullptr);
}
+
+ return install_match_data {move (r), me.cur_options, move (lsp)};
}
}
return r;
}
+ void install_rule::
+ apply_posthoc (action a, target& t, match_extra& me) const
+ {
+ // Similar semantics to filter() above for shared libraries specified as
+ // post hoc prerequisites (e.g., plugins).
+ //
+ if (a.operation () != update_id)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ if (t.is_a<exe> ())
+ p.match_options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ p.match_options = lib::option_install_runtime;
+ }
+ }
+ }
+ }
+ }
+
+ void install_rule::
+ reapply (action a, target& t, match_extra& me) const
+ {
+ tracer trace ("cc::install_rule::reapply");
+
+ assert (a.operation () != update_id && !t.is_a<exe> ());
+
+ l6 ([&]{trace << "rematching " << t
+ << ", current options " << me.cur_options
+ << ", new options " << me.new_options;});
+
+ me.cur_options |= me.new_options;
+
+ // We also need to update options in install_match_data.
+ //
+ t.data<install_match_data> (a).options = me.cur_options;
+
+ if ((me.new_options & lib::option_install_buildtime) != 0)
+ {
+ // If we are rematched with the buildtime option, propagate it to our
+ // prerequisite libraries.
+ //
+ for (const target* pt: t.prerequisite_targets[a])
+ {
+ if (pt != nullptr && (pt->is_a<liba> () || pt->is_a<libs> () ||
+ pt->is_a<libua> () || pt->is_a<libus> ()))
+ {
+ // Go for all options instead of just install_buildtime to avoid
+ // any further relocking/reapply (we only support runtime-only or
+ // everything).
+ //
+ rematch_sync (a, *pt, match_extra::all_options);
+ }
+ }
+
+ // Also to post hoc.
+ //
+ if (me.posthoc_prerequisite_targets != nullptr)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ p.match_options = match_extra::all_options;
+ }
+ }
+ }
+
+ // Also match any additional prerequisites (e.g., headers).
+ //
+ file_rule::reapply_impl (a, t, me);
+ }
+ }
+
bool install_rule::
install_extra (const file& t, const install_dir& id) const
{
@@ -233,14 +465,19 @@ namespace build2
if (t.is_a<libs> ())
{
+ const auto& md (t.data<install_match_data> (perform_install_id));
+
// Here we may have a bunch of symlinks that we need to install.
//
+ // Note that for runtime-only install we only omit the name that is
+ // used for linking (e.g., libfoo.so).
+ //
const scope& rs (t.root_scope ());
- auto& lp (t.data<install_match_data> (perform_install_id).libs_paths);
+ const link_rule::libs_paths& lp (md.libs_paths);
- auto ln = [&rs, &id] (const path& f, const path& l)
+ auto ln = [&t, &rs, &id] (const path& f, const path& l)
{
- install_l (rs, id, f.leaf (), l.leaf (), 2 /* verbosity */);
+ install_l (rs, id, l.leaf (), t, f.leaf (), 2 /* verbosity */);
return true;
};
@@ -254,7 +491,10 @@ namespace build2
if (!in.empty ()) {r = ln (*f, in) || r; f = &in;}
if (!so.empty ()) {r = ln (*f, so) || r; f = &so;}
if (!ld.empty ()) {r = ln (*f, ld) || r; f = &ld;}
- if (!lk.empty ()) {r = ln (*f, lk) || r; }
+ if ((md.options & lib::option_install_buildtime) != 0)
+ {
+ if (!lk.empty ()) {r = ln (*f, lk) || r;}
+ }
}
return r;
@@ -267,14 +507,16 @@ namespace build2
if (t.is_a<libs> ())
{
+ const auto& md (t.data<install_match_data> (perform_uninstall_id));
+
// Here we may have a bunch of symlinks that we need to uninstall.
//
const scope& rs (t.root_scope ());
- auto& lp (t.data<install_match_data> (perform_uninstall_id).libs_paths);
+ const link_rule::libs_paths& lp (md.libs_paths);
auto rm = [&rs, &id] (const path& f, const path& l)
{
- return uninstall_l (rs, id, f.leaf (), l.leaf (), 2 /* verbosity */);
+ return uninstall_l (rs, id, l.leaf (), f.leaf (), 2 /* verbosity */);
};
const path& lk (lp.link);
@@ -287,7 +529,10 @@ namespace build2
if (!in.empty ()) {r = rm (*f, in) || r; f = &in;}
if (!so.empty ()) {r = rm (*f, so) || r; f = &so;}
if (!ld.empty ()) {r = rm (*f, ld) || r; f = &ld;}
- if (!lk.empty ()) {r = rm (*f, lk) || r; }
+ if ((md.options & lib::option_install_buildtime) != 0)
+ {
+ if (!lk.empty ()) {r = rm (*f, lk) || r;}
+ }
}
return r;
@@ -299,22 +544,30 @@ namespace build2
libux_install_rule (data&& d, const link_rule& l)
: common (move (d)), link_ (l) {}
- const target* libux_install_rule::
+ pair<const target*, uint64_t> libux_install_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
using file_rule = install::file_rule;
const prerequisite& p (i->prerequisite);
+ uint64_t options (match_extra::all_options);
+
+ otype ot (link_type (t).type);
+
// The "see through" semantics that should be parallel to install_rule
// above. In particular, here we use libue/libua/libus{} as proxies for
// exe/liba/libs{} there.
//
- otype ot (link_type (t).type);
+ // @@ TMP: drop eventually.
+ //
+#if 0
bool st (t.is_a<libue> () || t.is_a<libus> ()); // Target needs shared.
bool at (t.is_a<libua> () || t.is_a<libus> ()); // Target needs static.
+ assert (st || at);
if ((st && (p.is_a<libx> () || p.is_a<libs> ())) ||
(at && (p.is_a<libx> () || p.is_a<liba> ())))
@@ -325,25 +578,85 @@ namespace build2
pt = link_member (*l, a, link_info (t.base_scope (), ot));
if ((st && pt->is_a<libs> ()) || (at && pt->is_a<liba> ()))
- return is == nullptr || pt->in (*is) ? pt : nullptr;
+ {
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<libue> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
if (pt->is_a<libux> ())
- return pt;
+ {
+ if (a.operation () != update_id && !pt->is_a<libue> ())
+ {
+ if (t.is_a<libue> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (pt, options);
+ }
}
+#else
+ if (p.is_a<libx> () || p.is_a<libs> () || p.is_a<liba> ())
+ {
+ const target* pt (&search (t, p));
- const target* pt (file_rule::instance.filter (is, a, t, p));
+ if (const libx* l = pt->is_a<libx> ())
+ pt = link_member (*l, a, link_info (t.base_scope (), ot));
+
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<libue> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ if (pt->is_a<libs> () || pt->is_a<liba> ())
+ {
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
+ else
+ return make_pair (pt, options);
+ }
+#endif
+
+ const target* pt (file_rule::instance.filter (is, a, t, p, me).first);
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
auto header_source = [this] (const auto& p)
{
- return (x_header (p) ||
- p.is_a (x_src) ||
- (x_mod != nullptr && p.is_a (*x_mod)) ||
- (x_obj != nullptr && p.is_a (*x_obj)));
+ return (x_header (p) ||
+ p.is_a (x_src) ||
+ p.is_a (c::static_type) ||
+ p.is_a (S::static_type) ||
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) ||
+ p.is_a (m::static_type))));
};
- if (t.is_a<libue> ())
+ if (t.is_a<libue> () ||
+ (a.operation () != update_id &&
+ me.cur_options == lib::option_install_runtime))
{
if (header_source (p))
pt = nullptr;
@@ -358,7 +671,7 @@ namespace build2
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
bool g (false);
@@ -374,16 +687,17 @@ namespace build2
{
pt = t.is_a<libue> ()
? nullptr
- : file_rule::instance.filter (is, a, *pt, pm.prerequisite);
+ : file_rule::instance.filter (
+ is, a, *pt, pm.prerequisite, me).first;
break;
}
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
- return pt;
+ return make_pair (pt, options);
}
bool libux_install_rule::
@@ -395,5 +709,81 @@ namespace build2
return link_.sub_match (x_link, update_id, a, t, me) &&
alias_rule::match (a, t);
}
+
+ recipe libux_install_rule::
+ apply (action a, target& t, match_extra& me) const
+ {
+ if (a.operation () != update_id)
+ {
+ if (!t.is_a<libue> ())
+ {
+ if (me.new_options == 0)
+ me.new_options = lib::option_install_runtime;
+
+ me.cur_options = me.new_options;
+ }
+ }
+
+ return alias_rule::apply_impl (
+ a, t, me, me.cur_options != match_extra::all_options /* reapply */);
+ }
+
+ void libux_install_rule::
+ apply_posthoc (action a, target& t, match_extra& me) const
+ {
+ if (a.operation () != update_id)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ if (t.is_a<libue> ())
+ p.match_options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ p.match_options = lib::option_install_runtime;
+ }
+ }
+ }
+ }
+ }
+
+ void libux_install_rule::
+ reapply (action a, target& t, match_extra& me) const
+ {
+ tracer trace ("cc::linux_install_rule::reapply");
+
+ assert (a.operation () != update_id && !t.is_a<libue> ());
+
+ l6 ([&]{trace << "rematching " << t
+ << ", current options " << me.cur_options
+ << ", new options " << me.new_options;});
+
+ me.cur_options |= me.new_options;
+
+ if ((me.new_options & lib::option_install_buildtime) != 0)
+ {
+ for (const target* pt: t.prerequisite_targets[a])
+ {
+ if (pt != nullptr && (pt->is_a<liba> () || pt->is_a<libs> () ||
+ pt->is_a<libua> () || pt->is_a<libus> ()))
+ rematch_sync (a, *pt, match_extra::all_options);
+ }
+
+ if (me.posthoc_prerequisite_targets != nullptr)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ p.match_options = match_extra::all_options;
+ }
+ }
+ }
+
+ alias_rule::reapply_impl (a, t, me);
+ }
+ }
}
}
diff --git a/libbuild2/cc/install-rule.hxx b/libbuild2/cc/install-rule.hxx
index 6998d63..771c33b 100644
--- a/libbuild2/cc/install-rule.hxx
+++ b/libbuild2/cc/install-rule.hxx
@@ -20,7 +20,7 @@ namespace build2
{
class link_rule;
- // Installation rule for exe{} and lib*{}. Here we do:
+ // Installation rule for exe{} and lib[as]{}. Here we do:
//
// 1. Signal to the link rule that this is update for install.
//
@@ -28,17 +28,23 @@ namespace build2
//
// 3. Extra un/installation (e.g., libs{} symlinks).
//
+ // 4. Handling runtime/buildtime match options for lib[as]{}.
+ //
class LIBBUILD2_CC_SYMEXPORT install_rule: public install::file_rule,
virtual common
{
public:
install_rule (data&&, const link_rule&);
- virtual const target*
+ virtual bool
+ filter (action, const target&, const target&) const override;
+
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const override;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const override;
- // Note: rule::match() override.
+ // Note: rule::match() override (with hint and match_extra).
//
virtual bool
match (action, target&, const string&, match_extra&) const override;
@@ -46,7 +52,13 @@ namespace build2
using file_rule::match; // Make Clang happy.
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
+
+ virtual void
+ apply_posthoc (action, target&, match_extra&) const override;
+
+ virtual void
+ reapply (action, target&, match_extra&) const override;
virtual bool
install_extra (const file&, const install_dir&) const override;
@@ -58,22 +70,24 @@ namespace build2
const link_rule& link_;
};
- // Installation rule for libu*{}.
+ // Installation rule for libu[eas]{}.
//
// While libu*{} members themselves are not installable, we need to see
// through them in case they depend on stuff that we need to install
// (e.g., headers). Note that we use the alias_rule as a base.
//
- class LIBBUILD2_CC_SYMEXPORT libux_install_rule:
- public install::alias_rule,
- virtual common
+ class LIBBUILD2_CC_SYMEXPORT libux_install_rule: public install::alias_rule,
+ virtual common
{
public:
libux_install_rule (data&&, const link_rule&);
- virtual const target*
+ // Note: utility libraries currently have no ad hoc members.
+
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const override;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const override;
// Note: rule::match() override.
//
@@ -82,6 +96,15 @@ namespace build2
using alias_rule::match; // Make Clang happy.
+ virtual recipe
+ apply (action, target&, match_extra&) const override;
+
+ virtual void
+ apply_posthoc (action, target&, match_extra&) const override;
+
+ virtual void
+ reapply (action, target&, match_extra&) const override;
+
private:
const link_rule& link_;
};
diff --git a/libbuild2/cc/lexer+comment.test.testscript b/libbuild2/cc/lexer+comment.test.testscript
index 358865c..381e479 100644
--- a/libbuild2/cc/lexer+comment.test.testscript
+++ b/libbuild2/cc/lexer+comment.test.testscript
@@ -16,6 +16,11 @@ four
/**
six /*
*/
+/* */
+/*
+
+*/
+/**/
EOI
: cxx-comment
diff --git a/libbuild2/cc/lexer+raw-string-literal.test.testscript b/libbuild2/cc/lexer+raw-string-literal.test.testscript
index bca489a..a6455eb 100644
--- a/libbuild2/cc/lexer+raw-string-literal.test.testscript
+++ b/libbuild2/cc/lexer+raw-string-literal.test.testscript
@@ -16,6 +16,7 @@ R"X(a
b)X"
R"X(a\
b)X"
+R""(a)""
EOI
<string literal>
<string literal>
@@ -24,6 +25,7 @@ EOI
<string literal>
<string literal>
<string literal>
+<string literal>
EOO
: prefix
diff --git a/libbuild2/cc/lexer.cxx b/libbuild2/cc/lexer.cxx
index beeb970..d20e0dc 100644
--- a/libbuild2/cc/lexer.cxx
+++ b/libbuild2/cc/lexer.cxx
@@ -214,7 +214,7 @@ namespace build2
// #line <integer> [<string literal>] ...
// # <integer> [<string literal>] ...
//
- // Also diagnose #include while at it.
+ // Also diagnose #include while at it if preprocessed.
//
if (!(c >= '0' && c <= '9'))
{
@@ -222,10 +222,13 @@ namespace build2
if (t.type == type::identifier)
{
- if (t.value == "include")
- fail (l) << "unexpected #include directive";
- else if (t.value != "line")
+ if (t.value != "line")
+ {
+ if (preprocessed_ && t.value == "include")
+ fail (l) << "unexpected #include directive";
+
continue;
+ }
}
else
continue;
@@ -734,8 +737,8 @@ namespace build2
// R"<delimiter>(<raw_characters>)<delimiter>"
//
// Where <delimiter> is a potentially-empty character sequence made of
- // any source character but parentheses, backslash and spaces. It can be
- // at most 16 characters long.
+ // any source character but parentheses, backslash, and spaces (in
+ // particular, it can be `"`). It can be at most 16 characters long.
//
// Note that the <raw_characters> are not processed in any way, not even
// for line continuations.
@@ -750,7 +753,7 @@ namespace build2
{
c = geth ();
- if (eos (c) || c == '\"' || c == ')' || c == '\\' || c == ' ')
+ if (eos (c) || c == ')' || c == '\\' || c == ' ')
fail (l) << "invalid raw string literal";
if (c == '(')
@@ -1108,21 +1111,18 @@ namespace build2
if (eos (c))
fail (p) << "unterminated comment";
- if (c == '*' && (c = peek ()) == '/')
+ if (c == '*')
{
- get (c);
- break;
+ if ((c = peek ()) == '/')
+ {
+ get (c);
+ break;
+ }
}
-
- if (c != '*' && c != '\\')
+ else
{
// Direct buffer scan.
//
- // Note that we should call get() prior to the direct buffer
- // scan (see butl::char_scanner for details).
- //
- get (c);
-
const char* b (gptr_);
const char* e (egptr_);
const char* p (b);
diff --git a/libbuild2/cc/lexer.hxx b/libbuild2/cc/lexer.hxx
index 81e0d97..17d706b 100644
--- a/libbuild2/cc/lexer.hxx
+++ b/libbuild2/cc/lexer.hxx
@@ -12,6 +12,8 @@
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/cc/export.hxx>
+
namespace build2
{
namespace cc
@@ -20,13 +22,15 @@ namespace build2
// sequence of tokens returned is similar to what a real C/C++ compiler
// would see from its preprocessor.
//
- // The input is a (partially-)preprocessed translation unit that may still
- // contain comments, line continuations, and preprocessor directives such
- // as #line, #pragma, but not #include (which is diagnosed). Currently,
- // all preprocessor directives except #line are ignored and no values are
- // saved from literals. The #line directive (and its shorthand notation)
- // is recognized to provide the logical token location. Note that the
- // modules-related pseudo-directives are not recognized or handled.
+ // The input is a potentially (partially-)preprocessed translation unit
+ // that may still contain comments, line continuations, and preprocessor
+ // directives such as #line and #pragma. If the input is said to be
+ // (partially-)preprocessed then #include directives are diagnosed.
+ // Currently, all preprocessor directives except #line are ignored and no
+ // values are saved from literals. The #line directive (and its shorthand
+ // notation) is recognized to provide the logical token location. Note
+ // that the modules-related pseudo-directives are not recognized or
+ // handled.
//
// While at it we also calculate the checksum of the input ignoring
// comments, whitespaces, etc. This is used to detect changes that do not
@@ -80,15 +84,19 @@ namespace build2
// Output the token value in a format suitable for diagnostics.
//
- ostream&
+ LIBBUILD2_CC_SYMEXPORT ostream&
operator<< (ostream&, const token&);
- class lexer: protected butl::char_scanner<>
+ class LIBBUILD2_CC_SYMEXPORT lexer: protected butl::char_scanner<>
{
public:
- lexer (ifdstream& is, const path_name& name)
+ // If preprocessed is true, then assume the input is at least partially
+ // preprocessed and therefore should not contain #include directives.
+ //
+ lexer (ifdstream& is, const path_name& name, bool preprocessed)
: char_scanner (is, false /* crlf */),
name_ (name),
+ preprocessed_ (preprocessed),
fail ("error", &name_),
log_file_ (name)
{
@@ -173,6 +181,8 @@ namespace build2
private:
const path_name& name_;
+ bool preprocessed_;
+
const fail_mark fail;
// Logical file and line as set by the #line directives. Note that the
diff --git a/libbuild2/cc/lexer.test.cxx b/libbuild2/cc/lexer.test.cxx
index 39e4279..82163fe 100644
--- a/libbuild2/cc/lexer.test.cxx
+++ b/libbuild2/cc/lexer.test.cxx
@@ -65,7 +65,7 @@ namespace build2
is.open (fddup (stdin_fd ()));
}
- lexer l (is, in);
+ lexer l (is, in, true /* preprocessed */);
// No use printing eos since we will either get it or loop forever.
//
diff --git a/libbuild2/cc/link-rule.cxx b/libbuild2/cc/link-rule.cxx
index 4588ce1..08a60b9 100644
--- a/libbuild2/cc/link-rule.cxx
+++ b/libbuild2/cc/link-rule.cxx
@@ -20,6 +20,8 @@
#include <libbuild2/bin/target.hxx>
#include <libbuild2/bin/utility.hxx>
+#include <libbuild2/install/utility.hxx>
+
#include <libbuild2/cc/target.hxx> // c, pc*
#include <libbuild2/cc/utility.hxx>
@@ -94,7 +96,7 @@ namespace build2
return false;
}
- if (const target* t = search_existing (n, bs, dir_path () /* out */))
+ if (const target* t = search_existing (n, bs))
{
// The same logic as in process_libraries().
//
@@ -290,13 +292,14 @@ namespace build2
if (p.is_a (x_src) ||
(x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
(x_obj != nullptr && p.is_a (*x_obj)) ||
// Header-only X library (or library with C source and X header).
(library && x_header (p, false /* c_hdr */)))
{
r.seen_x = true;
}
- else if (p.is_a<c> () ||
+ else if (p.is_a<c> () || p.is_a<S> () ||
(x_obj != nullptr && p.is_a<m> ()) ||
// Header-only C library.
(library && p.is_a<h> ()))
@@ -433,9 +436,12 @@ namespace build2
r.seen_lib = true;
}
// Some other c-common header/source (say C++ in a C rule) other than
- // a C header (we assume everyone can hanle that).
+ // a C header (we assume everyone can hanle that) or some other
+ // #include'able target.
//
- else if (p.is_a<cc> () && !(x_header (p, true /* c_hdr */)))
+ else if (p.is_a<cc> () &&
+ !(x_header (p, true /* c_hdr */)) &&
+ !p.is_a (x_inc) && !p.is_a<c_inc> ())
{
r.seen_cc = true;
break;
@@ -842,6 +848,9 @@ namespace build2
// If not, then we may need the same in recursive-binless logic.
//
#if 0
+ // @@ TMP hm, this hasn't actually been enabled. So may actually
+ // enable and see if it trips up (do git-blame for good measure).
+ //
assert (false); // @@ TMP (remove before 0.16.0 release)
#endif
ux = &link_member (*ul, a, li)->as<libux> ();
@@ -903,7 +912,7 @@ namespace build2
// for binless libraries since there could be other output (e.g., .pc
// files).
//
- inject_fsdir (a, t);
+ const fsdir* dir (inject_fsdir (a, t));
// Process prerequisites, pass 1: search and match prerequisite
// libraries, search obj/bmi{} targets, and search targets we do rule
@@ -999,7 +1008,7 @@ namespace build2
//
#if 1
if (!um)
- um = (p.is_a (x_src) || p.is_a<c> () ||
+ um = (p.is_a (x_src) || p.is_a<c> () || p.is_a<S> () ||
(x_mod != nullptr && p.is_a (*x_mod)) ||
(x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ())) ||
x_header (p, true));
@@ -1030,8 +1039,8 @@ namespace build2
bool mod (x_mod != nullptr && p.is_a (*x_mod));
bool hdr (false);
- if (mod ||
- p.is_a (x_src) || p.is_a<c> () ||
+ if (mod ||
+ p.is_a (x_src) || p.is_a<c> () || p.is_a<S> () ||
(x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ())))
{
binless = binless && (mod ? user_binless : false);
@@ -1192,6 +1201,12 @@ namespace build2
}
pt = &p.search (t);
+
+ if (pt == dir)
+ {
+ pt = nullptr;
+ continue;
+ }
}
if (skip (*pt))
@@ -1686,7 +1701,8 @@ namespace build2
fsdir::static_type,
path_cast<dir_path> (t.path () + ".dlls"),
t.out,
- string () /* name */));
+ string () /* name */,
+ nullopt /* ext */));
// By default our backlinking logic will try to symlink the
// directory and it can even be done on Windows using junctions.
@@ -1900,7 +1916,7 @@ namespace build2
//
if (mod
? p1.is_a (*x_mod)
- : (p1.is_a (x_src) || p1.is_a<c> () ||
+ : (p1.is_a (x_src) || p1.is_a<c> () || p1.is_a<S> () ||
(x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ()))))
{
src = true;
@@ -1914,10 +1930,11 @@ namespace build2
p1.is_a<libx> () ||
p1.is_a<liba> () || p1.is_a<libs> () || p1.is_a<libux> () ||
p1.is_a<bmi> () || p1.is_a<bmix> () ||
- ((mod ||
- p.is_a (x_src) ||
+ ((mod ||
+ p.is_a (x_src) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
(x_obj != nullptr && p.is_a (*x_obj))) && x_header (p1)) ||
- ((p.is_a<c> () ||
+ ((p.is_a<c> () || p.is_a<S> () ||
(x_obj != nullptr && p.is_a<m> ())) && p1.is_a<h> ()))
continue;
@@ -1931,7 +1948,7 @@ namespace build2
if (!src)
fail << "synthesized dependency for prerequisite " << p
<< " would be incompatible with existing target " << *pt <<
- info << "no existing c/" << x_lang << " source prerequisite" <<
+ info << "no existing C/" << x_lang << " source prerequisite" <<
info << "specify corresponding " << rtt.name << "{} "
<< "dependency explicitly";
@@ -2060,7 +2077,7 @@ namespace build2
{
if (mod
? p1.is_a (*x_mod)
- : (p1.is_a (x_src) || p1.is_a<c> () ||
+ : (p1.is_a (x_src) || p1.is_a<c> () || p1.is_a<S> () ||
(x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ()))))
{
// Searching our own prerequisite is ok, p1 must already be
@@ -2244,17 +2261,47 @@ namespace build2
*type != "cc" &&
type->compare (0, 3, "cc,") != 0)
{
- auto& md (l->data<link_rule::match_data> (d.a));
- assert (md.for_install); // Must have been executed.
+ auto* md (l->try_data<link_rule::match_data> (d.a));
+
+ if (md == nullptr)
+ fail << "library " << *l << " is not built with cc module-based "
+ << "link rule" <<
+ info << "mark it as generic with cc.type=cc target-specific "
+ << "variable";
+
+ assert (md->for_install); // Must have been executed.
// The user will get the target name from the context info.
//
- if (*md.for_install != *d.for_install)
+ if (*md->for_install != *d.for_install)
fail << "incompatible " << *l << " build" <<
- info << "library is built " << (*md.for_install ? "" : "not ")
+ info << "library is built " << (*md->for_install ? "" : "not ")
<< "for install";
}
+ auto newer = [&d, l] ()
+ {
+ // @@ Work around the unexecuted member for installed libraries
+ // issue (see search_library() for details).
+ //
+ // Note that the member may not even be matched, let alone
+ // executed, so we have to go through the group to detect this
+ // case (if the group is not matched, then the member got to be).
+ //
+#if 0
+ return l->newer (d.mt);
+#else
+ const target* g (l->group);
+ target_state s (g != nullptr &&
+ g->matched (d.a, memory_order_acquire) &&
+ g->state[d.a].rule == &file_rule::rule_match
+ ? target_state::unchanged
+ : l->executed_state (d.a));
+
+ return l->newer (d.mt, s);
+#endif
+ };
+
if (d.li.type == otype::a)
{
// Linking a utility library to a static library.
@@ -2282,7 +2329,7 @@ namespace build2
// Check if this library renders us out of date.
//
if (d.update != nullptr)
- *d.update = *d.update || l->newer (d.mt);
+ *d.update = *d.update || newer ();
for (const target* pt: l->prerequisite_targets[d.a])
{
@@ -2321,7 +2368,7 @@ namespace build2
// Check if this library renders us out of date.
//
if (d.update != nullptr)
- *d.update = *d.update || l->newer (d.mt);
+ *d.update = *d.update || newer ();
// On Windows a shared library is a DLL with the import library as
// an ad hoc group member. MinGW though can link directly to DLLs
@@ -2817,7 +2864,7 @@ namespace build2
// (Re)generate pkg-config's .pc file. While the target itself might be
// up-to-date from a previous run, there is no guarantee that .pc exists
// or also up-to-date. So to keep things simple we just regenerate it
- // unconditionally (and avoid doing so on uninstall; see pkconfig_save()
+ // unconditionally (and avoid doing so on uninstall; see pkgconfig_save()
// for details).
//
// Also, if you are wondering why don't we just always produce this .pc,
@@ -2827,7 +2874,7 @@ namespace build2
// There is a further complication: we may have no intention of
// installing the library but still need to update it for install (see
// install_scope() for background). In which case we may still not have
- // the installation directories. We handle this in pkconfig_save() by
+ // the installation directories. We handle this in pkgconfig_save() by
// skipping the generation of .pc files (and letting the install rule
// complain if we do end up trying to install them).
//
@@ -3264,10 +3311,72 @@ namespace build2
rpath_libraries (sargs, bs, a, t, li, for_install /* link */);
lookup l;
-
if ((l = t["bin.rpath"]) && !l->empty ())
+ {
+ // See if we need to make the specified paths relative using the
+ // $ORIGIN (Linux, BSD) or @loader_path (Mac OS) mechanisms.
+ //
+ optional<dir_path> origin;
+ if (for_install && cast_false<bool> (rs["install.relocatable"]))
+ {
+ // Note that both $ORIGIN and @loader_path will be expanded to
+ // the path of the binary that we are building (executable or
+ // shared library) as opposed to top-level executable.
+ //
+ path p (install::resolve_file (t));
+
+ // If the file is not installable then the install.relocatable
+ // semantics does not apply, naturally.
+ //
+ if (!p.empty ())
+ origin = p.directory ();
+ }
+
+ bool origin_used (false);
for (const dir_path& p: cast<dir_paths> (l))
- sargs.push_back ("-Wl,-rpath," + p.string ());
+ {
+ string o ("-Wl,-rpath,");
+
+ // Note that we only rewrite absolute paths so if the user
+ // specified $ORIGIN or @loader_path manually, we will pass it
+ // through as is.
+ //
+ if (origin && p.absolute ())
+ {
+ dir_path l;
+ try
+ {
+ l = p.relative (*origin);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make rpath " << p << " relative to "
+ << *origin <<
+ info << "required for relocatable installation";
+ }
+
+ o += (tclass == "macos" ? "@loader_path" : "$ORIGIN");
+
+ if (!l.empty ())
+ {
+ o += path_traits::directory_separator;
+ o += l.string ();
+ }
+
+ origin_used = true;
+ }
+ else
+ o += p.string ();
+
+ sargs.push_back (move (o));
+ }
+
+ // According to the Internet, `-Wl,-z,origin` is not needed except
+ // potentially for older BSDs.
+ //
+ if (origin_used && tclass == "bsd")
+ sargs.push_back ("-Wl,-z,origin");
+ }
if ((l = t["bin.rpath_link"]) && !l->empty ())
{
@@ -3301,25 +3410,24 @@ namespace build2
// Extra system library dirs (last).
//
- assert (sys_lib_dirs_extra <= sys_lib_dirs.size ());
+ assert (sys_lib_dirs_mode + sys_lib_dirs_extra <= sys_lib_dirs.size ());
+
+ // Note that the mode options are added as part of cmode.
+ //
+ auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode);
+ auto x (b + sys_lib_dirs_extra);
if (tsys == "win32-msvc")
{
// If we have no LIB environment variable set, then we add all of
// them. But we want extras to come first.
//
- // Note that the mode options are added as part of cmode.
- //
- auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode);
- auto m (sys_lib_dirs.begin () + sys_lib_dirs_extra);
- auto e (sys_lib_dirs.end ());
-
- for (auto i (m); i != e; ++i)
+ for (auto i (b); i != x; ++i)
sargs1.push_back ("/LIBPATH:" + i->string ());
if (!getenv ("LIB"))
{
- for (auto i (b); i != m; ++i)
+ for (auto i (x), e (sys_lib_dirs.end ()); i != e; ++i)
sargs1.push_back ("/LIBPATH:" + i->string ());
}
@@ -3330,7 +3438,7 @@ namespace build2
append_option_values (
args,
"-L",
- sys_lib_dirs.begin () + sys_lib_dirs_extra, sys_lib_dirs.end (),
+ b, x,
[] (const dir_path& d) {return d.string ().c_str ();});
}
}
@@ -3426,7 +3534,7 @@ namespace build2
&cs, &update, mt,
bs, a, *f, la, p.data, li,
for_install, true, true, &lc);
- f = nullptr; // Timestamp checked by hash_libraries().
+ f = nullptr; // Timestamp checked by append_libraries().
}
else
{
@@ -3687,6 +3795,8 @@ namespace build2
{
ld = &cpath;
+ append_diag_color_options (args);
+
// Add the option that triggers building a shared library and
// take care of any extras (e.g., import library).
//
@@ -3859,6 +3969,14 @@ namespace build2
try_rmfile (relt, true);
}
+ // We have no choice but to serialize early if we want the command line
+ // printed shortly before actually executing the linker. Failed that, it
+ // may look like we are still executing in parallel.
+ //
+ scheduler::alloc_guard jobs_ag;
+ if (!ctx.dry_run && cast_false<bool> (t[c_serialize]))
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, phase_unlock (nullptr));
+
if (verb == 1)
print_diag (lt.static_library () ? "ar" : "ld", t);
else if (verb == 2)
@@ -3879,10 +3997,15 @@ namespace build2
//
// Note that we are not going to bother with oargs for this.
//
+ // Note also that we now have scheduler::serialize() which allows us to
+ // block until full parallelism is available (this mode can currently
+ // be forced with cc.serialize=true; maybe we should invent something
+ // like config.cc.link_serialize or some such which can be used when
+ // LTO is enabled).
+ //
string jobs_arg;
- scheduler::alloc_guard jobs_extra;
- if (!lt.static_library ())
+ if (!ctx.dry_run && !lt.static_library ())
{
switch (ctype)
{
@@ -3898,8 +4021,10 @@ namespace build2
auto i (find_option_prefix ("-flto", args.rbegin (), args.rend ()));
if (i != args.rend () && strcmp (*i, "-flto=auto") == 0)
{
- jobs_extra = scheduler::alloc_guard (ctx.sched, 0);
- jobs_arg = "-flto=" + to_string (1 + jobs_extra.n);
+ if (jobs_ag.n == 0) // Might already have (see above).
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, 0);
+
+ jobs_arg = "-flto=" + to_string (1 + jobs_ag.n);
*i = jobs_arg.c_str ();
}
break;
@@ -3917,8 +4042,10 @@ namespace build2
strcmp (*i, "-flto=thin") == 0 &&
!find_option_prefix ("-flto-jobs=", args))
{
- jobs_extra = scheduler::alloc_guard (ctx.sched, 0);
- jobs_arg = "-flto-jobs=" + to_string (1 + jobs_extra.n);
+ if (jobs_ag.n == 0) // Might already have (see above).
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, 0);
+
+ jobs_arg = "-flto-jobs=" + to_string (1 + jobs_ag.n);
args.insert (i.base (), jobs_arg.c_str ()); // After -flto=thin.
}
break;
@@ -4090,8 +4217,6 @@ namespace build2
if (!e)
throw failed ();
}
-
- jobs_extra.deallocate ();
}
catch (const process_error& e)
{
@@ -4171,6 +4296,8 @@ namespace build2
}
}
+ jobs_ag.deallocate ();
+
// For Windows generate (or clean up) rpath-emulating assembly.
//
if (tclass == "windows")
diff --git a/libbuild2/cc/module.cxx b/libbuild2/cc/module.cxx
index aa9a526..cf6c6e4 100644
--- a/libbuild2/cc/module.cxx
+++ b/libbuild2/cc/module.cxx
@@ -11,10 +11,7 @@
#include <libbuild2/bin/target.hxx>
-#include <libbuild2/cc/target.hxx> // pc*
-
#include <libbuild2/config/utility.hxx>
-#include <libbuild2/install/utility.hxx>
#include <libbuild2/cc/guess.hxx>
@@ -60,7 +57,7 @@ namespace build2
// config.x
//
- strings mode;
+ strings omode; // Original mode.
{
// Normally we will have a persistent configuration and computing the
// default value every time will be a waste. So try without a default
@@ -144,19 +141,31 @@ namespace build2
fail << "invalid path '" << s << "' in " << config_x;
}
- mode.assign (++v.begin (), v.end ());
+ omode.assign (++v.begin (), v.end ());
// Save original path/mode in *.config.path/mode.
//
rs.assign (x_c_path) = xc;
- rs.assign (x_c_mode) = mode;
+ rs.assign (x_c_mode) = omode;
+
+ // Merge the configured mode options into user-specified (which must
+ // be done before loading the *.guess module).
+ //
+ // In particular, this ability to specify the compiler mode in a
+ // buildfile is useful in embedded development where the project may
+ // need to hardcode things like -target, -nostdinc, etc.
+ //
+ const strings& mode (cast<strings> (rs.assign (x_mode) += omode));
// Figure out which compiler we are dealing with, its target, etc.
//
// Note that we could allow guess() to modify mode to support
// imaginary options (such as /MACHINE for cl.exe). Though it's not
// clear what cc.mode would contain (original or modified). Note that
- // we are now folding *.std options into mode options.
+ // we are now adding *.std options into mode options.
+ //
+ // @@ But can't the language standard options alter things like search
+ // directories?
//
x_info = &build2::cc::guess (
ctx,
@@ -225,9 +234,10 @@ namespace build2
// Assign values to variables that describe the compiler.
//
+ // Note: x_mode is dealt with above.
+ //
rs.assign (x_path) = process_path_ex (
xi.path, x_name, xi.checksum, env_checksum);
- const strings& xm (cast<strings> (rs.assign (x_mode) = move (mode)));
rs.assign (x_id) = xi.id.string ();
rs.assign (x_id_type) = to_string (xi.id.type);
@@ -285,8 +295,8 @@ namespace build2
if (!xi.pattern.empty ())
h.assign ("config.cc.pattern") = xi.pattern;
- if (!xm.empty ())
- h.assign ("config.cc.mode") = xm;
+ if (!omode.empty ())
+ h.assign ("config.cc.mode") = move (omode);
h.assign (c_runtime) = xi.runtime;
h.assign (c_stdlib) = xi.c_stdlib;
@@ -357,6 +367,8 @@ namespace build2
# ifdef __APPLE__
static const dir_path a_usr_inc (
"/Library/Developer/CommandLineTools/SDKs/MacOSX*.sdk/usr/include");
+ static const dir_path a_usr_lib (
+ "/Library/Developer/CommandLineTools/SDKs/MacOSX*.sdk/usr/lib");
# endif
#endif
@@ -611,10 +623,10 @@ namespace build2
switch (xi.class_)
{
case compiler_class::gcc:
- lib_dirs = gcc_library_search_dirs (xi.path, rs);
+ lib_dirs = gcc_library_search_dirs (xi, rs);
break;
case compiler_class::msvc:
- lib_dirs = msvc_library_search_dirs (xi.path, rs);
+ lib_dirs = msvc_library_search_dirs (xi, rs);
break;
}
}
@@ -628,10 +640,10 @@ namespace build2
switch (xi.class_)
{
case compiler_class::gcc:
- hdr_dirs = gcc_header_search_dirs (xi.path, rs);
+ hdr_dirs = gcc_header_search_dirs (xi, rs);
break;
case compiler_class::msvc:
- hdr_dirs = msvc_header_search_dirs (xi.path, rs);
+ hdr_dirs = msvc_header_search_dirs (xi, rs);
break;
}
}
@@ -649,8 +661,8 @@ namespace build2
sys_hdr_dirs_mode = hdr_dirs.second;
sys_mod_dirs_mode = mod_dirs ? mod_dirs->second : 0;
- sys_lib_dirs_extra = lib_dirs.first.size ();
- sys_hdr_dirs_extra = hdr_dirs.first.size ();
+ sys_lib_dirs_extra = 0;
+ sys_hdr_dirs_extra = 0;
#ifndef _WIN32
// Add /usr/local/{include,lib}. We definitely shouldn't do this if we
@@ -666,11 +678,11 @@ namespace build2
// on the next invocation.
//
{
- auto& is (hdr_dirs.first);
+ auto& hs (hdr_dirs.first);
auto& ls (lib_dirs.first);
- bool ui (find (is.begin (), is.end (), usr_inc) != is.end ());
- bool uli (find (is.begin (), is.end (), usr_loc_inc) != is.end ());
+ bool ui (find (hs.begin (), hs.end (), usr_inc) != hs.end ());
+ bool uli (find (hs.begin (), hs.end (), usr_loc_inc) != hs.end ());
#ifdef __APPLE__
// On Mac OS starting from 10.14 there is no longer /usr/include.
@@ -693,15 +705,28 @@ namespace build2
//
// Is Apple's /usr/include.
//
- if (!ui && !uli)
+ // Also, it appears neither Clang nor GCC report MacOSX*.sdk/usr/lib
+ // with -print-search-dirs but they do search in there. So we add it
+ // to our list if we see MacOSX*.sdk/usr/include.
+ //
+ auto aui (find_if (hs.begin (), hs.end (),
+ [] (const dir_path& d)
+ {
+ return path_match (d, a_usr_inc);
+ }));
+
+ if (aui != hs.end ())
{
- for (const dir_path& d: is)
+ if (!ui)
+ ui = true;
+
+ if (find_if (ls.begin (), ls.end (),
+ [] (const dir_path& d)
+ {
+ return path_match (d, a_usr_lib);
+ }) == ls.end ())
{
- if (path_match (d, a_usr_inc))
- {
- ui = true;
- break;
- }
+ ls.push_back (aui->directory () /= "lib");
}
}
#endif
@@ -709,18 +734,29 @@ namespace build2
{
bool ull (find (ls.begin (), ls.end (), usr_loc_lib) != ls.end ());
- // Many platforms don't search in /usr/local/lib by default (but do
- // for headers in /usr/local/include). So add it as the last option.
+ // Many platforms don't search in /usr/local/lib by default but do
+ // for headers in /usr/local/include.
+ //
+ // Note that customarily /usr/local/include is searched before
+ // /usr/include so we add /usr/local/lib before built-in entries
+ // (there isn't really a way to add it after since all we can do is
+ // specify it with -L).
//
if (!ull && exists (usr_loc_lib, true /* ignore_error */))
- ls.push_back (usr_loc_lib);
+ {
+ ls.insert (ls.begin () + sys_lib_dirs_mode, usr_loc_lib);
+ ++sys_lib_dirs_extra;
+ }
// FreeBSD is at least consistent: it searches in neither. Quoting
// its wiki: "FreeBSD can't even find libraries that it installed."
// So let's help it a bit.
//
if (!uli && exists (usr_loc_inc, true /* ignore_error */))
- is.push_back (usr_loc_inc);
+ {
+ hs.insert (hs.begin () + sys_hdr_dirs_mode, usr_loc_inc);
+ ++sys_hdr_dirs_extra;
+ }
}
}
#endif
@@ -824,8 +860,11 @@ namespace build2
dr << "\n hdr dirs";
for (size_t i (0); i != incs.size (); ++i)
{
- if (i == sys_hdr_dirs_extra)
+ if ((sys_hdr_dirs_mode != 0 && i == sys_hdr_dirs_mode) ||
+ (sys_hdr_dirs_extra != 0 &&
+ i == sys_hdr_dirs_extra + sys_hdr_dirs_mode))
dr << "\n --";
+
dr << "\n " << incs[i];
}
}
@@ -835,8 +874,11 @@ namespace build2
dr << "\n lib dirs";
for (size_t i (0); i != libs.size (); ++i)
{
- if (i == sys_lib_dirs_extra)
+ if ((sys_lib_dirs_mode != 0 && i == sys_lib_dirs_mode) ||
+ (sys_lib_dirs_extra != 0 &&
+ i == sys_lib_dirs_extra + sys_lib_dirs_mode))
dr << "\n --";
+
dr << "\n " << libs[i];
}
}
@@ -957,43 +999,7 @@ namespace build2
// Register target types and configure their "installability".
//
- bool install_loaded (cast_false<bool> (rs["install.loaded"]));
-
- {
- using namespace install;
-
- // Note: not registering x_obj (it's registered seperately by the
- // x.objx module).
- //
- rs.insert_target_type (x_src);
-
- auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
- {
- rs.insert_target_type (tt);
-
- // Install headers into install.include.
- //
- if (install_loaded)
- install_path (rs, tt, dir_path ("include"));
- };
-
- // Note: module (x_mod) is in x_hdr.
- //
- for (const target_type* const* ht (x_hdr); *ht != nullptr; ++ht)
- insert_hdr (**ht);
-
- // Also register the C header for C-derived languages.
- //
- if (*x_hdr != &h::static_type)
- insert_hdr (h::static_type);
-
- rs.insert_target_type<pc> ();
- rs.insert_target_type<pca> ();
- rs.insert_target_type<pcs> ();
-
- if (install_loaded)
- install_path<pc> (rs, dir_path ("pkgconfig"));
- }
+ load_module (rs, rs, (string (x) += ".types"), loc);
// Register rules.
//
@@ -1091,8 +1097,11 @@ namespace build2
// them in case they depend on stuff that we need to install (see the
// install rule implementations for details).
//
- if (install_loaded)
+ if (cast_false<bool> (rs["install.loaded"]))
{
+ // Note: we rely quite heavily in these rule implementations that
+ // these are the only target types they are registered for.
+
const install_rule& ir (*this);
r.insert<exe> (perform_install_id, x_install, ir);
diff --git a/libbuild2/cc/module.hxx b/libbuild2/cc/module.hxx
index 2a8611b..4213516 100644
--- a/libbuild2/cc/module.hxx
+++ b/libbuild2/cc/module.hxx
@@ -17,6 +17,7 @@
#include <libbuild2/cc/compile-rule.hxx>
#include <libbuild2/cc/link-rule.hxx>
#include <libbuild2/cc/install-rule.hxx>
+#include <libbuild2/cc/predefs-rule.hxx>
#include <libbuild2/cc/export.hxx>
@@ -115,18 +116,18 @@ namespace build2
// Defined in gcc.cxx.
//
pair<dir_paths, size_t>
- gcc_header_search_dirs (const process_path&, scope&) const;
+ gcc_header_search_dirs (const compiler_info&, scope&) const;
pair<dir_paths, size_t>
- gcc_library_search_dirs (const process_path&, scope&) const;
+ gcc_library_search_dirs (const compiler_info&, scope&) const;
// Defined in msvc.cxx.
//
pair<dir_paths, size_t>
- msvc_header_search_dirs (const process_path&, scope&) const;
+ msvc_header_search_dirs (const compiler_info&, scope&) const;
pair<dir_paths, size_t>
- msvc_library_search_dirs (const process_path&, scope&) const;
+ msvc_library_search_dirs (const compiler_info&, scope&) const;
};
class LIBBUILD2_CC_SYMEXPORT module: public build2::module,
@@ -134,7 +135,8 @@ namespace build2
public link_rule,
public compile_rule,
public install_rule,
- public libux_install_rule
+ public libux_install_rule,
+ public predefs_rule
{
public:
explicit
@@ -143,7 +145,8 @@ namespace build2
link_rule (move (d)),
compile_rule (move (d), rs),
install_rule (move (d), *this),
- libux_install_rule (move (d), *this) {}
+ libux_install_rule (move (d), *this),
+ predefs_rule (move (d)) {}
void
init (scope&,
diff --git a/libbuild2/cc/msvc.cxx b/libbuild2/cc/msvc.cxx
index 8fcbb0b..d21969c 100644
--- a/libbuild2/cc/msvc.cxx
+++ b/libbuild2/cc/msvc.cxx
@@ -264,6 +264,13 @@ namespace build2
}
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -271,10 +278,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -295,6 +299,13 @@ namespace build2
d = dir_path (o, 9, string::npos);
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -302,10 +313,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -324,7 +332,7 @@ namespace build2
{
try
{
- r.push_back (dir_path (move (d)));
+ r.push_back (dir_path (move (d)).normalize ());
}
catch (const invalid_path&)
{
@@ -337,7 +345,7 @@ namespace build2
// Extract system header search paths from MSVC.
//
pair<dir_paths, size_t> config_module::
- msvc_header_search_dirs (const process_path&, scope& rs) const
+ msvc_header_search_dirs (const compiler_info&, scope& rs) const
{
// MSVC doesn't have any built-in paths and all of them either come from
// the INCLUDE environment variable or are specified explicitly on the
@@ -365,7 +373,7 @@ namespace build2
// Extract system library search paths from MSVC.
//
pair<dir_paths, size_t> config_module::
- msvc_library_search_dirs (const process_path&, scope& rs) const
+ msvc_library_search_dirs (const compiler_info&, scope& rs) const
{
// MSVC doesn't seem to have any built-in paths and all of them either
// come from the LIB environment variable or are specified explicitly on
@@ -390,9 +398,22 @@ namespace build2
// Inspect the file and determine if it is static or import library.
// Return otype::e if it is neither (which we quietly ignore).
//
+ static global_cache<otype> library_type_cache;
+
static otype
library_type (const process_path& ld, const path& l)
{
+ string key;
+ {
+ sha256 cs;
+ cs.append (ld.effect_string ());
+ cs.append (l.string ());
+ key = cs.string ();
+
+ if (const otype* r = library_type_cache.find (key))
+ return *r;
+ }
+
// The are several reasonably reliable methods to tell whether it is a
// static or import library. One is lib.exe /LIST -- if there aren't any
// .obj members, then it is most likely an import library (it can also
@@ -458,14 +479,11 @@ namespace build2
// libhello\hello.lib.obj
// hello-0.1.0-a.0.19700101000000.dll
//
- // Archive member name at 746: [...]hello.dll[/][ ]*
- // Archive member name at 8C70: [...]hello.lib.obj[/][ ]*
- //
size_t n (s.size ());
for (; n != 0 && s[n - 1] == ' '; --n) ; // Skip trailing spaces.
- if (n >= 7) // At least ": X.obj" or ": X.dll".
+ if (n >= 5) // At least "X.obj" or "X.dll".
{
n -= 4; // Beginning of extension.
@@ -500,23 +518,25 @@ namespace build2
return otype::e;
}
- if (obj && dll)
+ otype r;
+ if (obj != dll)
+ r = obj ? otype::a : otype::s;
+ else
{
- warn << l << " looks like hybrid static/import library, ignoring";
- return otype::e;
- }
+ if (obj && dll)
+ warn << l << " looks like hybrid static/import library, ignoring";
- if (!obj && !dll)
- {
- warn << l << " looks like empty static or import library, ignoring";
- return otype::e;
+ if (!obj && !dll)
+ warn << l << " looks like empty static or import library, ignoring";
+
+ r = otype::e;
}
- return obj ? otype::a : otype::s;
+ return library_type_cache.insert (move (key), r);
}
template <typename T>
- static T*
+ static pair<T*, bool>
msvc_search_library (const process_path& ld,
const dir_path& d,
const prerequisite_key& p,
@@ -562,20 +582,26 @@ namespace build2
//
timestamp mt (mtime (f));
- if (mt != timestamp_nonexistent && library_type (ld, f) == lt)
+ pair<T*, bool> r (nullptr, true);
+
+ if (mt != timestamp_nonexistent)
{
- // Enter the target.
- //
- T* t;
- common::insert_library (p.scope->ctx, t, name, d, ld, e, exist, trace);
- t->path_mtime (move (f), mt);
- return t;
+ if (library_type (ld, f) == lt)
+ {
+ // Enter the target.
+ //
+ common::insert_library (
+ p.scope->ctx, r.first, name, d, ld, e, exist, trace);
+ r.first->path_mtime (move (f), mt);
+ }
+ else
+ r.second = false; // Don't search for binless.
}
- return nullptr;
+ return r;
}
- liba* common::
+ pair<bin::liba*, bool> common::
msvc_search_static (const process_path& ld,
const dir_path& d,
const prerequisite_key& p,
@@ -583,14 +609,21 @@ namespace build2
{
tracer trace (x, "msvc_search_static");
- liba* r (nullptr);
+ liba* a (nullptr);
+ bool b (true);
- auto search = [&r, &ld, &d, &p, exist, &trace] (
+ auto search = [&a, &b, &ld, &d, &p, exist, &trace] (
const char* pf, const char* sf) -> bool
{
- r = msvc_search_library<liba> (
- ld, d, p, otype::a, pf, sf, exist, trace);
- return r != nullptr;
+ pair<liba*, bool> r (msvc_search_library<liba> (
+ ld, d, p, otype::a, pf, sf, exist, trace));
+
+ if (r.first != nullptr)
+ a = r.first;
+ else if (!r.second)
+ b = false;
+
+ return a != nullptr;
};
// Try:
@@ -603,10 +636,10 @@ namespace build2
search ("", "") ||
search ("lib", "") ||
search ("", "lib") ||
- search ("", "_static") ? r : nullptr;
+ search ("", "_static") ? make_pair (a, true) : make_pair (nullptr, b);
}
- libs* common::
+ pair<bin::libs*, bool> common::
msvc_search_shared (const process_path& ld,
const dir_path& d,
const prerequisite_key& pk,
@@ -617,12 +650,14 @@ namespace build2
assert (pk.scope != nullptr);
libs* s (nullptr);
+ bool b (true);
- auto search = [&s, &ld, &d, &pk, exist, &trace] (
+ auto search = [&s, &b, &ld, &d, &pk, exist, &trace] (
const char* pf, const char* sf) -> bool
{
- if (libi* i = msvc_search_library<libi> (
- ld, d, pk, otype::s, pf, sf, exist, trace))
+ pair<libi*, bool> r (msvc_search_library<libi> (
+ ld, d, pk, otype::s, pf, sf, exist, trace));
+ if (r.first != nullptr)
{
ulock l (
insert_library (
@@ -630,6 +665,8 @@ namespace build2
if (!exist)
{
+ libi* i (r.first);
+
if (l.owns_lock ())
{
s->adhoc_member = i; // We are first.
@@ -643,6 +680,8 @@ namespace build2
s->path_mtime (path (), i->mtime ());
}
}
+ else if (!r.second)
+ b = false;
return s != nullptr;
};
@@ -655,7 +694,7 @@ namespace build2
return
search ("", "") ||
search ("lib", "") ||
- search ("", "dll") ? s : nullptr;
+ search ("", "dll") ? make_pair (s, true) : make_pair (nullptr, b);
}
}
}
diff --git a/libbuild2/cc/parser.cxx b/libbuild2/cc/parser.cxx
index dc5093f..f62847e 100644
--- a/libbuild2/cc/parser.cxx
+++ b/libbuild2/cc/parser.cxx
@@ -15,9 +15,11 @@ namespace build2
using type = token_type;
void parser::
- parse (ifdstream& is, const path_name& in, unit& u)
+ parse (ifdstream& is, const path_name& in, unit& u, const compiler_id& cid)
{
- lexer l (is, in);
+ cid_ = &cid;
+
+ lexer l (is, in, true /* preprocessed */);
l_ = &l;
u_ = &u;
@@ -82,6 +84,12 @@ namespace build2
// to call it __import) or it can have a special attribute (GCC
// currently marks it with [[__translated]]).
//
+ // Similarly, MSVC drops the `module;` marker and replaces all
+ // other `module` keywords with `__preprocessed_module`.
+ //
+ // Clang doesn't appear to rewrite anything, at least as of
+ // version 18.
+ //
if (bb == 0 && t.first)
{
const string& id (t.value); // Note: tracks t.
@@ -102,7 +110,9 @@ namespace build2
// Fall through.
}
- if (id == "module")
+ if (id == "module" ||
+ (cid_->type == compiler_type::msvc &&
+ id == "__preprocessed_module"))
{
location_value l (get_location (t));
l_->next (t);
@@ -113,7 +123,9 @@ namespace build2
else
n = false;
}
- else if (id == "import" /*|| id == "__import"*/)
+ else if (id == "import" /* ||
+ (cid_->type == compiler_type::gcc &&
+ id == "__import")*/)
{
l_->next (t);
@@ -181,7 +193,7 @@ namespace build2
//
pair<string, bool> np (parse_module_name (t, true /* partition */));
- // Should be {}-balanced.
+ // Skip attributes (should be {}-balanced).
//
for (;
t.type != type::eos && t.type != type::semi && !t.first;
@@ -262,7 +274,7 @@ namespace build2
return;
}
- // Should be {}-balanced.
+ // Skip attributes (should be {}-balanced).
//
for (;
t.type != type::eos && t.type != type::semi && !t.first;
diff --git a/libbuild2/cc/parser.hxx b/libbuild2/cc/parser.hxx
index 1fbf1a3..0c2eb2d 100644
--- a/libbuild2/cc/parser.hxx
+++ b/libbuild2/cc/parser.hxx
@@ -10,6 +10,7 @@
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/cc/types.hxx>
+#include <libbuild2/cc/guess.hxx> // compiler_id
namespace build2
{
@@ -23,16 +24,19 @@ namespace build2
class parser
{
public:
+ // The compiler_id argument should identify the compiler that has done
+ // the preprocessing.
+ //
unit
- parse (ifdstream& is, const path_name& n)
+ parse (ifdstream& is, const path_name& n, const compiler_id& cid)
{
unit r;
- parse (is, n, r);
+ parse (is, n, r, cid);
return r;
}
void
- parse (ifdstream&, const path_name&, unit&);
+ parse (ifdstream&, const path_name&, unit&, const compiler_id&);
private:
void
@@ -54,6 +58,7 @@ namespace build2
string checksum; // Translation unit checksum.
private:
+ const compiler_id* cid_;
lexer* l_;
unit* u_;
diff --git a/libbuild2/cc/parser.test.cxx b/libbuild2/cc/parser.test.cxx
index 1d5930a..2270d32 100644
--- a/libbuild2/cc/parser.test.cxx
+++ b/libbuild2/cc/parser.test.cxx
@@ -44,7 +44,7 @@ namespace build2
}
parser p;
- unit u (p.parse (is, in));
+ unit u (p.parse (is, in, compiler_id (compiler_type::gcc, "")));
switch (u.type)
{
diff --git a/libbuild2/cc/pkgconfig-libpkgconf.cxx b/libbuild2/cc/pkgconfig-libpkgconf.cxx
index 81a96c3..f3754d3 100644
--- a/libbuild2/cc/pkgconfig-libpkgconf.cxx
+++ b/libbuild2/cc/pkgconfig-libpkgconf.cxx
@@ -81,10 +81,17 @@ namespace build2
#endif
;
+#if defined(LIBPKGCONF_VERSION) && LIBPKGCONF_VERSION >= 10900
+ static bool
+ pkgconf_error_handler (const char* msg,
+ const pkgconf_client_t*,
+ void*)
+#else
static bool
pkgconf_error_handler (const char* msg,
const pkgconf_client_t*,
const void*)
+#endif
{
error << runtime_error (msg); // Sanitize the message (trailing dot).
return true;
diff --git a/libbuild2/cc/pkgconfig.cxx b/libbuild2/cc/pkgconfig.cxx
index eae328e..046fbc8 100644
--- a/libbuild2/cc/pkgconfig.cxx
+++ b/libbuild2/cc/pkgconfig.cxx
@@ -34,6 +34,9 @@ namespace build2
//
// @@ TODO: handle empty values (save as ''?)
//
+ // Note: may contain variable expansions (e.g, ${pcfiledir}) so unclear
+ // if can use quoting.
+ //
static string
escape (const string& s)
{
@@ -313,23 +316,120 @@ namespace build2
assert (!ap.empty () || !sp.empty ());
- // Extract --cflags and set them as lib?{}:export.poptions..
+ const scope& rs (*s.root_scope ());
+
+ const dir_path* sysroot (
+ cast_null<abs_dir_path> (rs["config.cc.pkgconfig.sysroot"]));
+
+ // Append -I<dir> or -L<dir> option suppressing duplicates. Also handle
+ // the sysroot rewrite.
+ //
+ auto append_dir = [sysroot] (strings& ops, string&& o)
+ {
+ char c (o[1]);
+
+ // @@ Should we normalize the path for good measure? But on the other
+ // hand, most of the time when it's not normalized, it will likely
+ // be "consistently-relative", e.g., something like
+ // ${prefix}/lib/../include. I guess let's wait and see for some
+ // real-world examples.
+ //
+ // Well, we now support generating relocatable .pc files that have
+ // a bunch of -I${pcfiledir}/../../include and -L${pcfiledir}/.. .
+ //
+ // On the other hand, there could be symlinks involved and just
+ // normalize() may not be correct.
+ //
+ // Note that we do normalize -L paths in the usrd logic later
+ // (but not when setting as *.export.loptions).
+
+ if (sysroot != nullptr)
+ {
+ // Notes:
+ //
+ // - The path might not be absolute (we only rewrite absolute ones).
+ //
+ // - Do this before duplicate suppression since options in ops
+ // already have the sysroot rewritten.
+ //
+ // - Check if the path already starts with sysroot since some .pc
+ // files might already be in a good shape (e.g., because they use
+ // ${pcfiledir} to support relocation properly).
+ //
+ const char* op (o.c_str () + 2);
+ size_t on (o.size () - 2);
+
+ if (path_traits::absolute (op, on))
+ {
+ const string& s (sysroot->string ());
+
+ const char* sp (s.c_str ());
+ size_t sn (s.size ());
+
+ if (!path_traits::sub (op, on, sp, sn)) // Already in sysroot.
+ {
+ // Find the first directory seperator that seperates the root
+ // component from the rest of the path (think /usr/include,
+ // c:\install\include). We need to replace the root component
+ // with sysroot. If there is no separator (say, -Ic:) or the
+ // path after the separator is empty (say, -I/), then we replace
+ // the entire path.
+ //
+ size_t p (path_traits::find_separator (o, 2));
+ if (p == string::npos || p + 1 == o.size ())
+ p = o.size ();
+
+ o.replace (2, p - 2, s);
+ }
+ }
+ }
+
+ for (const string& x: ops)
+ {
+ if (x.size () > 2 && x[0] == '-' && x[1] == c)
+ {
+ if (path_traits::compare (x.c_str () + 2, x.size () - 2,
+ o.c_str () + 2, o.size () - 2) == 0)
+ return; // Duplicate.
+ }
+ }
+
+ ops.push_back (move (o));
+ };
+
+ // Extract --cflags and set them as lib?{}:export.poptions returing the
+ // pointer to the set value. If [as]pops are not NULL, then only keep
+ // options that are present in both.
//
- auto parse_cflags = [&trace, this] (target& t,
- const pkgconfig& pc,
- bool la)
+ auto parse_cflags =[&trace,
+ this,
+ &append_dir] (target& t,
+ const pkgconfig& pc,
+ bool la,
+ const strings* apops = nullptr,
+ const strings* spops = nullptr)
+ -> const strings*
{
+ // Note that we normalize `-[IDU] <arg>` to `-[IDU]<arg>`.
+ //
strings pops;
- bool arg (false);
- for (auto& o: pc.cflags (la))
+ char arg ('\0'); // Option with pending argument.
+ for (string& o: pc.cflags (la))
{
if (arg)
{
// Can only be an argument for -I, -D, -U options.
//
- pops.push_back (move (o));
- arg = false;
+ o.insert (0, 1, arg);
+ o.insert (0, 1, '-');
+
+ if (arg == 'I')
+ append_dir (pops, move (o));
+ else
+ pops.push_back (move (o));
+
+ arg = '\0';
continue;
}
@@ -340,8 +440,15 @@ namespace build2
if (n >= 2 &&
o[0] == '-' && (o[1] == 'I' || o[1] == 'D' || o[1] == 'U'))
{
- pops.push_back (move (o));
- arg = (n == 2);
+ if (n > 2)
+ {
+ if (o[1] == 'I')
+ append_dir (pops, move (o));
+ else
+ pops.push_back (move (o));
+ }
+ else
+ arg = o[1];
continue;
}
@@ -350,7 +457,7 @@ namespace build2
}
if (arg)
- fail << "argument expected after " << pops.back () <<
+ fail << "argument expected after -" << arg <<
info << "while parsing pkg-config --cflags " << pc.path;
if (!pops.empty ())
@@ -363,19 +470,45 @@ namespace build2
// export stub and we shouldn't touch them.
//
if (p.second)
+ {
+ // If required, only keep common stuff. While removing the entries
+ // is not the most efficient way, it is simple.
+ //
+ if (apops != nullptr || spops != nullptr)
+ {
+ for (auto i (pops.begin ()); i != pops.end (); )
+ {
+ if ((apops != nullptr && find (
+ apops->begin (), apops->end (), *i) == apops->end ()) ||
+ (spops != nullptr && find (
+ spops->begin (), spops->end (), *i) == spops->end ()))
+ i = pops.erase (i);
+ else
+ ++i;
+ }
+ }
+
p.first = move (pops);
+ return &p.first.as<strings> ();
+ }
}
+
+ return nullptr;
};
// Parse --libs into loptions/libs (interface and implementation). If
// ps is not NULL, add each resolved library target as a prerequisite.
//
- auto parse_libs = [this, act, &s, top_sysd] (target& t,
- bool binless,
- const pkgconfig& pc,
- bool la,
- prerequisites* ps)
+ auto parse_libs = [this,
+ &append_dir,
+ act, &s, top_sysd] (target& t,
+ bool binless,
+ const pkgconfig& pc,
+ bool la,
+ prerequisites* ps)
{
+ // Note that we normalize `-L <arg>` to `-L<arg>`.
+ //
strings lops;
vector<name> libs;
@@ -392,15 +525,21 @@ namespace build2
// library. What we do at the moment is stop recognizing just library
// names (without -l) after seeing an unknown option.
//
- bool arg (false), first (true), known (true), have_L;
- for (auto& o: pc.libs (la))
+ bool first (true), known (true), have_L (false);
+
+ string self; // The library itself (-l of just name/path).
+
+ char arg ('\0'); // Option with pending argument.
+ for (string& o: pc.libs (la))
{
if (arg)
{
- // Can only be an argument for an loption.
+ // Can only be an argument for an -L option.
//
- lops.push_back (move (o));
- arg = false;
+ o.insert (0, 1, arg);
+ o.insert (0, 1, '-');
+ append_dir (lops, move (o));
+ arg = '\0';
continue;
}
@@ -410,15 +549,17 @@ namespace build2
//
if (n >= 2 && o[0] == '-' && o[1] == 'L')
{
+ if (n > 2)
+ append_dir (lops, move (o));
+ else
+ arg = o[1];
have_L = true;
- lops.push_back (move (o));
- arg = (n == 2);
continue;
}
// See if that's -l, -pthread, or just the library name/path.
//
- if ((known && o[0] != '-') ||
+ if ((known && n != 0 && o[0] != '-') ||
(n > 2 && o[0] == '-' && (o[1] == 'l' || o == "-pthread")))
{
// Unless binless, the first one is the library itself, which we
@@ -426,19 +567,11 @@ namespace build2
// be some other library, but we haven't encountered such a beast
// yet.
//
- if (first)
- {
- first = false;
-
- if (!binless)
- continue;
- }
-
- // @@ If for some reason this is the library itself (doesn't go
- // first or libpkg-config parsed libs in some bizarre way) we
- // will have a dependency cycle by trying to lock its target
- // inside search_library() as by now it is already locked. To
- // be safe we probably shouldn't rely on the position and
+ // What we have enountered (e.g., in the Magick++ library) is the
+ // library itself repeated in Libs.private. So now we save it and
+ // filter all its subsequent occurences.
+ //
+ // @@ To be safe we probably shouldn't rely on the position and
// filter out all occurrences of the library itself (by name?)
// and complain if none were encountered.
//
@@ -448,6 +581,22 @@ namespace build2
// frame around the call to search_library() to help diagnose
// such situations.
//
+ if (first)
+ {
+ first = false;
+
+ if (!binless)
+ {
+ self = move (o);
+ continue;
+ }
+ }
+ else
+ {
+ if (!binless && o == self)
+ continue;
+ }
+
libs.push_back (name (move (o)));
continue;
}
@@ -459,7 +608,7 @@ namespace build2
}
if (arg)
- fail << "argument expected after " << lops.back () <<
+ fail << "argument expected after -" << arg <<
info << "while parsing pkg-config --libs " << pc.path;
// Space-separated list of escaped library flags.
@@ -467,7 +616,7 @@ namespace build2
auto lflags = [&pc, la] () -> string
{
string r;
- for (const auto& o: pc.libs (la))
+ for (const string& o: pc.libs (la))
{
if (!r.empty ())
r += ' ';
@@ -476,7 +625,7 @@ namespace build2
return r;
};
- if (first && !binless)
+ if (!binless && self.empty ())
fail << "library expected in '" << lflags () << "'" <<
info << "while parsing pkg-config --libs " << pc.path;
@@ -517,12 +666,15 @@ namespace build2
if (l[0] != '-') // e.g., just shell32.lib
continue;
else if (cmp ("advapi32") ||
+ cmp ("authz") ||
cmp ("bcrypt") ||
+ cmp ("comdlg32") ||
cmp ("crypt32") ||
- cmp ("dbgeng") ||
- cmp ("dbghelp") ||
cmp ("d2d1") ||
cmp ("d3d", 3) || // d3d*
+ cmp ("dbgeng") ||
+ cmp ("dbghelp") ||
+ cmp ("dnsapi") ||
cmp ("dwmapi") ||
cmp ("dwrite") ||
cmp ("dxgi") ||
@@ -535,6 +687,7 @@ namespace build2
cmp ("kernel32") ||
cmp ("mincore") ||
cmp ("mpr") ||
+ cmp ("msimg32") ||
cmp ("mswsock") ||
cmp ("msxml", 5) || // msxml*
cmp ("netapi32") ||
@@ -547,6 +700,7 @@ namespace build2
cmp ("psapi") ||
cmp ("rpcrt4") ||
cmp ("secur32") ||
+ cmp ("setupapi") ||
cmp ("shell32") ||
cmp ("shlwapi") ||
cmp ("synchronization") ||
@@ -554,6 +708,7 @@ namespace build2
cmp ("userenv") ||
cmp ("uuid") ||
cmp ("version") ||
+ cmp ("windowscodecs") ||
cmp ("winhttp") ||
cmp ("winmm") ||
cmp ("winspool") ||
@@ -604,7 +759,11 @@ namespace build2
}
else if (tclass == "macos")
{
- if (l == "-lSystem")
+ // Note that Mac OS has libiconv in /usr/lib/ which only comes
+ // in the shared variant. So we treat it as system.
+ //
+ if (l == "-lSystem" ||
+ l == "-liconv")
continue;
}
else if (tclass == "bsd")
@@ -621,18 +780,13 @@ namespace build2
{
usrd = dir_paths ();
- for (auto i (lops.begin ()); i != lops.end (); ++i)
+ for (const string& o: lops)
{
- const string& o (*i);
-
- if (o.size () >= 2 && o[0] == '-' && o[1] == 'L')
+ // Note: always in the -L<dir> form (see above).
+ //
+ if (o.size () > 2 && o[0] == '-' && o[1] == 'L')
{
- string p;
-
- if (o.size () == 2)
- p = *++i; // We've verified it's there.
- else
- p = string (o, 2);
+ string p (o, 2);
try
{
@@ -643,6 +797,7 @@ namespace build2
<< lflags () << "'" <<
info << "while parsing pkg-config --libs " << pc.path;
+ d.normalize ();
usrd->push_back (move (d));
}
catch (const invalid_path& e)
@@ -722,24 +877,16 @@ namespace build2
{
// Translate -L to /LIBPATH.
//
- for (auto i (lops.begin ()); i != lops.end (); )
+ for (string& o: lops)
{
- string& o (*i);
size_t n (o.size ());
- if (n >= 2 && o[0] == '-' && o[1] == 'L')
+ // Note: always in the -L<dir> form (see above).
+ //
+ if (n > 2 && o[0] == '-' && o[1] == 'L')
{
o.replace (0, 2, "/LIBPATH:");
-
- if (n == 2)
- {
- o += *++i; // We've verified it's there.
- i = lops.erase (i);
- continue;
- }
}
-
- ++i;
}
}
@@ -933,6 +1080,14 @@ namespace build2
string mn (m, 0, p);
path mp (m, p + 1, string::npos);
+
+ // Must be absolute but may not be normalized due to a relocatable
+ // .pc file. We assume there are no symlink shenanigans that would
+ // require realize().
+ //
+ if (!mp.normalized ())
+ mp.normalize ();
+
path mf (mp.leaf ());
// Extract module properties, if any.
@@ -957,7 +1112,7 @@ namespace build2
target_decl::implied,
trace));
- target& mt (tl.first);
+ file& mt (tl.first.as<file> ());
// If the target already exists, then setting its variables is not
// MT-safe. So currently we only do it if we have the lock (and thus
@@ -975,6 +1130,7 @@ namespace build2
//
if (tl.second.owns_lock ())
{
+ mt.path (move (mp));
mt.vars.assign (c_module_name) = move (mn);
// Set module properties. Note that if unspecified we should still
@@ -1025,6 +1181,14 @@ namespace build2
for (size_t b (0), e (0); !(h = next (*val, b, e)).empty (); )
{
path hp (move (h));
+
+ // Must be absolute but may not be normalized due to a relocatable
+ // .pc file. We assume there are no symlink shenanigans that would
+ // require realize().
+ //
+ if (!hp.normalized ())
+ hp.normalize ();
+
path hf (hp.leaf ());
auto tl (
@@ -1037,7 +1201,7 @@ namespace build2
target_decl::implied,
trace));
- target& ht (tl.first);
+ file& ht (tl.first.as<file> ());
// If the target already exists, then setting its variables is not
// MT-safe. So currently we only do it if we have the lock (and thus
@@ -1046,6 +1210,7 @@ namespace build2
//
if (tl.second.owns_lock ())
{
+ ht.path (move (hp));
ht.vars.assign (c_importable) = true;
tl.second.unlock ();
}
@@ -1209,14 +1374,26 @@ namespace build2
false,
&prs);
+ const strings* apops (nullptr);
if (pa)
{
- parse_cflags (*at, apc, true);
+ apops = parse_cflags (*at, apc, true);
parse_libs (*at, at->path ().empty (), apc, true, nullptr);
}
+ const strings* spops (nullptr);
if (ps)
- parse_cflags (*st, spc, false);
+ spops = parse_cflags (*st, spc, false);
+
+ // Also set common poptions for the group. In particular, this makes
+ // sure $lib_poptions() in the "common interface" mode works for the
+ // installed libraries.
+ //
+ // Note that if there are no poptions set for either, then we cannot
+ // possibly have a common subset.
+ //
+ if (apops != nullptr || spops != nullptr)
+ parse_cflags (lt, ipc, false, apops, spops);
// @@ TODO: we can now load cc.type if there is metadata (but need to
// return this rather than set, see search_library() for
@@ -1248,7 +1425,7 @@ namespace build2
// We treat headers outside of any project as C headers (see
// enter_header() for details).
//
- parse_headers (ipc, h::static_type /* **x_hdr */, x, prs);
+ parse_headers (ipc, h::static_type /* **x_hdrs */, x, prs);
parse_headers (ipc, h::static_type, "c", prs);
}
@@ -1368,7 +1545,7 @@ namespace build2
{
bool f (ldirs.empty ());
- ldirs.push_back (resolve_dir (g, d, !f /* fail_unknown */));
+ ldirs.push_back (resolve_dir (g, d, {}, !f /* fail_unknown */));
if (f && ldirs.back ().empty ())
break;
@@ -1377,6 +1554,7 @@ namespace build2
else
ldirs.push_back (resolve_dir (g,
cast<dir_path> (g["install.lib"]),
+ {},
false /* fail_unknown */));
if (!ldirs.empty () && ldirs.front ().empty ())
@@ -1407,6 +1585,71 @@ namespace build2
if (ctx.dry_run)
return;
+ // See if we should be generating a relocatable .pc file and if so get
+ // its installation location. The plan is to make all absolute paths
+ // that we write relative to this location and prefix them with the
+ // built-in ${pcfiledir} variable (which supported by everybody: the
+ // original pkg-config, pkgconf, and our libpkg-config library).
+ //
+ dir_path rel_base;
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ path f (install::resolve_file (*t));
+ if (!f.empty ()) // Shouldn't happen but who knows.
+ rel_base = f.directory ();
+ }
+
+ // Note: reloc_*path() expect absolute and normalized paths.
+ //
+ // Note also that reloc_path() can be used on dir_path to get the path
+ // without the trailing slash.
+ //
+ auto reloc_path = [&rel_base,
+ s = string ()] (const path& p,
+ const char* what) mutable
+ -> const string&
+ {
+ if (rel_base.empty ())
+ return p.string ();
+
+ try
+ {
+ s = p.relative (rel_base).string ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make " << what << " path " << p << " relative to "
+ << rel_base;
+ }
+
+ if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator);
+ s.insert (0, "${pcfiledir}");
+ return s;
+ };
+
+ auto reloc_dir_path = [&rel_base,
+ s = string ()] (const dir_path& p,
+ const char* what) mutable
+ -> const string&
+ {
+ if (rel_base.empty ())
+ return (s = p.representation ());
+
+ try
+ {
+ s = p.relative (rel_base).representation ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make " << what << " path " << p << " relative to "
+ << rel_base;
+ }
+
+ if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator);
+ s.insert (0, "${pcfiledir}");
+ return s;
+ };
+
auto_rmfile arm (p);
try
@@ -1558,7 +1801,7 @@ namespace build2
//
os << "Cflags:";
for (const dir_path& d: idirs)
- os << " -I" << escape (d.string ());
+ os << " -I" << escape (reloc_path (d, "header search"));
save_poptions (x_export_poptions);
save_poptions (c_export_poptions);
os << endl;
@@ -1578,7 +1821,7 @@ namespace build2
// necessary to resolve its binful dependencies.
//
for (const dir_path& d: ldirs)
- os << " -L" << escape (d.string ());
+ os << " -L" << escape (reloc_path (d, "library search"));
// Now process ourselves as if we were being linked to something (so
// pretty similar to link_rule::append_libraries()). We also reuse
@@ -1671,7 +1914,7 @@ namespace build2
//@@ TODO: should we filter -L similar to -I?
//@@ TODO: how will the Libs/Libs.private work?
- //@@ TODO: remember to use escape()
+ //@@ TODO: remember to use reloc_*() and escape().
if (d.pls != nullptr && d.pls->find (l) != nullptr)
return true;
@@ -1765,7 +2008,8 @@ namespace build2
}
catch (const invalid_argument& e)
{
- fail << "invalid metadata version in library " << g << ": " << e;
+ fail << "invalid metadata version in library " << g << ": " << e
+ << endf;
}
if (ver != 1)
@@ -1923,16 +2167,43 @@ namespace build2
const value& val (*b.val);
names ns;
- names_view nv (reverse (val, ns));
+ names_view nv (reverse (val, ns, true /* reduce */));
os << *b.name << " =";
- auto append = [&l, &var, &s] (const name& v)
+ auto append = [&rel_base,
+ &reloc_path,
+ &reloc_dir_path,
+ &l, &var, &val, &s] (const name& v)
{
+ // If this is absolute path or dir_path, then attempt to
+ // relocate. Without that the result will not be relocatable.
+ //
if (v.simple ())
- s += v.value;
+ {
+ path p;
+ if (!rel_base.empty () &&
+ val.type != nullptr &&
+ (val.type->is_a<path> () || val.type->is_a<paths> ()) &&
+ (p = path (v.value)).absolute ())
+ {
+ p.normalize ();
+ s += reloc_path (p, var.name.c_str ());
+ }
+ else
+ s += v.value;
+ }
else if (v.directory ())
- s += v.dir.representation ();
+ {
+ if (!rel_base.empty () && v.dir.absolute ())
+ {
+ dir_path p (v.dir);
+ p.normalize ();
+ s += reloc_dir_path (p, var.name.c_str ());
+ }
+ else
+ s += v.dir.representation ();
+ }
else
// It seems like we shouldn't end up here due to the type
// check but let's keep it for good measure.
@@ -2090,7 +2361,7 @@ namespace build2
move (pp),
symexport});
}
- else if (pt->is_a (**x_hdr) || pt->is_a<h> ())
+ else if (pt->is_a (**this->x_hdrs) || pt->is_a<h> ())
{
if (cast_false<bool> ((*pt)[c_importable]))
{
@@ -2133,7 +2404,8 @@ namespace build2
// Module names shouldn't require escaping.
//
os << (n != 1 ? " \\\n" : " ")
- << m.name << '=' << escape (m.file.string ());
+ << m.name << '='
+ << escape (reloc_path (m.file, "module interface"));
}
os << endl;
@@ -2159,7 +2431,8 @@ namespace build2
<< "c.importable_headers =";
for (const path& h: c_hdrs)
- os << (n != 1 ? " \\\n" : " ") << escape (h.string ());
+ os << (n != 1 ? " \\\n" : " ")
+ << escape (reloc_path (h, "header unit"));
os << endl;
}
@@ -2170,7 +2443,8 @@ namespace build2
<< x << ".importable_headers =";
for (const path& h: x_hdrs)
- os << (n != 1 ? " \\\n" : " ") << escape (h.string ());
+ os << (n != 1 ? " \\\n" : " ")
+ << escape (reloc_path (h, "header unit"));
os << endl;
}
diff --git a/libbuild2/cc/pkgconfig.hxx b/libbuild2/cc/pkgconfig.hxx
index 7959da1..a1bcdee 100644
--- a/libbuild2/cc/pkgconfig.hxx
+++ b/libbuild2/cc/pkgconfig.hxx
@@ -56,8 +56,8 @@ namespace build2
// Movable-only type.
//
- pkgconfig (pkgconfig&&);
- pkgconfig& operator= (pkgconfig&&);
+ pkgconfig (pkgconfig&&) noexcept;
+ pkgconfig& operator= (pkgconfig&&) noexcept;
pkgconfig (const pkgconfig&) = delete;
pkgconfig& operator= (const pkgconfig&) = delete;
@@ -95,7 +95,7 @@ namespace build2
}
inline pkgconfig::
- pkgconfig (pkgconfig&& p)
+ pkgconfig (pkgconfig&& p) noexcept
: path (move (p.path)),
client_ (p.client_),
pkg_ (p.pkg_)
@@ -105,7 +105,7 @@ namespace build2
}
inline pkgconfig& pkgconfig::
- operator= (pkgconfig&& p)
+ operator= (pkgconfig&& p) noexcept
{
if (this != &p)
{
diff --git a/libbuild2/cc/predefs-rule.cxx b/libbuild2/cc/predefs-rule.cxx
new file mode 100644
index 0000000..e74192d
--- /dev/null
+++ b/libbuild2/cc/predefs-rule.cxx
@@ -0,0 +1,379 @@
+// file : libbuild2/cc/predefs-rule.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/cc/predefs-rule.hxx>
+
+#include <libbuild2/depdb.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/context.hxx>
+#include <libbuild2/algorithm.hxx>
+#include <libbuild2/filesystem.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ predefs_rule::
+ predefs_rule (data&& d)
+ : common (move (d)),
+ rule_name (string (x) += ".predefs"),
+ rule_id (rule_name + " 1")
+ {
+ }
+
+ bool predefs_rule::
+ match (action, target&, const string& hint, match_extra&) const
+ {
+ tracer trace (x, "predefs_rule::match");
+
+ // We only match with an explicit hint (failed that, we will turn every
+ // header into predefs).
+ //
+ if (hint == rule_name)
+ {
+ // Don't match if unsupported compiler. In particular, this allows the
+ // user to provide a fallback rule.
+ //
+ switch (cclass)
+ {
+ case compiler_class::gcc: return true;
+ case compiler_class::msvc:
+ {
+ // Only MSVC 19.20 or later. Not tested with clang-cl.
+ //
+ if (cvariant.empty () && (cmaj > 19 || (cmaj == 19 && cmin >= 20)))
+ return true;
+
+ l4 ([&]{trace << "unsupported compiler/version";});
+ break;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ recipe predefs_rule::
+ apply (action a, target& xt, match_extra&) const
+ {
+ file& t (xt.as<file> ());
+ t.derive_path ();
+
+ // Inject dependency on the output directory.
+ //
+ inject_fsdir (a, t);
+
+ if (a == perform_update_id)
+ {
+ return [this] (action a, const target& xt)
+ {
+ return perform_update (a, xt);
+ };
+ }
+ else if (a == perform_clean_id)
+ {
+ return [] (action a, const target& t)
+ {
+ // Also remove the temporary input source file in case it wasn't
+ // removed at the end of the update.
+ //
+ return perform_clean_extra (a, t.as<file> (), {".d", ".t"});
+ };
+ }
+ else
+ return noop_recipe; // Configure update.
+ }
+
+ // Filter noise, sanitize options (msvc.cxx).
+ //
+ void
+ msvc_filter_cl (diag_buffer&, const path& src);
+
+ void
+ msvc_sanitize_cl (cstrings&);
+
+ target_state predefs_rule::
+ perform_update (action a, const target& xt) const
+ {
+ tracer trace (x, "predefs_rule::perform_update");
+
+ const file& t (xt.as<file> ());
+ const path& tp (t.path ());
+
+ context& ctx (t.ctx);
+
+ const scope& rs (t.root_scope ());
+
+ // Execute prerequisites (the output directory being the only one thus
+ // not mtime checking).
+ //
+ execute_prerequisites (a, t);
+
+ // Use depdb to track changes to options, compiler, etc (similar to
+ // the compile_rule).
+ //
+ depdb dd (tp + ".d");
+ {
+ // First should come the rule name/version.
+ //
+ if (dd.expect (rule_id) != nullptr)
+ l4 ([&]{trace << "rule mismatch forcing update of " << t;});
+
+ // Then the compiler checksum.
+ //
+ if (dd.expect (cast<string> (rs[x_checksum])) != nullptr)
+ l4 ([&]{trace << "compiler mismatch forcing update of " << t;});
+
+ // Then the compiler environment checksum.
+ //
+ if (dd.expect (env_checksum) != nullptr)
+ l4 ([&]{trace << "environment mismatch forcing update of " << t;});
+
+ // Finally the options checksum (as below).
+ //
+ {
+ sha256 cs;
+ append_options (cs, t, c_coptions);
+ append_options (cs, t, x_coptions);
+ append_options (cs, cmode);
+
+ if (dd.expect (cs.string ()) != nullptr)
+ l4 ([&]{trace << "options mismatch forcing update of " << t;});
+ }
+ }
+
+ // Update if depdb mismatch.
+ //
+ bool update (dd.writing () || dd.mtime > t.load_mtime ());
+
+ dd.close ();
+
+ if (!update)
+ return target_state::unchanged; // No mtime-based prerequisites.
+
+ // Prepare the compiler command-line.
+ //
+ cstrings args {cpath.recall_string ()};
+
+ // Append compile options.
+ //
+ // Note that any command line macros that we specify with -D will end up
+ // in the predefs, which is something we don't want. So no poptions.
+ //
+ append_options (args, t, c_coptions);
+ append_options (args, t, x_coptions);
+ append_options (args, cmode);
+
+ // The output and input paths, relative to the working directory for
+ // easier to read diagnostics.
+ //
+ path relo (relative (tp));
+ path reli;
+
+ // Add compiler-specific command-line arguments.
+ //
+ switch (cclass)
+ {
+ case compiler_class::gcc:
+ {
+ // Add implied options which may affect predefs, similar to the
+ // compile rule.
+ //
+ if (!find_option_prefix ("-finput-charset=", args))
+ args.push_back ("-finput-charset=UTF-8");
+
+ if (ctype == compiler_type::clang && tsys == "win32-msvc")
+ {
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
+ {
+ args.push_back ("-D_MT");
+ args.push_back ("-D_DLL");
+ }
+ }
+
+ if (ctype == compiler_type::clang && cvariant == "emscripten")
+ {
+ if (x_lang == lang::cxx)
+ {
+ if (!find_option_prefix ("DISABLE_EXCEPTION_CATCHING=", args))
+ {
+ args.push_back ("-s");
+ args.push_back ("DISABLE_EXCEPTION_CATCHING=0");
+ }
+ }
+ }
+
+ args.push_back ("-E"); // Stop after the preprocessing stage.
+ args.push_back ("-dM"); // Generate #define directives.
+
+ // Output.
+ //
+ args.push_back ("-o");
+ args.push_back (relo.string ().c_str ());
+
+ // Input.
+ //
+ args.push_back ("-x");
+ switch (x_lang)
+ {
+ case lang::c: args.push_back ("c"); break;
+ case lang::cxx: args.push_back ("c++"); break;
+ }
+
+ // With GCC and Clang we can compile /dev/null as stdin by
+ // specifying `-` and thus omitting the temporary file.
+ //
+ args.push_back ("-");
+
+ break;
+ }
+ case compiler_class::msvc:
+ {
+ // Add implied options which may affect predefs, similar to the
+ // compile rule.
+ //
+ {
+ // Note: these affect the _MSVC_EXECUTION_CHARACTER_SET, _UTF8
+ // macros.
+ //
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
+
+ if (!sc && !ec)
+ args.push_back ("/utf-8");
+ else
+ {
+ if (!sc)
+ args.push_back ("/source-charset:UTF-8");
+
+ if (!ec)
+ args.push_back ("/execution-charset:UTF-8");
+ }
+ }
+
+ if (x_lang == lang::cxx)
+ {
+ if (!find_option_prefixes ({"/EH", "-EH"}, args))
+ args.push_back ("/EHsc");
+ }
+
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
+ args.push_back ("/MD");
+
+ msvc_sanitize_cl (args);
+
+ args.push_back ("/nologo");
+
+ // /EP may seem like it contradicts /P but it's the recommended
+ // way to suppress `#line`s from the output of the /P option (see
+ // /P in the "MSVC Compiler Options" documentation).
+ //
+ args.push_back ("/P"); // Write preprocessor output to a file.
+ args.push_back ("/EP"); // Preprocess to stdout without `#line`s.
+
+ args.push_back ("/PD"); // Print all macro definitions.
+ args.push_back ("/Zc:preprocessor"); // Preproc. conformance mode.
+
+ // Output (note that while the /Fi: variant is only availbale
+ // starting with VS2013, /Zc:preprocessor is only available in
+ // starting from VS2019).
+ //
+ args.push_back ("/Fi:");
+ args.push_back (relo.string ().c_str ());
+
+ // Input.
+ //
+ switch (x_lang)
+ {
+ case lang::c: args.push_back ("/TC"); break;
+ case lang::cxx: args.push_back ("/TP"); break;
+ }
+
+ // Input path.
+ //
+ // Note that with MSVC we have to use a temporary file. In
+ // particular compiling `nul` does not work.
+ //
+ reli = relo + ".t";
+ args.push_back (reli.string ().c_str ());
+
+ break;
+ }
+ }
+
+ args.push_back (nullptr);
+
+ // Run the compiler.
+ //
+ if (verb >= 2)
+ print_process (args);
+ else if (verb)
+ print_diag ((string (x_name) + "-predefs").c_str (), t);
+
+ if (!ctx.dry_run)
+ {
+ // Create an empty temporary input source file, if necessary.
+ //
+ auto_rmfile rmi;
+ if (!reli.empty ())
+ {
+ rmi = auto_rmfile (reli);
+
+ if (exists (reli, false /* follow_symlinks */))
+ rmfile (ctx, reli, 3 /* verbosity */);
+
+ touch (ctx, reli, true /* create */, 3 /* verbosity */);
+ }
+
+ try
+ {
+ // VC cl.exe sends diagnostics to stdout. It also prints the file
+ // name being compiled as the first line. So for cl.exe we filter
+ // that noise out.
+ //
+ // For other compilers also redirect stdout to stderr, in case any
+ // of them tries to pull off something similar. For sane compilers
+ // this should be harmless.
+ //
+ // We also redirect stdin to /dev/null in case that's used instead
+ // of the temporary file.
+ //
+ // Note: similar logic as in compile_rule.
+ //
+ bool filter (ctype == compiler_type::msvc);
+
+ process pr (cpath,
+ args,
+ -2, /* stdin */
+ 2, /* stdout */
+ diag_buffer::pipe (ctx, filter /* force */) /* stderr */);
+
+ diag_buffer dbuf (ctx, args[0], pr);
+
+ if (filter)
+ msvc_filter_cl (dbuf, reli);
+
+ dbuf.read ();
+
+ run_finish (dbuf, args, pr, 1 /* verbosity */);
+ dd.check_mtime (tp);
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ t.mtime (system_clock::now ());
+ return target_state::changed;
+ }
+ }
+}
diff --git a/libbuild2/cc/predefs-rule.hxx b/libbuild2/cc/predefs-rule.hxx
new file mode 100644
index 0000000..60aa063
--- /dev/null
+++ b/libbuild2/cc/predefs-rule.hxx
@@ -0,0 +1,45 @@
+// file : libbuild2/cc/predefs-rule.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CC_PREDEFS_RULE_HXX
+#define LIBBUILD2_CC_PREDEFS_RULE_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/rule.hxx>
+
+#include <libbuild2/cc/types.hxx>
+#include <libbuild2/cc/common.hxx>
+
+#include <libbuild2/cc/export.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ class LIBBUILD2_CC_SYMEXPORT predefs_rule: public rule,
+ virtual common
+ {
+ public:
+ const string rule_name;
+
+ explicit
+ predefs_rule (data&&);
+
+ virtual bool
+ match (action, target&, const string&, match_extra&) const override;
+
+ virtual recipe
+ apply (action, target&, match_extra&) const override;
+
+ target_state
+ perform_update (action, const target&) const;
+
+ private:
+ const string rule_id;
+ };
+ }
+}
+
+#endif // LIBBUILD2_CC_PREDEFS_RULE_HXX
diff --git a/libbuild2/cc/std.cppm b/libbuild2/cc/std.cppm
new file mode 100644
index 0000000..5368d1c
--- /dev/null
+++ b/libbuild2/cc/std.cppm
@@ -0,0 +1,6781 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// WARNING, this entire header is generated by
+// utils/generate_std_cppm_in.py
+// DO NOT MODIFY!
+
+module;
+
+#include <__config>
+
+#if _LIBCPP_VERSION < 170000
+#error libc++ version 17.0.0 or later required
+#endif
+
+// The headers of Table 24: C++ library headers [tab:headers.cpp]
+// and the headers of Table 25: C++ headers for C library facilities [tab:headers.cpp.c]
+#include <algorithm>
+#include <any>
+#include <array>
+#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
+# include <atomic>
+#endif
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <barrier>
+#endif
+#include <bit>
+#include <bitset>
+#include <cassert>
+#include <cctype>
+#include <cerrno>
+#include <cfenv>
+#include <cfloat>
+#include <charconv>
+#include <chrono>
+#include <cinttypes>
+#include <climits>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <clocale>
+#endif
+#include <cmath>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <codecvt>
+#endif
+#include <compare>
+#include <complex>
+#include <concepts>
+#include <condition_variable>
+#include <coroutine>
+#include <csetjmp>
+#include <csignal>
+#include <cstdarg>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <cuchar>
+#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
+# include <cwchar>
+#endif
+#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
+# include <cwctype>
+#endif
+#include <deque>
+#include <exception>
+#include <execution>
+#include <expected>
+#include <filesystem>
+#include <format>
+#include <forward_list>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <fstream>
+#endif
+#include <functional>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <future>
+#endif
+#include <initializer_list>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <iomanip>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <ios>
+#endif
+#include <iosfwd>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <iostream>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <istream>
+#endif
+#include <iterator>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <latch>
+#endif
+#include <limits>
+#include <list>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <locale>
+#endif
+#include <map>
+#include <mdspan>
+#include <memory>
+#include <memory_resource>
+#include <mutex>
+#include <new>
+#include <numbers>
+#include <numeric>
+#include <optional>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <ostream>
+#endif
+#include <print>
+#include <queue>
+#include <random>
+#include <ranges>
+#include <ratio>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <regex>
+#endif
+#include <scoped_allocator>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <semaphore>
+#endif
+#include <set>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <shared_mutex>
+#endif
+#include <source_location>
+#include <span>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <sstream>
+#endif
+#include <stack>
+#include <stdexcept>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <stop_token>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <streambuf>
+#endif
+#include <string>
+#include <string_view>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <strstream>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+#if __has_include(<syncstream>)
+# define _LIPCPP_HAS_YES_SYNCSTREAM
+# include <syncstream>
+#endif
+#endif
+#include <system_error>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <thread>
+#endif
+#include <tuple>
+#include <type_traits>
+#include <typeindex>
+#include <typeinfo>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <valarray>
+#include <variant>
+#include <vector>
+#include <version>
+
+#if 0
+// *** Headers not yet available ***
+#if __has_include(<debugging>)
+# error "update the header information for <debugging> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<debugging>)
+#if __has_include(<flat_map>)
+# error "update the header information for <flat_map> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<flat_map>)
+#if __has_include(<flat_set>)
+# error "update the header information for <flat_set> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<flat_set>)
+#if __has_include(<generator>)
+# error "update the header information for <generator> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<generator>)
+#if __has_include(<hazard_pointer>)
+# error "update the header information for <hazard_pointer> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<hazard_pointer>)
+#if __has_include(<linalg>)
+# error "update the header information for <linalg> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<linalg>)
+#if __has_include(<rcu>)
+# error "update the header information for <rcu> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<rcu>)
+#if __has_include(<spanstream>)
+# error "update the header information for <spanstream> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<spanstream>)
+#if __has_include(<stacktrace>)
+# error "update the header information for <stacktrace> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<stacktrace>)
+#if __has_include(<stdfloat>)
+# error "update the header information for <stdfloat> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<stdfloat>)
+#if __has_include(<text_encoding>)
+# error "update the header information for <text_encoding> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<text_encoding>)
+#endif
+
+export module std;
+
+// algorithm.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ namespace ranges {
+ // [algorithms.results], algorithm result types
+ using std::ranges::in_found_result;
+ using std::ranges::in_fun_result;
+ using std::ranges::in_in_out_result;
+ using std::ranges::in_in_result;
+ using std::ranges::in_out_out_result;
+ using std::ranges::in_out_result;
+ // using std::ranges::in_value_result;
+ using std::ranges::min_max_result;
+ // using std::ranges::out_value_result;
+ } // namespace ranges
+
+ // [alg.nonmodifying], non-modifying sequence operations
+ // [alg.all.of], all of
+ using std::all_of;
+ namespace ranges {
+ using std::ranges::all_of;
+ }
+
+ // [alg.any.of], any of
+ using std::any_of;
+ namespace ranges {
+ using std::ranges::any_of;
+ }
+
+ // [alg.none.of], none of
+ using std::none_of;
+ namespace ranges {
+ using std::ranges::none_of;
+ }
+
+ // [alg.contains], contains
+#if 0
+ namespace ranges {
+ using std::ranges::contains;
+ using std::ranges::contains_subrange;
+ } // namespace ranges
+#endif
+
+ // [alg.foreach], for each
+ using std::for_each;
+
+ namespace ranges {
+ using std::ranges::for_each;
+ using std::ranges::for_each_result;
+ } // namespace ranges
+
+ using std::for_each_n;
+
+ namespace ranges {
+ using std::ranges::for_each_n_result;
+
+ using std::ranges::for_each_n;
+ } // namespace ranges
+
+ // [alg.find], find
+ using std::find;
+ using std::find_if;
+ using std::find_if_not;
+
+ namespace ranges {
+ using std::ranges::find;
+ using std::ranges::find_if;
+ using std::ranges::find_if_not;
+ } // namespace ranges
+
+ namespace ranges {
+#if 0
+ using std::ranges::find_last;
+ using std::ranges::find_last_if;
+ using std::ranges::find_last_if_not;
+#endif
+ } // namespace ranges
+
+ // [alg.find.end], find end
+ using std::find_end;
+
+ namespace ranges {
+ using std::ranges::find_end;
+ }
+
+ // [alg.find.first.of], find first
+ using std::find_first_of;
+
+ namespace ranges {
+ using std::ranges::find_first_of;
+ }
+
+ // [alg.adjacent.find], adjacent find
+ using std::adjacent_find;
+
+ namespace ranges {
+ using std::ranges::adjacent_find;
+ }
+
+ // [alg.count], count
+ using std::count;
+ using std::count_if;
+
+ namespace ranges {
+ using std::ranges::count;
+ using std::ranges::count_if;
+ } // namespace ranges
+
+ // [mismatch], mismatch
+ using std::mismatch;
+
+ namespace ranges {
+ using std::ranges::mismatch_result;
+
+ using std::ranges::mismatch;
+ } // namespace ranges
+
+ // [alg.equal], equal
+ using std::equal;
+
+ namespace ranges {
+ using std::ranges::equal;
+ }
+
+ // [alg.is.permutation], is permutation
+ using std::is_permutation;
+
+ namespace ranges {
+ using std::ranges::is_permutation;
+ }
+
+ // [alg.search], search
+ using std::search;
+
+ namespace ranges {
+ using std::ranges::search;
+ }
+
+ using std::search_n;
+
+ namespace ranges {
+ using std::ranges::search_n;
+ }
+
+ namespace ranges {
+#if _LIBCPP_STD_VER >= 23
+ // [alg.starts.with], starts with
+ using std::ranges::starts_with;
+
+#if _LIBCPP_VERSION >= 180000
+ // [alg.ends.with], ends with
+ using std::ranges::ends_with;
+#endif
+
+# if 0
+ // [alg.fold], fold
+ using std::ranges::fold_left;
+ using std::ranges::fold_left_first;
+ using std::ranges::fold_right;
+ using std::ranges::fold_right_last;
+ using std::ranges::fold_left_with_iter;
+ using std::ranges::fold_left_with_iter_result;
+ using std::ranges::fold_left_with_iter;
+ using std::ranges::fold_left_first_with_iter;
+ using std::ranges::fold_left_first_with_iter;
+# endif
+#endif // _LIBCPP_STD_VER >= 23
+ } // namespace ranges
+
+ // [alg.modifying.operations], mutating sequence operations
+ // [alg.copy], copy
+ using std::copy;
+
+ namespace ranges {
+ using std::ranges::copy;
+ using std::ranges::copy_result;
+ } // namespace ranges
+
+ using std::copy_n;
+
+ namespace ranges {
+ using std::ranges::copy_n;
+ using std::ranges::copy_n_result;
+ } // namespace ranges
+
+ using std::copy_if;
+
+ namespace ranges {
+ using std::ranges::copy_if;
+ using std::ranges::copy_if_result;
+ } // namespace ranges
+
+ using std::copy_backward;
+
+ namespace ranges {
+ using std::ranges::copy_backward;
+ using std::ranges::copy_backward_result;
+ } // namespace ranges
+
+ // [alg.move], move
+ using std::move;
+
+ namespace ranges {
+ using std::ranges::move;
+ using std::ranges::move_result;
+ } // namespace ranges
+
+ using std::move_backward;
+
+ namespace ranges {
+ using std::ranges::move_backward;
+ using std::ranges::move_backward_result;
+ } // namespace ranges
+
+ // [alg.swap], swap
+ using std::swap_ranges;
+
+ namespace ranges {
+ using std::ranges::swap_ranges;
+ using std::ranges::swap_ranges_result;
+ } // namespace ranges
+
+ using std::iter_swap;
+
+ // [alg.transform], transform
+ using std::transform;
+
+ namespace ranges {
+ using std::ranges::binary_transform_result;
+ using std::ranges::unary_transform_result;
+
+ using std::ranges::transform;
+
+ } // namespace ranges
+
+ using std::replace;
+ using std::replace_if;
+
+ namespace ranges {
+ using std::ranges::replace;
+ using std::ranges::replace_if;
+ } // namespace ranges
+
+ using std::replace_copy;
+ using std::replace_copy_if;
+
+ namespace ranges {
+ using std::ranges::replace_copy;
+ using std::ranges::replace_copy_if;
+ using std::ranges::replace_copy_if_result;
+ using std::ranges::replace_copy_result;
+ } // namespace ranges
+
+ // [alg.fill], fill
+ using std::fill;
+ using std::fill_n;
+
+ namespace ranges {
+ using std::ranges::fill;
+ using std::ranges::fill_n;
+ } // namespace ranges
+
+ // [alg.generate], generate
+ using std::generate;
+ using std::generate_n;
+
+ namespace ranges {
+ using std::ranges::generate;
+ using std::ranges::generate_n;
+ } // namespace ranges
+
+ // [alg.remove], remove
+ using std::remove;
+ using std::remove_if;
+
+ namespace ranges {
+ using std::ranges::remove;
+ using std::ranges::remove_if;
+ } // namespace ranges
+
+ using std::remove_copy;
+ using std::remove_copy_if;
+ namespace ranges {
+ using std::ranges::remove_copy;
+ using std::ranges::remove_copy_if;
+ using std::ranges::remove_copy_if_result;
+ using std::ranges::remove_copy_result;
+ } // namespace ranges
+
+ // [alg.unique], unique
+ using std::unique;
+
+ namespace ranges {
+ using std::ranges::unique;
+ }
+
+ using std::unique_copy;
+
+ namespace ranges {
+ using std::ranges::unique_copy;
+ using std::ranges::unique_copy_result;
+ } // namespace ranges
+
+ // [alg.reverse], reverse
+ using std::reverse;
+
+ namespace ranges {
+ using std::ranges::reverse;
+ }
+
+ using std::reverse_copy;
+
+ namespace ranges {
+ using std::ranges::reverse_copy;
+ using std::ranges::reverse_copy_result;
+ } // namespace ranges
+
+ // [alg.rotate], rotate
+ using std::rotate;
+
+ namespace ranges {
+ using std::ranges::rotate;
+ }
+
+ using std::rotate_copy;
+
+ namespace ranges {
+ using std::ranges::rotate_copy;
+ using std::ranges::rotate_copy_result;
+ } // namespace ranges
+
+ // [alg.random.sample], sample
+ using std::sample;
+
+ namespace ranges {
+ using std::ranges::sample;
+ }
+
+ // [alg.random.shuffle], shuffle
+ using std::shuffle;
+
+ namespace ranges {
+ using std::ranges::shuffle;
+ }
+
+ // [alg.shift], shift
+ using std::shift_left;
+
+ namespace ranges {
+ // using std::ranges::shift_left;
+ }
+
+ using std::shift_right;
+
+ namespace ranges {
+ // using std::ranges::shift_right;
+ }
+
+ // [alg.sorting], sorting and related operations
+ // [alg.sort], sorting
+ using std::sort;
+
+ namespace ranges {
+ using std::ranges::sort;
+ }
+
+ using std::stable_sort;
+
+ namespace ranges {
+ using std::ranges::stable_sort;
+ }
+
+ using std::partial_sort;
+
+ namespace ranges {
+ using std::ranges::partial_sort;
+ }
+ using std::partial_sort_copy;
+
+ namespace ranges {
+ using std::ranges::partial_sort_copy;
+ using std::ranges::partial_sort_copy_result;
+ } // namespace ranges
+
+ using std::is_sorted;
+ using std::is_sorted_until;
+
+ namespace ranges {
+ using std::ranges::is_sorted;
+ using std::ranges::is_sorted_until;
+ } // namespace ranges
+
+ // [alg.nth.element], Nth element
+ using std::nth_element;
+
+ namespace ranges {
+ using std::ranges::nth_element;
+ }
+
+ // [alg.binary.search], binary search
+ using std::lower_bound;
+
+ namespace ranges {
+ using std::ranges::lower_bound;
+ }
+
+ using std::upper_bound;
+
+ namespace ranges {
+ using std::ranges::upper_bound;
+ }
+
+ using std::equal_range;
+
+ namespace ranges {
+ using std::ranges::equal_range;
+ }
+
+ using std::binary_search;
+
+ namespace ranges {
+ using std::ranges::binary_search;
+ }
+
+ // [alg.partitions], partitions
+ using std::is_partitioned;
+
+ namespace ranges {
+ using std::ranges::is_partitioned;
+ }
+
+ using std::partition;
+
+ namespace ranges {
+ using std::ranges::partition;
+ }
+
+ using std::stable_partition;
+
+ namespace ranges {
+ using std::ranges::stable_partition;
+ }
+
+ using std::partition_copy;
+
+ namespace ranges {
+ using std::ranges::partition_copy;
+ using std::ranges::partition_copy_result;
+ } // namespace ranges
+
+ using std::partition_point;
+
+ namespace ranges {
+ using std::ranges::partition_point;
+ }
+ // [alg.merge], merge
+ using std::merge;
+ namespace ranges {
+ using std::ranges::merge;
+ using std::ranges::merge_result;
+ } // namespace ranges
+
+ using std::inplace_merge;
+
+ namespace ranges {
+ using std::ranges::inplace_merge;
+ }
+
+ // [alg.set.operations], set operations
+ using std::includes;
+ namespace ranges {
+ using std::ranges::includes;
+ }
+
+ using std::set_union;
+
+ namespace ranges {
+ using std::ranges::set_union;
+ using std::ranges::set_union_result;
+ } // namespace ranges
+
+ using std::set_intersection;
+ namespace ranges {
+ using std::ranges::set_intersection;
+ using std::ranges::set_intersection_result;
+ } // namespace ranges
+
+ using std::set_difference;
+
+ namespace ranges {
+ using std::ranges::set_difference;
+ using std::ranges::set_difference_result;
+ } // namespace ranges
+
+ using std::set_symmetric_difference;
+
+ namespace ranges {
+ using std::ranges::set_symmetric_difference_result;
+
+ using std::ranges::set_symmetric_difference;
+ } // namespace ranges
+
+ // [alg.heap.operations], heap operations
+ using std::push_heap;
+
+ namespace ranges {
+ using std::ranges::push_heap;
+ }
+
+ using std::pop_heap;
+
+ namespace ranges {
+ using std::ranges::pop_heap;
+ }
+
+ using std::make_heap;
+
+ namespace ranges {
+ using std::ranges::make_heap;
+ }
+
+ using std::sort_heap;
+
+ namespace ranges {
+ using std::ranges::sort_heap;
+ }
+
+ using std::is_heap;
+
+ namespace ranges {
+ using std::ranges::is_heap;
+ }
+
+ using std::is_heap_until;
+
+ namespace ranges {
+ using std::ranges::is_heap_until;
+ }
+
+ // [alg.min.max], minimum and maximum
+ using std::min;
+
+ namespace ranges {
+ using std::ranges::min;
+ }
+
+ using std::max;
+
+ namespace ranges {
+ using std::ranges::max;
+ }
+
+ using std::minmax;
+
+ namespace ranges {
+ using std::ranges::minmax_result;
+
+ using std::ranges::minmax;
+ } // namespace ranges
+
+ using std::min_element;
+
+ namespace ranges {
+ using std::ranges::min_element;
+ }
+
+ using std::max_element;
+
+ namespace ranges {
+ using std::ranges::max_element;
+ }
+
+ using std::minmax_element;
+
+ namespace ranges {
+ using std::ranges::minmax_element_result;
+
+ using std::ranges::minmax_element;
+ } // namespace ranges
+ // [alg.clamp], bounded value
+ using std::clamp;
+
+ namespace ranges {
+ using std::ranges::clamp;
+ }
+
+ // [alg.lex.comparison], lexicographical comparison
+ using std::lexicographical_compare;
+
+ namespace ranges {
+ using std::ranges::lexicographical_compare;
+ }
+
+ // [alg.three.way], three-way comparison algorithms
+ using std::lexicographical_compare_three_way;
+
+ // [alg.permutation.generators], permutations
+ using std::next_permutation;
+
+ namespace ranges {
+ using std::ranges::next_permutation_result;
+
+ using std::ranges::next_permutation;
+ } // namespace ranges
+
+ using std::prev_permutation;
+
+ namespace ranges {
+ using std::ranges::prev_permutation_result;
+
+ using std::ranges::prev_permutation;
+ } // namespace ranges
+
+} // namespace std
+
+// any.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [any.bad.any.cast], class bad_any_cast
+ using std::bad_any_cast;
+
+ // [any.class], class any
+ using std::any;
+
+ // [any.nonmembers], non-member functions
+ using std::any_cast;
+ using std::make_any;
+ using std::swap;
+
+} // namespace std
+
+// array.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [array], class template array
+ using std::array;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ // [array.special], specialized algorithms
+ using std::swap;
+
+ // [array.creation], array creation functions
+ using std::to_array;
+
+ // [array.tuple], tuple interface
+ using std::get;
+ using std::tuple_element;
+ using std::tuple_size;
+
+} // namespace std
+
+// atomic.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [atomics.order], order and consistency
+ using std::memory_order;
+ using std::memory_order_acq_rel;
+ using std::memory_order_acquire;
+ using std::memory_order_consume;
+ using std::memory_order_relaxed;
+ using std::memory_order_release;
+ using std::memory_order_seq_cst;
+
+ using std::kill_dependency;
+
+ // [atomics.ref.generic], class template atomic_ref
+ // [atomics.ref.pointer], partial specialization for pointers
+ // using std::atomic_ref;
+
+ // [atomics.types.generic], class template atomic
+ using std::atomic;
+
+ // [atomics.nonmembers], non-member functions
+ using std::atomic_compare_exchange_strong;
+ using std::atomic_compare_exchange_strong_explicit;
+ using std::atomic_compare_exchange_weak;
+ using std::atomic_compare_exchange_weak_explicit;
+ using std::atomic_exchange;
+ using std::atomic_exchange_explicit;
+ using std::atomic_is_lock_free;
+ using std::atomic_load;
+ using std::atomic_load_explicit;
+ using std::atomic_store;
+ using std::atomic_store_explicit;
+
+ using std::atomic_fetch_add;
+ using std::atomic_fetch_add_explicit;
+ using std::atomic_fetch_and;
+ using std::atomic_fetch_and_explicit;
+ using std::atomic_fetch_or;
+ using std::atomic_fetch_or_explicit;
+ using std::atomic_fetch_sub;
+ using std::atomic_fetch_sub_explicit;
+ using std::atomic_fetch_xor;
+ using std::atomic_fetch_xor_explicit;
+ using std::atomic_notify_all;
+ using std::atomic_notify_one;
+ using std::atomic_wait;
+ using std::atomic_wait_explicit;
+
+ // [atomics.alias], type aliases
+ using std::atomic_bool;
+ using std::atomic_char;
+ using std::atomic_char16_t;
+ using std::atomic_char32_t;
+ using std::atomic_char8_t;
+ using std::atomic_int;
+ using std::atomic_llong;
+ using std::atomic_long;
+ using std::atomic_schar;
+ using std::atomic_short;
+ using std::atomic_uchar;
+ using std::atomic_uint;
+ using std::atomic_ullong;
+ using std::atomic_ulong;
+ using std::atomic_ushort;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::atomic_wchar_t;
+#endif
+
+ using std::atomic_int16_t;
+ using std::atomic_int32_t;
+ using std::atomic_int64_t;
+ using std::atomic_int8_t;
+ using std::atomic_uint16_t;
+ using std::atomic_uint32_t;
+ using std::atomic_uint64_t;
+ using std::atomic_uint8_t;
+
+ using std::atomic_int_least16_t;
+ using std::atomic_int_least32_t;
+ using std::atomic_int_least64_t;
+ using std::atomic_int_least8_t;
+ using std::atomic_uint_least16_t;
+ using std::atomic_uint_least32_t;
+ using std::atomic_uint_least64_t;
+ using std::atomic_uint_least8_t;
+
+ using std::atomic_int_fast16_t;
+ using std::atomic_int_fast32_t;
+ using std::atomic_int_fast64_t;
+ using std::atomic_int_fast8_t;
+ using std::atomic_uint_fast16_t;
+ using std::atomic_uint_fast32_t;
+ using std::atomic_uint_fast64_t;
+ using std::atomic_uint_fast8_t;
+
+ using std::atomic_intmax_t;
+ using std::atomic_intptr_t;
+ using std::atomic_ptrdiff_t;
+ using std::atomic_size_t;
+ using std::atomic_uintmax_t;
+ using std::atomic_uintptr_t;
+
+ using std::atomic_signed_lock_free;
+ using std::atomic_unsigned_lock_free;
+
+ // [atomics.flag], flag type and operations
+ using std::atomic_flag;
+
+ using std::atomic_flag_clear;
+ using std::atomic_flag_clear_explicit;
+ using std::atomic_flag_test;
+ using std::atomic_flag_test_and_set;
+ using std::atomic_flag_test_and_set_explicit;
+ using std::atomic_flag_test_explicit;
+
+ using std::atomic_flag_notify_all;
+ using std::atomic_flag_notify_one;
+ using std::atomic_flag_wait;
+ using std::atomic_flag_wait_explicit;
+
+ // [atomics.fences], fences
+ using std::atomic_signal_fence;
+ using std::atomic_thread_fence;
+
+ // [depr.atomics.nonmembers]
+ using std::atomic_init;
+
+} // namespace std
+
+// barrier.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ using std::barrier;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// bit.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [bit.cast], bit_cast
+ using std::bit_cast;
+
+#if _LIBCPP_STD_VER >= 23
+ // [bit.byteswap], byteswap
+ using std::byteswap;
+#endif
+
+ // [bit.pow.two], integral powers of 2
+ using std::bit_ceil;
+ using std::bit_floor;
+ using std::bit_width;
+ using std::has_single_bit;
+
+ // [bit.rotate], rotating
+ using std::rotl;
+ using std::rotr;
+
+ // [bit.count], counting
+ using std::countl_one;
+ using std::countl_zero;
+ using std::countr_one;
+ using std::countr_zero;
+ using std::popcount;
+
+ // [bit.endian], endian
+ using std::endian;
+} // namespace std
+
+// bitset.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::bitset;
+
+ // [bitset.operators], bitset operators
+ using std::operator&;
+ using std::operator|;
+ using std::operator^;
+ using std::operator>>;
+ using std::operator<<;
+
+ // [bitset.hash], hash support
+ using std::hash;
+
+} // namespace std
+
+// cassert.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// cctype.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::isalnum;
+ using std::isalpha;
+ using std::isblank;
+ using std::iscntrl;
+ using std::isdigit;
+ using std::isgraph;
+ using std::islower;
+ using std::isprint;
+ using std::ispunct;
+ using std::isspace;
+ using std::isupper;
+ using std::isxdigit;
+ using std::tolower;
+ using std::toupper;
+} // namespace std
+
+// cerrno.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// cfenv.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // types
+ using std::fenv_t;
+ using std::fexcept_t;
+
+ // functions
+ using std::feclearexcept;
+ using std::fegetexceptflag;
+ using std::feraiseexcept;
+ using std::fesetexceptflag;
+ using std::fetestexcept;
+
+ using std::fegetround;
+ using std::fesetround;
+
+ using std::fegetenv;
+ using std::feholdexcept;
+ using std::fesetenv;
+ using std::feupdateenv;
+
+} // namespace std
+
+// cfloat.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// charconv.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // floating-point format for primitive numerical conversion
+ using std::chars_format;
+
+ // chars_format is a bitmask type.
+ // [bitmask.types] specified operators
+ using std::operator&;
+ using std::operator&=;
+ using std::operator^;
+ using std::operator^=;
+ using std::operator|;
+ using std::operator|=;
+ using std::operator~;
+
+ // [charconv.to.chars], primitive numerical output conversion
+ using std::to_chars_result;
+
+ using std::to_chars;
+
+ // [charconv.from.chars], primitive numerical input conversion
+ using std::from_chars_result;
+
+ using std::from_chars;
+} // namespace std
+
+// chrono.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ namespace chrono {
+ using std::chrono::duration;
+ using std::chrono::time_point;
+
+ } // namespace chrono
+
+ using std::common_type;
+
+ namespace chrono {
+
+ // [time.traits], customization traits
+ using std::chrono::treat_as_floating_point;
+ using std::chrono::treat_as_floating_point_v;
+
+ using std::chrono::duration_values;
+
+ // using std::chrono::is_clock;
+ // using std::chrono::is_clock_v;
+
+ // [time.duration.nonmember], duration arithmetic
+ using std::chrono::operator+;
+ using std::chrono::operator-;
+ using std::chrono::operator*;
+ using std::chrono::operator/;
+ using std::chrono::operator%;
+
+ // [time.duration.comparisons], duration comparisons
+ using std::chrono::operator==;
+ using std::chrono::operator!=;
+ using std::chrono::operator<;
+ using std::chrono::operator>;
+ using std::chrono::operator<=;
+ using std::chrono::operator>=;
+ using std::chrono::operator<=>;
+
+ // [time.duration.cast], conversions
+ using std::chrono::ceil;
+ using std::chrono::duration_cast;
+ using std::chrono::floor;
+ using std::chrono::round;
+
+ // [time.duration.io], duration I/O
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::chrono::operator<<;
+#endif
+ // using std::chrono::from_stream;
+
+ // convenience typedefs
+ using std::chrono::days;
+ using std::chrono::hours;
+ using std::chrono::microseconds;
+ using std::chrono::milliseconds;
+ using std::chrono::minutes;
+ using std::chrono::months;
+ using std::chrono::nanoseconds;
+ using std::chrono::seconds;
+ using std::chrono::weeks;
+ using std::chrono::years;
+
+ // [time.point.nonmember], time_point arithmetic
+
+ // [time.point.comparisons], time_point comparisons
+
+ // [time.point.cast], conversions
+ using std::chrono::time_point_cast;
+
+ // [time.duration.alg], specialized algorithms
+ using std::chrono::abs;
+
+ // [time.clock.system], class system_clock
+ using std::chrono::system_clock;
+
+ using std::chrono::sys_days;
+ using std::chrono::sys_seconds;
+ using std::chrono::sys_time;
+
+#if 0
+ // [time.clock.utc], class utc_clock
+ using std::chrono::utc_clock;
+
+ using std::chrono::utc_seconds;
+ using std::chrono::utc_time;
+
+ using std::chrono::leap_second_info;
+
+ using std::chrono::get_leap_second_info;
+ // [time.clock.tai], class tai_clock
+ using std::chrono::tai_clock;
+
+ using std::chrono::tai_seconds;
+ using std::chrono::tai_time;
+
+ // [time.clock.gps], class gps_clock
+ using std::chrono::gps_clock;
+
+ using std::chrono::gps_seconds;
+ using std::chrono::gps_time;
+#endif
+ // [time.clock.file], type file_clock
+ using std::chrono::file_clock;
+
+ using std::chrono::file_time;
+
+#ifndef _LIBCPP_HAS_NO_MONOTONIC_CLOCK
+ // [time.clock.steady], class steady_clock
+ using std::chrono::steady_clock;
+#endif
+
+ // [time.clock.hires], class high_resolution_clock
+ using std::chrono::high_resolution_clock;
+
+ // [time.clock.local], local time
+ using std::chrono::local_days;
+ using std::chrono::local_seconds;
+ using std::chrono::local_t;
+ using std::chrono::local_time;
+
+ // [time.clock.cast], time_point conversions
+ // using std::chrono::clock_time_conversion;
+
+ // using std::chrono::clock_cast;
+
+ // [time.cal.last], class last_spec
+ using std::chrono::last_spec;
+
+ // [time.cal.day], class day
+ using std::chrono::day;
+
+ // [time.cal.month], class month
+ using std::chrono::month;
+
+ // [time.cal.year], class year
+ using std::chrono::year;
+
+ // [time.cal.wd], class weekday
+ using std::chrono::weekday;
+
+ // [time.cal.wdidx], class weekday_indexed
+ using std::chrono::weekday_indexed;
+
+ // [time.cal.wdlast], class weekday_last
+ using std::chrono::weekday_last;
+
+ // [time.cal.md], class month_day
+ using std::chrono::month_day;
+
+ // [time.cal.mdlast], class month_day_last
+ using std::chrono::month_day_last;
+
+ // [time.cal.mwd], class month_weekday
+ using std::chrono::month_weekday;
+
+ // [time.cal.mwdlast], class month_weekday_last
+ using std::chrono::month_weekday_last;
+
+ // [time.cal.ym], class year_month
+ using std::chrono::year_month;
+
+ // [time.cal.ymd], class year_month_day
+ using std::chrono::year_month_day;
+
+ // [time.cal.ymdlast], class year_month_day_last
+ using std::chrono::year_month_day_last;
+
+ // [time.cal.ymwd], class year_month_weekday
+ using std::chrono::year_month_weekday;
+
+ // [time.cal.ymwdlast], class year_month_weekday_last
+ using std::chrono::year_month_weekday_last;
+
+ // [time.cal.operators], civil calendar conventional syntax operators
+
+ // [time.hms], class template hh_mm_ss
+ using std::chrono::hh_mm_ss;
+
+ // [time.12], 12/24 hour functions
+ using std::chrono::is_am;
+ using std::chrono::is_pm;
+ using std::chrono::make12;
+ using std::chrono::make24;
+
+#if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \
+ !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+
+# ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+ // [time.zone.db], time zone database
+ using std::chrono::tzdb;
+ using std::chrono::tzdb_list;
+
+ // [time.zone.db.access], time zone database access
+ // using std::chrono::current_zone;
+ using std::chrono::get_tzdb;
+ using std::chrono::get_tzdb_list;
+ // using std::chrono::locate_zone;
+
+ // [time.zone.db.remote], remote time zone database support
+ using std::chrono::reload_tzdb;
+ using std::chrono::remote_version;
+
+# endif // !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) &&
+ // !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+
+# if 0
+ // [time.zone.exception], exception classes
+ using std::chrono::ambiguous_local_time;
+ using std::chrono::nonexistent_local_time;
+
+ // [time.zone.info], information classes
+ using std::chrono::sys_info;
+
+ // [time.zone.timezone], class time_zone
+ using std::chrono::choose;
+ using std::chrono::time_zone;
+
+ // [time.zone.zonedtraits], class template zoned_traits
+ using std::chrono::zoned_traits;
+
+ // [time.zone.zonedtime], class template zoned_time
+ using std::chrono::zoned_time;
+
+ using std::chrono::zoned_seconds;
+
+ // [time.zone.leap], leap second support
+ using std::chrono::leap_second;
+
+ // [time.zone.link], class time_zone_link
+ using std::chrono::time_zone_link;
+
+ // [time.format], formatting
+ using std::chrono::local_time_format;
+# endif
+#endif // _LIBCPP_ENABLE_EXPERIMENTAL
+ } // namespace chrono
+
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::formatter;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+ namespace chrono {
+ // using std::chrono::parse;
+
+ // calendrical constants
+ using std::chrono::last;
+
+ using std::chrono::Friday;
+ using std::chrono::Monday;
+ using std::chrono::Saturday;
+ using std::chrono::Sunday;
+ using std::chrono::Thursday;
+ using std::chrono::Tuesday;
+ using std::chrono::Wednesday;
+
+ using std::chrono::April;
+ using std::chrono::August;
+ using std::chrono::December;
+ using std::chrono::February;
+ using std::chrono::January;
+ using std::chrono::July;
+ using std::chrono::June;
+ using std::chrono::March;
+ using std::chrono::May;
+ using std::chrono::November;
+ using std::chrono::October;
+ using std::chrono::September;
+
+ } // namespace chrono
+
+} // namespace std
+export namespace std::inline literals::inline chrono_literals {
+ // [time.duration.literals], suffixes for duration literals
+ using std::literals::chrono_literals::operator""h;
+ using std::literals::chrono_literals::operator""min;
+ using std::literals::chrono_literals::operator""s;
+ using std::literals::chrono_literals::operator""ms;
+ using std::literals::chrono_literals::operator""us;
+ using std::literals::chrono_literals::operator""ns;
+
+ // [using std::literals::chrono_literals::.cal.day.nonmembers], non-member functions
+ using std::literals::chrono_literals::operator""d;
+
+ // [using std::literals::chrono_literals::.cal.year.nonmembers], non-member functions
+ using std::literals::chrono_literals::operator""y;
+} // namespace std::inline literals::inline chrono_literals
+
+// cinttypes.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::imaxdiv_t;
+
+ using std::imaxabs;
+ using std::imaxdiv;
+ using std::strtoimax;
+ using std::strtoumax;
+ using std::wcstoimax;
+ using std::wcstoumax;
+
+ // abs is conditionally here, but always present in cmath.cppm. To avoid
+ // conflicing declarations omit the using here.
+
+ // div is conditionally here, but always present in cstdlib.cppm. To avoid
+ // conflicing declarations omit the using here.
+} // namespace std
+
+// climits.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// clocale.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::lconv;
+
+ using std::localeconv;
+ using std::setlocale;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// cmath.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ using std::double_t;
+ using std::float_t;
+
+ using std::acos;
+ using std::acosf;
+ using std::acosl;
+
+ using std::asin;
+ using std::asinf;
+ using std::asinl;
+
+ using std::atan;
+ using std::atanf;
+ using std::atanl;
+
+ using std::atan2;
+ using std::atan2f;
+ using std::atan2l;
+
+ using std::cos;
+ using std::cosf;
+ using std::cosl;
+
+ using std::sin;
+ using std::sinf;
+ using std::sinl;
+
+ using std::tan;
+ using std::tanf;
+ using std::tanl;
+
+ using std::acosh;
+ using std::acoshf;
+ using std::acoshl;
+
+ using std::asinh;
+ using std::asinhf;
+ using std::asinhl;
+
+ using std::atanh;
+ using std::atanhf;
+ using std::atanhl;
+
+ using std::cosh;
+ using std::coshf;
+ using std::coshl;
+
+ using std::sinh;
+ using std::sinhf;
+ using std::sinhl;
+
+ using std::tanh;
+ using std::tanhf;
+ using std::tanhl;
+
+ using std::exp;
+ using std::expf;
+ using std::expl;
+
+ using std::exp2;
+ using std::exp2f;
+ using std::exp2l;
+
+ using std::expm1;
+ using std::expm1f;
+ using std::expm1l;
+
+ using std::frexp;
+ using std::frexpf;
+ using std::frexpl;
+
+ using std::ilogb;
+ using std::ilogbf;
+ using std::ilogbl;
+
+ using std::ldexp;
+ using std::ldexpf;
+ using std::ldexpl;
+
+ using std::log;
+ using std::logf;
+ using std::logl;
+
+ using std::log10;
+ using std::log10f;
+ using std::log10l;
+
+ using std::log1p;
+ using std::log1pf;
+ using std::log1pl;
+
+ using std::log2;
+ using std::log2f;
+ using std::log2l;
+
+ using std::logb;
+ using std::logbf;
+ using std::logbl;
+
+ using std::modf;
+ using std::modff;
+ using std::modfl;
+
+ using std::scalbn;
+ using std::scalbnf;
+ using std::scalbnl;
+
+ using std::scalbln;
+ using std::scalblnf;
+ using std::scalblnl;
+
+ using std::cbrt;
+ using std::cbrtf;
+ using std::cbrtl;
+
+ // [c.math.abs], absolute values
+ using std::abs;
+
+ using std::fabs;
+ using std::fabsf;
+ using std::fabsl;
+
+ using std::hypot;
+ using std::hypotf;
+ using std::hypotl;
+
+ // [c.math.hypot3], three-dimensional hypotenuse
+
+ using std::pow;
+ using std::powf;
+ using std::powl;
+
+ using std::sqrt;
+ using std::sqrtf;
+ using std::sqrtl;
+
+ using std::erf;
+ using std::erff;
+ using std::erfl;
+
+ using std::erfc;
+ using std::erfcf;
+ using std::erfcl;
+
+ using std::lgamma;
+ using std::lgammaf;
+ using std::lgammal;
+
+ using std::tgamma;
+ using std::tgammaf;
+ using std::tgammal;
+
+ using std::ceil;
+ using std::ceilf;
+ using std::ceill;
+
+ using std::floor;
+ using std::floorf;
+ using std::floorl;
+
+ using std::nearbyint;
+ using std::nearbyintf;
+ using std::nearbyintl;
+
+ using std::rint;
+ using std::rintf;
+ using std::rintl;
+
+ using std::lrint;
+ using std::lrintf;
+ using std::lrintl;
+
+ using std::llrint;
+ using std::llrintf;
+ using std::llrintl;
+
+ using std::round;
+ using std::roundf;
+ using std::roundl;
+
+ using std::lround;
+ using std::lroundf;
+ using std::lroundl;
+
+ using std::llround;
+ using std::llroundf;
+ using std::llroundl;
+
+ using std::trunc;
+ using std::truncf;
+ using std::truncl;
+
+ using std::fmod;
+ using std::fmodf;
+ using std::fmodl;
+
+ using std::remainder;
+ using std::remainderf;
+ using std::remainderl;
+
+ using std::remquo;
+ using std::remquof;
+ using std::remquol;
+
+ using std::copysign;
+ using std::copysignf;
+ using std::copysignl;
+
+ using std::nan;
+ using std::nanf;
+ using std::nanl;
+
+ using std::nextafter;
+ using std::nextafterf;
+ using std::nextafterl;
+
+ using std::nexttoward;
+ using std::nexttowardf;
+ using std::nexttowardl;
+
+ using std::fdim;
+ using std::fdimf;
+ using std::fdiml;
+
+ using std::fmax;
+ using std::fmaxf;
+ using std::fmaxl;
+
+ using std::fmin;
+ using std::fminf;
+ using std::fminl;
+
+ using std::fma;
+ using std::fmaf;
+ using std::fmal;
+
+ // [c.math.lerp], linear interpolation
+ using std::lerp;
+
+ // [c.math.fpclass], classification / comparison functions
+ using std::fpclassify;
+ using std::isfinite;
+ using std::isgreater;
+ using std::isgreaterequal;
+ using std::isinf;
+ using std::isless;
+ using std::islessequal;
+ using std::islessgreater;
+ using std::isnan;
+ using std::isnormal;
+ using std::isunordered;
+ using std::signbit;
+
+ // [sf.cmath], mathematical special functions
+#if 0
+ // [sf.cmath.assoc.laguerre], associated Laguerre polynomials
+ using std::assoc_laguerre;
+ using std::assoc_laguerref;
+ using std::assoc_laguerrel;
+
+ // [sf.cmath.assoc.legendre], associated Legendre functions
+ using std::assoc_legendre;
+ using std::assoc_legendref;
+ using std::assoc_legendrel;
+
+ // [sf.cmath.beta], beta function
+ using std::beta;
+ using std::betaf;
+ using std::betal;
+
+ // [sf.cmath.comp.ellint.1], complete elliptic integral of the first kind
+ using std::comp_ellint_1;
+ using std::comp_ellint_1f;
+ using std::comp_ellint_1l;
+
+ // [sf.cmath.comp.ellint.2], complete elliptic integral of the second kind
+ using std::comp_ellint_2;
+ using std::comp_ellint_2f;
+ using std::comp_ellint_2l;
+
+ // [sf.cmath.comp.ellint.3], complete elliptic integral of the third kind
+ using std::comp_ellint_3;
+ using std::comp_ellint_3f;
+ using std::comp_ellint_3l;
+
+ // [sf.cmath.cyl.bessel.i], regular modified cylindrical Bessel functions
+ using std::cyl_bessel_i;
+ using std::cyl_bessel_if;
+ using std::cyl_bessel_il;
+
+ // [sf.cmath.cyl.bessel.j], cylindrical Bessel functions of the first kind
+ using std::cyl_bessel_j;
+ using std::cyl_bessel_jf;
+ using std::cyl_bessel_jl;
+
+ // [sf.cmath.cyl.bessel.k], irregular modified cylindrical Bessel functions
+ using std::cyl_bessel_k;
+ using std::cyl_bessel_kf;
+ using std::cyl_bessel_kl;
+
+ // [sf.cmath.cyl.neumann], cylindrical Neumann functions
+ // cylindrical Bessel functions of the second kind
+ using std::cyl_neumann;
+ using std::cyl_neumannf;
+ using std::cyl_neumannl;
+
+ // [sf.cmath.ellint.1], incomplete elliptic integral of the first kind
+ using std::ellint_1;
+ using std::ellint_1f;
+ using std::ellint_1l;
+
+ // [sf.cmath.ellint.2], incomplete elliptic integral of the second kind
+ using std::ellint_2;
+ using std::ellint_2f;
+ using std::ellint_2l;
+
+ // [sf.cmath.ellint.3], incomplete elliptic integral of the third kind
+ using std::ellint_3;
+ using std::ellint_3f;
+ using std::ellint_3l;
+
+ // [sf.cmath.expint], exponential integral
+ using std::expint;
+ using std::expintf;
+ using std::expintl;
+
+ // [sf.cmath.hermite], Hermite polynomials
+ using std::hermite;
+ using std::hermitef;
+ using std::hermitel;
+
+ // [sf.cmath.laguerre], Laguerre polynomials
+ using std::laguerre;
+ using std::laguerref;
+ using std::laguerrel;
+
+ // [sf.cmath.legendre], Legendre polynomials
+ using std::legendre;
+ using std::legendref;
+ using std::legendrel;
+
+ // [sf.cmath.riemann.zeta], Riemann zeta function
+ using std::riemann_zeta;
+ using std::riemann_zetaf;
+ using std::riemann_zetal;
+
+ // [sf.cmath.sph.bessel], spherical Bessel functions of the first kind
+ using std::sph_bessel;
+ using std::sph_besself;
+ using std::sph_bessell;
+
+ // [sf.cmath.sph.legendre], spherical associated Legendre functions
+ using std::sph_legendre;
+ using std::sph_legendref;
+ using std::sph_legendrel;
+
+ // [sf.cmath.sph.neumann], spherical Neumann functions;
+ // spherical Bessel functions of the second kind
+ using std::sph_neumann;
+ using std::sph_neumannf;
+ using std::sph_neumannl;
+#endif
+} // namespace std
+
+// codecvt.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::codecvt_mode;
+
+ using std::codecvt_utf16;
+ using std::codecvt_utf8;
+ using std::codecvt_utf8_utf16;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// compare.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [cmp.categories], comparison category types
+ using std::partial_ordering;
+ using std::strong_ordering;
+ using std::weak_ordering;
+
+ // named comparison functions
+ using std::is_eq;
+ using std::is_gt;
+ using std::is_gteq;
+ using std::is_lt;
+ using std::is_lteq;
+ using std::is_neq;
+
+ // [cmp.common], common comparison category type
+ using std::common_comparison_category;
+ using std::common_comparison_category_t;
+
+ // [cmp.concept], concept three_way_comparable
+ using std::three_way_comparable;
+ using std::three_way_comparable_with;
+
+ // [cmp.result], result of three-way comparison
+ using std::compare_three_way_result;
+
+ using std::compare_three_way_result_t;
+
+ // [comparisons.three.way], class compare_three_way
+ using std::compare_three_way;
+
+ // [cmp.alg], comparison algorithms
+ inline namespace __cpo {
+ using std::__cpo::compare_partial_order_fallback;
+ using std::__cpo::compare_strong_order_fallback;
+ using std::__cpo::compare_weak_order_fallback;
+ using std::__cpo::partial_order;
+ using std::__cpo::strong_order;
+ using std::__cpo::weak_order;
+ } // namespace __cpo
+
+} // namespace std
+
+// complex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [complex], class template complex
+ using std::complex;
+
+ // [complex.ops], operators
+ using std::operator+;
+ using std::operator-;
+ using std::operator*;
+ using std::operator/;
+
+ using std::operator==;
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::operator>>;
+ using std::operator<<;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+ // [complex.value.ops], values
+ using std::imag;
+ using std::real;
+
+ using std::abs;
+ using std::arg;
+ using std::norm;
+
+ using std::conj;
+ using std::polar;
+ using std::proj;
+
+ // [complex.transcendentals], transcendentals
+ using std::acos;
+ using std::asin;
+ using std::atan;
+
+ using std::acosh;
+ using std::asinh;
+ using std::atanh;
+
+ using std::cos;
+ using std::cosh;
+ using std::exp;
+ using std::log;
+ using std::log10;
+
+ using std::pow;
+
+ using std::sin;
+ using std::sinh;
+ using std::sqrt;
+ using std::tan;
+ using std::tanh;
+
+ // [complex.literals], complex literals
+ inline namespace literals {
+ inline namespace complex_literals {
+ using std::operator""il;
+ using std::operator""i;
+ using std::operator""if;
+ } // namespace complex_literals
+ } // namespace literals
+
+} // namespace std
+
+// concepts.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [concepts.lang], language-related concepts
+ // [concept.same], concept same_as
+ using std::same_as;
+
+ // [concept.derived], concept derived_from
+ using std::derived_from;
+
+ // [concept.convertible], concept convertible_to
+ using std::convertible_to;
+
+ // [concept.commonref], concept common_reference_with
+ using std::common_reference_with;
+
+ // [concept.common], concept common_with
+ using std::common_with;
+
+ // [concepts.arithmetic], arithmetic concepts
+ using std::floating_point;
+ using std::integral;
+ using std::signed_integral;
+ using std::unsigned_integral;
+
+ // [concept.assignable], concept assignable_from
+ using std::assignable_from;
+
+ // [concept.swappable], concept swappable
+ namespace ranges {
+ inline namespace __cpo {
+ using std::ranges::__cpo::swap;
+ }
+ } // namespace ranges
+
+ using std::swappable;
+ using std::swappable_with;
+
+ // [concept.destructible], concept destructible
+ using std::destructible;
+
+ // [concept.constructible], concept constructible_from
+ using std::constructible_from;
+
+ // [concept.default.init], concept default_initializable
+ using std::default_initializable;
+
+ // [concept.moveconstructible], concept move_constructible
+ using std::move_constructible;
+
+ // [concept.copyconstructible], concept copy_constructible
+ using std::copy_constructible;
+
+ // [concepts.compare], comparison concepts
+ // [concept.equalitycomparable], concept equality_comparable
+ using std::equality_comparable;
+ using std::equality_comparable_with;
+
+ // [concept.totallyordered], concept totally_ordered
+ using std::totally_ordered;
+ using std::totally_ordered_with;
+
+ // [concepts.object], object concepts
+ using std::copyable;
+ using std::movable;
+ using std::regular;
+ using std::semiregular;
+
+ // [concepts.callable], callable concepts
+ // [concept.invocable], concept invocable
+ using std::invocable;
+
+ // [concept.regularinvocable], concept regular_invocable
+ using std::regular_invocable;
+
+ // [concept.predicate], concept predicate
+ using std::predicate;
+
+ // [concept.relation], concept relation
+ using std::relation;
+
+ // [concept.equiv], concept equivalence_relation
+ using std::equivalence_relation;
+
+ // [concept.strictweakorder], concept strict_weak_order
+ using std::strict_weak_order;
+
+} // namespace std
+
+// condition_variable.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.condition.condvar], class condition_variable
+ using std::condition_variable;
+ // [thread.condition.condvarany], class condition_variable_any
+ using std::condition_variable_any;
+
+ // [thread.condition.nonmember], non-member functions
+ using std::notify_all_at_thread_exit;
+
+ using std::cv_status;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// coroutine.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [coroutine.traits], coroutine traits
+ using std::coroutine_traits;
+
+ // [coroutine.handle], coroutine handle
+ using std::coroutine_handle;
+
+ // [coroutine.handle.compare], comparison operators
+ using std::operator==;
+ using std::operator<=>;
+
+ // [coroutine.handle.hash], hash support
+ using std::hash;
+
+ // [coroutine.noop], no-op coroutines
+ using std::noop_coroutine;
+ using std::noop_coroutine_handle;
+ using std::noop_coroutine_promise;
+
+ // [coroutine.trivial.awaitables], trivial awaitables
+ using std::suspend_always;
+ using std::suspend_never;
+} // namespace std
+
+// csetjmp.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::jmp_buf;
+ using std::longjmp;
+} // namespace std
+
+// csignal.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::sig_atomic_t;
+
+ // [support.signal], signal handlers
+ using std::signal;
+
+ using std::raise;
+
+} // namespace std
+
+// cstdarg.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::va_list;
+} // namespace std
+
+// cstddef.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::max_align_t;
+ using std::nullptr_t;
+ using std::ptrdiff_t;
+ using std::size_t;
+
+ using std::byte;
+
+ // [support.types.byteops], byte type operations
+ using std::operator<<=;
+ using std::operator<<;
+ using std::operator>>=;
+ using std::operator>>;
+ using std::operator|=;
+ using std::operator|;
+ using std::operator&=;
+ using std::operator&;
+ using std::operator^=;
+ using std::operator^;
+ using std::operator~;
+ using std::to_integer;
+} // namespace std
+
+// cstdint.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // signed
+ using std::int8_t _LIBCPP_USING_IF_EXISTS;
+ using std::int16_t _LIBCPP_USING_IF_EXISTS;
+ using std::int32_t _LIBCPP_USING_IF_EXISTS;
+ using std::int64_t _LIBCPP_USING_IF_EXISTS;
+
+ using std::int_fast16_t;
+ using std::int_fast32_t;
+ using std::int_fast64_t;
+ using std::int_fast8_t;
+
+ using std::int_least16_t;
+ using std::int_least32_t;
+ using std::int_least64_t;
+ using std::int_least8_t;
+
+ using std::intmax_t;
+
+ using std::intptr_t _LIBCPP_USING_IF_EXISTS;
+
+ // unsigned
+ using std::uint8_t _LIBCPP_USING_IF_EXISTS;
+ using std::uint16_t _LIBCPP_USING_IF_EXISTS;
+ using std::uint32_t _LIBCPP_USING_IF_EXISTS;
+ using std::uint64_t _LIBCPP_USING_IF_EXISTS;
+
+ using std::uint_fast16_t;
+ using std::uint_fast32_t;
+ using std::uint_fast64_t;
+ using std::uint_fast8_t;
+
+ using std::uint_least16_t;
+ using std::uint_least32_t;
+ using std::uint_least64_t;
+ using std::uint_least8_t;
+
+ using std::uintmax_t;
+
+ using std::uintptr_t _LIBCPP_USING_IF_EXISTS;
+} // namespace std
+
+// cstdio.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::FILE;
+ using std::fpos_t;
+ using std::size_t;
+
+ using std::clearerr;
+ using std::fclose;
+ using std::feof;
+ using std::ferror;
+ using std::fflush;
+ using std::fgetc;
+ using std::fgetpos;
+ using std::fgets;
+ using std::fopen;
+ using std::fprintf;
+ using std::fputc;
+ using std::fputs;
+ using std::fread;
+ using std::freopen;
+ using std::fscanf;
+ using std::fseek;
+ using std::fsetpos;
+ using std::ftell;
+ using std::fwrite;
+ using std::getc;
+ using std::getchar;
+ using std::perror;
+ using std::printf;
+ using std::putc;
+ using std::putchar;
+ using std::puts;
+ using std::remove;
+ using std::rename;
+ using std::rewind;
+ using std::scanf;
+ using std::setbuf;
+ using std::setvbuf;
+ using std::snprintf;
+ using std::sprintf;
+ using std::sscanf;
+ using std::tmpfile;
+ using std::tmpnam;
+ using std::ungetc;
+ using std::vfprintf;
+ using std::vfscanf;
+ using std::vprintf;
+ using std::vscanf;
+ using std::vsnprintf;
+ using std::vsprintf;
+ using std::vsscanf;
+} // namespace std
+
+// cstdlib.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::div_t;
+ using std::ldiv_t;
+ using std::lldiv_t;
+ using std::size_t;
+
+ // [support.start.term], start and termination
+ using std::_Exit;
+ using std::abort;
+ using std::at_quick_exit;
+ using std::atexit;
+ using std::exit;
+ using std::quick_exit;
+
+ using std::getenv;
+ using std::system;
+
+ // [c.malloc], C library memory allocation
+ using std::aligned_alloc;
+ using std::calloc;
+ using std::free;
+ using std::malloc;
+ using std::realloc;
+
+ using std::atof;
+ using std::atoi;
+ using std::atol;
+ using std::atoll;
+ using std::strtod;
+ using std::strtof;
+ using std::strtol;
+ using std::strtold;
+ using std::strtoll;
+ using std::strtoul;
+ using std::strtoull;
+
+ // [c.mb.wcs], multibyte / wide string and character conversion functions
+ using std::mblen;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::mbstowcs;
+ using std::mbtowc;
+ using std::wcstombs;
+ using std::wctomb;
+#endif
+ // [alg.c.library], C standard library algorithms
+ using std::bsearch;
+ using std::qsort;
+
+ // [c.math.rand], low-quality random number generation
+ using std::rand;
+ using std::srand;
+
+ // [c.math.abs], absolute values
+ using std::abs;
+
+ using std::labs;
+ using std::llabs;
+
+ using std::div;
+ using std::ldiv;
+ using std::lldiv;
+} // namespace std
+
+// cstring.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::size_t;
+
+ using std::memchr;
+ using std::memcmp;
+ using std::memcpy;
+ using std::memmove;
+ using std::memset;
+ using std::strcat;
+ using std::strchr;
+ using std::strcmp;
+ using std::strcoll;
+ using std::strcpy;
+ using std::strcspn;
+ using std::strerror;
+ using std::strlen;
+ using std::strncat;
+ using std::strncmp;
+ using std::strncpy;
+ using std::strpbrk;
+ using std::strrchr;
+ using std::strspn;
+ using std::strstr;
+ using std::strtok;
+ using std::strxfrm;
+} // namespace std
+
+// ctime.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::clock_t;
+ using std::size_t;
+ using std::time_t;
+
+ using std::timespec;
+ using std::tm;
+
+ using std::asctime;
+ using std::clock;
+ using std::ctime;
+ using std::difftime;
+ using std::gmtime;
+ using std::localtime;
+ using std::mktime;
+ using std::strftime;
+ using std::time;
+ using std::timespec_get;
+} // namespace std
+
+// cuchar.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // Note the Standard does not mark these symbols optional, but libc++'s header
+ // does. So this seems strictly not to be conforming.
+
+ // mbstate_t is conditionally here, but always present in cwchar.cppm. To avoid
+ // conflicing declarations omit the using here.
+
+ // size_t is conditionally here, but always present in cstddef.cppm. To avoid
+ // conflicing declarations omit the using here.
+
+#if !defined(_LIBCPP_HAS_NO_C8RTOMB_MBRTOC8)
+ using std::mbrtoc8 _LIBCPP_USING_IF_EXISTS;
+ using std::c8rtomb _LIBCPP_USING_IF_EXISTS;
+#endif
+ using std::mbrtoc16 _LIBCPP_USING_IF_EXISTS;
+ using std::c16rtomb _LIBCPP_USING_IF_EXISTS;
+ using std::mbrtoc32 _LIBCPP_USING_IF_EXISTS;
+ using std::c32rtomb _LIBCPP_USING_IF_EXISTS;
+} // namespace std
+
+// cwchar.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::mbstate_t;
+ using std::size_t;
+ using std::wint_t;
+
+ using std::tm;
+
+ using std::btowc;
+ using std::fgetwc;
+ using std::fgetws;
+ using std::fputwc;
+ using std::fputws;
+ using std::fwide;
+ using std::fwprintf;
+ using std::fwscanf;
+ using std::getwc;
+ using std::getwchar;
+ using std::putwc;
+ using std::putwchar;
+ using std::swprintf;
+ using std::swscanf;
+ using std::ungetwc;
+ using std::vfwprintf;
+ using std::vfwscanf;
+ using std::vswprintf;
+ using std::vswscanf;
+ using std::vwprintf;
+ using std::vwscanf;
+ using std::wcscat;
+ using std::wcschr;
+ using std::wcscmp;
+ using std::wcscoll;
+ using std::wcscpy;
+ using std::wcscspn;
+ using std::wcsftime;
+ using std::wcslen;
+ using std::wcsncat;
+ using std::wcsncmp;
+ using std::wcsncpy;
+ using std::wcspbrk;
+ using std::wcsrchr;
+ using std::wcsspn;
+ using std::wcsstr;
+ using std::wcstod;
+ using std::wcstof;
+ using std::wcstok;
+ using std::wcstol;
+ using std::wcstold;
+ using std::wcstoll;
+ using std::wcstoul;
+ using std::wcstoull;
+ using std::wcsxfrm;
+ using std::wctob;
+ using std::wmemchr;
+ using std::wmemcmp;
+ using std::wmemcpy;
+ using std::wmemmove;
+ using std::wmemset;
+ using std::wprintf;
+ using std::wscanf;
+
+ // [c.mb.wcs], multibyte / wide string and character conversion functions
+ using std::mbrlen;
+ using std::mbrtowc;
+ using std::mbsinit;
+ using std::mbsrtowcs;
+ using std::wcrtomb;
+ using std::wcsrtombs;
+#endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS
+} // namespace std
+
+// cwctype.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wctrans_t;
+ using std::wctype_t;
+ using std::wint_t;
+
+ using std::iswalnum;
+ using std::iswalpha;
+ using std::iswblank;
+ using std::iswcntrl;
+ using std::iswctype;
+ using std::iswdigit;
+ using std::iswgraph;
+ using std::iswlower;
+ using std::iswprint;
+ using std::iswpunct;
+ using std::iswspace;
+ using std::iswupper;
+ using std::iswxdigit;
+ using std::towctrans;
+ using std::towlower;
+ using std::towupper;
+ using std::wctrans;
+ using std::wctype;
+#endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS
+} // namespace std
+
+// deque.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [deque], class template deque
+ using std::deque;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [deque.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::deque;
+ }
+} // namespace std
+
+// exception.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::bad_exception;
+ using std::current_exception;
+ using std::exception;
+ using std::exception_ptr;
+ using std::get_terminate;
+ using std::make_exception_ptr;
+ using std::nested_exception;
+ using std::rethrow_exception;
+ using std::rethrow_if_nested;
+ using std::set_terminate;
+ using std::terminate;
+ using std::terminate_handler;
+ using std::throw_with_nested;
+ using std::uncaught_exception;
+ using std::uncaught_exceptions;
+} // namespace std
+
+// execution.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+export namespace std {
+ // [execpol.type], execution policy type trait
+ using std::is_execution_policy;
+ using std::is_execution_policy_v;
+} // namespace std
+
+export namespace std::execution {
+ // [execpol.seq], sequenced execution policy
+ using std::execution::sequenced_policy;
+
+ // [execpol.par], parallel execution policy
+ using std::execution::parallel_policy;
+
+ // [execpol.parunseq], parallel and unsequenced execution policy
+ using std::execution::parallel_unsequenced_policy;
+
+ // [execpol.unseq], unsequenced execution policy
+ using std::execution::unsequenced_policy;
+
+ // [execpol.objects], execution policy objects
+ using std::execution::par;
+ using std::execution::par_unseq;
+ using std::execution::seq;
+ using std::execution::unseq;
+} // namespace std::execution
+#endif // _LIBCPP_ENABLE_EXPERIMENTAL
+
+// expected.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if _LIBCPP_STD_VER >= 23
+ // [expected.unexpected], class template unexpected
+ using std::unexpected;
+
+ // [expected.bad], class template bad_expected_access
+ using std::bad_expected_access;
+
+ // in-place construction of unexpected values
+ using std::unexpect;
+ using std::unexpect_t;
+
+ // [expected.expected], class template expected
+ using std::expected;
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// filesystem.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std::filesystem {
+ // [fs.class.path], paths
+ using std::filesystem::path;
+
+ // [fs.path.nonmember], path non-member functions
+ using std::filesystem::hash_value;
+ using std::filesystem::swap;
+
+ // [fs.class.filesystem.error], filesystem errors
+ using std::filesystem::filesystem_error;
+
+#ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ // [fs.class.directory.entry], directory entries
+ using std::filesystem::directory_entry;
+
+ // [fs.class.directory.iterator], directory iterators
+ using std::filesystem::directory_iterator;
+
+ // [fs.dir.itr.nonmembers], range access for directory iterators
+ using std::filesystem::begin;
+ using std::filesystem::end;
+
+ // [fs.class.rec.dir.itr], recursive directory iterators
+ using std::filesystem::recursive_directory_iterator;
+#endif // _LIBCPP_HAS_NO_FILESYSTEM
+
+ // [fs.rec.dir.itr.nonmembers], range access for recursive directory iterators
+
+ // [fs.class.file.status], file status
+ using std::filesystem::file_status;
+ using std::filesystem::space_info;
+
+ // [fs.enum], enumerations
+ using std::filesystem::copy_options;
+ using std::filesystem::directory_options;
+ using std::filesystem::file_type;
+ using std::filesystem::perm_options;
+ using std::filesystem::perms;
+
+ using std::filesystem::file_time_type;
+
+ // several of these enums are a bitmask type.
+ // [bitmask.types] specified operators
+ using std::filesystem::operator&;
+ using std::filesystem::operator&=;
+ using std::filesystem::operator^;
+ using std::filesystem::operator^=;
+ using std::filesystem::operator|;
+ using std::filesystem::operator|=;
+ using std::filesystem::operator~;
+
+#ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ // [fs.op.funcs], filesystem operations
+ using std::filesystem::absolute;
+ using std::filesystem::canonical;
+ using std::filesystem::copy;
+ using std::filesystem::copy_file;
+ using std::filesystem::copy_symlink;
+ using std::filesystem::create_directories;
+ using std::filesystem::create_directory;
+ using std::filesystem::create_directory_symlink;
+ using std::filesystem::create_hard_link;
+ using std::filesystem::create_symlink;
+ using std::filesystem::current_path;
+ using std::filesystem::equivalent;
+ using std::filesystem::exists;
+ using std::filesystem::file_size;
+ using std::filesystem::hard_link_count;
+
+ using std::filesystem::is_block_file;
+ using std::filesystem::is_character_file;
+ using std::filesystem::is_directory;
+ using std::filesystem::is_empty;
+ using std::filesystem::is_fifo;
+ using std::filesystem::is_other;
+ using std::filesystem::is_regular_file;
+ using std::filesystem::is_socket;
+ using std::filesystem::is_symlink;
+
+ using std::filesystem::last_write_time;
+ using std::filesystem::permissions;
+ using std::filesystem::proximate;
+ using std::filesystem::read_symlink;
+ using std::filesystem::relative;
+ using std::filesystem::remove;
+
+ using std::filesystem::remove_all;
+ using std::filesystem::rename;
+ using std::filesystem::resize_file;
+ using std::filesystem::space;
+ using std::filesystem::status;
+ using std::filesystem::status_known;
+ using std::filesystem::symlink_status;
+ using std::filesystem::temp_directory_path;
+ using std::filesystem::weakly_canonical;
+#endif // _LIBCPP_HAS_NO_FILESYSTEM
+
+ // [depr.fs.path.factory]
+ using std::filesystem::u8path;
+} // namespace std::filesystem
+
+// [fs.path.hash], hash support
+export namespace std {
+ using std::hash;
+}
+
+export namespace std::ranges {
+#ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ using std::ranges::enable_borrowed_range;
+ using std::ranges::enable_view;
+#endif // _LIBCPP_HAS_NO_FILESYSTEM
+} // namespace std::ranges
+
+// flat_map.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ // [flat.map], class template flat_­map
+ using std::flat_map;
+
+ using std::sorted_unique;
+ using std::sorted_unique_t;
+
+ using std::uses_allocator;
+
+ // [flat.map.erasure], erasure for flat_­map
+ using std::erase_if;
+
+ // [flat.multimap], class template flat_­multimap
+ using std::flat_multimap;
+
+ using std::sorted_equivalent;
+ using std::sorted_equivalent_t;
+
+ // [flat.multimap.erasure], erasure for flat_­multimap
+#endif
+} // namespace std
+
+// flat_set.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ // [flat.set], class template flat_­set
+ using std::flat_set;
+
+ using std::sorted_unique;
+ using std::sorted_unique_t;
+
+ using std::uses_allocator;
+
+ // [flat.set.erasure], erasure for flat_­set
+ using std::erase_if;
+
+ // [flat.multiset], class template flat_­multiset
+ using std::flat_multiset;
+
+ using std::sorted_equivalent;
+ using std::sorted_equivalent_t;
+#endif
+} // namespace std
+
+// format.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [format.context], class template basic_format_context
+ using std::basic_format_context;
+ using std::format_context;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_context;
+#endif
+
+ // [format.args], class template basic_format_args
+ using std::basic_format_args;
+ using std::format_args;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_args;
+#endif
+
+ // [format.fmt.string], class template basic_format_string
+ using std::basic_format_string;
+ using std::format_string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_string;
+#endif
+
+ // [format.functions], formatting functions
+ using std::format;
+ using std::format_to;
+ using std::vformat;
+ using std::vformat_to;
+
+ using std::format_to_n;
+ using std::format_to_n_result;
+ using std::formatted_size;
+
+ // [format.formatter], formatter
+ using std::formatter;
+
+#if _LIBCPP_STD_VER >= 23
+ // [format.formattable], concept formattable
+ using std::formattable;
+#endif
+
+ // [format.parse.ctx], class template basic_format_parse_context
+ using std::basic_format_parse_context;
+ using std::format_parse_context;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_parse_context;
+#endif
+
+#if _LIBCPP_STD_VER >= 23
+ // [format.range], formatting of ranges
+ // [format.range.fmtkind], variable template format_kind
+ using std::format_kind;
+ using std::range_format;
+
+ // [format.range.formatter], class template range_formatter
+ using std::range_formatter;
+#endif // _LIBCPP_STD_VER >= 23
+
+ // [format.arg], class template basic_format_arg
+ using std::basic_format_arg;
+ using std::visit_format_arg;
+
+ // [format.arg.store], class template format-arg-store
+ using std::make_format_args;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::make_wformat_args;
+#endif
+
+ // [format.error], class format_error
+ using std::format_error;
+} // namespace std
+
+// forward_list.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [forward.list], class template forward_list
+ using std::forward_list;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [forward.list.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::forward_list;
+ }
+} // namespace std
+
+// fstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_filebuf;
+
+# ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ using std::swap;
+# endif
+
+ using std::filebuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wfilebuf;
+# endif
+
+ using std::basic_ifstream;
+
+ using std::ifstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wifstream;
+# endif
+
+ using std::basic_ofstream;
+
+ using std::ofstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wofstream;
+# endif
+
+ using std::basic_fstream;
+
+ using std::fstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wfstream;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// functional.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [func.invoke], invoke
+ using std::invoke;
+#if _LIBCPP_STD_VER >= 23
+ using std::invoke_r;
+#endif
+
+ // [refwrap], reference_wrapper
+ using std::reference_wrapper;
+
+ using std::cref;
+ using std::ref;
+
+ // [arithmetic.operations], arithmetic operations
+ using std::divides;
+ using std::minus;
+ using std::modulus;
+ using std::multiplies;
+ using std::negate;
+ using std::plus;
+ // [comparisons], comparisons
+ using std::equal_to;
+ using std::greater;
+ using std::greater_equal;
+ using std::less;
+ using std::less_equal;
+ using std::not_equal_to;
+
+ // [comparisons.three.way], class compare_three_way
+ using std::compare_three_way;
+
+ // [logical.operations], logical operations
+ using std::logical_and;
+ using std::logical_not;
+ using std::logical_or;
+
+ // [bitwise.operations], bitwise operations
+ using std::bit_and;
+ using std::bit_not;
+ using std::bit_or;
+ using std::bit_xor;
+
+ // [func.identity], identity
+ using std::identity;
+
+ // [func.not.fn], function template not_fn
+ using std::not_fn;
+
+ // [func.bind.partial], function templates bind_front and bind_back
+ // using std::bind_back;
+ using std::bind_front;
+
+ // [func.bind], bind
+ using std::is_bind_expression;
+ using std::is_bind_expression_v;
+ using std::is_placeholder;
+ using std::is_placeholder_v;
+
+ using std::bind;
+
+ namespace placeholders {
+ // M is the implementation-defined number of placeholders
+ using std::placeholders::_1;
+ using std::placeholders::_10;
+ using std::placeholders::_2;
+ using std::placeholders::_3;
+ using std::placeholders::_4;
+ using std::placeholders::_5;
+ using std::placeholders::_6;
+ using std::placeholders::_7;
+ using std::placeholders::_8;
+ using std::placeholders::_9;
+ } // namespace placeholders
+
+ // [func.memfn], member function adaptors
+ using std::mem_fn;
+
+ // [func.wrap], polymorphic function wrappers
+ using std::bad_function_call;
+
+ using std::function;
+
+ using std::swap;
+
+ using std::operator==;
+
+ // [func.wrap.move], move only wrapper
+ // using std::move_only_function;
+
+ // [func.search], searchers
+ using std::default_searcher;
+
+ using std::boyer_moore_searcher;
+
+ using std::boyer_moore_horspool_searcher;
+
+ // [unord.hash], class template hash
+ using std::hash;
+
+ namespace ranges {
+ // [range.cmp], concept-constrained comparisons
+ using std::ranges::equal_to;
+ using std::ranges::greater;
+ using std::ranges::greater_equal;
+ using std::ranges::less;
+ using std::ranges::less_equal;
+ using std::ranges::not_equal_to;
+ } // namespace ranges
+} // namespace std
+
+// future.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ using std::future_errc;
+ using std::future_status;
+ using std::launch;
+
+ // launch is a bitmask type.
+ // [bitmask.types] specified operators
+ using std::operator&;
+ using std::operator&=;
+ using std::operator^;
+ using std::operator^=;
+ using std::operator|;
+ using std::operator|=;
+ using std::operator~;
+
+ // [futures.errors], error handling
+ using std::is_error_code_enum;
+ using std::make_error_code;
+ using std::make_error_condition;
+
+ using std::future_category;
+
+ // [futures.future.error], class future_error
+ using std::future_error;
+
+ // [futures.promise], class template promise
+ using std::promise;
+
+ using std::swap;
+
+ using std::uses_allocator;
+
+ // [futures.unique.future], class template future
+ using std::future;
+
+ // [futures.shared.future], class template shared_future
+ using std::shared_future;
+
+ // [futures.task], class template packaged_task
+ using std::packaged_task;
+
+ // [futures.async], function template async
+ using std::async;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// generator.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ using std::generator;
+#endif
+} // namespace std
+
+// hazard_pointer.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+# if _LIBCPP_STD_VER >= 23
+ // 4.1.3, class template hazard_pointer_obj_base
+ using std::hazard_pointer_obj_base;
+ // 4.1.4, class hazard_pointer
+ using std::hazard_pointer;
+ // 4.1.5, Construct non-empty hazard_pointer
+ using std::make_hazard_pointer;
+ // 4.1.6, Hazard pointer swap
+ using std::swap;
+# endif // _LIBCPP_STD_VER >= 23
+#endif
+} // namespace std
+
+// initializer_list.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::initializer_list;
+
+ // [support.initlist.range], initializer list range access
+ using std::begin;
+ using std::end;
+} // namespace std
+
+// iomanip.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::get_money;
+ using std::get_time;
+ using std::put_money;
+ using std::put_time;
+ using std::resetiosflags;
+ using std::setbase;
+ using std::setfill;
+ using std::setiosflags;
+ using std::setprecision;
+ using std::setw;
+
+ using std::quoted;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// ios.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::fpos;
+ // based on [tab:fpos.operations]
+ using std::operator!=; // Note not affected by P1614, seems like a bug.
+ using std::operator-;
+ using std::operator==;
+
+ using std::streamoff;
+ using std::streamsize;
+
+ using std::basic_ios;
+ using std::ios_base;
+
+ // [std.ios.manip], manipulators
+ using std::boolalpha;
+ using std::noboolalpha;
+
+ using std::noshowbase;
+ using std::showbase;
+
+ using std::noshowpoint;
+ using std::showpoint;
+
+ using std::noshowpos;
+ using std::showpos;
+
+ using std::noskipws;
+ using std::skipws;
+
+ using std::nouppercase;
+ using std::uppercase;
+
+ using std::nounitbuf;
+ using std::unitbuf;
+
+ // [adjustfield.manip], adjustfield
+ using std::internal;
+ using std::left;
+ using std::right;
+
+ // [basefield.manip], basefield
+ using std::dec;
+ using std::hex;
+ using std::oct;
+
+ // [floatfield.manip], floatfield
+ using std::defaultfloat;
+ using std::fixed;
+ using std::hexfloat;
+ using std::scientific;
+
+ // [error.reporting], error reporting
+ using std::io_errc;
+
+ using std::iostream_category;
+ using std::is_error_code_enum;
+ using std::make_error_code;
+ using std::make_error_condition;
+
+ // [iosfwd.syn]
+ using std::ios;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wios;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// iosfwd.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::streampos;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstreampos;
+#endif
+ using std::u16streampos;
+ using std::u32streampos;
+ using std::u8streampos;
+
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+ using std::basic_osyncstream;
+ using std::basic_syncbuf;
+#endif
+
+ using std::istreambuf_iterator;
+ using std::ostreambuf_iterator;
+
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+ using std::osyncstream;
+ using std::syncbuf;
+#endif
+
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+ using std::wosyncstream;
+ using std::wsyncbuf;
+#endif
+#endif
+
+ using std::fpos;
+} // namespace std
+
+// iostream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::cerr;
+ using std::cin;
+ using std::clog;
+ using std::cout;
+
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcerr;
+ using std::wcin;
+ using std::wclog;
+ using std::wcout;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// istream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_istream;
+
+ using std::istream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wistream;
+# endif
+
+ using std::basic_iostream;
+
+ using std::iostream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wiostream;
+# endif
+
+ using std::ws;
+
+ using std::operator>>;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// iterator.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [iterator.assoc.types], associated types
+ // [incrementable.traits], incrementable traits
+ using std::incrementable_traits;
+ using std::iter_difference_t;
+
+ using std::indirectly_readable_traits;
+ using std::iter_value_t;
+
+ // [iterator.traits], iterator traits
+ using std::iterator_traits;
+
+ using std::iter_reference_t;
+
+ namespace ranges {
+ // [iterator.cust], customization point objects
+ inline namespace __cpo {
+ // [iterator.cust.move], ranges::iter_move
+ using std::ranges::__cpo::iter_move;
+
+ // [iterator.cust.swap], ranges::iter_swap
+ using std::ranges::__cpo::iter_swap;
+ } // namespace __cpo
+ } // namespace ranges
+
+ using std::iter_rvalue_reference_t;
+
+ // [iterator.concepts], iterator concepts
+ // [iterator.concept.readable], concept indirectly_readable
+ using std::indirectly_readable;
+
+ using std::iter_common_reference_t;
+
+ // [iterator.concept.writable], concept indirectly_writable
+ using std::indirectly_writable;
+
+ // [iterator.concept.winc], concept weakly_incrementable
+ using std::weakly_incrementable;
+
+ // [iterator.concept.inc], concept incrementable
+ using std::incrementable;
+
+ // [iterator.concept.iterator], concept input_or_output_iterator
+ using std::input_or_output_iterator;
+
+ // [iterator.concept.sentinel], concept sentinel_for
+ using std::sentinel_for;
+
+ // [iterator.concept.sizedsentinel], concept sized_sentinel_for
+ using std::disable_sized_sentinel_for;
+
+ using std::sized_sentinel_for;
+
+ // [iterator.concept.input], concept input_iterator
+ using std::input_iterator;
+
+ // [iterator.concept.output], concept output_iterator
+ using std::output_iterator;
+
+ // [iterator.concept.forward], concept forward_iterator
+ using std::forward_iterator;
+
+ // [iterator.concept.bidir], concept bidirectional_iterator
+ using std::bidirectional_iterator;
+
+ // [iterator.concept.random.access], concept random_access_iterator
+ using std::random_access_iterator;
+
+ // [iterator.concept.contiguous], concept contiguous_iterator
+ using std::contiguous_iterator;
+
+ // [indirectcallable], indirect callable requirements
+ // [indirectcallable.indirectinvocable], indirect callables
+ using std::indirectly_unary_invocable;
+
+ using std::indirectly_regular_unary_invocable;
+
+ using std::indirect_unary_predicate;
+
+ using std::indirect_binary_predicate;
+
+ using std::indirect_equivalence_relation;
+
+ using std::indirect_strict_weak_order;
+
+ using std::indirect_result_t;
+
+ // [projected], projected
+ using std::projected;
+
+ // [alg.req], common algorithm requirements
+ // [alg.req.ind.move], concept indirectly_movable
+ using std::indirectly_movable;
+
+ using std::indirectly_movable_storable;
+
+ // [alg.req.ind.copy], concept indirectly_copyable
+ using std::indirectly_copyable;
+
+ using std::indirectly_copyable_storable;
+
+ // [alg.req.ind.swap], concept indirectly_swappable
+ using std::indirectly_swappable;
+
+ // [alg.req.ind.cmp], concept indirectly_comparable
+ using std::indirectly_comparable;
+
+ // [alg.req.permutable], concept permutable
+ using std::permutable;
+
+ // [alg.req.mergeable], concept mergeable
+ using std::mergeable;
+
+ // [alg.req.sortable], concept sortable
+ using std::sortable;
+
+ // [iterator.primitives], primitives
+ // [std.iterator.tags], iterator tags
+ using std::bidirectional_iterator_tag;
+ using std::contiguous_iterator_tag;
+ using std::forward_iterator_tag;
+ using std::input_iterator_tag;
+ using std::output_iterator_tag;
+ using std::random_access_iterator_tag;
+
+ // [iterator.operations], iterator operations
+ using std::advance;
+ using std::distance;
+ using std::next;
+ using std::prev;
+
+ // [range.iter.ops], range iterator operations
+ namespace ranges {
+ // [range.iter.op.advance], ranges​::​advance
+ using std::ranges::advance;
+
+ // [range.iter.op.distance], ranges​::​distance
+ using std::ranges::distance;
+
+ // [range.iter.op.next], ranges​::​next
+ using std::ranges::next;
+
+ // [range.iter.op.prev], ranges​::​prev
+ using std::ranges::prev;
+ } // namespace ranges
+
+ // [predef.iterators], predefined iterators and sentinels
+ // [reverse.iterators], reverse iterators
+ using std::reverse_iterator;
+
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::operator-;
+ using std::operator+;
+
+ using std::make_reverse_iterator;
+
+ // using std::disable_sized_sentinel_for;
+
+ // [insert.iterators], insert iterators
+ using std::back_insert_iterator;
+ using std::back_inserter;
+
+ using std::front_insert_iterator;
+ using std::front_inserter;
+
+ using std::insert_iterator;
+ using std::inserter;
+
+ // [const.iterators], constant iterators and sentinels
+ // [const.iterators.alias], alias templates
+ // using std::const_iterator;
+ // using std::const_sentinel;
+ // using std::iter_const_reference_t;
+
+ // [const.iterators.iterator], class template basic_const_iterator
+ // using std::basic_const_iterator;
+
+ // using std::common_type;
+
+ // using std::make_const_iterator;
+
+ // [move.iterators], move iterators and sentinels
+ using std::move_iterator;
+
+ using std::make_move_iterator;
+
+ using std::move_sentinel;
+
+ using std::common_iterator;
+
+ // [default.sentinel], default sentinel
+ using std::default_sentinel;
+ using std::default_sentinel_t;
+
+ // [iterators.counted], counted iterators
+ using std::counted_iterator;
+
+ // [unreachable.sentinel], unreachable sentinel
+ using std::unreachable_sentinel;
+ using std::unreachable_sentinel_t;
+
+ // [stream.iterators], stream iterators
+ using std::istream_iterator;
+
+ using std::ostream_iterator;
+
+ using std::istreambuf_iterator;
+ using std::ostreambuf_iterator;
+
+ // [iterator.range], range access
+ using std::begin;
+ using std::cbegin;
+ using std::cend;
+ using std::crbegin;
+ using std::crend;
+ using std::end;
+ using std::rbegin;
+ using std::rend;
+
+ using std::empty;
+ using std::size;
+ using std::ssize;
+
+ using std::data;
+
+ // [depr.iterator]
+ using std::iterator;
+} // namespace std
+
+// latch.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ using std::latch;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// limits.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [fp.style], floating-point type properties
+ using std::float_denorm_style;
+ using std::float_round_style;
+
+ // [numeric.limits], class template numeric_­limits
+ using std::numeric_limits;
+} // namespace std
+
+// list.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [list], class template list
+ using std::list;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [list.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::list;
+ }
+} // namespace std
+
+// locale.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ // [locale], locale
+ using std::has_facet;
+ using std::locale;
+ using std::use_facet;
+
+ // [locale.convenience], convenience interfaces
+ using std::isalnum;
+ using std::isalpha;
+ using std::isblank;
+ using std::iscntrl;
+ using std::isdigit;
+ using std::isgraph;
+ using std::islower;
+ using std::isprint;
+ using std::ispunct;
+ using std::isspace;
+ using std::isupper;
+ using std::isxdigit;
+ using std::tolower;
+ using std::toupper;
+
+ // [category.ctype], ctype
+ using std::codecvt;
+ using std::codecvt_base;
+ using std::codecvt_byname;
+ using std::ctype;
+ using std::ctype_base;
+ using std::ctype_byname;
+
+ // [category.numeric], numeric
+ using std::num_get;
+ using std::num_put;
+ using std::numpunct;
+ using std::numpunct_byname;
+
+ // [category.collate], collation
+ using std::collate;
+ using std::collate_byname;
+
+ // [category.time], date and time
+ using std::time_base;
+ using std::time_get;
+ using std::time_get_byname;
+ using std::time_put;
+ using std::time_put_byname;
+
+ // [category.monetary], money
+ using std::money_base;
+ using std::money_get;
+ using std::money_put;
+ using std::moneypunct;
+ using std::moneypunct_byname;
+
+ // [category.messages], message retrieval
+ using std::messages;
+ using std::messages_base;
+ using std::messages_byname;
+
+ // [depr.conversions.buffer]
+ using std::wbuffer_convert;
+
+ // [depr.conversions.string]
+ using std::wstring_convert;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// map.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [map], class template map
+ using std::map;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [map.erasure], erasure for map
+ using std::erase_if;
+
+ // [multimap], class template multimap
+ using std::multimap;
+
+ namespace pmr {
+ using std::pmr::map;
+ using std::pmr::multimap;
+ } // namespace pmr
+} // namespace std
+
+// mdspan.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if _LIBCPP_STD_VER >= 23
+ // [mdspan.extents], class template extents
+ using std::extents;
+
+ // [mdspan.extents.dextents], alias template dextents
+ using std::dextents;
+
+ // [mdspan.layout], layout mapping
+ using std::layout_left;
+ using std::layout_right;
+#if _LIBCPP_VERSION >= 180000
+ using std::layout_stride;
+#endif
+
+ // [mdspan.accessor.default], class template default_accessor
+ using std::default_accessor;
+
+ // [mdspan.mdspan], class template mdspan
+ using std::mdspan;
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// memory.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [pointer.traits], pointer traits
+ using std::pointer_traits;
+
+ // [pointer.conversion], pointer conversion
+ using std::to_address;
+
+ // [ptr.align], pointer alignment
+ using std::align;
+ using std::assume_aligned;
+
+ // [obj.lifetime], explicit lifetime management
+ // using std::start_lifetime_as;
+ // using std::start_lifetime_as_array;
+
+ // [allocator.tag], allocator argument tag
+ using std::allocator_arg;
+ using std::allocator_arg_t;
+
+ // [allocator.uses], uses_allocator
+ using std::uses_allocator;
+
+ // [allocator.uses.trait], uses_allocator
+ using std::uses_allocator_v;
+
+ // [allocator.uses.construction], uses-allocator construction
+ using std::uses_allocator_construction_args;
+
+ using std::make_obj_using_allocator;
+ using std::uninitialized_construct_using_allocator;
+
+ // [allocator.traits], allocator traits
+ using std::allocator_traits;
+
+#if _LIBCPP_STD_VER >= 23
+ using std::allocation_result;
+
+ using std::allocate_at_least;
+#endif
+
+ // [default.allocator], the default allocator
+ using std::allocator;
+ using std::operator==;
+
+ // [specialized.addressof], addressof
+ using std::addressof;
+
+ // [specialized.algorithms], specialized algorithms
+ // [special.mem.concepts], special memory concepts
+
+ using std::uninitialized_default_construct;
+ using std::uninitialized_default_construct_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_default_construct;
+ using std::ranges::uninitialized_default_construct_n;
+ } // namespace ranges
+
+ using std::uninitialized_value_construct;
+ using std::uninitialized_value_construct_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_value_construct;
+ using std::ranges::uninitialized_value_construct_n;
+ } // namespace ranges
+
+ using std::uninitialized_copy;
+ using std::uninitialized_copy_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_copy;
+ using std::ranges::uninitialized_copy_result;
+
+ using std::ranges::uninitialized_copy_n;
+ using std::ranges::uninitialized_copy_n_result;
+ } // namespace ranges
+
+ using std::uninitialized_move;
+ using std::uninitialized_move_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_move;
+ using std::ranges::uninitialized_move_result;
+
+ using std::ranges::uninitialized_move_n;
+ using std::ranges::uninitialized_move_n_result;
+ } // namespace ranges
+
+ using std::uninitialized_fill;
+ using std::uninitialized_fill_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_fill;
+ using std::ranges::uninitialized_fill_n;
+ } // namespace ranges
+
+ // [specialized.construct], construct_at
+ using std::construct_at;
+
+ namespace ranges {
+ using std::ranges::construct_at;
+ }
+ // [specialized.destroy], destroy
+ using std::destroy;
+ using std::destroy_at;
+ using std::destroy_n;
+
+ namespace ranges {
+ using std::ranges::destroy;
+ using std::ranges::destroy_at;
+ using std::ranges::destroy_n;
+ } // namespace ranges
+
+ // [unique.ptr], class template unique_ptr
+ using std::default_delete;
+ using std::unique_ptr;
+
+ using std::make_unique;
+ using std::make_unique_for_overwrite;
+
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::operator<<;
+
+ // [util.smartptr.weak.bad], class bad_weak_ptr
+ using std::bad_weak_ptr;
+
+ // [util.smartptr.shared], class template shared_ptr
+ using std::shared_ptr;
+
+ // [util.smartptr.shared.create], shared_ptr creation
+ using std::allocate_shared;
+ using std::allocate_shared_for_overwrite;
+ using std::make_shared;
+ using std::make_shared_for_overwrite;
+
+ // [util.smartptr.shared.spec], shared_ptr specialized algorithms
+ using std::swap;
+
+ // [util.smartptr.shared.cast], shared_ptr casts
+ using std::const_pointer_cast;
+ using std::dynamic_pointer_cast;
+ using std::reinterpret_pointer_cast;
+ using std::static_pointer_cast;
+
+ using std::get_deleter;
+
+ // [util.smartptr.shared.io], shared_ptr I/O
+
+ // [util.smartptr.weak], class template weak_ptr
+ using std::weak_ptr;
+
+ // [util.smartptr.weak.spec], weak_ptr specialized algorithms
+
+ // [util.smartptr.ownerless], class template owner_less
+ using std::owner_less;
+
+ // [util.smartptr.enab], class template enable_shared_from_this
+ using std::enable_shared_from_this;
+
+ // [util.smartptr.hash], hash support
+ using std::hash;
+
+ // [util.smartptr.atomic], atomic smart pointers
+ // using std::atomic;
+
+ // [out.ptr.t], class template out_ptr_t
+ // using std::out_ptr_t;
+
+ // [out.ptr], function template out_ptr
+ // using std::out_ptr;
+
+ // [inout.ptr.t], class template inout_ptr_t
+ // using std::inout_ptr_t;
+
+ // [inout.ptr], function template inout_ptr
+ // using std::inout_ptr;
+
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [depr.util.smartptr.shared.atomic]
+ using std::atomic_is_lock_free;
+
+ using std::atomic_load;
+ using std::atomic_load_explicit;
+
+ using std::atomic_store;
+ using std::atomic_store_explicit;
+
+ using std::atomic_exchange;
+ using std::atomic_exchange_explicit;
+
+ using std::atomic_compare_exchange_strong;
+ using std::atomic_compare_exchange_strong_explicit;
+ using std::atomic_compare_exchange_weak;
+ using std::atomic_compare_exchange_weak_explicit;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// memory_resource.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std::pmr {
+ // [mem.res.class], class memory_resource
+ using std::pmr::memory_resource;
+
+ using std::pmr::operator==;
+
+ // [mem.poly.allocator.class], class template polymorphic_allocator
+ using std::pmr::polymorphic_allocator;
+
+ // [mem.res.global], global memory resources
+ using std::pmr::get_default_resource;
+ using std::pmr::new_delete_resource;
+ using std::pmr::null_memory_resource;
+ using std::pmr::set_default_resource;
+
+ // [mem.res.pool], pool resource classes
+ using std::pmr::monotonic_buffer_resource;
+ using std::pmr::pool_options;
+ using std::pmr::synchronized_pool_resource;
+ using std::pmr::unsynchronized_pool_resource;
+} // namespace std::pmr
+
+// mutex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.mutex.class], class mutex
+ using std::mutex;
+ // [thread.mutex.recursive], class recursive_mutex
+ using std::recursive_mutex;
+ // [thread.timedmutex.class] class timed_mutex
+ using std::timed_mutex;
+ // [thread.timedmutex.recursive], class recursive_timed_mutex
+ using std::recursive_timed_mutex;
+
+ using std::adopt_lock_t;
+ using std::defer_lock_t;
+ using std::try_to_lock_t;
+
+ using std::adopt_lock;
+ using std::defer_lock;
+ using std::try_to_lock;
+
+ // [thread.lock], locks
+ using std::lock_guard;
+ using std::scoped_lock;
+ using std::unique_lock;
+
+ using std::swap;
+
+ // [thread.lock.algorithm], generic locking algorithms
+ using std::lock;
+ using std::try_lock;
+#endif // _LIBCPP_HAS_NO_THREADS
+
+ using std::once_flag;
+
+ using std::call_once;
+} // namespace std
+
+// new.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [alloc.errors], storage allocation errors
+ using std::bad_alloc;
+ using std::bad_array_new_length;
+
+ using std::destroying_delete;
+ using std::destroying_delete_t;
+
+ // global operator new control
+ using std::align_val_t;
+
+ using std::nothrow;
+ using std::nothrow_t;
+
+ using std::get_new_handler;
+ using std::new_handler;
+ using std::set_new_handler;
+
+ // [ptr.launder], pointer optimization barrier
+ using std::launder;
+#if 0
+ // [hardware.interference], hardware interference size
+ using std::hardware_constructive_interference_size;
+ using std::hardware_destructive_interference_size;
+#endif
+} // namespace std
+
+export {
+ using ::operator new;
+ using ::operator delete;
+ using ::operator new[];
+ using ::operator delete[];
+} // export
+
+// numbers.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std::numbers {
+ using std::numbers::e_v;
+ using std::numbers::egamma_v;
+ using std::numbers::inv_pi_v;
+ using std::numbers::inv_sqrt3_v;
+ using std::numbers::inv_sqrtpi_v;
+ using std::numbers::ln10_v;
+ using std::numbers::ln2_v;
+ using std::numbers::log10e_v;
+ using std::numbers::log2e_v;
+ using std::numbers::phi_v;
+ using std::numbers::pi_v;
+ using std::numbers::sqrt2_v;
+ using std::numbers::sqrt3_v;
+
+ using std::numbers::e;
+ using std::numbers::egamma;
+ using std::numbers::inv_pi;
+ using std::numbers::inv_sqrt3;
+ using std::numbers::inv_sqrtpi;
+ using std::numbers::ln10;
+ using std::numbers::ln2;
+ using std::numbers::log10e;
+ using std::numbers::log2e;
+ using std::numbers::phi;
+ using std::numbers::pi;
+ using std::numbers::sqrt2;
+ using std::numbers::sqrt3;
+} // namespace std::numbers
+
+// numeric.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [accumulate], accumulate
+ using std::accumulate;
+
+ // [reduce], reduce
+ using std::reduce;
+
+ // [inner.product], inner product
+ using std::inner_product;
+
+ // [transform.reduce], transform reduce
+ using std::transform_reduce;
+
+ // [partial.sum], partial sum
+ using std::partial_sum;
+
+ // [exclusive.scan], exclusive scan
+ using std::exclusive_scan;
+
+ // [inclusive.scan], inclusive scan
+ using std::inclusive_scan;
+
+ // [transform.exclusive.scan], transform exclusive scan
+ using std::transform_exclusive_scan;
+
+ // [transform.inclusive.scan], transform inclusive scan
+ using std::transform_inclusive_scan;
+
+ // [adjacent.difference], adjacent difference
+ using std::adjacent_difference;
+
+ // [numeric.iota], iota
+ using std::iota;
+
+ namespace ranges {
+ // using std::ranges::iota_result;
+ // using std::ranges::iota;
+ } // namespace ranges
+
+ // [numeric.ops.gcd], greatest common divisor
+ using std::gcd;
+
+ // [numeric.ops.lcm], least common multiple
+ using std::lcm;
+
+ // [numeric.ops.midpoint], midpoint
+ using std::midpoint;
+} // namespace std
+
+// optional.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [optional.optional], class template optional
+ using std::optional;
+
+ // [optional.nullopt], no-value state indicator
+ using std::nullopt;
+ using std::nullopt_t;
+
+ // [optional.bad.access], class bad_optional_access
+ using std::bad_optional_access;
+
+ // [optional.relops], relational operators
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ // [optional.specalg], specialized algorithms
+ using std::swap;
+
+ using std::make_optional;
+
+ // [optional.hash], hash support
+ using std::hash;
+} // namespace std
+
+// ostream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_ostream;
+
+ using std::ostream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wostream;
+# endif
+
+ using std::endl;
+ using std::ends;
+ using std::flush;
+
+# if 0
+ using std::emit_on_flush;
+ using std::flush_emit;
+ using std::noemit_on_flush;
+# endif
+ using std::operator<<;
+
+# if 0
+ // [ostream.formatted.print], print functions
+ using std::print;
+ using std::println;
+
+ using std::vprint_nonunicode;
+ using std::vprint_unicode;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// print.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if _LIBCPP_STD_VER >= 23
+ // [print.fun], print functions
+ using std::print;
+ using std::println;
+
+ using std::vprint_nonunicode;
+# ifndef _LIBCPP_HAS_NO_UNICODE
+ using std::vprint_unicode;
+# endif // _LIBCPP_HAS_NO_UNICODE
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// queue.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [queue], class template queue
+ using std::queue;
+
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::swap;
+ using std::uses_allocator;
+
+ // [priority.queue], class template priority_queue
+ using std::priority_queue;
+} // namespace std
+
+// random.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [rand.req.urng], uniform random bit generator requirements
+ using std::uniform_random_bit_generator;
+
+ // [rand.eng.lcong], class template linear_congruential_engine
+ using std::linear_congruential_engine;
+
+ // [rand.eng.mers], class template mersenne_twister_engine
+ using std::mersenne_twister_engine;
+
+ // [rand.eng.sub], class template subtract_with_carry_engine
+ using std::subtract_with_carry_engine;
+
+ // [rand.adapt.disc], class template discard_block_engine
+ using std::discard_block_engine;
+
+ // [rand.adapt.ibits], class template independent_bits_engine
+ using std::independent_bits_engine;
+
+ // [rand.adapt.shuf], class template shuffle_order_engine
+ using std::shuffle_order_engine;
+
+ // [rand.predef], engines and engine adaptors with predefined parameters
+ using std::knuth_b;
+ using std::minstd_rand;
+ using std::minstd_rand0;
+ using std::mt19937;
+ using std::mt19937_64;
+ using std::ranlux24;
+ using std::ranlux24_base;
+ using std::ranlux48;
+ using std::ranlux48_base;
+
+ using std::default_random_engine;
+
+#ifndef _LIBCPP_HAS_NO_RANDOM_DEVICE
+ // [rand.device], class random_device
+ using std::random_device;
+#endif
+
+ // [rand.util.seedseq], class seed_seq
+ using std::seed_seq;
+
+ // [rand.util.canonical], function template generate_canonical
+ using std::generate_canonical;
+
+ // [rand.dist.uni.int], class template uniform_int_distribution
+ using std::uniform_int_distribution;
+
+ // [rand.dist.uni.real], class template uniform_real_distribution
+ using std::uniform_real_distribution;
+
+ // [rand.dist.bern.bernoulli], class bernoulli_distribution
+ using std::bernoulli_distribution;
+
+ // [rand.dist.bern.bin], class template binomial_distribution
+ using std::binomial_distribution;
+
+ // [rand.dist.bern.geo], class template geometric_distribution
+ using std::geometric_distribution;
+
+ // [rand.dist.bern.negbin], class template negative_binomial_distribution
+ using std::negative_binomial_distribution;
+
+ // [rand.dist.pois.poisson], class template poisson_distribution
+ using std::poisson_distribution;
+
+ // [rand.dist.pois.exp], class template exponential_distribution
+ using std::exponential_distribution;
+
+ // [rand.dist.pois.gamma], class template gamma_distribution
+ using std::gamma_distribution;
+
+ // [rand.dist.pois.weibull], class template weibull_distribution
+ using std::weibull_distribution;
+
+ // [rand.dist.pois.extreme], class template extreme_value_distribution
+ using std::extreme_value_distribution;
+
+ // [rand.dist.norm.normal], class template normal_distribution
+ using std::normal_distribution;
+
+ // [rand.dist.norm.lognormal], class template lognormal_distribution
+ using std::lognormal_distribution;
+
+ // [rand.dist.norm.chisq], class template chi_squared_distribution
+ using std::chi_squared_distribution;
+
+ // [rand.dist.norm.cauchy], class template cauchy_distribution
+ using std::cauchy_distribution;
+
+ // [rand.dist.norm.f], class template fisher_f_distribution
+ using std::fisher_f_distribution;
+
+ // [rand.dist.norm.t], class template student_t_distribution
+ using std::student_t_distribution;
+
+ // [rand.dist.samp.discrete], class template discrete_distribution
+ using std::discrete_distribution;
+
+ // [rand.dist.samp.pconst], class template piecewise_constant_distribution
+ using std::piecewise_constant_distribution;
+
+ // [rand.dist.samp.plinear], class template piecewise_linear_distribution
+ using std::piecewise_linear_distribution;
+} // namespace std
+
+// ranges.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ namespace ranges {
+ inline namespace __cpo {
+ // [range.access], range access
+ using std::ranges::__cpo::begin;
+ using std::ranges::__cpo::cbegin;
+ using std::ranges::__cpo::cend;
+ using std::ranges::__cpo::crbegin;
+ using std::ranges::__cpo::crend;
+ using std::ranges::__cpo::end;
+ using std::ranges::__cpo::rbegin;
+ using std::ranges::__cpo::rend;
+
+ using std::ranges::__cpo::cdata;
+ using std::ranges::__cpo::data;
+ using std::ranges::__cpo::empty;
+ using std::ranges::__cpo::size;
+ using std::ranges::__cpo::ssize;
+ } // namespace __cpo
+
+ // [range.range], ranges
+ using std::ranges::range;
+
+ using std::ranges::enable_borrowed_range;
+
+ using std::ranges::borrowed_range;
+
+ // using std::ranges::const_iterator_t;
+ // using std::ranges::const_sentinel_t;
+ using std::ranges::iterator_t;
+ // using std::ranges::range_const_reference_t;
+ using std::ranges::range_common_reference_t;
+ using std::ranges::range_difference_t;
+ using std::ranges::range_reference_t;
+ using std::ranges::range_rvalue_reference_t;
+ using std::ranges::range_size_t;
+ using std::ranges::range_value_t;
+ using std::ranges::sentinel_t;
+
+ // [range.sized], sized ranges
+ using std::ranges::disable_sized_range;
+ using std::ranges::sized_range;
+
+ // [range.view], views
+ using std::ranges::enable_view;
+ using std::ranges::view;
+ using std::ranges::view_base;
+
+ // [range.refinements], other range refinements
+ using std::ranges::bidirectional_range;
+ using std::ranges::common_range;
+ // using std::ranges::constant_range;
+ using std::ranges::contiguous_range;
+ using std::ranges::forward_range;
+ using std::ranges::input_range;
+ using std::ranges::output_range;
+ using std::ranges::random_access_range;
+ using std::ranges::viewable_range;
+
+ // [view.interface], class template view_­interface
+ using std::ranges::view_interface;
+
+ // [range.subrange], sub-ranges
+ using std::ranges::subrange;
+ using std::ranges::subrange_kind;
+
+ using std::ranges::get;
+ } // namespace ranges
+
+ using std::ranges::get;
+
+ namespace ranges {
+
+ // [range.dangling], dangling iterator handling
+ using std::ranges::dangling;
+
+ // [range.elementsof], class template elements_­of
+ // using std::ranges::elements_of;
+
+ using std::ranges::borrowed_iterator_t;
+
+ using std::ranges::borrowed_subrange_t;
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.utility.conv], range conversions
+ using std::ranges::to;
+#endif
+
+ // [range.empty], empty view
+ using std::ranges::empty_view;
+
+ namespace views {
+ using std::ranges::views::empty;
+ }
+
+ // [range.single], single view
+ using std::ranges::single_view;
+
+ namespace views {
+ using std::ranges::views::single;
+ } // namespace views
+
+ // [range.iota], iota view
+ using std::ranges::iota_view;
+
+ namespace views {
+ using std::ranges::views::iota;
+ } // namespace views
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.repeat], repeat view
+ using std::ranges::repeat_view;
+
+ namespace views {
+ using std::ranges::views::repeat;
+ } // namespace views
+#endif // _LIBCPP_STD_VER >= 23
+
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ // [range.istream], istream view
+ using std::ranges::basic_istream_view;
+ using std::ranges::istream_view;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::ranges::wistream_view;
+# endif
+
+ namespace views {
+ using std::ranges::views::istream;
+ }
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+ // [range.adaptor.object], range adaptor objects
+ // using std::ranges::range_adaptor_closure;
+
+ // [range.all], all view
+ namespace views {
+ using std::ranges::views::all;
+ using std::ranges::views::all_t;
+ } // namespace views
+
+ // [range.ref.view], ref view
+ using std::ranges::ref_view;
+
+ // [range.owning.view], owning view
+ using std::ranges::owning_view;
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.as.rvalue], as rvalue view
+ using std::ranges::as_rvalue_view;
+
+ namespace views {
+ using std::ranges::views::as_rvalue;
+ } // namespace views
+#endif // _LIBCPP_STD_VER >= 23
+
+ // [range.filter], filter view
+ using std::ranges::filter_view;
+
+ namespace views {
+ using std::ranges::views::filter;
+ } // namespace views
+
+ // [range.transform], transform view
+ using std::ranges::transform_view;
+
+ namespace views {
+ using std::ranges::views::transform;
+ } // namespace views
+
+ // [range.take], take view
+ using std::ranges::take_view;
+
+ namespace views {
+ using std::ranges::views::take;
+ } // namespace views
+
+ // [range.take.while], take while view
+ using std::ranges::take_while_view;
+
+ namespace views {
+ using std::ranges::views::take_while;
+ } // namespace views
+
+ // [range.drop], drop view
+ using std::ranges::drop_view;
+
+ namespace views {
+ using std::ranges::views::drop;
+ } // namespace views
+
+ // [range.drop.while], drop while view
+ using std::ranges::drop_while_view;
+
+ namespace views {
+ using std::ranges::views::drop_while;
+ } // namespace views
+
+#ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+ using std::ranges::join_view;
+
+ namespace views {
+ using std::ranges::views::join;
+ } // namespace views
+#endif // _LIBCPP_ENABLE_EXPERIMENTAL
+#if 0
+ using std::ranges::join_with_view;
+
+ namespace views {
+ using std::ranges::views::join_with;
+ } // namespace views
+#endif
+ using std::ranges::lazy_split_view;
+
+ // [range.split], split view
+ using std::ranges::split_view;
+
+ namespace views {
+ using std::ranges::views::lazy_split;
+ using std::ranges::views::split;
+ } // namespace views
+
+ // [range.counted], counted view
+ namespace views {
+ using std::ranges::views::counted;
+ } // namespace views
+
+ // [range.common], common view
+ using std::ranges::common_view;
+
+ namespace views {
+ using std::ranges::views::common;
+ } // namespace views
+
+ // [range.reverse], reverse view
+ using std::ranges::reverse_view;
+
+ namespace views {
+ using std::ranges::views::reverse;
+ } // namespace views
+
+ // [range.as.const], as const view
+#if 0
+ using std::ranges::as_const_view;
+
+ namespace views {
+ using std::ranges::views::as_const;
+ } // namespace views
+#endif
+ // [range.elements], elements view
+ using std::ranges::elements_view;
+
+ using std::ranges::keys_view;
+ using std::ranges::values_view;
+
+ namespace views {
+ using std::ranges::views::elements;
+ using std::ranges::views::keys;
+ using std::ranges::views::values;
+ } // namespace views
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.zip], zip view
+ using std::ranges::zip_view;
+
+ namespace views {
+ using std::ranges::views::zip;
+ } // namespace views
+#endif // _LIBCPP_STD_VER >= 23
+
+#if 0
+ // [range.zip.transform], zip transform view
+ using std::ranges::zip_transform_view;
+
+ namespace views {
+ using std::ranges::views::zip_transform;
+ }
+
+ using std::ranges::adjacent_view;
+
+ namespace views {
+ using std::ranges::views::adjacent;
+ using std::ranges::views::pairwise;
+ } // namespace views
+
+ using std::ranges::adjacent_transform_view;
+
+ namespace views {
+ using std::ranges::views::adjacent_transform;
+ using std::ranges::views::pairwise_transform;
+ } // namespace views
+
+ using std::ranges::chunk_view;
+
+ using std::ranges::chunk_view<V>;
+
+ namespace views {
+ using std::ranges::views::chunk;
+ }
+
+ using std::ranges::slide_view;
+
+ namespace views {
+ using std::ranges::views::slide;
+ }
+#endif
+
+#if _LIBCPP_STD_VER >= 23
+#if _LIBCPP_VERSION >= 180000
+ // [range.chunk.by], chunk by view
+ using std::ranges::chunk_by_view;
+
+ namespace views {
+ using std::ranges::views::chunk_by;
+ }
+#endif
+#endif // _LIBCPP_STD_VER >= 23
+
+#if 0
+ // [range.stride], stride view
+ using std::ranges::stride_view;
+
+ namespace views {
+ using std::ranges::views::stride;
+ }
+
+ using std::ranges::cartesian_product_view;
+
+ namespace views {
+ using std::ranges::views::cartesian_product;
+ }
+#endif
+ } // namespace ranges
+
+ namespace views = ranges::views;
+
+ using std::tuple_element;
+ using std::tuple_size;
+
+#if _LIBCPP_STD_VER >= 23
+ using std::from_range;
+ using std::from_range_t;
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// ratio.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [ratio.ratio], class template ratio
+ using std::ratio;
+
+ // [ratio.arithmetic], ratio arithmetic
+ using std::ratio_add;
+ using std::ratio_divide;
+ using std::ratio_multiply;
+ using std::ratio_subtract;
+
+ // [ratio.comparison], ratio comparison
+ using std::ratio_equal;
+ using std::ratio_greater;
+ using std::ratio_greater_equal;
+ using std::ratio_less;
+ using std::ratio_less_equal;
+ using std::ratio_not_equal;
+
+ using std::ratio_equal_v;
+ using std::ratio_greater_equal_v;
+ using std::ratio_greater_v;
+ using std::ratio_less_equal_v;
+ using std::ratio_less_v;
+ using std::ratio_not_equal_v;
+
+ // [ratio.si], convenience SI typedefs
+ using std::atto;
+ using std::centi;
+ using std::deca;
+ using std::deci;
+ using std::exa;
+ using std::femto;
+ using std::giga;
+ using std::hecto;
+ using std::kilo;
+ using std::mega;
+ using std::micro;
+ using std::milli;
+ using std::nano;
+ using std::peta;
+ using std::pico;
+ using std::tera;
+
+ // These are not supported by libc++, due to the range of intmax_t
+ // using std::yocto;
+ // using std::yotta;
+ // using std::zepto;
+ // using std::zetta
+} // namespace std
+
+// rcu.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+# if _LIBCPP_STD_VER >= 23
+ // 2.2.3, class template rcu_obj_base using std::rcu_obj_base;
+ // 2.2.4, class rcu_domain
+ using std::rcu_domain;
+ using std::rcu_default_domain();
+ using std::rcu_barrier;
+ using std::rcu_retire;
+ using std::rcu_synchronize;
+# endif // _LIBCPP_STD_VER >= 23
+#endif
+} // namespace std
+
+// regex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ // [re.const], regex constants
+ namespace regex_constants {
+ using std::regex_constants::error_type;
+ using std::regex_constants::match_flag_type;
+ using std::regex_constants::syntax_option_type;
+
+ // regex_constants is a bitmask type.
+ // [bitmask.types] specified operators
+ using std::regex_constants::operator&;
+ using std::regex_constants::operator&=;
+ using std::regex_constants::operator^;
+ using std::regex_constants::operator^=;
+ using std::regex_constants::operator|;
+ using std::regex_constants::operator|=;
+ using std::regex_constants::operator~;
+
+ } // namespace regex_constants
+
+ // [re.badexp], class regex_error
+ using std::regex_error;
+
+ // [re.traits], class template regex_traits
+ using std::regex_traits;
+
+ // [re.regex], class template basic_regex
+ using std::basic_regex;
+
+ using std::regex;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wregex;
+# endif
+
+ // [re.regex.swap], basic_regex swap
+ using std::swap;
+
+ // [re.submatch], class template sub_match
+ using std::sub_match;
+
+ using std::csub_match;
+ using std::ssub_match;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcsub_match;
+ using std::wssub_match;
+# endif
+
+ // [re.submatch.op], sub_match non-member operators
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::operator<<;
+
+ // [re.results], class template match_results
+ using std::match_results;
+
+ using std::cmatch;
+ using std::smatch;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcmatch;
+ using std::wsmatch;
+# endif
+
+ // match_results comparisons
+
+ // [re.results.swap], match_results swap
+
+ // [re.alg.match], function template regex_match
+ using std::regex_match;
+
+ // [re.alg.search], function template regex_search
+ using std::regex_search;
+
+ // [re.alg.replace], function template regex_replace
+ using std::regex_replace;
+
+ // [re.regiter], class template regex_iterator
+ using std::regex_iterator;
+
+ using std::cregex_iterator;
+ using std::sregex_iterator;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcregex_iterator;
+ using std::wsregex_iterator;
+# endif
+
+ // [re.tokiter], class template regex_token_iterator
+ using std::regex_token_iterator;
+
+ using std::cregex_token_iterator;
+ using std::sregex_token_iterator;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcregex_token_iterator;
+ using std::wsregex_token_iterator;
+# endif
+
+ namespace pmr {
+ using std::pmr::match_results;
+
+ using std::pmr::cmatch;
+ using std::pmr::smatch;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::pmr::wcmatch;
+ using std::pmr::wsmatch;
+# endif
+ } // namespace pmr
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// scoped_allocator.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // class template scoped_allocator_adaptor
+ using std::scoped_allocator_adaptor;
+
+ // [scoped.adaptor.operators], scoped allocator operators
+ using std::operator==;
+
+} // namespace std
+
+// semaphore.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.sema.cnt], class template counting_semaphore
+ using std::counting_semaphore;
+
+ using std::binary_semaphore;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// set.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [set], class template set
+ using std::set;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [set.erasure], erasure for set
+ using std::erase_if;
+
+ // [multiset], class template multiset
+ using std::multiset;
+
+ namespace pmr {
+ using std::pmr::multiset;
+ using std::pmr::set;
+ } // namespace pmr
+} // namespace std
+
+// shared_mutex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.sharedmutex.class], class shared_­mutex
+ using std::shared_mutex;
+ // [thread.sharedtimedmutex.class], class shared_­timed_­mutex
+ using std::shared_timed_mutex;
+ // [thread.lock.shared], class template shared_­lock
+ using std::shared_lock;
+ using std::swap;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// source_location.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::source_location;
+} // namespace std
+
+// span.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // constants
+ using std::dynamic_extent;
+
+ // [views.span], class template span
+ using std::span;
+
+ namespace ranges {
+ using std::ranges::enable_borrowed_range;
+ using std::ranges::enable_view;
+ } // namespace ranges
+
+ // [span.objectrep], views of object representation
+ using std::as_bytes;
+
+ using std::as_writable_bytes;
+} // namespace std
+
+// spanstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ using std::basic_spanbuf;
+
+ using std::swap;
+
+ using std::spanbuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wspanbuf;
+# endif
+
+ using std::basic_ispanstream;
+
+ using std::ispanstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wispanstream;
+# endif
+
+ using std::basic_ospanstream;
+
+ using std::ospanstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wospanstream;
+# endif
+
+ using std::basic_spanstream;
+
+ using std::spanstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wspanstream;
+# endif
+#endif
+} // namespace std
+
+// sstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_stringbuf;
+
+ using std::swap;
+
+ using std::stringbuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstringbuf;
+# endif
+
+ using std::basic_istringstream;
+
+ using std::istringstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wistringstream;
+# endif
+
+ using std::basic_ostringstream;
+
+ using std::ostringstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wostringstream;
+# endif
+
+ using std::basic_stringstream;
+
+ using std::stringstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstringstream;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// stack.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [stack], class template stack
+ using std::stack;
+
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::swap;
+ using std::uses_allocator;
+} // namespace std
+
+// stacktrace.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ // [stacktrace.entry], class stacktrace_­entry
+ using std::stacktrace_entry;
+
+ // [stacktrace.basic], class template basic_­stacktrace
+ using std::basic_stacktrace;
+
+ // basic_­stacktrace typedef-names
+ using std::stacktrace;
+
+ // [stacktrace.basic.nonmem], non-member functions
+ using std::swap;
+
+ using std::to_string;
+
+ using std::operator<<;
+
+ namespace pmr {
+ using std::pmr::stacktrace;
+ }
+
+ // [stacktrace.basic.hash], hash support
+ using std::hash;
+#endif
+} // namespace std
+
+// stdexcept.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::domain_error;
+ using std::invalid_argument;
+ using std::length_error;
+ using std::logic_error;
+ using std::out_of_range;
+ using std::overflow_error;
+ using std::range_error;
+ using std::runtime_error;
+ using std::underflow_error;
+} // namespace std
+
+// stdfloat.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if defined(__STDCPP_FLOAT16_T__)
+ using std::float16_t;
+#endif
+#if defined(__STDCPP_FLOAT32_T__)
+ using std::float32_t;
+#endif
+#if defined(__STDCPP_FLOAT64_T__)
+ using std::float64_t;
+#endif
+#if defined(__STDCPP_FLOAT128_T__)
+ using std::float128_t;
+#endif
+#if defined(__STDCPP_BFLOAT16_T__)
+ using std::bfloat16_t;
+#endif
+} // namespace std
+
+// stop_token.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+# ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+ // [stoptoken], class stop_­token
+ using std::stop_token;
+
+ // [stopsource], class stop_­source
+ using std::stop_source;
+
+ // no-shared-stop-state indicator
+ using std::nostopstate;
+ using std::nostopstate_t;
+
+ // [stopcallback], class template stop_­callback
+ using std::stop_callback;
+# endif // _LIBCPP_ENABLE_EXPERIMENTAL
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// streambuf.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_streambuf;
+ using std::streambuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstreambuf;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// string.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [char.traits], character traits
+ using std::char_traits;
+
+ // [basic.string], basic_string
+ using std::basic_string;
+
+ using std::operator+;
+ using std::operator==;
+ using std::operator<=>;
+
+ // [string.special], swap
+ using std::swap;
+
+ // [string.io], inserters and extractors
+ using std::operator>>;
+ using std::operator<<;
+ using std::getline;
+
+ // [string.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ // basic_string typedef-names
+ using std::string;
+ using std::u16string;
+ using std::u32string;
+ using std::u8string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstring;
+#endif
+
+ // [string.conversions], numeric conversions
+ using std::stod;
+ using std::stof;
+ using std::stoi;
+ using std::stol;
+ using std::stold;
+ using std::stoll;
+ using std::stoul;
+ using std::stoull;
+ using std::to_string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::to_wstring;
+#endif
+
+ namespace pmr {
+ using std::pmr::basic_string;
+ using std::pmr::string;
+ using std::pmr::u16string;
+ using std::pmr::u32string;
+ using std::pmr::u8string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::pmr::wstring;
+#endif
+ } // namespace pmr
+
+ // [basic.string.hash], hash support
+ using std::hash;
+
+ // TODO MODULES is this a bug?
+#if _LIBCPP_STD_VER >= 23
+ using std::operator""s;
+#else
+ inline namespace literals {
+ inline namespace string_literals {
+ // [basic.string.literals], suffix for basic_string literals
+ using std::literals::string_literals::operator""s;
+ } // namespace string_literals
+ } // namespace literals
+#endif
+} // namespace std
+
+// string_view.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [string.view.template], class template basic_string_view
+ using std::basic_string_view;
+
+ namespace ranges {
+ using std::ranges::enable_borrowed_range;
+ using std::ranges::enable_view;
+ } // namespace ranges
+
+ // [string.view.comparison], non-member comparison functions
+ using std::operator==;
+ using std::operator<=>;
+
+ // [string.view.io], inserters and extractors
+ using std::operator<<;
+
+ // basic_string_view typedef-names
+ using std::string_view;
+ using std::u16string_view;
+ using std::u32string_view;
+ using std::u8string_view;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstring_view;
+#endif
+
+ // [string.view.hash], hash support
+ using std::hash;
+
+ inline namespace literals {
+ inline namespace string_view_literals {
+ // [string.view.literals], suffix for basic_string_view literals
+ using std::literals::string_view_literals::operator""sv;
+ } // namespace string_view_literals
+ } // namespace literals
+} // namespace std
+
+// strstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::istrstream;
+ using std::ostrstream;
+ using std::strstream;
+ using std::strstreambuf;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// syncstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+
+export namespace std {
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM)
+ using std::basic_syncbuf;
+
+ // [syncstream.syncbuf.special], specialized algorithms
+ using std::swap;
+
+ using std::syncbuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wsyncbuf;
+# endif
+ using std::basic_osyncstream;
+
+ using std::osyncstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wosyncstream;
+# endif
+#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM)
+} // namespace std
+
+#endif
+
+// system_error.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::error_category;
+ using std::generic_category;
+ using std::system_category;
+
+ using std::error_code;
+ using std::error_condition;
+ using std::system_error;
+
+ using std::is_error_code_enum;
+ using std::is_error_condition_enum;
+
+ using std::errc;
+
+ // [syserr.errcode.nonmembers], non-member functions
+ using std::make_error_code;
+
+ using std::operator<<;
+
+ // [syserr.errcondition.nonmembers], non-member functions
+ using std::make_error_condition;
+
+ // [syserr.compare], comparison operator functions
+ using std::operator==;
+ using std::operator<=>;
+
+ // [syserr.hash], hash support
+ using std::hash;
+
+ // [syserr], system error support
+ using std::is_error_code_enum_v;
+ using std::is_error_condition_enum_v;
+} // namespace std
+
+// text_encoding.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+# if _LIBCPP_STD_VER >= 23
+ using std::text_encoding;
+
+ // hash support
+ using std::hash;
+# endif // _LIBCPP_STD_VER >= 23
+#endif
+} // namespace std
+
+// thread.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.thread.class], class thread
+ using std::thread;
+
+ using std::swap;
+
+ // [thread.jthread.class], class jthread
+# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN)
+ using std::jthread;
+# endif
+
+ // [thread.thread.this], namespace this_thread
+ namespace this_thread {
+ using std::this_thread::get_id;
+
+ using std::this_thread::sleep_for;
+ using std::this_thread::sleep_until;
+ using std::this_thread::yield;
+ } // namespace this_thread
+
+ // [thread.thread.id]
+ using std::operator==;
+ using std::operator<=>;
+# ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::operator<<;
+# endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+# if _LIBCPP_STD_VER >= 23
+ using std::formatter;
+# endif
+
+ using std::hash;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// tuple.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [tuple.tuple], class template tuple
+ using std::tuple;
+
+ // [tuple.like], concept tuple-like
+
+#if _LIBCPP_STD_VER >= 23
+ // [tuple.common.ref], common_reference related specializations
+ using std::basic_common_reference;
+ using std::common_type;
+#endif
+
+ // [tuple.creation], tuple creation functions
+ using std::ignore;
+
+ using std::forward_as_tuple;
+ using std::make_tuple;
+ using std::tie;
+ using std::tuple_cat;
+
+ // [tuple.apply], calling a function with a tuple of arguments
+ using std::apply;
+
+ using std::make_from_tuple;
+
+ // [tuple.helper], tuple helper classes
+ using std::tuple_element;
+ using std::tuple_size;
+
+ // [tuple.elem], element access
+ using std::get;
+ using std::tuple_element_t;
+
+ // [tuple.rel], relational operators
+ using std::operator==;
+ using std::operator<=>;
+
+ // [tuple.traits], allocator-related traits
+ using std::uses_allocator;
+
+ // [tuple.special], specialized algorithms
+ using std::swap;
+
+ // [tuple.helper], tuple helper classes
+ using std::tuple_size_v;
+} // namespace std
+
+// type_traits.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [meta.help], helper class
+ using std::integral_constant;
+
+ using std::bool_constant;
+ using std::false_type;
+ using std::true_type;
+
+ // [meta.unary.cat], primary type categories
+ using std::is_array;
+ using std::is_class;
+ using std::is_enum;
+ using std::is_floating_point;
+ using std::is_function;
+ using std::is_integral;
+ using std::is_lvalue_reference;
+ using std::is_member_function_pointer;
+ using std::is_member_object_pointer;
+ using std::is_null_pointer;
+ using std::is_pointer;
+ using std::is_rvalue_reference;
+ using std::is_union;
+ using std::is_void;
+
+ // [meta.unary.comp], composite type categories
+ using std::is_arithmetic;
+ using std::is_compound;
+ using std::is_fundamental;
+ using std::is_member_pointer;
+ using std::is_object;
+ using std::is_reference;
+ using std::is_scalar;
+
+ // [meta.unary.prop], type properties
+ using std::is_abstract;
+ using std::is_aggregate;
+ using std::is_const;
+ using std::is_empty;
+ using std::is_final;
+ using std::is_polymorphic;
+ using std::is_standard_layout;
+ using std::is_trivial;
+ using std::is_trivially_copyable;
+ using std::is_volatile;
+
+ using std::is_bounded_array;
+#if _LIBCPP_STD_VER >= 23
+ using std::is_scoped_enum;
+#endif
+ using std::is_signed;
+ using std::is_unbounded_array;
+ using std::is_unsigned;
+
+ using std::is_constructible;
+ using std::is_copy_constructible;
+ using std::is_default_constructible;
+ using std::is_move_constructible;
+
+ using std::is_assignable;
+ using std::is_copy_assignable;
+ using std::is_move_assignable;
+
+ using std::is_swappable;
+ using std::is_swappable_with;
+
+ using std::is_destructible;
+
+ using std::is_trivially_constructible;
+ using std::is_trivially_copy_constructible;
+ using std::is_trivially_default_constructible;
+ using std::is_trivially_move_constructible;
+
+ using std::is_trivially_assignable;
+ using std::is_trivially_copy_assignable;
+ using std::is_trivially_destructible;
+ using std::is_trivially_move_assignable;
+
+ using std::is_nothrow_constructible;
+ using std::is_nothrow_copy_constructible;
+ using std::is_nothrow_default_constructible;
+ using std::is_nothrow_move_constructible;
+
+ using std::is_nothrow_assignable;
+ using std::is_nothrow_copy_assignable;
+ using std::is_nothrow_move_assignable;
+
+ using std::is_nothrow_swappable;
+ using std::is_nothrow_swappable_with;
+
+ using std::is_nothrow_destructible;
+
+ // using std::is_implicit_lifetime;
+
+ using std::has_virtual_destructor;
+
+ using std::has_unique_object_representations;
+
+ // using std::reference_constructs_from_temporary;
+ // using std::reference_converts_from_temporary;
+
+ // [meta.unary.prop.query], type property queries
+ using std::alignment_of;
+ using std::extent;
+ using std::rank;
+
+ // [meta.rel], type relations
+ using std::is_base_of;
+ using std::is_convertible;
+ // using std::is_layout_compatible;
+ using std::is_nothrow_convertible;
+ // using std::is_pointer_interconvertible_base_of;
+ using std::is_same;
+
+ using std::is_invocable;
+ using std::is_invocable_r;
+
+ using std::is_nothrow_invocable;
+ using std::is_nothrow_invocable_r;
+
+ // [meta.trans.cv], const-volatile modifications
+ using std::add_const;
+ using std::add_cv;
+ using std::add_volatile;
+ using std::remove_const;
+ using std::remove_cv;
+ using std::remove_volatile;
+
+ using std::add_const_t;
+ using std::add_cv_t;
+ using std::add_volatile_t;
+ using std::remove_const_t;
+ using std::remove_cv_t;
+ using std::remove_volatile_t;
+
+ // [meta.trans.ref], reference modifications
+ using std::add_lvalue_reference;
+ using std::add_rvalue_reference;
+ using std::remove_reference;
+
+ using std::add_lvalue_reference_t;
+ using std::add_rvalue_reference_t;
+ using std::remove_reference_t;
+
+ // [meta.trans.sign], sign modifications
+ using std::make_signed;
+ using std::make_unsigned;
+
+ using std::make_signed_t;
+ using std::make_unsigned_t;
+
+ // [meta.trans.arr], array modifications
+ using std::remove_all_extents;
+ using std::remove_extent;
+
+ using std::remove_all_extents_t;
+ using std::remove_extent_t;
+
+ // [meta.trans.ptr], pointer modifications
+ using std::add_pointer;
+ using std::remove_pointer;
+
+ using std::add_pointer_t;
+ using std::remove_pointer_t;
+
+ // [meta.trans.other], other transformations
+ using std::basic_common_reference;
+ using std::common_reference;
+ using std::common_type;
+ using std::conditional;
+ using std::decay;
+ using std::enable_if;
+ using std::invoke_result;
+ using std::remove_cvref;
+ using std::type_identity;
+ using std::underlying_type;
+ using std::unwrap_ref_decay;
+ using std::unwrap_reference;
+
+ using std::common_reference_t;
+ using std::common_type_t;
+ using std::conditional_t;
+ using std::decay_t;
+ using std::enable_if_t;
+ using std::invoke_result_t;
+ using std::remove_cvref_t;
+ using std::type_identity_t;
+ using std::underlying_type_t;
+ using std::unwrap_ref_decay_t;
+ using std::unwrap_reference_t;
+ using std::void_t;
+
+ // [meta.logical], logical operator traits
+ using std::conjunction;
+ using std::disjunction;
+ using std::negation;
+
+ // [meta.unary.cat], primary type categories
+ using std::is_array_v;
+ using std::is_class_v;
+ using std::is_enum_v;
+ using std::is_floating_point_v;
+ using std::is_function_v;
+ using std::is_integral_v;
+ using std::is_lvalue_reference_v;
+ using std::is_member_function_pointer_v;
+ using std::is_member_object_pointer_v;
+ using std::is_null_pointer_v;
+ using std::is_pointer_v;
+ using std::is_rvalue_reference_v;
+ using std::is_union_v;
+ using std::is_void_v;
+
+ // [meta.unary.comp], composite type categories
+ using std::is_arithmetic_v;
+ using std::is_compound_v;
+ using std::is_fundamental_v;
+ using std::is_member_pointer_v;
+ using std::is_object_v;
+ using std::is_reference_v;
+ using std::is_scalar_v;
+
+ // [meta.unary.prop], type properties
+ using std::has_unique_object_representations_v;
+ using std::has_virtual_destructor_v;
+ using std::is_abstract_v;
+ using std::is_aggregate_v;
+ using std::is_assignable_v;
+ using std::is_bounded_array_v;
+ using std::is_const_v;
+ using std::is_constructible_v;
+ using std::is_copy_assignable_v;
+ using std::is_copy_constructible_v;
+ using std::is_default_constructible_v;
+ using std::is_destructible_v;
+ using std::is_empty_v;
+ using std::is_final_v;
+ // using std::is_implicit_lifetime_v;
+ using std::is_move_assignable_v;
+ using std::is_move_constructible_v;
+ using std::is_nothrow_assignable_v;
+ using std::is_nothrow_constructible_v;
+ using std::is_nothrow_copy_assignable_v;
+ using std::is_nothrow_copy_constructible_v;
+ using std::is_nothrow_default_constructible_v;
+ using std::is_nothrow_destructible_v;
+ using std::is_nothrow_move_assignable_v;
+ using std::is_nothrow_move_constructible_v;
+ using std::is_nothrow_swappable_v;
+ using std::is_nothrow_swappable_with_v;
+ using std::is_polymorphic_v;
+#if _LIBCPP_STD_VER >= 23
+ using std::is_scoped_enum_v;
+#endif
+ using std::is_signed_v;
+ using std::is_standard_layout_v;
+ using std::is_swappable_v;
+ using std::is_swappable_with_v;
+ using std::is_trivial_v;
+ using std::is_trivially_assignable_v;
+ using std::is_trivially_constructible_v;
+ using std::is_trivially_copy_assignable_v;
+ using std::is_trivially_copy_constructible_v;
+ using std::is_trivially_copyable_v;
+ using std::is_trivially_default_constructible_v;
+ using std::is_trivially_destructible_v;
+ using std::is_trivially_move_assignable_v;
+ using std::is_trivially_move_constructible_v;
+ using std::is_unbounded_array_v;
+ using std::is_unsigned_v;
+ using std::is_volatile_v;
+ // using std::reference_constructs_from_temporary_v;
+ // using std::reference_converts_from_temporary_v;
+
+ // [meta.unary.prop.query], type property queries
+ using std::alignment_of_v;
+ using std::extent_v;
+ using std::rank_v;
+
+ // [meta.rel], type relations
+ using std::is_base_of_v;
+ using std::is_convertible_v;
+ using std::is_invocable_r_v;
+ using std::is_invocable_v;
+ // using std::is_layout_compatible_v;
+ using std::is_nothrow_convertible_v;
+ using std::is_nothrow_invocable_r_v;
+ using std::is_nothrow_invocable_v;
+ // using std::is_pointer_interconvertible_base_of_v;
+ using std::is_same_v;
+
+ // [meta.logical], logical operator traits
+ using std::conjunction_v;
+ using std::disjunction_v;
+ using std::negation_v;
+
+ // [meta.member], member relationships
+ // using std::is_corresponding_member;
+ // using std::is_pointer_interconvertible_with_class;
+
+ // [meta.const.eval], constant evaluation context
+ using std::is_constant_evaluated;
+
+ // [depr.meta.types]
+ using std::aligned_storage;
+ using std::aligned_storage_t;
+ using std::aligned_union;
+ using std::aligned_union_t;
+ using std::is_pod;
+ using std::is_pod_v;
+} // namespace std
+
+// typeindex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::hash;
+ using std::type_index;
+} // namespace std
+
+// typeinfo.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::bad_cast;
+ using std::bad_typeid;
+ using std::type_info;
+} // namespace std
+
+// unordered_map.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [unord.map], class template unordered_­map
+ using std::unordered_map;
+
+ // [unord.multimap], class template unordered_­multimap
+ using std::unordered_multimap;
+
+ using std::operator==;
+
+ using std::swap;
+
+ // [unord.map.erasure], erasure for unordered_­map
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::unordered_map;
+ using std::pmr::unordered_multimap;
+ } // namespace pmr
+} // namespace std
+
+// unordered_set.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [unord.set], class template unordered_­set
+ using std::unordered_set;
+
+ // [unord.multiset], class template unordered_­multiset
+ using std::unordered_multiset;
+
+ using std::operator==;
+
+ using std::swap;
+
+ // [unord.set.erasure], erasure for unordered_­set
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::unordered_multiset;
+ using std::pmr::unordered_set;
+ } // namespace pmr
+} // namespace std
+
+// utility.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [utility.swap], swap
+ using std::swap;
+
+ // [utility.exchange], exchange
+ using std::exchange;
+
+ // [forward], forward/move
+ using std::forward;
+#if _LIBCPP_STD_VER >= 23
+ using std::forward_like;
+#endif
+ using std::move;
+ using std::move_if_noexcept;
+
+ // [utility.as.const], as_const
+ using std::as_const;
+
+ // [declval], declval
+ using std::declval;
+
+ // [utility.intcmp], integer comparison functions
+ using std::cmp_equal;
+ using std::cmp_not_equal;
+
+ using std::cmp_greater;
+ using std::cmp_greater_equal;
+ using std::cmp_less;
+ using std::cmp_less_equal;
+
+ using std::in_range;
+
+#if _LIBCPP_STD_VER >= 23
+ // [utility.underlying], to_underlying
+ using std::to_underlying;
+
+ // [utility.unreachable], unreachable
+ using std::unreachable;
+#endif // _LIBCPP_STD_VER >= 23
+
+ // [intseq], compile-time integer sequences
+ using std::index_sequence;
+ using std::integer_sequence;
+
+ using std::make_index_sequence;
+ using std::make_integer_sequence;
+
+ using std::index_sequence_for;
+
+ // [pairs], class template pair
+ using std::pair;
+
+#if _LIBCPP_STD_VER >= 23
+ using std::basic_common_reference;
+ using std::common_type;
+#endif
+ // [pairs.spec], pair specialized algorithms
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::make_pair;
+
+ // [pair.astuple], tuple-like access to pair
+ using std::tuple_element;
+ using std::tuple_size;
+
+ using std::get;
+
+ // [pair.piecewise], pair piecewise construction
+ using std::piecewise_construct;
+ using std::piecewise_construct_t;
+
+ // in-place construction
+ using std::in_place;
+ using std::in_place_t;
+
+ using std::in_place_type;
+ using std::in_place_type_t;
+
+ using std::in_place_index;
+ using std::in_place_index_t;
+
+ // [depr.relops]
+ namespace rel_ops {
+ using rel_ops::operator!=;
+ using rel_ops::operator>;
+ using rel_ops::operator<=;
+ using rel_ops::operator>=;
+ } // namespace rel_ops
+} // namespace std
+
+// valarray.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::gslice;
+ using std::gslice_array;
+ using std::indirect_array;
+ using std::mask_array;
+ using std::slice;
+ using std::slice_array;
+ using std::valarray;
+
+ using std::swap;
+
+ using std::operator*;
+ using std::operator/;
+ using std::operator%;
+ using std::operator+;
+ using std::operator-;
+
+ using std::operator^;
+ using std::operator&;
+ using std::operator|;
+
+ using std::operator<<;
+ using std::operator>>;
+
+ using std::operator&&;
+ using std::operator||;
+
+ using std::operator==;
+ using std::operator!=;
+
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+
+ using std::abs;
+ using std::acos;
+ using std::asin;
+ using std::atan;
+
+ using std::atan2;
+
+ using std::cos;
+ using std::cosh;
+ using std::exp;
+ using std::log;
+ using std::log10;
+
+ using std::pow;
+
+ using std::sin;
+ using std::sinh;
+ using std::sqrt;
+ using std::tan;
+ using std::tanh;
+
+ using std::begin;
+ using std::end;
+} // namespace std
+
+// variant.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [variant.variant], class template variant
+ using std::variant;
+
+ // [variant.helper], variant helper classes
+ using std::variant_alternative;
+ using std::variant_npos;
+ using std::variant_size;
+ using std::variant_size_v;
+
+ // [variant.get], value access
+ using std::get;
+ using std::get_if;
+ using std::holds_alternative;
+ using std::variant_alternative_t;
+
+ // [variant.relops], relational operators
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ // [variant.visit], visitation
+ using std::visit;
+
+ // [variant.monostate], class monostate
+ using std::monostate;
+
+ // [variant.specalg], specialized algorithms
+ using std::swap;
+
+ // [variant.bad.access], class bad_variant_access
+ using std::bad_variant_access;
+
+ // [variant.hash], hash support
+ using std::hash;
+} // namespace std
+
+// vector.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [vector], class template vector
+ using std::vector;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [vector.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::vector;
+ }
+
+ // hash support
+ using std::hash;
+
+#if _LIBCPP_STD_VER >= 23
+ // [vector.bool.fmt], formatter specialization for vector<bool>
+ using std::formatter;
+#endif
+} // namespace std
+
+// version.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
diff --git a/libbuild2/cc/target.cxx b/libbuild2/cc/target.cxx
index d743752..6a518dd 100644
--- a/libbuild2/cc/target.cxx
+++ b/libbuild2/cc/target.cxx
@@ -66,6 +66,33 @@ namespace build2
target_type::flag::none
};
+ extern const char S_ext_def[] = "S";
+ const target_type S::static_type
+ {
+ "S",
+ &cc::static_type,
+ &target_factory<S>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<S_ext_def>,
+ &target_pattern_var<S_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
+ const target_type c_inc::static_type
+ {
+ "c_inc",
+ &cc::static_type,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ target_type::flag::none
+ };
+
extern const char pc_ext[] = "pc"; // VC14 rejects constexpr.
const target_type pc::static_type
{
diff --git a/libbuild2/cc/target.hxx b/libbuild2/cc/target.hxx
index 87df326..01f2d6e 100644
--- a/libbuild2/cc/target.hxx
+++ b/libbuild2/cc/target.hxx
@@ -84,6 +84,41 @@ namespace build2
static const target_type static_type;
};
+ // Assembler with C preprocessor source file (the same rationale for
+ // having it here as for c{} above).
+ //
+ class LIBBUILD2_CC_SYMEXPORT S: public cc
+ {
+ public:
+ S (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // This is an abstract base target for deriving additional targets that
+ // can be #include'd in C translation units (the same rationale for having
+ // it here as for c{} above). In particular, only such targets will be
+ // considered to reverse-lookup extensions to target types (see
+ // dyndep_rule::map_extension() for background).
+ //
+ class LIBBUILD2_CC_SYMEXPORT c_inc: public cc
+ {
+ public:
+ c_inc (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
// pkg-config file targets.
//
class LIBBUILD2_CC_SYMEXPORT pc: public file // .pc (common)
diff --git a/libbuild2/cc/types.cxx b/libbuild2/cc/types.cxx
index 8ee4fa9..c6cfae9 100644
--- a/libbuild2/cc/types.cxx
+++ b/libbuild2/cc/types.cxx
@@ -6,6 +6,7 @@
#include <libbuild2/cc/utility.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -123,6 +124,8 @@ namespace build2
size_t importable_headers::
insert_angle_pattern (const dir_paths& sys_hdr_dirs, const string& pat)
{
+ tracer trace ("importable_headers::insert_angle_pattern");
+
assert (pat.front () == '<' && pat.back () == '>' && path_pattern (pat));
// First see if it has already been inserted.
@@ -172,7 +175,17 @@ namespace build2
try
{
- path_search (f, process, dir);
+ path_search (
+ f,
+ process,
+ dir,
+ path_match_flags::follow_symlinks,
+ [&trace] (const dir_entry& de)
+ {
+ l5 ([&]{trace << "skipping inaccessible/dangling entry "
+ << de.base () / de.path ();});
+ return true;
+ });
}
catch (const system_error& e)
{
diff --git a/libbuild2/cc/windows-rpath.cxx b/libbuild2/cc/windows-rpath.cxx
index 9387078..eb62ad1 100644
--- a/libbuild2/cc/windows-rpath.cxx
+++ b/libbuild2/cc/windows-rpath.cxx
@@ -45,6 +45,8 @@ namespace build2
// Return the greatest (newest) timestamp of all the DLLs that we will be
// adding to the assembly or timestamp_nonexistent if there aren't any.
//
+ // Note: called during the execute phase.
+ //
timestamp link_rule::
windows_rpath_timestamp (const file& t,
const scope& bs,
@@ -88,7 +90,18 @@ namespace build2
//
if (l->is_a<libs> () && !l->path ().empty ()) // Also covers binless.
{
- timestamp t (l->load_mtime ());
+ // Handle the case where the library is a member of a group (for
+ // example, people are trying to hack something up with pre-built
+ // libraries; see GH issue #366).
+ //
+ timestamp t;
+ if (l->group_state (action () /* inner */))
+ {
+ t = l->group->is_a<mtime_target> ()->mtime ();
+ assert (t != timestamp_unknown);
+ }
+ else
+ t = l->load_mtime ();
if (t > r)
r = t;
@@ -128,7 +141,9 @@ namespace build2
library_cache lib_cache;
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt == nullptr || pt.adhoc ())
+ // Note: during execute so check for ad hoc first to avoid data races.
+ //
+ if (pt.adhoc () || pt == nullptr)
continue;
bool la;
@@ -255,7 +270,9 @@ namespace build2
library_cache lib_cache;
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt == nullptr || pt.adhoc ())
+ // Note: during execute so check for ad hoc first to avoid data races.
+ //
+ if (pt.adhoc () || pt == nullptr)
continue;
bool la;
diff --git a/libbuild2/cli/buildfile b/libbuild2/cli/buildfile
new file mode 100644
index 0000000..9b6e4eb
--- /dev/null
+++ b/libbuild2/cli/buildfile
@@ -0,0 +1,71 @@
+# file : libbuild2/cli/buildfile
+# license : MIT; see accompanying LICENSE file
+
+# NOTE: shared imports should go into root.build.
+#
+include ../
+impl_libs = ../lib{build2} # Implied interface dependency.
+
+include ../cxx/
+intf_libs = ../cxx/lib{build2-cxx}
+
+./: lib{build2-cli}: libul{build2-cli}: {hxx ixx txx cxx}{** -**.test...} \
+ $intf_libs $impl_libs
+
+# Unit tests.
+#
+exe{*.test}:
+{
+ test = true
+ install = false
+}
+
+for t: cxx{**.test...}
+{
+ d = $directory($t)
+ n = $name($t)...
+
+ ./: $d/exe{$n}: $t $d/{hxx ixx txx}{+$n} $d/testscript{+$n}
+ $d/exe{$n}: libul{build2-cli}: bin.whole = false
+}
+
+# Build options.
+#
+obja{*}: cxx.poptions += -DLIBBUILD2_CLI_STATIC_BUILD
+objs{*}: cxx.poptions += -DLIBBUILD2_CLI_SHARED_BUILD
+
+# Export options.
+#
+lib{build2-cli}:
+{
+ cxx.export.poptions = "-I$out_root" "-I$src_root"
+ cxx.export.libs = $intf_libs
+}
+
+liba{build2-cli}: cxx.export.poptions += -DLIBBUILD2_CLI_STATIC
+libs{build2-cli}: cxx.export.poptions += -DLIBBUILD2_CLI_SHARED
+
+# For pre-releases use the complete version to make sure they cannot be used
+# in place of another pre-release or the final version. See the version module
+# for details on the version.* variable values.
+#
+# And because this is a build system module, we also embed the same value as
+# the interface version (note that we cannot use build.version.interface for
+# bundled modules because we could be built with a different version of the
+# build system).
+#
+ver = ($version.pre_release \
+ ? "$version.project_id" \
+ : "$version.major.$version.minor")
+
+lib{build2-cli}: bin.lib.version = @"-$ver"
+libs{build2-cli}: bin.lib.load_suffix = "-$ver"
+
+# Install into the libbuild2/cli/ subdirectory of, say, /usr/include/
+# recreating subdirectories.
+#
+{hxx ixx txx}{*}:
+{
+ install = include/libbuild2/cli/
+ install.subdirs = true
+}
diff --git a/libbuild2/cli/export.hxx b/libbuild2/cli/export.hxx
new file mode 100644
index 0000000..67c1eb9
--- /dev/null
+++ b/libbuild2/cli/export.hxx
@@ -0,0 +1,37 @@
+// file : libbuild2/cli/export.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#pragma once
+
+// Normally we don't export class templates (but do complete specializations),
+// inline functions, and classes with only inline member functions. Exporting
+// classes that inherit from non-exported/imported bases (e.g., std::string)
+// will end up badly. The only known workarounds are to not inherit or to not
+// export. Also, MinGW GCC doesn't like seeing non-exported functions being
+// used before their inline definition. The workaround is to reorder code. In
+// the end it's all trial and error.
+
+#if defined(LIBBUILD2_CLI_STATIC) // Using static.
+# define LIBBUILD2_CLI_SYMEXPORT
+#elif defined(LIBBUILD2_CLI_STATIC_BUILD) // Building static.
+# define LIBBUILD2_CLI_SYMEXPORT
+#elif defined(LIBBUILD2_CLI_SHARED) // Using shared.
+# ifdef _WIN32
+# define LIBBUILD2_CLI_SYMEXPORT __declspec(dllimport)
+# else
+# define LIBBUILD2_CLI_SYMEXPORT
+# endif
+#elif defined(LIBBUILD2_CLI_SHARED_BUILD) // Building shared.
+# ifdef _WIN32
+# define LIBBUILD2_CLI_SYMEXPORT __declspec(dllexport)
+# else
+# define LIBBUILD2_CLI_SYMEXPORT
+# endif
+#else
+// If none of the above macros are defined, then we assume we are being used
+// by some third-party build system that cannot/doesn't signal the library
+// type. Note that this fallback works for both static and shared but in case
+// of shared will be sub-optimal compared to having dllimport.
+//
+# define LIBBUILD2_CLI_SYMEXPORT // Using static or shared.
+#endif
diff --git a/build2/cli/init.cxx b/libbuild2/cli/init.cxx
index d7d8251..581fdaf 100644
--- a/build2/cli/init.cxx
+++ b/libbuild2/cli/init.cxx
@@ -1,7 +1,7 @@
-// file : build2/cli/init.cxx -*- C++ -*-
+// file : libbuild2/cli/init.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <build2/cli/init.hxx>
+#include <libbuild2/cli/init.hxx>
#include <libbuild2/file.hxx>
#include <libbuild2/scope.hxx>
@@ -13,9 +13,9 @@
#include <libbuild2/cxx/target.hxx>
-#include <build2/cli/rule.hxx>
-#include <build2/cli/module.hxx>
-#include <build2/cli/target.hxx>
+#include <libbuild2/cli/rule.hxx>
+#include <libbuild2/cli/module.hxx>
+#include <libbuild2/cli/target.hxx>
namespace build2
{
diff --git a/build2/cli/init.hxx b/libbuild2/cli/init.hxx
index 1c54316..6d23795 100644
--- a/build2/cli/init.hxx
+++ b/libbuild2/cli/init.hxx
@@ -1,14 +1,16 @@
-// file : build2/cli/init.hxx -*- C++ -*-
+// file : libbuild2/cli/init.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_INIT_HXX
-#define BUILD2_CLI_INIT_HXX
+#ifndef LIBBUILD2_CLI_INIT_HXX
+#define LIBBUILD2_CLI_INIT_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/module.hxx>
+#include <libbuild2/cli/export.hxx>
+
namespace build2
{
namespace cli
@@ -21,9 +23,9 @@ namespace build2
// `cli.config` -- load `cli.guess` and set the rest of the variables.
// `cli` -- load `cli.config` and register targets and rules.
//
- extern "C" const module_functions*
+ extern "C" LIBBUILD2_CLI_SYMEXPORT const module_functions*
build2_cli_load ();
}
}
-#endif // BUILD2_CLI_INIT_HXX
+#endif // LIBBUILD2_CLI_INIT_HXX
diff --git a/build2/cli/module.hxx b/libbuild2/cli/module.hxx
index 70f6ba8..ba10540 100644
--- a/build2/cli/module.hxx
+++ b/libbuild2/cli/module.hxx
@@ -1,15 +1,15 @@
-// file : build2/cli/module.hxx -*- C++ -*-
+// file : libbuild2/cli/module.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_MODULE_HXX
-#define BUILD2_CLI_MODULE_HXX
+#ifndef LIBBUILD2_CLI_MODULE_HXX
+#define LIBBUILD2_CLI_MODULE_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/module.hxx>
-#include <build2/cli/rule.hxx>
+#include <libbuild2/cli/rule.hxx>
namespace build2
{
@@ -27,4 +27,4 @@ namespace build2
}
}
-#endif // BUILD2_CLI_MODULE_HXX
+#endif // LIBBUILD2_CLI_MODULE_HXX
diff --git a/build2/cli/rule.cxx b/libbuild2/cli/rule.cxx
index 7f610c9..996ca51 100644
--- a/build2/cli/rule.cxx
+++ b/libbuild2/cli/rule.cxx
@@ -1,7 +1,7 @@
-// file : build2/cli/rule.cxx -*- C++ -*-
+// file : libbuild2/cli/rule.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <build2/cli/rule.hxx>
+#include <libbuild2/cli/rule.hxx>
#include <libbuild2/depdb.hxx>
#include <libbuild2/scope.hxx>
@@ -11,7 +11,7 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
-#include <build2/cli/target.hxx>
+#include <libbuild2/cli/target.hxx>
namespace build2
{
diff --git a/build2/cli/rule.hxx b/libbuild2/cli/rule.hxx
index 0538c57..0132b44 100644
--- a/build2/cli/rule.hxx
+++ b/libbuild2/cli/rule.hxx
@@ -1,14 +1,16 @@
-// file : build2/cli/rule.hxx -*- C++ -*-
+// file : libbuild2/cli/rule.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_RULE_HXX
-#define BUILD2_CLI_RULE_HXX
+#ifndef LIBBUILD2_CLI_RULE_HXX
+#define LIBBUILD2_CLI_RULE_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/rule.hxx>
+#include <libbuild2/cli/export.hxx>
+
namespace build2
{
namespace cli
@@ -23,7 +25,8 @@ namespace build2
// @@ Redo as two separate rules?
//
- class compile_rule: public simple_rule, virtual data
+ class LIBBUILD2_CLI_SYMEXPORT compile_rule: public simple_rule,
+ private virtual data
{
public:
compile_rule (data&& d): data (move (d)) {}
@@ -40,4 +43,4 @@ namespace build2
}
}
-#endif // BUILD2_CLI_RULE_HXX
+#endif // LIBBUILD2_CLI_RULE_HXX
diff --git a/build2/cli/target.cxx b/libbuild2/cli/target.cxx
index 37eee97..22ae75c 100644
--- a/build2/cli/target.cxx
+++ b/libbuild2/cli/target.cxx
@@ -1,7 +1,7 @@
-// file : build2/cli/target.cxx -*- C++ -*-
+// file : libbuild2/cli/target.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <build2/cli/target.hxx>
+#include <libbuild2/cli/target.hxx>
#include <libbuild2/context.hxx>
diff --git a/build2/cli/target.hxx b/libbuild2/cli/target.hxx
index f27ee89..8efb837 100644
--- a/build2/cli/target.hxx
+++ b/libbuild2/cli/target.hxx
@@ -1,8 +1,8 @@
-// file : build2/cli/target.hxx -*- C++ -*-
+// file : libbuild2/cli/target.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_TARGET_HXX
-#define BUILD2_CLI_TARGET_HXX
+#ifndef LIBBUILD2_CLI_TARGET_HXX
+#define LIBBUILD2_CLI_TARGET_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -11,11 +11,13 @@
#include <libbuild2/cxx/target.hxx>
+#include <libbuild2/cli/export.hxx>
+
namespace build2
{
namespace cli
{
- class cli: public file
+ class LIBBUILD2_CLI_SYMEXPORT cli: public file
{
public:
cli (context& c, dir_path d, dir_path o, string n)
@@ -37,7 +39,8 @@ namespace build2
const cxx::ixx* i = nullptr;
};
- class cli_cxx: public mtime_target, public cli_cxx_members
+ class LIBBUILD2_CLI_SYMEXPORT cli_cxx: public mtime_target,
+ public cli_cxx_members
{
public:
cli_cxx (context& c, dir_path d, dir_path o, string n)
@@ -55,4 +58,4 @@ namespace build2
}
}
-#endif // BUILD2_CLI_TARGET_HXX
+#endif // LIBBUILD2_CLI_TARGET_HXX
diff --git a/libbuild2/common-options.cxx b/libbuild2/common-options.cxx
index 4be0289..03e7e60 100644
--- a/libbuild2/common-options.cxx
+++ b/libbuild2/common-options.cxx
@@ -30,7 +30,7 @@ namespace build2
// unknown_option
//
unknown_option::
- ~unknown_option () throw ()
+ ~unknown_option () noexcept
{
}
@@ -41,7 +41,7 @@ namespace build2
}
const char* unknown_option::
- what () const throw ()
+ what () const noexcept
{
return "unknown option";
}
@@ -49,7 +49,7 @@ namespace build2
// unknown_argument
//
unknown_argument::
- ~unknown_argument () throw ()
+ ~unknown_argument () noexcept
{
}
@@ -60,7 +60,7 @@ namespace build2
}
const char* unknown_argument::
- what () const throw ()
+ what () const noexcept
{
return "unknown argument";
}
@@ -68,7 +68,7 @@ namespace build2
// missing_value
//
missing_value::
- ~missing_value () throw ()
+ ~missing_value () noexcept
{
}
@@ -79,7 +79,7 @@ namespace build2
}
const char* missing_value::
- what () const throw ()
+ what () const noexcept
{
return "missing option value";
}
@@ -87,7 +87,7 @@ namespace build2
// invalid_value
//
invalid_value::
- ~invalid_value () throw ()
+ ~invalid_value () noexcept
{
}
@@ -102,7 +102,7 @@ namespace build2
}
const char* invalid_value::
- what () const throw ()
+ what () const noexcept
{
return "invalid option value";
}
@@ -116,7 +116,7 @@ namespace build2
}
const char* eos_reached::
- what () const throw ()
+ what () const noexcept
{
return "end of argument stream reached";
}
@@ -124,7 +124,7 @@ namespace build2
// file_io_failure
//
file_io_failure::
- ~file_io_failure () throw ()
+ ~file_io_failure () noexcept
{
}
@@ -135,7 +135,7 @@ namespace build2
}
const char* file_io_failure::
- what () const throw ()
+ what () const noexcept
{
return "unable to open file or read failure";
}
@@ -143,7 +143,7 @@ namespace build2
// unmatched_quote
//
unmatched_quote::
- ~unmatched_quote () throw ()
+ ~unmatched_quote () noexcept
{
}
@@ -154,7 +154,7 @@ namespace build2
}
const char* unmatched_quote::
- what () const throw ()
+ what () const noexcept
{
return "unmatched quote";
}
@@ -721,6 +721,56 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
diff --git a/libbuild2/common-options.hxx b/libbuild2/common-options.hxx
index f52fc3c..f90f563 100644
--- a/libbuild2/common-options.hxx
+++ b/libbuild2/common-options.hxx
@@ -94,7 +94,7 @@ namespace build2
{
public:
virtual
- ~unknown_option () throw ();
+ ~unknown_option () noexcept;
unknown_option (const std::string& option);
@@ -105,7 +105,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string option_;
@@ -115,7 +115,7 @@ namespace build2
{
public:
virtual
- ~unknown_argument () throw ();
+ ~unknown_argument () noexcept;
unknown_argument (const std::string& argument);
@@ -126,7 +126,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string argument_;
@@ -136,7 +136,7 @@ namespace build2
{
public:
virtual
- ~missing_value () throw ();
+ ~missing_value () noexcept;
missing_value (const std::string& option);
@@ -147,7 +147,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string option_;
@@ -157,7 +157,7 @@ namespace build2
{
public:
virtual
- ~invalid_value () throw ();
+ ~invalid_value () noexcept;
invalid_value (const std::string& option,
const std::string& value,
@@ -176,7 +176,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string option_;
@@ -191,14 +191,14 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
};
class LIBBUILD2_SYMEXPORT file_io_failure: public exception
{
public:
virtual
- ~file_io_failure () throw ();
+ ~file_io_failure () noexcept;
file_io_failure (const std::string& file);
@@ -209,7 +209,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string file_;
@@ -219,7 +219,7 @@ namespace build2
{
public:
virtual
- ~unmatched_quote () throw ();
+ ~unmatched_quote () noexcept;
unmatched_quote (const std::string& argument);
@@ -230,7 +230,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string argument_;
diff --git a/libbuild2/config/functions.cxx b/libbuild2/config/functions.cxx
index 84c1b03..b1a61a2 100644
--- a/libbuild2/config/functions.cxx
+++ b/libbuild2/config/functions.cxx
@@ -95,7 +95,7 @@ namespace build2
// See save_config() for details.
//
assert (s->ctx.phase == run_phase::load);
- module* mod (s->rw ().find_module<module> (module::name));
+ const module* mod (s->find_module<module> (module::name));
if (mod == nullptr)
fail << "config.save() called without config module";
diff --git a/libbuild2/config/host-config.cxx.in b/libbuild2/config/host-config.cxx.in
index 9e3e0c2..6b1ce77 100644
--- a/libbuild2/config/host-config.cxx.in
+++ b/libbuild2/config/host-config.cxx.in
@@ -9,5 +9,8 @@ namespace build2
//
extern const char host_config[] = R"###($host_config$)###";
extern const char build2_config[] = R"###($build2_config$)###";
+
+ extern const char host_config_no_warnings[] = R"###($host_config_no_warnings$)###";
+ extern const char build2_config_no_warnings[] = R"###($build2_config_no_warnings$)###";
}
}
diff --git a/libbuild2/config/init.cxx b/libbuild2/config/init.cxx
index 7035ce5..2f134c4 100644
--- a/libbuild2/config/init.cxx
+++ b/libbuild2/config/init.cxx
@@ -26,6 +26,8 @@ namespace build2
{
namespace config
{
+ static const file_rule file_rule_ (true /* check_type */);
+
void
functions (function_map&); // functions.cxx
@@ -39,7 +41,7 @@ namespace build2
save_environment (const value& d, const value* b, names& storage)
{
if (b == nullptr)
- return make_pair (reverse (d, storage), "=");
+ return make_pair (reverse (d, storage, true /* reduce */), "=");
// The plan is to iterator over environment variables adding those that
// are not in base to storage. There is, however, a complication: we may
@@ -208,6 +210,9 @@ namespace build2
#ifndef BUILD2_BOOTSTRAP
extern const char host_config[];
extern const char build2_config[];
+
+ extern const char host_config_no_warnings[];
+ extern const char build2_config_no_warnings[];
#endif
bool
@@ -429,8 +434,15 @@ namespace build2
auto load_config_file = [&load_config] (const path& f, const location& l)
{
path_name fn (f);
- ifdstream ifs;
- load_config (open_file_or_stdin (fn, ifs), fn, l);
+ try
+ {
+ ifdstream ifs;
+ load_config (open_file_or_stdin (fn, ifs), fn, l);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read buildfile " << fn << ": " << e;
+ }
};
// Load config.build unless requested not to.
@@ -475,14 +487,23 @@ namespace build2
const string& s (f.string ());
- if (s[0] != '~')
+ if (s.empty ())
+ fail << "empty path in config.config.load";
+ else if (s[0] != '~')
load_config_file (f, l);
- else if (s == "~host" || s == "~build2")
+ else if (s == "~host" || s == "~host-no-warnings" ||
+ s == "~build2" || s == "~build2-no-warnings")
{
#ifdef BUILD2_BOOTSTRAP
assert (false);
#else
- istringstream is (s[1] == 'h' ? host_config : build2_config);
+ istringstream is (s[1] == 'h'
+ ? (s.size () == 5
+ ? host_config
+ : host_config_no_warnings)
+ : (s.size () == 7
+ ? build2_config
+ : build2_config_no_warnings));
load_config (is, path_name (s), l);
#endif
}
@@ -705,21 +726,23 @@ namespace build2
// Register alias and fallback rule for the configure meta-operation.
//
+ rs.insert_rule<alias> (configure_id, 0, "config.alias", alias_rule::instance);
+
+ // This allows a custom configure rule while doing nothing by default.
+ //
+ rs.insert_rule<target> (configure_id, 0, "config.noop", noop_rule::instance);
+
// We need this rule for out-of-any-project dependencies (for example,
// libraries imported from /usr/lib). We are registering it on the
// global scope similar to builtin rules.
//
- // See a similar rule in the dist module.
+ // Note: use target instead of anything more specific (such as
+ // mtime_target) in order not to take precedence over the rules above.
//
- rs.global_scope ().insert_rule<mtime_target> (
- configure_id, 0, "config.file", file_rule::instance);
-
- rs.insert_rule<alias> (configure_id, 0, "config.alias", alias_rule::instance);
-
- // This allows a custom configure rule while doing nothing by default.
+ // See a similar rule in the dist module.
//
- rs.insert_rule<target> (configure_id, 0, "config.noop", noop_rule::instance);
- rs.insert_rule<file> (configure_id, 0, "config.noop", noop_rule::instance);
+ rs.global_scope ().insert_rule<target> (
+ configure_id, 0, "config.file", file_rule_);
return true;
}
diff --git a/libbuild2/config/module.hxx b/libbuild2/config/module.hxx
index 82b79be..8d3ff67 100644
--- a/libbuild2/config/module.hxx
+++ b/libbuild2/config/module.hxx
@@ -160,7 +160,7 @@ namespace build2
save_module (scope&, const char*, int);
const saved_variable*
- find_variable (const variable& var)
+ find_variable (const variable& var) const
{
auto i (saved_modules.find_sup (var.name));
if (i != saved_modules.end ())
diff --git a/libbuild2/config/operation.cxx b/libbuild2/config/operation.cxx
index 34ed402..150bf1a 100644
--- a/libbuild2/config/operation.cxx
+++ b/libbuild2/config/operation.cxx
@@ -134,7 +134,8 @@ namespace build2
bool r;
if (c.compare (p, 4 , "save") == 0) r = true;
else if (c.compare (p, 4 , "drop") == 0) r = false;
- else fail << "invalid config.config.persist action '" << c << "'";
+ else fail << "invalid config.config.persist action '" << c << "'"
+ << endf;
bool w (false);
if ((p += 4) != c.size ())
@@ -163,11 +164,18 @@ namespace build2
// and this function can be called from a buildfile (probably only
// during serial execution but still).
//
+ // We could also be configuring multiple projects (including from
+ // pkg_configure() in bpkg) but feels like we should be ok since we
+ // only modify this project's root scope data which should not affect
+ // any other project.
+ //
+ // See also save_environment() for a similar issue.
+ //
void
save_config (const scope& rs,
ostream& os, const path_name& on,
bool inherit,
- module& mod,
+ const module& mod,
const project_set& projects)
{
context& ctx (rs.ctx);
@@ -181,7 +189,7 @@ namespace build2
if (v)
{
storage.clear ();
- dr << "'" << reverse (v, storage) << "'";
+ dr << "'" << reverse (v, storage, true /* reduce */) << "'";
}
else
dr << "[null]";
@@ -251,6 +259,24 @@ namespace build2
continue;
}
+ // A common reason behind an unused config.import.* value is an
+ // unused dependency. That is, there is depends in manifest but no
+ // import in buildfile (or import could be conditional in which case
+ // depends should also be conditional). So let's suggest this
+ // possibility. Note that the project name may have been sanitized
+ // to a variable name. Oh, well, better than nothing.
+ //
+ auto info_import = [] (diag_record& dr, const string& var)
+ {
+ if (var.compare (0, 14, "config.import.") == 0)
+ {
+ size_t p (var.find ('.', 14));
+
+ dr << info << "potentially unused dependency on "
+ << string (var, 14, p == string::npos ? p : p - 14);
+ }
+ };
+
const value& v (p.first->second);
pair<bool, bool> r (save_config_variable (*var,
@@ -259,7 +285,7 @@ namespace build2
true /* unused */));
if (r.first) // save
{
- mod.save_variable (*var, 0);
+ const_cast<module&> (mod).save_variable (*var, 0);
if (r.second) // warn
{
@@ -278,6 +304,7 @@ namespace build2
diag_record dr;
dr << warn (on) << "saving no longer used variable " << *var;
+ info_import (dr, var->name);
if (verb >= 2)
info_value (dr, v);
}
@@ -288,6 +315,7 @@ namespace build2
{
diag_record dr;
dr << warn (on) << "dropping no longer used variable " << *var;
+ info_import (dr, var->name);
info_value (dr, v);
}
}
@@ -531,7 +559,7 @@ namespace build2
pair<names_view, const char*> p (
sv.save != nullptr
? sv.save (v, base, storage)
- : make_pair (reverse (v, storage), "="));
+ : make_pair (reverse (v, storage, true /* reduce */), "="));
// Might becomes empty after a custom save function had at it.
//
@@ -560,7 +588,7 @@ namespace build2
save_config (const scope& rs,
const path& f,
bool inherit,
- module& mod,
+ const module& mod,
const project_set& projects)
{
path_name fn (f);
@@ -588,6 +616,9 @@ namespace build2
// Update config.config.environment value for a hermetic configuration.
//
+ // @@ We are modifying the module. See also save_config() for a similar
+ // issue.
+ //
static void
save_environment (scope& rs, module& mod)
{
@@ -660,9 +691,9 @@ namespace build2
static void
configure_project (action a,
- scope& rs,
+ const scope& rs,
const variable* c_s, // config.config.save
- module& mod,
+ const module& mod,
project_set& projects)
{
tracer trace ("configure_project");
@@ -696,7 +727,7 @@ namespace build2
// for the other half of this logic).
//
if (cast_false<bool> (rs["config.config.hermetic"]))
- save_environment (rs, mod);
+ save_environment (const_cast<scope&> (rs), const_cast<module&> (mod));
// Save src-root.build unless out_root is the same as src.
//
@@ -728,6 +759,11 @@ namespace build2
lookup l (rs[*c_s]);
if (l && (l.belongs (rs) || l.belongs (ctx.global_scope)))
{
+ const path& f (cast<path> (l));
+
+ if (f.empty ())
+ fail << "empty path in " << *c_s;
+
// While writing the complete configuration seems like a natural
// default, there might be a desire to take inheritance into
// account (if, say, we are exporting at multiple levels). One can
@@ -735,8 +771,7 @@ namespace build2
// still want to support this mode somehow in the future (it seems
// like an override of config.config.persist should do the trick).
//
- save_config (
- rs, cast<path> (l), false /* inherit */, mod, projects);
+ save_config (rs, f, false /* inherit */, mod, projects);
}
}
}
@@ -759,14 +794,14 @@ namespace build2
{
const dir_path& pd (p.second);
dir_path out_nroot (out_root / pd);
- scope& nrs (ctx.scopes.find_out (out_nroot).rw ());
+ const scope& nrs (ctx.scopes.find_out (out_nroot));
// Skip this subproject if it is not loaded or doesn't use the
// config module.
//
if (nrs.out_path () == out_nroot)
{
- if (module* m = nrs.find_module<module> (module::name))
+ if (const module* m = nrs.find_module<module> (module::name))
{
configure_project (a, nrs, c_s, *m, projects);
}
@@ -819,6 +854,8 @@ namespace build2
// Don't translate default to update. In our case unspecified
// means configure everything.
//
+ // Note: see pkg_configure() in bpkg if changing anything here.
+ //
return o;
}
@@ -855,6 +892,8 @@ namespace build2
static void
configure_pre (context&, const values& params, const location& l)
{
+ // Note: see pkg_configure() in bpkg if changing anything here.
+ //
forward (params, "configure", l); // Validate.
}
@@ -976,20 +1015,28 @@ namespace build2
ctx.current_operation (*oif);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, true /* inner */, location ());
+
phase_lock pl (ctx, run_phase::match);
match_sync (action (configure_id, id), t);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, true /* inner */);
}
}
configure_project (a,
- rs->rw (),
+ *rs,
c_s,
- *rs->rw ().find_module<module> (module::name),
+ *rs->find_module<module> (module::name),
projects);
}
}
}
+ // NOTE: see pkg_configure() in bpkg if changing anything here.
+ //
const meta_operation_info mo_configure {
configure_id,
"configure",
diff --git a/libbuild2/config/operation.hxx b/libbuild2/config/operation.hxx
index 9e2a91e..1662941 100644
--- a/libbuild2/config/operation.hxx
+++ b/libbuild2/config/operation.hxx
@@ -15,8 +15,8 @@ namespace build2
{
class module;
- extern const meta_operation_info mo_configure;
- extern const meta_operation_info mo_disfigure;
+ LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_configure;
+ LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_disfigure;
const string&
preprocess_create (context&,
@@ -37,7 +37,7 @@ namespace build2
save_config (const scope& rs,
ostream&, const path_name&,
bool inherit,
- module&,
+ const module&,
const project_set&);
// See config.config.hermetic.environment.
diff --git a/libbuild2/context.cxx b/libbuild2/context.cxx
index 967577f..6e4fd6f 100644
--- a/libbuild2/context.cxx
+++ b/libbuild2/context.cxx
@@ -72,11 +72,225 @@ namespace build2
data_->var_pool.map_.reserve (res.variables);
}
+ pair<char, variable_override> context::
+ parse_variable_override (const string& s, size_t i, bool buildspec)
+ {
+ istringstream is (s);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ // Similar to buildspec we do "effective escaping" of the special `'"\$(`
+ // characters (basically what's escapable inside a double-quoted literal
+ // plus the single quote; note, however, that we exclude line
+ // continuations and `)` since they would make directory paths on Windows
+ // unusable).
+ //
+ path_name in ("<cmdline>");
+ lexer l (is, in, 1 /* line */, "\'\"\\$(");
+
+ // At the buildfile level the scope-specific variable should be separated
+ // from the directory with a whitespace, for example:
+ //
+ // ./ foo=$bar
+ //
+ // However, requiring this for command line variables would be too
+ // inconvinient so we support both.
+ //
+ // We also have the optional visibility modifier as a first character of
+ // the variable name:
+ //
+ // ! - global
+ // % - project
+ // / - scope
+ //
+ // The last one clashes a bit with the directory prefix:
+ //
+ // ./ /foo=bar
+ // .//foo=bar
+ //
+ // But that's probably ok (the need for a scope-qualified override with
+ // scope visibility should be pretty rare). Note also that to set the
+ // value on the global scope we use !.
+ //
+ // And so the first token should be a word which can be either a variable
+ // name (potentially with the directory qualification) or just the
+ // directory, in which case it should be followed by another word
+ // (unqualified variable name). To avoid treating any of the visibility
+ // modifiers as special we use the cmdvar mode.
+ //
+ l.mode (lexer_mode::cmdvar);
+ token t (l.next ());
+
+ optional<dir_path> dir;
+ if (t.type == token_type::word)
+ {
+ string& v (t.value);
+ size_t p (path::traits_type::rfind_separator (v));
+
+ if (p != string::npos && p != 0) // If first then visibility.
+ {
+ if (p == v.size () - 1)
+ {
+ // Separate directory.
+ //
+ dir = dir_path (move (v));
+ t = l.next ();
+
+ // Target-specific overrides are not yet supported (and probably
+ // never will be; the beast is already complex enough).
+ //
+ if (t.type == token_type::colon)
+ {
+ diag_record dr (fail);
+
+ dr << "'" << s << "' is a target-specific override";
+
+ if (buildspec)
+ dr << info << "use double '--' to treat this argument as "
+ << "buildspec";
+ }
+ }
+ else
+ {
+ // Combined directory.
+ //
+ // If double separator (visibility marker), then keep the first in
+ // name.
+ //
+ if (p != 0 && path::traits_type::is_separator (v[p - 1]))
+ --p;
+
+ dir = dir_path (t.value, 0, p + 1); // Include the separator.
+ t.value.erase (0, p + 1); // Erase the separator.
+ }
+
+ if (dir->relative ())
+ {
+ // Handle the special relative to base scope case (.../).
+ //
+ auto i (dir->begin ());
+
+ if (*i == "...")
+ dir = dir_path (++i, dir->end ()); // Note: can become empty.
+ else
+ dir->complete (); // Relative to CWD.
+ }
+
+ if (dir->absolute ())
+ dir->normalize ();
+ }
+ }
+
+ token_type tt (l.next ().type);
+
+ // The token should be the variable name followed by =, +=, or =+.
+ //
+ if (t.type != token_type::word || t.value.empty () ||
+ (tt != token_type::assign &&
+ tt != token_type::prepend &&
+ tt != token_type::append))
+ {
+ diag_record dr (fail);
+
+ dr << "expected variable assignment instead of '" << s << "'";
+
+ if (buildspec)
+ dr << info << "use double '--' to treat this argument as buildspec";
+ }
+
+ // Take care of the visibility. Note that here we rely on the fact that
+ // none of these characters are lexer's name separators.
+ //
+ char c (t.value[0]);
+
+ if (path::traits_type::is_separator (c))
+ c = '/'; // Normalize.
+
+ string n (t.value, c == '!' || c == '%' || c == '/' ? 1 : 0);
+
+ // Make sure it is qualified.
+ //
+ // We can support overridable public unqualified variables (which must
+ // all be pre-entered by the end of this constructor) but we will need
+ // to detect their names here in an ad hoc manner (we cannot enter them
+ // before this logic because of the "untyped override" requirement).
+ //
+ // Note: issue the same diagnostics as in variable_pool::update().
+ //
+ if (n.find ('.') == string::npos)
+ fail << "variable " << n << " cannot be overridden";
+
+ if (c == '!' && dir)
+ fail << "scope-qualified global override of variable " << n;
+
+ // Pre-enter the main variable. Note that we rely on all the overridable
+ // variables with global visibility to be known (either entered or
+ // handled via a pattern) at this stage.
+ //
+ variable_pool& vp (data_->var_pool);
+ variable& var (
+ const_cast<variable&> (vp.insert (n, true /* overridable */)));
+
+ const variable* o;
+ {
+ variable_visibility v (c == '/' ? variable_visibility::scope :
+ c == '%' ? variable_visibility::project :
+ variable_visibility::global);
+
+ const char* k (tt == token_type::assign ? "__override" :
+ tt == token_type::append ? "__suffix" : "__prefix");
+
+ unique_ptr<variable> p (
+ new variable {
+ n + '.' + to_string (i + 1) + '.' + k,
+ &vp /* owner */,
+ nullptr /* aliases */,
+ nullptr /* type */,
+ nullptr /* overrides */,
+ v});
+
+ // Back link.
+ //
+ p->aliases = p.get ();
+ if (var.overrides != nullptr)
+ swap (p->aliases,
+ const_cast<variable*> (var.overrides.get ())->aliases);
+
+ // Forward link.
+ //
+ p->overrides = move (var.overrides);
+ var.overrides = move (p);
+
+ o = var.overrides.get ();
+ }
+
+ // Currently we expand project overrides in the global scope to keep
+ // things simple. Pass original variable for diagnostics. Use current
+ // working directory as pattern base.
+ //
+ scope& gs (global_scope.rw ());
+
+ parser p (*this);
+ pair<value, token> r (p.parse_variable_value (l, gs, &work, var));
+
+ if (r.second.type != token_type::eos)
+ fail << "unexpected " << r.second << " in variable assignment "
+ << "'" << s << "'";
+
+ // Make sure the value is not typed.
+ //
+ if (r.first.type != nullptr)
+ fail << "typed override of variable " << n;
+
+ return make_pair (
+ c,
+ variable_override {var, *o, move (dir), move (r.first)});
+ }
+
context::
context (scheduler& s,
global_mutexes& ms,
file_cache& fc,
- bool mo,
+ optional<match_only_level> mo,
bool nem,
bool dr,
bool ndb,
@@ -84,11 +298,12 @@ namespace build2
const strings& cmd_vars,
reserves res,
optional<context*> mc,
- const loaded_modules_lock* ml)
+ const module_libraries_lock* ml,
+ const function<var_override_function>& var_ovr_func)
: data_ (new data (*this)),
- sched (s),
- mutexes (ms),
- fcache (fc),
+ sched (&s),
+ mutexes (&ms),
+ fcache (&fc),
match_only (mo),
no_external_modules (nem),
dry_run_option (dr),
@@ -111,6 +326,8 @@ namespace build2
? optional<unique_ptr<context>> (nullptr)
: nullopt)
{
+ // NOTE: see also the bare minimum version below if adding anything here.
+
tracer trace ("context");
l6 ([&]{trace << "initializing build state";});
@@ -128,7 +345,7 @@ namespace build2
//
meta_operation_table.insert ("noop");
meta_operation_table.insert ("perform");
- meta_operation_table.insert ("configure");
+ meta_operation_table.insert ("configure"); // bpkg assumes no process.
meta_operation_table.insert ("disfigure");
if (config_preprocess_create != nullptr)
@@ -204,10 +421,10 @@ namespace build2
//
set ("build.verbosity", uint64_t (verb));
- // Build system progress diagnostics.
+ // Build system diagnostics progress and color.
//
- // Note that it can be true, false, or NULL if progress was neither
- // requested nor suppressed.
+ // Note that these can be true, false, or NULL if neither requested nor
+ // suppressed explicitly.
//
{
value& v (gs.assign (vp.insert<bool> ("build.progress", v_g)));
@@ -215,6 +432,18 @@ namespace build2
v = *diag_progress_option;
}
+ {
+ value& v (gs.assign (vp.insert<bool> ("build.diag_color", v_g)));
+ if (diag_color_option)
+ v = *diag_color_option;
+ }
+
+ // These are the "effective" values that incorporate a suitable default
+ // if neither requested nor suppressed explicitly.
+ //
+ set ("build.show_progress", show_progress (verb_never));
+ set ("build.show_diag_color", show_diag_color ());
+
// Build system version (similar to what we do in the version module
// except here we don't include package epoch/revision).
//
@@ -318,6 +547,7 @@ namespace build2
t.insert<path_target> ();
t.insert<file> ();
+ t.insert<group> ();
t.insert<alias> ();
t.insert<dir> ();
t.insert<fsdir> ();
@@ -359,221 +589,44 @@ namespace build2
// marked as such first. Then, as we enter variables, we can verify that
// the override is alowed.
//
- for (size_t i (0); i != cmd_vars.size (); ++i)
{
- const string& s (cmd_vars[i]);
-
- istringstream is (s);
- is.exceptions (istringstream::failbit | istringstream::badbit);
-
- // Similar to buildspec we do "effective escaping" and only for ['"\$(]
- // (basically what's necessary inside a double-quoted literal plus the
- // single quote).
- //
- path_name in ("<cmdline>");
- lexer l (is, in, 1 /* line */, "\'\"\\$(");
+ size_t i (0);
+ for (; i != cmd_vars.size (); ++i)
+ {
+ const string& s (cmd_vars[i]);
- // At the buildfile level the scope-specific variable should be
- // separated from the directory with a whitespace, for example:
- //
- // ./ foo=$bar
- //
- // However, requiring this for command line variables would be too
- // inconvinient so we support both.
- //
- // We also have the optional visibility modifier as a first character of
- // the variable name:
- //
- // ! - global
- // % - project
- // / - scope
- //
- // The last one clashes a bit with the directory prefix:
- //
- // ./ /foo=bar
- // .//foo=bar
- //
- // But that's probably ok (the need for a scope-qualified override with
- // scope visibility should be pretty rare). Note also that to set the
- // value on the global scope we use !.
- //
- // And so the first token should be a word which can be either a
- // variable name (potentially with the directory qualification) or just
- // the directory, in which case it should be followed by another word
- // (unqualified variable name). To avoid treating any of the visibility
- // modifiers as special we use the cmdvar mode.
- //
- l.mode (lexer_mode::cmdvar);
- token t (l.next ());
+ pair<char, variable_override> p (
+ parse_variable_override (s, i, true /* buildspec */));
- optional<dir_path> dir;
- if (t.type == token_type::word)
- {
- string& v (t.value);
- size_t p (path::traits_type::rfind_separator (v));
+ char c (p.first);
+ variable_override& vo (p.second);
- if (p != string::npos && p != 0) // If first then visibility.
+ // Global and absolute scope overrides we can enter directly. Project
+ // and relative scope ones will be entered later for each project.
+ //
+ if (c == '!' || (vo.dir && vo.dir->absolute ()))
{
- if (p == v.size () - 1)
- {
- // Separate directory.
- //
- dir = dir_path (move (v));
- t = l.next ();
-
- // Target-specific overrides are not yet supported (and probably
- // never will be; the beast is already complex enough).
- //
- if (t.type == token_type::colon)
- fail << "'" << s << "' is a target-specific override" <<
- info << "use double '--' to treat this argument as buildspec";
- }
- else
- {
- // Combined directory.
- //
- // If double separator (visibility marker), then keep the first in
- // name.
- //
- if (p != 0 && path::traits_type::is_separator (v[p - 1]))
- --p;
-
- dir = dir_path (t.value, 0, p + 1); // Include the separator.
- t.value.erase (0, p + 1); // Erase the separator.
- }
+ scope& s (c == '!' ? gs : *sm.insert_out (*vo.dir)->second.front ());
- if (dir->relative ())
- {
- // Handle the special relative to base scope case (.../).
- //
- auto i (dir->begin ());
-
- if (*i == "...")
- dir = dir_path (++i, dir->end ()); // Note: can become empty.
- else
- dir->complete (); // Relative to CWD.
- }
+ auto p (s.vars.insert (vo.ovr));
+ assert (p.second); // Variable name is unique.
- if (dir->absolute ())
- dir->normalize ();
+ value& v (p.first);
+ v = move (vo.val);
}
- }
-
- token_type tt (l.next ().type);
-
- // The token should be the variable name followed by =, +=, or =+.
- //
- if (t.type != token_type::word || t.value.empty () ||
- (tt != token_type::assign &&
- tt != token_type::prepend &&
- tt != token_type::append))
- {
- fail << "expected variable assignment instead of '" << s << "'" <<
- info << "use double '--' to treat this argument as buildspec";
- }
+ else
+ data_->var_overrides.push_back (move (vo));
- // Take care of the visibility. Note that here we rely on the fact that
- // none of these characters are lexer's name separators.
- //
- char c (t.value[0]);
-
- if (path::traits_type::is_separator (c))
- c = '/'; // Normalize.
-
- string n (t.value, c == '!' || c == '%' || c == '/' ? 1 : 0);
-
- // Make sure it is qualified.
- //
- // We can support overridable public unqualified variables (which must
- // all be pre-entered by the end of this constructor) but we will need
- // to detect their names here in an ad hoc manner (we cannot enter them
- // before this logic because of the "untyped override" requirement).
- //
- // Note: issue the same diagnostics as in variable_pool::update().
- //
- if (n.find ('.') == string::npos)
- fail << "variable " << n << " cannot be overridden";
-
- if (c == '!' && dir)
- fail << "scope-qualified global override of variable " << n;
-
- // Pre-enter the main variable. Note that we rely on all the overridable
- // variables with global visibility to be known (either entered or
- // handled via a pettern) at this stage.
- //
- variable& var (
- const_cast<variable&> (vp.insert (n, true /* overridable */)));
-
- const variable* o;
- {
- variable_visibility v (c == '/' ? variable_visibility::scope :
- c == '%' ? variable_visibility::project :
- variable_visibility::global);
-
- const char* k (tt == token_type::assign ? "__override" :
- tt == token_type::append ? "__suffix" : "__prefix");
-
- unique_ptr<variable> p (
- new variable {
- n + '.' + to_string (i + 1) + '.' + k,
- &vp /* owner */,
- nullptr /* aliases */,
- nullptr /* type */,
- nullptr /* overrides */,
- v});
-
- // Back link.
+ // Save global overrides for nested contexts.
//
- p->aliases = p.get ();
- if (var.overrides != nullptr)
- swap (p->aliases,
- const_cast<variable*> (var.overrides.get ())->aliases);
-
- // Forward link.
- //
- p->overrides = move (var.overrides);
- var.overrides = move (p);
-
- o = var.overrides.get ();
+ if (c == '!')
+ data_->global_var_overrides.push_back (s);
}
- // Currently we expand project overrides in the global scope to keep
- // things simple. Pass original variable for diagnostics. Use current
- // working directory as pattern base.
- //
- parser p (*this);
- pair<value, token> r (p.parse_variable_value (l, gs, &work, var));
-
- if (r.second.type != token_type::eos)
- fail << "unexpected " << r.second << " in variable assignment "
- << "'" << s << "'";
-
- // Make sure the value is not typed.
- //
- if (r.first.type != nullptr)
- fail << "typed override of variable " << n;
-
- // Global and absolute scope overrides we can enter directly. Project
- // and relative scope ones will be entered later for each project.
+ // Parse any ad hoc project-wide overrides.
//
- if (c == '!' || (dir && dir->absolute ()))
- {
- scope& s (c == '!' ? gs : *sm.insert_out (*dir)->second.front ());
-
- auto p (s.vars.insert (*o));
- assert (p.second); // Variable name is unique.
-
- value& v (p.first);
- v = move (r.first);
- }
- else
- data_->var_overrides.push_back (
- variable_override {var, *o, move (dir), move (r.first)});
-
- // Save global overrides for nested contexts.
- //
- if (c == '!')
- data_->global_var_overrides.push_back (s);
+ if (var_ovr_func != nullptr)
+ var_ovr_func (*this, i);
}
// Enter remaining variable patterns and builtin variables.
@@ -659,6 +712,42 @@ namespace build2
}
context::
+ context ()
+ : data_ (new data (*this)),
+ sched (nullptr),
+ mutexes (nullptr),
+ fcache (nullptr),
+ match_only (nullopt),
+ no_external_modules (true),
+ dry_run_option (false),
+ no_diag_buffer (false),
+ keep_going (false),
+ phase_mutex (*this),
+ scopes (data_->scopes),
+ targets (data_->targets),
+ var_pool (data_->var_pool),
+ var_patterns (data_->var_patterns),
+ var_overrides (data_->var_overrides),
+ functions (data_->functions),
+ global_scope (create_global_scope (data_->scopes)),
+ global_target_types (data_->global_target_types),
+ global_override_cache (data_->global_override_cache),
+ global_var_overrides (data_->global_var_overrides),
+ modules_lock (nullptr),
+ module_context (nullptr)
+ {
+ variable_pool& vp (data_->var_pool);
+
+ var_src_root = &vp.insert<dir_path> ("src_root");
+ var_out_root = &vp.insert<dir_path> ("out_root");
+
+ var_project = &vp.insert<project_name> ("project");
+ var_amalgamation = &vp.insert<dir_path> ("amalgamation");
+
+ load_generation = 1;
+ }
+
+ context::
~context ()
{
// Cannot be inline since context::data is undefined.
@@ -667,7 +756,8 @@ namespace build2
void context::
enter_project_overrides (scope& rs,
const dir_path& out_base,
- const variable_overrides& ovrs)
+ const variable_overrides& ovrs,
+ scope* as)
{
// The mildly tricky part here is to distinguish the situation where we
// are bootstrapping the same project multiple times. The first override
@@ -692,7 +782,7 @@ namespace build2
scope& s (
o.dir
? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
- : *rs.weak_scope ());
+ : *(as != nullptr ? as : (as = rs.weak_scope ())));
auto p (s.vars.insert (o.ovr));
@@ -735,6 +825,7 @@ namespace build2
}
current_mif = &mif;
+ current_mdata = current_data_ptr (nullptr, null_current_data_deleter);
current_on = 0; // Reset.
}
@@ -748,6 +839,8 @@ namespace build2
current_oname = oif.name;
current_inner_oif = &inner_oif;
current_outer_oif = outer_oif;
+ current_inner_odata = current_data_ptr (nullptr, null_current_data_deleter);
+ current_outer_odata = current_data_ptr (nullptr, null_current_data_deleter);
current_on++;
current_mode = inner_oif.mode;
current_diag_noise = diag_noise;
@@ -757,6 +850,7 @@ namespace build2
dependency_count.store (0, memory_order_relaxed);
target_count.store (0, memory_order_relaxed);
skip_count.store (0, memory_order_relaxed);
+ resolve_count.store (0, memory_order_relaxed);
// Clear accumulated targets with post hoc prerequisites.
//
@@ -795,11 +889,11 @@ namespace build2
{
++contention; // Protected by m_.
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
}
else
r = !fail_;
@@ -811,9 +905,9 @@ namespace build2
{
if (!lm_.try_lock ())
{
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
lm_.lock ();
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
++contention_load; // Protected by lm_.
}
@@ -863,9 +957,9 @@ namespace build2
// relock().
//
if (o == run_phase::match && n == run_phase::execute)
- ctx_.sched.push_phase ();
+ ctx_.sched->push_phase ();
else if (o == run_phase::execute && n == run_phase::match)
- ctx_.sched.pop_phase ();
+ ctx_.sched->pop_phase ();
if (v != nullptr)
{
@@ -920,9 +1014,9 @@ namespace build2
// unlock().
//
if (o == run_phase::match && n == run_phase::execute)
- ctx_.sched.push_phase ();
+ ctx_.sched->push_phase ();
else if (o == run_phase::execute && n == run_phase::match)
- ctx_.sched.pop_phase ();
+ ctx_.sched->pop_phase ();
// Notify others that could be waiting for this phase.
//
@@ -936,11 +1030,11 @@ namespace build2
{
++contention; // Protected by m_.
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
}
}
@@ -955,9 +1049,9 @@ namespace build2
//
s = false;
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
lm_.lock ();
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
++contention_load; // Protected by lm_.
}
@@ -1030,35 +1124,35 @@ namespace build2
// phase_unlock
//
phase_unlock::
- phase_unlock (context& c, bool u, bool d)
- : ctx (u ? &c : nullptr), lock (nullptr)
+ phase_unlock (context* c, bool d)
+ : ctx (c), lock_ (nullptr)
{
- if (u && !d)
+ if (ctx != nullptr && !d)
unlock ();
}
void phase_unlock::
unlock ()
{
- if (ctx != nullptr && lock == nullptr)
+ if (ctx != nullptr && lock_ == nullptr)
{
- lock = phase_lock_instance;
- assert (&lock->ctx == ctx);
+ lock_ = phase_lock_instance;
+ assert (&lock_->ctx == ctx);
phase_lock_instance = nullptr; // Note: not lock->prev.
- ctx->phase_mutex.unlock (lock->phase);
+ ctx->phase_mutex.unlock (lock_->phase);
- //text << this_thread::get_id () << " phase unlock " << lock->phase;
+ //text << this_thread::get_id () << " phase unlock " << lock_->phase;
}
}
- phase_unlock::
- ~phase_unlock () noexcept (false)
+ void phase_unlock::
+ lock ()
{
- if (lock != nullptr)
+ if (lock_ != nullptr)
{
- bool r (ctx->phase_mutex.lock (lock->phase));
- phase_lock_instance = lock;
+ bool r (ctx->phase_mutex.lock (lock_->phase));
+ phase_lock_instance = lock_;
// Fail unless we are already failing. Note that we keep the phase
// locked since there will be phase_lock down the stack to unlock it.
@@ -1066,10 +1160,16 @@ namespace build2
if (!r && !uncaught_exception ())
throw failed ();
- //text << this_thread::get_id () << " phase lock " << lock->phase;
+ //text << this_thread::get_id () << " phase lock " << lock_->phase;
}
}
+ phase_unlock::
+ ~phase_unlock () noexcept (false)
+ {
+ lock ();
+ }
+
// phase_switch
//
phase_switch::
diff --git a/libbuild2/context.hxx b/libbuild2/context.hxx
index 27c3cc0..33fc892 100644
--- a/libbuild2/context.hxx
+++ b/libbuild2/context.hxx
@@ -21,7 +21,7 @@
namespace build2
{
class file_cache;
- class loaded_modules_lock;
+ class module_libraries_lock;
class LIBBUILD2_SYMEXPORT run_phase_mutex
{
@@ -120,6 +120,16 @@ namespace build2
}
};
+ // Match-only level.
+ //
+ // See the --match-only and --load-only options for background.
+ //
+ enum class match_only_level
+ {
+ alias, // Match only alias{} targets.
+ all // Match all targets.
+ };
+
// A build context encapsulates the state of a build. It is possible to have
// multiple build contexts provided they are non-overlapping, that is, they
// don't try to build the same projects (note that this is currently not
@@ -142,9 +152,9 @@ namespace build2
// instead go the multiple communicating schedulers route, a la the job
// server).
//
- // The loaded_modules state (module.hxx) is shared among all the contexts
+ // The module_libraries state (module.hxx) is shared among all the contexts
// (there is no way to have multiple shared library loading "contexts") and
- // is protected by loaded_modules_lock. A nested context should normally
+ // is protected by module_libraries_lock. A nested context should normally
// inherit this lock value from its outer context.
//
// Note also that any given thread should not participate in multiple
@@ -211,13 +221,15 @@ namespace build2
unique_ptr<data> data_;
public:
- scheduler& sched;
- global_mutexes& mutexes;
- file_cache& fcache;
+ // These are only NULL for the "bare minimum" context (see below).
+ //
+ scheduler* sched;
+ global_mutexes* mutexes;
+ file_cache* fcache;
- // Match only flag (see --match-only but also dist).
+ // Match only flag/level (see --{load,match}-only but also dist).
//
- bool match_only;
+ optional<match_only_level> match_only;
// Skip booting external modules flag (see --no-external-modules).
//
@@ -339,6 +351,22 @@ namespace build2
(current_mname.empty () && current_oname == mo));
};
+ // Meta/operation-specific context-global auxiliary data storage.
+ //
+ // Note: cleared by current_[meta_]operation() below. Normally set by
+ // meta/operation-specific callbacks from [mate_]operation_info.
+ //
+ // Note also: watch out for MT-safety in the data itself.
+ //
+ static void
+ null_current_data_deleter (void* p) { assert (p == nullptr); }
+
+ using current_data_ptr = unique_ptr<void, void (*) (void*)>;
+
+ current_data_ptr current_mdata = {nullptr, null_current_data_deleter};
+ current_data_ptr current_inner_odata = {nullptr, null_current_data_deleter};
+ current_data_ptr current_outer_odata = {nullptr, null_current_data_deleter};
+
// Current operation number (1-based) in the meta-operation batch.
//
size_t current_on;
@@ -377,11 +405,14 @@ namespace build2
// decremented after such recipe has been executed. If such a recipe has
// skipped executing the operation, then it should increment the skip
// count. These two counters are used for progress monitoring and
- // diagnostics.
+ // diagnostics. The resolve count keeps track of the number of targets
+ // matched but not executed as a result of the resolve_members() calls
+ // (see also target::resolve_counted).
//
atomic_count dependency_count;
atomic_count target_count;
atomic_count skip_count;
+ atomic_count resolve_count;
// Build state (scopes, targets, variables, etc).
//
@@ -401,9 +432,15 @@ namespace build2
//
struct posthoc_target
{
+ struct prerequisite_target
+ {
+ const build2::target* target;
+ uint64_t match_options;
+ };
+
build2::action action;
reference_wrapper<const build2::target> target;
- vector<const build2::target*> prerequisite_targets;
+ vector<prerequisite_target> prerequisite_targets;
};
list<posthoc_target> current_posthoc_targets;
@@ -630,9 +667,9 @@ namespace build2
dir_path old_src_root;
dir_path new_src_root;
- // NULL if this context hasn't already locked the loaded_modules state.
+ // NULL if this context hasn't already locked the module_libraries state.
//
- const loaded_modules_lock* modules_lock;
+ const module_libraries_lock* modules_lock;
// Nested context for updating build system modules and ad hoc recipes.
//
@@ -649,6 +686,11 @@ namespace build2
// properly setup context (including, normally, a self-reference in
// modules_context).
//
+ // The var_override_function callback can be used to parse ad hoc project-
+ // wide variable overrides (see parse_variable_override()). This has to
+ // happen at a specific point during context construction (see the
+ // implementation for details).
+ //
// Note: see also the trace_* data members that, if needed, must be set
// separately, after construction.
//
@@ -661,11 +703,12 @@ namespace build2
reserves (size_t t, size_t v): targets (t), variables (v) {}
};
- explicit
+ using var_override_function = void (context&, size_t&);
+
context (scheduler&,
global_mutexes&,
file_cache&,
- bool match_only = false,
+ optional<match_only_level> match_only = nullopt,
bool no_external_modules = false,
bool dry_run = false,
bool no_diag_buffer = false,
@@ -673,7 +716,16 @@ namespace build2
const strings& cmd_vars = {},
reserves = {0, 160},
optional<context*> module_context = nullptr,
- const loaded_modules_lock* inherited_mudules_lock = nullptr);
+ const module_libraries_lock* inherited_modules_lock = nullptr,
+ const function<var_override_function>& = nullptr);
+
+ // Special context with bare minimum of initializations. It is only
+ // guaranteed to be sufficiently initialized to call extract_variable().
+ //
+ // Note that for this purpose you may omit calls to init_diag() and
+ // init().
+ //
+ context ();
// Reserve elements in containers to avoid re-allocation/re-hashing. Zero
// values are ignored (that is, the corresponding container reserve()
@@ -682,12 +734,28 @@ namespace build2
void
reserve (reserves);
+ // Parse a variable override returning its type in the first half of the
+ // pair. Index is the variable index (used to derive unique name) and if
+ // buildspec is true then assume `--` is used as a separator between
+ // variables and buildscpec and issue appropriate diagnostics.
+ //
+ // Note: should only be called from the var_override_function constructor
+ // callback.
+ //
+ pair<char, variable_override>
+ parse_variable_override (const string& var, size_t index, bool buildspec);
+
// Enter project-wide (as opposed to global) variable overrides.
//
+ // If the amalgamation scope is specified, then use it instead of
+ // rs.weak_scope() to set overrides with global visibility (make sure you
+ // understand the implications before doing this).
+ //
void
enter_project_overrides (scope& rs,
const dir_path& out_base,
- const variable_overrides&);
+ const variable_overrides&,
+ scope* amalgamation = nullptr);
// Set current meta-operation and operation.
//
@@ -781,14 +849,20 @@ namespace build2
//
struct LIBBUILD2_SYMEXPORT phase_unlock
{
- phase_unlock (context&, bool unlock = true, bool delay = false);
+ explicit phase_unlock (context*, bool delay = false);
+ explicit phase_unlock (context& ctx, bool delay = false)
+ : phase_unlock (&ctx, delay) {}
+
~phase_unlock () noexcept (false);
void
unlock ();
+ void
+ lock ();
+
context* ctx;
- phase_lock* lock;
+ phase_lock* lock_;
};
// Assuming we have a lock on the current phase, temporarily switch to a
@@ -834,8 +908,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- wait_guard (wait_guard&&);
- wait_guard& operator= (wait_guard&&);
+ wait_guard (wait_guard&&) noexcept;
+ wait_guard& operator= (wait_guard&&) noexcept;
wait_guard (const wait_guard&) = delete;
wait_guard& operator= (const wait_guard&) = delete;
diff --git a/libbuild2/context.ixx b/libbuild2/context.ixx
index 4f86c28..6c8c428 100644
--- a/libbuild2/context.ixx
+++ b/libbuild2/context.ixx
@@ -31,7 +31,7 @@ namespace build2
}
inline wait_guard::
- wait_guard (wait_guard&& x)
+ wait_guard (wait_guard&& x) noexcept
: ctx (x.ctx),
start_count (x.start_count),
task_count (x.task_count),
@@ -41,7 +41,7 @@ namespace build2
}
inline wait_guard& wait_guard::
- operator= (wait_guard&& x)
+ operator= (wait_guard&& x) noexcept
{
if (&x != this)
{
@@ -56,8 +56,8 @@ namespace build2
inline void wait_guard::
wait ()
{
- phase_unlock u (*ctx, phase, true /* delay */);
- ctx->sched.wait (start_count, *task_count, u);
+ phase_unlock u (phase ? ctx : nullptr, true /* delay */);
+ ctx->sched->wait (start_count, *task_count, u);
task_count = nullptr;
}
}
diff --git a/libbuild2/cxx/init.cxx b/libbuild2/cxx/init.cxx
index fd6d04c..8159d18 100644
--- a/libbuild2/cxx/init.cxx
+++ b/libbuild2/cxx/init.cxx
@@ -7,10 +7,12 @@
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/config/utility.hxx>
+#include <libbuild2/install/utility.hxx>
#include <libbuild2/cc/guess.hxx>
#include <libbuild2/cc/module.hxx>
+#include <libbuild2/cc/target.hxx> // pc*
#include <libbuild2/cxx/target.hxx>
#ifndef BUILD2_DEFAULT_CXX
@@ -62,8 +64,8 @@ namespace build2
uint64_t mi (ci.version.minor);
uint64_t p (ci.version.patch);
- // Besides various `c++NN` we have two special values: `latest` and
- // `experimental`.
+ // Besides various `NN` we have two special values: `latest` and
+ // `experimental`. It can also be `gnu++NN`.
//
// The semantics of the `latest` value is the latest available standard
// that is not necessarily complete or final but is practically usable.
@@ -91,6 +93,24 @@ namespace build2
bool latest (v != nullptr && *v == "latest");
bool experimental (v != nullptr && *v == "experimental");
+ // This helper helps recognize both NN and [cC]++NN to avoid an endless
+ // stream of user questions. It can also be used to recognize Nx in
+ // addition to NN (e.g., "14" and "1y").
+ //
+ auto stdcmp = [v] (const char* nn, const char* nx = nullptr)
+ {
+ if (v != nullptr)
+ {
+ const char* s (v->c_str ());
+ if ((s[0] == 'c' || s[0] == 'C') && s[1] == '+' && s[2] == '+')
+ s += 3;
+
+ return strcmp (s, nn) == 0 || (nx != nullptr && strcmp (s, nx) == 0);
+ }
+
+ return false;
+ };
+
// Feature flags.
//
auto& vp (rs.var_pool (true /* public */)); // All qualified.
@@ -157,6 +177,10 @@ namespace build2
i = mode.insert (i, move (o)) + 1;
};
+ // Derive approximate __cplusplus value from the standard if possible.
+ //
+ optional<uint32_t> cplusplus;
+
switch (cl)
{
case compiler_class::msvc:
@@ -186,6 +210,26 @@ namespace build2
{
if (v14_3)
o = "/std:c++latest";
+
+ // According to the documentation:
+ //
+ // "The value of __cplusplus with the /std:c++latest option
+ // depends on the version of Visual Studio. It's always at least
+ // one higher than the highest supported __cplusplus standard
+ // value supported by your version of Visual Studio."
+ //
+ if (v16_11)
+ cplusplus = 202002 + 1;
+ else if (v16_0)
+ cplusplus = 201703 + 1;
+ else if (v14_3)
+ cplusplus = 201402 + 1;
+ else if (mj >= 19)
+ cplusplus = 201402;
+ else if (mj >= 16)
+ cplusplus = 201103;
+ else
+ cplusplus = 199711;
}
else if (latest)
{
@@ -194,55 +238,78 @@ namespace build2
// for this mode. So starting from 16 we only enable it in
// `experimental`.
//
+ // Note: no /std:c++23 yet as of MSVC 17.6.
+ //
if (v16_11)
o = "/std:c++20";
else if (v16_0)
o = "/std:c++17";
else if (v14_3)
o = "/std:c++latest";
+
+ if (v16_11)
+ cplusplus = 202002;
+ else if (v16_0)
+ cplusplus = 201703;
+ else if (v14_3)
+ cplusplus = 201402 + 1;
+ else if (mj >= 19)
+ cplusplus = 201402;
+ else if (mj >= 16)
+ cplusplus = 201103;
+ else
+ cplusplus = 199711;
}
else if (v == nullptr)
- ;
- else if (*v != "98" && *v != "03")
+ {
+ // @@ TODO: map defaults to cplusplus for each version.
+ }
+ else if (!stdcmp ("98") && !stdcmp ("03"))
{
bool sup (false);
- if (*v == "11") // C++11 since VS2010/10.0.
+ if (stdcmp ("11", "0x")) // C++11 since VS2010/10.0.
{
sup = mj >= 16;
+ cplusplus = 201103;
}
- else if (*v == "14") // C++14 since VS2015/14.0.
+ else if (stdcmp ("14", "1y")) // C++14 since VS2015/14.0.
{
sup = mj >= 19;
+ cplusplus = 201402;
}
- else if (*v == "17") // C++17 since VS2015/14.0u2.
+ else if (stdcmp ("17", "1z")) // C++17 since VS2015/14.0u2.
{
// Note: the VC15 compiler version is 19.10.
//
sup = (mj > 19 ||
(mj == 19 && (mi > 0 || (mi == 0 && p >= 23918))));
+ cplusplus = 201703;
}
- else if (*v == "20") // C++20 since VS2019/16.11.
+ else if (stdcmp ("20", "2a")) // C++20 since VS2019/16.11.
{
sup = v16_11;
+ cplusplus = 202002;
}
if (!sup)
- fail << "C++" << *v << " is not supported by " << ci.signature <<
+ fail << "C++ " << *v << " is not supported by " << ci.signature <<
info << "required by " << project (rs) << '@' << rs;
if (v15_3)
{
- if (*v == "20") o = "/std:c++20";
- else if (*v == "17") o = "/std:c++17";
- else if (*v == "14") o = "/std:c++14";
+ if (stdcmp ("20", "2a")) o = "/std:c++20";
+ else if (stdcmp ("17", "1z")) o = "/std:c++17";
+ else if (stdcmp ("14", "1y")) o = "/std:c++14";
}
else if (v14_3)
{
- if (*v == "14") o = "/std:c++14";
- else if (*v == "17") o = "/std:c++latest";
+ if (stdcmp ("14", "1y")) o = "/std:c++14";
+ else if (stdcmp ("17", "1z")) o = "/std:c++latest";
}
}
+ else
+ cplusplus = 199711;
if (!o.empty ())
prepend (move (o));
@@ -268,11 +335,33 @@ namespace build2
{
case compiler_type::gcc:
{
- if (mj >= 11) o = "-std=c++23"; // 23
- else if (mj >= 8) o = "-std=c++2a"; // 20
- else if (mj >= 5) o = "-std=c++1z"; // 17
- else if (mj == 4 && mi >= 8) o = "-std=c++1y"; // 14
- else if (mj == 4 && mi >= 4) o = "-std=c++0x"; // 11
+ if (mj >= 11)
+ {
+ o = "-std=c++23";
+ cplusplus = 202302;
+ }
+ else if (mj >= 8)
+ {
+ o = "-std=c++2a";
+ cplusplus = 202002;
+ }
+ else if (mj >= 5)
+ {
+ o = "-std=c++1z";
+ cplusplus = 201703;
+ }
+ else if (mj == 4 && mi >= 8)
+ {
+ o = "-std=c++1y";
+ cplusplus = 201402;
+ }
+ else if (mj == 4 && mi >= 4)
+ {
+ o = "-std=c++0x";
+ cplusplus = 201103;
+ }
+ else
+ cplusplus = 199711;
break;
}
@@ -290,21 +379,56 @@ namespace build2
// MSVC.
//
- if (mj >= 13) o = "-std=c++2b";
- else if (mj == 10 &&
- latest && tt.system == "win32-msvc") o = "-std=c++17";
- else if (mj >= 5) o = "-std=c++2a";
- else if (mj > 3 || (mj == 3 && mi >= 5)) o = "-std=c++1z";
- else if (mj == 3 && mi >= 4) o = "-std=c++1y";
- else /* ??? */ o = "-std=c++0x";
+ if (mj >= 13)
+ {
+ o = "-std=c++2b";
+ cplusplus = 202302;
+ }
+ else if (mj == 10 && latest && tt.system == "win32-msvc")
+ {
+ o = "-std=c++17";
+ cplusplus = 201703;
+ }
+ else if (mj >= 5)
+ {
+ o = "-std=c++2a";
+ cplusplus = 202002;
+ }
+ else if (mj > 3 || (mj == 3 && mi >= 5))
+ {
+ o = "-std=c++1z";
+ cplusplus = 201703;
+ }
+ else if (mj == 3 && mi >= 4)
+ {
+ o = "-std=c++1y";
+ cplusplus = 201402;
+ }
+ else /* ??? */
+ {
+ o = "-std=c++0x";
+ cplusplus = 201103;
+ }
break;
}
case compiler_type::icc:
{
- if (mj >= 17) o = "-std=c++1z";
- else if (mj > 15 || (mj == 15 && p >= 3)) o = "-std=c++1y";
- else /* ??? */ o = "-std=c++0x";
+ if (mj >= 17)
+ {
+ o = "-std=c++1z";
+ cplusplus = 201703;
+ }
+ else if (mj > 15 || (mj == 15 && p >= 3))
+ {
+ o = "-std=c++1y";
+ cplusplus = 201402;
+ }
+ else /* ??? */
+ {
+ o = "-std=c++0x";
+ cplusplus = 201103;
+ }
break;
}
@@ -313,22 +437,33 @@ namespace build2
}
}
else if (v == nullptr)
- ;
+ {
+ // @@ TODO: map defaults to cplusplus for each version.
+ }
else
{
- // Translate 11 to 0x, 14 to 1y, 17 to 1z, 20 to 2a, and 23 to 2b
- // for compatibility with older versions of the compilers.
+ // Translate 11 to 0x, 14 to 1y, 17 to 1z, 20 to 2a, 23 to 2b, and
+ // 26 to 2c for compatibility with older versions of the
+ // compilers.
+ //
+ // @@ TMP: update C++26 __cplusplus value once known.
//
o = "-std=";
- if (*v == "23") o += "c++2b";
- else if (*v == "20") o += "c++2a";
- else if (*v == "17") o += "c++1z";
- else if (*v == "14") o += "c++1y";
- else if (*v == "11") o += "c++0x";
- else if (*v == "03") o += "c++03";
- else if (*v == "98") o += "c++98";
- else o += *v; // In case the user specifies `gnu++NN` or some such.
+ if (stdcmp ("26", "2c")) {o += "c++2c"; cplusplus = 202400;}
+ else if (stdcmp ("23", "2b")) {o += "c++2b"; cplusplus = 202302;}
+ else if (stdcmp ("20", "2a")) {o += "c++2a"; cplusplus = 202002;}
+ else if (stdcmp ("17", "1z")) {o += "c++1z"; cplusplus = 201703;}
+ else if (stdcmp ("14", "1y")) {o += "c++1y"; cplusplus = 201402;}
+ else if (stdcmp ("11", "0x")) {o += "c++0x"; cplusplus = 201103;}
+ else if (stdcmp ("03") ) {o += "c++03"; cplusplus = 199711;}
+ else if (stdcmp ("98") ) {o += "c++98"; cplusplus = 199711;}
+ else
+ {
+ o += *v; // In case the user specifies `gnu++NN` or some such.
+
+ // @@ TODO: can we still try to derive cplusplus value?
+ }
}
if (!o.empty ())
@@ -338,6 +473,8 @@ namespace build2
}
}
+ // Additional experimental options.
+ //
if (experimental)
{
switch (ct)
@@ -357,85 +494,124 @@ namespace build2
default:
break;
}
+ }
- // Unless disabled by the user, try to enable C++ modules.
- //
- if (!modules.value || *modules.value)
+ // Unless disabled by the user, try to enable C++ modules.
+ //
+ // NOTE: see also diagnostics about modules support required in compile
+ // rule.
+ //
+ if (!modules.value || *modules.value)
+ {
+ switch (ct)
{
- switch (ct)
+ case compiler_type::msvc:
{
- case compiler_type::msvc:
+ // Modules are enabled by default in /std:c++20 and
+ // /std:c++latest with both defining __cpp_modules to 201907
+ // (final C++20 module), at least as of 17.6 (LTS).
+ //
+ // @@ Should we enable modules by default? There are still some
+ // serious bugs, like inability to both `import std;` and
+ // `#include <string>` in the same translation unit (see Visual
+ // Studio issue #10541166).
+ //
+ if (modules.value)
{
- // While modules are supported in VC 15.0 (19.10), there is a
- // bug in the separate interface/implementation unit support
- // which makes them pretty much unusable. This has been fixed in
- // 15.3 (19.11). And 15.5 (19.12) supports the `export module
- // M;` syntax. And 16.4 (19.24) supports the global module
- // fragment. And in 16.8 all the modules-related options have
- // been changed. Seeing that the whole thing is unusable anyway,
- // we disable it for 16.8 or later for now.
- //
- if ((mj > 19 || (mj == 19 && mi >= (modules.value ? 10 : 12))) &&
- (mj < 19 || (mj == 19 && mi < 28) || modules.value))
+ if (cplusplus && *cplusplus < 202002)
{
- prepend (
- mj > 19 || mi >= 24 ?
- "/D__cpp_modules=201810" : // p1103 (merged modules)
- mj == 19 || mi >= 12 ?
- "/D__cpp_modules=201704" : // p0629r0 (export module M;)
- "/D__cpp_modules=201703"); // n4647 ( module M;)
-
- prepend ("/experimental:module");
- modules = true;
+ fail << "support for C++ modules requires C++20 or later" <<
+ info << "standard in use is " << *cplusplus <<
+ info << "required by " << project (rs) << '@' << rs;
}
- break;
- }
- case compiler_type::gcc:
- {
- // We use the module mapper support which is only available
- // since GCC 11. And since we are not yet capable of supporting
- // generated headers via the mapper, we require the user to
- // explicitly request modules.
- //
- if (mj >= 11 && modules.value)
+
+ if (mj < 19 || (mj == 19 && mi < 36))
{
- // Defines __cpp_modules:
- //
- // 11 -- 201810
- //
- prepend ("-fmodules-ts");
- modules = true;
+ fail << "support for C++ modules requires MSVC 17.6 or later" <<
+ info << "C++ compiler is " << ci.signature <<
+ info << "required by " << project (rs) << '@' << rs;
}
- break;
+ modules = true;
}
- case compiler_type::clang:
+
+ break;
+ }
+ case compiler_type::gcc:
+ {
+ // We use the module mapper support which is only available since
+ // GCC 11. And since we are not yet capable of supporting
+ // generated headers via the mapper, we require the user to
+ // explicitly request modules.
+ //
+ // @@ Actually, now that we pre-generate headers by default, this
+ // is probably no longer the reason. But GCC modules being
+ // unusable due to bugs is stil a reason.
+ //
+ if (modules.value)
{
- // At the time of this writing, support for C++20 modules in
- // Clang is incomplete. And starting with Clang 9 (Apple Clang
- // 11.0.3), they are enabled by default in the C++2a mode which
- // breaks the way we set things up for partial preprocessing;
- // see this post for details:
- //
- // http://lists.llvm.org/pipermail/cfe-dev/2019-October/063637.html
- //
- // As a result, for now, we only enable modules if forced with
- // explicit cxx.features.modules=true.
+ if (cplusplus && *cplusplus < 202002)
+ {
+ fail << "support for C++ modules requires C++20 or later" <<
+ info << "standard in use is " << *cplusplus <<
+ info << "required by " << project (rs) << '@' << rs;
+ }
+
+ if (mj < 11)
+ {
+ fail << "support for C++ modules requires GCC 11 or later" <<
+ info << "C++ compiler is " << ci.signature <<
+ info << "required by " << project (rs) << '@' << rs;
+ }
+
+ // Defines __cpp_modules:
//
- // Also see Clang modules support hack in cc::compile.
+ // 11 -- 201810
//
- if (modules.value)
+ prepend ("-fmodules-ts");
+ modules = true;
+ }
+
+ break;
+ }
+ case compiler_type::clang:
+ {
+ // Things (command line options, semantics) changed quite a bit
+ // around Clang 16 so we don't support anything earlier than
+ // that (it's not practically usable anyway).
+ //
+ // Clang enable modules by default in c++20 or later but they
+ // don't yet (as of Clang 18) define __cpp_modules. When they
+ // do, we can consider enabling modules by default on our side.
+ // For now, we only enable modules if forced with explicit
+ // cxx.features.modules=true.
+ //
+ if (modules.value)
+ {
+ if (cplusplus && *cplusplus < 202002)
+ {
+ fail << "support for C++ modules requires C++20 or later" <<
+ info << "standard in use is " << *cplusplus <<
+ info << "required by " << project (rs) << '@' << rs;
+ }
+
+ if (mj < 16)
{
- prepend ("-D__cpp_modules=201704"); // p0629r0
- mode.push_back ("-fmodules-ts"); // For the hack to work.
- modules = true;
+ fail << "support for C++ modules requires Clang 16 or later" <<
+ info << "C++ compiler is " << ci.signature <<
+ info << "required by " << project (rs) << '@' << rs;
}
- break;
+ // See https://github.com/llvm/llvm-project/issues/71364
+ //
+ prepend ("-D__cpp_modules=201907L");
+ modules = true;
}
- case compiler_type::icc:
- break; // No modules support yet.
+
+ break;
}
+ case compiler_type::icc:
+ break; // No modules support yet.
}
}
@@ -443,6 +619,95 @@ namespace build2
//set_feature (concepts);
}
+ // See cc::data::x_{hdr,inc} for background.
+ //
+ static const target_type* const hdr[] =
+ {
+ &hxx::static_type,
+ &ixx::static_type,
+ &txx::static_type,
+ &mxx::static_type,
+ nullptr
+ };
+
+ // Note that we don't include S{} here because none of the files we
+ // compile can plausibly want to include .S. (Maybe in inline assembler
+ // instructions?)
+ //
+ static const target_type* const inc[] =
+ {
+ &hxx::static_type,
+ &h::static_type,
+ &ixx::static_type,
+ &txx::static_type,
+ &mxx::static_type,
+ &cxx::static_type,
+ &c::static_type,
+ &mm::static_type,
+ &m::static_type,
+ &cxx_inc::static_type,
+ &cc::c_inc::static_type,
+ nullptr
+ };
+
+ bool
+ types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.types module must be loaded in project root";
+
+ // Register target types and configure their "installability".
+ //
+ using namespace install;
+
+ bool install_loaded (cast_false<bool> (rs["install.loaded"]));
+
+ // Note: not registering mm{} (it is registered seperately by the
+ // respective optional .types submodule).
+ //
+ // Note: mxx{} is in hdr. @@ But maybe it shouldn't be...
+ //
+ rs.insert_target_type<cxx> ();
+
+ auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
+ {
+ rs.insert_target_type (tt);
+
+ // Install headers into install.include.
+ //
+ if (install_loaded)
+ install_path (rs, tt, dir_path ("include"));
+ };
+
+ for (const target_type* const* ht (hdr); *ht != nullptr; ++ht)
+ insert_hdr (**ht);
+
+ // Also register the C header for C-derived languages.
+ //
+ insert_hdr (h::static_type);
+
+ // @@ PERF: maybe factor this to cc.types?
+ //
+ rs.insert_target_type<cc::pc> ();
+ rs.insert_target_type<cc::pca> ();
+ rs.insert_target_type<cc::pcs> ();
+
+ if (install_loaded)
+ install_path<cc::pc> (rs, dir_path ("pkgconfig"));
+
+ return true;
+ }
+
static const char* const hinters[] = {"c", nullptr};
// See cc::module for details on guess_init vs config_init.
@@ -673,8 +938,8 @@ namespace build2
vp["cc.export.libs"],
vp["cc.export.impl_libs"],
- vp["cc.pkconfig.include"],
- vp["cc.pkconfig.lib"],
+ vp["cc.pkgconfig.include"],
+ vp["cc.pkgconfig.lib"],
vp.insert<string> ("cxx.stdlib"),
@@ -686,6 +951,7 @@ namespace build2
vp["cc.module_name"],
vp["cc.importable"],
vp["cc.reprocess"],
+ vp["cc.serialize"],
// Ability to signal that source is already (partially) preprocessed.
// Valid values are 'none' (not preprocessed), 'includes' (no #include
@@ -774,29 +1040,6 @@ namespace build2
return true;
}
- static const target_type* const hdr[] =
- {
- &hxx::static_type,
- &ixx::static_type,
- &txx::static_type,
- &mxx::static_type,
- nullptr
- };
-
- static const target_type* const inc[] =
- {
- &hxx::static_type,
- &h::static_type,
- &ixx::static_type,
- &txx::static_type,
- &mxx::static_type,
- &cxx::static_type,
- &c::static_type,
- &mm::static_type,
- &m::static_type,
- nullptr
- };
-
bool
init (scope& rs,
scope& bs,
@@ -837,8 +1080,7 @@ namespace build2
"cxx.link",
"cxx.install",
- cm.x_info->id.type,
- cm.x_info->id.variant,
+ cm.x_info->id,
cm.x_info->class_,
cm.x_info->version.major,
cm.x_info->version.minor,
@@ -871,6 +1113,7 @@ namespace build2
cxx::static_type,
modules ? &mxx::static_type : nullptr,
+ cxx_inc::static_type,
hdr,
inc
};
@@ -882,12 +1125,35 @@ namespace build2
}
bool
+ objcxx_types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::objcxx_types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.objcxx.types module must be loaded in project root";
+
+ // Register the mm{} target type.
+ //
+ rs.insert_target_type<mm> ();
+
+ return true;
+ }
+
+ bool
objcxx_init (scope& rs,
scope& bs,
const location& loc,
bool,
bool,
- module_init_extra&)
+ module_init_extra&)
{
tracer trace ("cxx::objcxx_init");
l5 ([&]{trace << "for " << bs;});
@@ -910,7 +1176,7 @@ namespace build2
//
// Note: see similar code in the c module.
//
- rs.insert_target_type<mm> ();
+ load_module (rs, rs, "cxx.objcxx.types", loc);
// Note that while Objective-C++ is supported by MinGW GCC, it's
// unlikely Clang supports it when targeting MSVC or Emscripten. But
@@ -923,16 +1189,55 @@ namespace build2
return true;
}
+ bool
+ predefs_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::predefs_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.predefs module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("cxx"));
+
+ if (mod == nullptr)
+ fail (loc) << "cxx.predefs module must be loaded after cxx module";
+
+ // Register the cxx.predefs rule.
+ //
+ // Why invent a separate module instead of just always registering it in
+ // the cxx module? The reason is performance: this rule will be called
+ // for every C++ header.
+ //
+ cc::predefs_rule& r (*mod);
+
+ rs.insert_rule<hxx> (perform_update_id, r.rule_name, r);
+ rs.insert_rule<hxx> (perform_clean_id, r.rule_name, r);
+ rs.insert_rule<hxx> (configure_update_id, r.rule_name, r);
+
+ return true;
+ }
+
static const module_functions mod_functions[] =
{
// NOTE: don't forget to also update the documentation in init.hxx if
// changing anything here.
- {"cxx.guess", nullptr, guess_init},
- {"cxx.config", nullptr, config_init},
- {"cxx", nullptr, init},
- {"cxx.objcxx", nullptr, objcxx_init},
- {nullptr, nullptr, nullptr}
+ {"cxx.types", nullptr, types_init},
+ {"cxx.guess", nullptr, guess_init},
+ {"cxx.config", nullptr, config_init},
+ {"cxx.objcxx.types", nullptr, objcxx_types_init},
+ {"cxx.objcxx", nullptr, objcxx_init},
+ {"cxx.predefs", nullptr, predefs_init},
+ {"cxx", nullptr, init},
+ {nullptr, nullptr, nullptr}
};
const module_functions*
diff --git a/libbuild2/cxx/init.hxx b/libbuild2/cxx/init.hxx
index 0e42cbe..a193e74 100644
--- a/libbuild2/cxx/init.hxx
+++ b/libbuild2/cxx/init.hxx
@@ -19,11 +19,19 @@ namespace build2
//
// Submodules:
//
- // `cxx.guess` -- registers and sets some variables.
- // `cxx.config` -- loads cxx.guess and sets more variables.
- // `cxx` -- loads cxx.config and registers target types and rules.
- // `cxx.objcxx` -- registers mm{} target type and enables Objective-C++
- // compilation.
+ // `cxx.types` -- registers target types.
+ // `cxx.guess` -- registers and sets some variables.
+ // `cxx.config` -- loads cxx.guess and sets more variables.
+ // `cxx` -- loads cxx.{types,config} and registers rules
+ // and functions.
+ //
+ // `cxx.objcxx.types` -- registers mm{} target type.
+ // `cxx.objcxx` -- loads cxx.objcxx and enables Objective-C++
+ // compilation.
+ //
+ // `cxx.predefs` -- registers rule for generating a C++ header with
+ // predefined compiler macros. Must be loaded after
+ // cxx.
//
extern "C" LIBBUILD2_CXX_SYMEXPORT const module_functions*
build2_cxx_load ();
diff --git a/libbuild2/cxx/target.cxx b/libbuild2/cxx/target.cxx
index 5ead620..37096c3 100644
--- a/libbuild2/cxx/target.cxx
+++ b/libbuild2/cxx/target.cxx
@@ -90,5 +90,18 @@ namespace build2
&file_search,
target_type::flag::none
};
+
+ const target_type cxx_inc::static_type
+ {
+ "cxx_inc",
+ &cc::static_type,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ target_type::flag::none
+ };
}
}
diff --git a/libbuild2/cxx/target.hxx b/libbuild2/cxx/target.hxx
index fc85f75..06e8a67 100644
--- a/libbuild2/cxx/target.hxx
+++ b/libbuild2/cxx/target.hxx
@@ -7,7 +7,6 @@
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
-#include <libbuild2/target.hxx>
#include <libbuild2/cc/target.hxx>
#include <libbuild2/cxx/export.hxx>
@@ -104,6 +103,25 @@ namespace build2
public:
static const target_type static_type;
};
+
+ // This is an abstract base target for deriving additional targets (for
+ // example, Qt moc{}) that can be #include'd in C++ translation units. In
+ // particular, only such targets will be considered to reverse-lookup
+ // extensions to target types (see dyndep_rule::map_extension() for
+ // background).
+ //
+ class LIBBUILD2_CXX_SYMEXPORT cxx_inc: public cc::cc
+ {
+ public:
+ cxx_inc (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
}
}
diff --git a/libbuild2/diagnostics.cxx b/libbuild2/diagnostics.cxx
index d91150b..4a46756 100644
--- a/libbuild2/diagnostics.cxx
+++ b/libbuild2/diagnostics.cxx
@@ -4,7 +4,9 @@
#include <libbuild2/diagnostics.hxx>
#include <cstring> // strchr(), memcpy()
+#include <cstdlib> // getenv()
+#include <libbutl/fdstream.hxx> // fdterm_color()
#include <libbutl/process-io.hxx>
#include <libbuild2/scope.hxx>
@@ -17,30 +19,70 @@ using namespace butl;
namespace build2
{
- // Diagnostics state (verbosity level, progress, etc). Keep disabled until
- // set from options.
+ // Diagnostics state (verbosity level, progress, etc). Keep default/disabled
+ // until set from options.
//
- uint16_t verb = 0;
- bool silent = true;
+ uint16_t verb = 1;
+ bool silent = false;
optional<bool> diag_progress_option;
+ optional<bool> diag_color_option;
bool diag_no_line = false;
bool diag_no_column = false;
bool stderr_term = false;
+ bool stderr_term_color = false;
void
- init_diag (uint16_t v, bool s, optional<bool> p, bool nl, bool nc, bool st)
+ init_diag (uint16_t v,
+ bool s,
+ optional<bool> p,
+ optional<bool> c,
+ bool nl,
+ bool nc,
+ bool st)
{
assert (!s || v == 0);
verb = v;
silent = s;
diag_progress_option = p;
+ diag_color_option = c;
diag_no_line = nl;
diag_no_column = nc;
stderr_term = st;
+
+ if (st)
+ {
+ // @@ TMP: eventually we want to enable on Windows by default.
+ //
+#ifdef _WIN32
+ if (c && *c)
+ {
+#endif
+ stderr_term_color = fdterm_color (stderr_fd (), !c || *c /* enable */);
+
+ // If the user specified --diag-color on POSIX we will trust the color
+ // is supported (e.g., wrong TERM value, etc).
+ //
+ if (!stderr_term_color && c && *c)
+ {
+#ifdef _WIN32
+ fail << "unable to enable diagnostics color support for stderr";
+#else
+ stderr_term_color = true;
+#endif
+ }
+
+#ifdef _WIN32
+ }
+ else
+ stderr_term_color = false;
+#endif
+ }
+ else
+ stderr_term_color = false;
}
// Stream verbosity.
@@ -589,7 +631,7 @@ namespace build2
int diag_buffer::
pipe (context& ctx, bool force)
{
- return (ctx.sched.serial () || ctx.no_diag_buffer) && !force ? 2 : -1;
+ return (ctx.sched->serial () || ctx.no_diag_buffer) && !force ? 2 : -1;
}
void diag_buffer::
@@ -597,7 +639,7 @@ namespace build2
{
assert (state_ == state::closed && args0 != nullptr);
- serial = ctx_.sched.serial ();
+ serial = ctx_.sched->serial ();
nobuf = !serial && ctx_.no_diag_buffer;
if (fd != nullfd)
@@ -621,7 +663,7 @@ namespace build2
{
assert (state_ == state::closed && args0 != nullptr);
- serial = ctx_.sched.serial ();
+ serial = ctx_.sched->serial ();
nobuf = !serial && ctx_.no_diag_buffer;
this->args0 = args0;
state_ = state::eof;
diff --git a/libbuild2/diagnostics.hxx b/libbuild2/diagnostics.hxx
index c048d5b..ef41f22 100644
--- a/libbuild2/diagnostics.hxx
+++ b/libbuild2/diagnostics.hxx
@@ -401,15 +401,25 @@ namespace build2
using butl::diag_progress_lock;
// Return true if progress is to be shown. The max_verb argument is the
- // maximum verbosity level that this type of progress should be shown by
- // default.
+ // maximum verbosity level that this type of progress should be shown at by
+ // default. If it is verb_never, then both min and max verbosity checks are
+ // omitted, assuming the caller takes care of that themselves.
//
inline bool
show_progress (uint16_t max_verb)
{
return diag_progress_option
? *diag_progress_option
- : stderr_term && verb >= 1 && verb <= max_verb;
+ : stderr_term && (max_verb == verb_never ||
+ (verb >= 1 && verb <= max_verb));
+ }
+
+ // Diagnostics color.
+ //
+ inline bool
+ show_diag_color ()
+ {
+ return diag_color_option ? *diag_color_option : stderr_term_color;
}
// Diagnostic facility.
@@ -507,6 +517,8 @@ namespace build2
}
};
+ // Note: diag frames are not applied to text/trace diagnostics.
+ //
template <typename F>
struct diag_frame_impl: diag_frame
{
diff --git a/libbuild2/dist/init.cxx b/libbuild2/dist/init.cxx
index 26ff86d..48a3e15 100644
--- a/libbuild2/dist/init.cxx
+++ b/libbuild2/dist/init.cxx
@@ -22,6 +22,7 @@ namespace build2
namespace dist
{
static const rule rule_;
+ static const file_rule file_rule_ (true /* check_type */);
void
boot (scope& rs, const location&, module_boot_extra& extra)
@@ -133,7 +134,7 @@ namespace build2
//
bool s (specified_config (rs, "dist", {"bootstrap"}));
- // dist.root
+ // config.dist.root
//
{
value& v (rs.assign ("dist.root"));
@@ -145,22 +146,24 @@ namespace build2
}
}
- // dist.cmd
+ // config.dist.cmd
+ //
+ // By default we use in-process code for creating directories and
+ // copying files (for performance, especially on Windows). But an
+ // external program (normally install) can be used if configured.
//
{
- value& v (rs.assign<process_path> ("dist.cmd"));
+ value& v (rs.assign<process_path> ("dist.cmd")); // NULL
if (s)
{
- if (lookup l = lookup_config (rs,
- "config.dist.cmd",
- path ("install")))
+ if (lookup l = lookup_config (rs, "config.dist.cmd", nullptr))
v = run_search (cast<path> (l), true);
}
}
- // dist.archives
- // dist.checksums
+ // config.dist.archives
+ // config.dist.checksums
//
{
value& a (rs.assign ("dist.archives"));
@@ -183,7 +186,7 @@ namespace build2
}
}
- // dist.uncommitted
+ // config.dist.uncommitted
//
// Omit it from the configuration unless specified.
//
@@ -220,10 +223,14 @@ namespace build2
// executables imported from /usr/bin, etc). We are registering it on
// the global scope similar to builtin rules.
//
+ // Note: use target instead of anything more specific (such as
+ // mtime_target) in order not to take precedence over the "dist" rule
+ // above.
+ //
// See a similar rule in the config module.
//
- rs.global_scope ().insert_rule<mtime_target> (
- dist_id, 0, "dist.file", file_rule::instance);
+ rs.global_scope ().insert_rule<target> (
+ dist_id, 0, "dist.file", file_rule_);
// Configuration.
//
diff --git a/libbuild2/dist/operation.cxx b/libbuild2/dist/operation.cxx
index af7b40b..cfc90cf 100644
--- a/libbuild2/dist/operation.cxx
+++ b/libbuild2/dist/operation.cxx
@@ -6,6 +6,8 @@
#include <libbutl/sha1.hxx>
#include <libbutl/sha256.hxx>
+#include <libbutl/filesystem.hxx> // try_mkdir_p(), cpfile()
+
#include <libbuild2/file.hxx>
#include <libbuild2/dump.hxx>
#include <libbuild2/scope.hxx>
@@ -29,14 +31,14 @@ namespace build2
// install -d <dir>
//
static void
- install (const process_path&, context&, const dir_path&);
+ install (const process_path*, context&, const dir_path&);
// install <file> <dir>[/<name>]
//
// Return the destination file path.
//
static path
- install (const process_path&, const file&, const dir_path&, const path&);
+ install (const process_path*, const file&, const dir_path&, const path&);
// tar|zip ... <dir>/<pkg>.<ext> <pkg>
//
@@ -108,9 +110,7 @@ namespace build2
// Figure out if we need out.
//
- dir_path out (rs.src_path () != rs.out_path ()
- ? out_src (d, rs)
- : dir_path ());
+ dir_path out (!rs.out_eq_src () ? out_src (d, rs) : dir_path ());
const T& t (rs.ctx.targets.insert<T> (
move (d),
@@ -152,11 +152,11 @@ namespace build2
try
{
- for (const dir_entry& e: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::no_follow))
{
const path& n (e.path ());
- if (n.string ()[0] != '.')
+ if (!n.empty () && n.string ().front () != '.')
try
{
if (e.type () == entry_type::directory) // Can throw.
@@ -241,7 +241,8 @@ namespace build2
const module& mod (*rs.find_module<module> (module::name));
const string& dist_package (cast<string> (l));
- const process_path& dist_cmd (cast<process_path> (rs.vars["dist.cmd"]));
+ const process_path* dist_cmd (
+ cast_null<process_path> (rs.vars["dist.cmd"]));
dir_path td (dist_root / dir_path (dist_package));
@@ -273,7 +274,6 @@ namespace build2
// Note that we are not calling operation_pre/post() callbacks here
// since the meta operation is dist and we know what we are doing.
//
- values params;
path_name pn ("<dist>");
const location loc (pn); // Dummy location.
action_targets ts {tgt};
@@ -303,8 +303,8 @@ namespace build2
}
};
- auto mog = make_guard ([&ctx] () {ctx.match_only = false;});
- ctx.match_only = true;
+ auto mog = make_guard ([&ctx] () {ctx.match_only = nullopt;});
+ ctx.match_only = match_only_level::all;
const operations& ops (rs.root_extra->operations);
for (operations::size_type id (default_id + 1); // Skip default_id.
@@ -326,39 +326,72 @@ namespace build2
//
if (auto pp = oif->pre_operation)
{
- if (operation_id pid = pp (ctx, params, dist_id, loc))
+ if (operation_id pid = pp (ctx, {}, dist_id, loc))
{
const operation_info* poif (ops[pid]);
ctx.current_operation (*poif, oif, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, false /* inner */, loc);
+
+ if (poif->operation_pre != nullptr)
+ poif->operation_pre (ctx, {}, true /* inner */, loc);
+
action a (dist_id, poif->id, oif->id);
mod.postponed.list.clear ();
- perform_match (params, a, ts,
+ perform_match ({}, a, ts,
1 /* diag (failures only) */,
false /* progress */);
process_postponed ();
+
+ if (poif->operation_post != nullptr)
+ poif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, false /* inner */);
}
}
ctx.current_operation (*oif, nullptr, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, true /* inner */, loc);
+
action a (dist_id, oif->id);
mod.postponed.list.clear ();
- perform_match (params, a, ts,
+ perform_match ({}, a, ts,
1 /* diag (failures only) */,
false /* progress */);
process_postponed ();
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, true /* inner */);
+
if (auto po = oif->post_operation)
{
- if (operation_id pid = po (ctx, params, dist_id))
+ if (operation_id pid = po (ctx, {}, dist_id))
{
const operation_info* poif (ops[pid]);
ctx.current_operation (*poif, oif, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, false /* inner */, loc);
+
+ if (poif->operation_pre != nullptr)
+ poif->operation_pre (ctx, {}, true /* inner */, loc);
+
action a (dist_id, poif->id, oif->id);
mod.postponed.list.clear ();
- perform_match (params, a, ts,
+ perform_match ({}, a, ts,
1 /* diag (failures only) */,
false /* progress */);
process_postponed ();
+
+ if (poif->operation_post != nullptr)
+ poif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, false /* inner */);
}
}
}
@@ -367,7 +400,7 @@ namespace build2
// Add ad hoc files and buildfiles that are not normally loaded as
// part of the project, for example, the export stub. They will still
// be ignored on the next step if the user explicitly marked them
- // dist=false.
+ // with dist=false.
//
auto add_adhoc = [] (const scope& rs)
{
@@ -430,48 +463,96 @@ namespace build2
// Note that we are not showing progress here (e.g., "N targets to
// distribute") since it will be useless (too fast).
//
- for (const auto& pt: ctx.targets)
+ auto see_through = [] (const target& t)
{
- file* ft (pt->is_a<file> ());
-
- if (ft == nullptr) // Not a file.
- continue;
+ return ((t.type ().flags & target_type::flag::see_through) ==
+ target_type::flag::see_through);
+ };
- if (ft->dir.sub (src_root))
+ auto collect = [&trace, &dist_var,
+ &src_root, &out_root] (const file& ft)
+ {
+ if (ft.dir.sub (src_root))
{
// Include unless explicitly excluded.
//
- if (const path* v = cast_null<path> ((*ft)[dist_var]))
+ if (const path* v = cast_null<path> (ft[dist_var]))
{
if (v->string () == "false")
{
- l5 ([&]{trace << "excluding " << *ft;});
- continue;
+ l5 ([&]{trace << "excluding " << ft;});
+ return false;
}
}
- files.push_back (ft);
+ return true;
}
- else if (ft->dir.sub (out_root))
+ else if (ft.dir.sub (out_root))
{
// Exclude unless explicitly included.
//
- if (const path* v = cast_null<path> ((*ft)[dist_var]))
+ if (const path* v = cast_null<path> (ft[dist_var]))
{
if (v->string () != "false")
{
- l5 ([&]{trace << "including " << *ft;});
- files.push_back (ft);
+ l5 ([&]{trace << "including " << ft;});
+ return true;
}
}
+
+ return false;
}
+ else
+ return false; // Out of project.
+ };
+
+ for (const auto& pt: ctx.targets)
+ {
+ // Collect see-through groups if they are marked with dist=true.
+ //
+ // Note that while it's possible that only their certain members are
+ // marked as such (e.g., via a pattern), we will still require
+ // dist=true on the group itself (and potentially dist=false on some
+ // of its members) for such cases because we don't want to update
+ // every see-through group only to discover that most of them don't
+ // have anything to distribute.
+ //
+ if (see_through (*pt))
+ {
+ if (const path* v = cast_null<path> ((*pt)[dist_var]))
+ {
+ if (v->string () != "false")
+ {
+ l5 ([&]{trace << "including group " << *pt;});
+ files.push_back (pt.get ());
+ }
+ }
+
+ continue;
+ }
+
+ file* ft (pt->is_a<file> ());
+
+ if (ft == nullptr) // Not a file.
+ continue;
+
+ // Skip member of see-through groups since after dist_* their list
+ // can be incomplete (or even bogus, e.g., the "representative
+ // sample"). Instead, we will collect them during perfrom_update
+ // below.
+ //
+ if (ft->group != nullptr && see_through (*ft->group))
+ continue;
+
+ if (collect (*ft))
+ files.push_back (ft);
}
// Make sure what we need to distribute is up to date.
//
{
if (mo_perform.meta_operation_pre != nullptr)
- mo_perform.meta_operation_pre (ctx, params, loc);
+ mo_perform.meta_operation_pre (ctx, {}, loc);
// This is a hack since according to the rules we need to completely
// reset the state. We could have done that (i.e., saved target
@@ -487,25 +568,75 @@ namespace build2
ctx.current_on = on + 1;
if (mo_perform.operation_pre != nullptr)
- mo_perform.operation_pre (ctx, params, update_id);
+ mo_perform.operation_pre (ctx, {}, update_id);
ctx.current_operation (op_update, nullptr, false /* diag_noise */);
+ if (op_update.operation_pre != nullptr)
+ op_update.operation_pre (ctx, {}, true /* inner */, loc);
+
action a (perform_update_id);
- mo_perform.match (params, a, files,
+ mo_perform.match ({}, a, files,
1 /* diag (failures only) */,
prog /* progress */);
- mo_perform.execute (params, a, files,
+ mo_perform.execute ({}, a, files,
1 /* diag (failures only) */,
prog /* progress */);
+ // Replace see-through groups (which now should have their members
+ // resolved) with members.
+ //
+ for (auto i (files.begin ()); i != files.end (); )
+ {
+ const target& t (i->as<target> ());
+ if (see_through (t))
+ {
+ group_view gv (t.group_members (a)); // Go directly.
+
+ if (gv.members == nullptr)
+ fail << "unable to resolve see-through group " << t
+ << " members";
+
+ i = files.erase (i); // Drop the group itself.
+
+ for (size_t j (0); j != gv.count; ++j)
+ {
+ if (const target* m = gv.members[j])
+ {
+ if (const file* ft = m->is_a<file> ())
+ {
+ // Note that a rule may only link-up its members to groups
+ // if/when matched (for example, the cli.cxx{} group). It
+ // feels harmless for us to do the linking here.
+ //
+ if (ft->group == nullptr)
+ const_cast<file*> (ft)->group = &t;
+ else
+ assert (ft->group == &t); // Sanity check.
+
+ if (collect (*ft))
+ {
+ i = files.insert (i, ft); // Insert instead of the group.
+ i++; // Stay after the group.
+ }
+ }
+ }
+ }
+ }
+ else
+ ++i;
+ }
+
+ if (op_update.operation_post != nullptr)
+ op_update.operation_post (ctx, {}, true /* inner */);
+
if (mo_perform.operation_post != nullptr)
- mo_perform.operation_post (ctx, params, update_id);
+ mo_perform.operation_post (ctx, {}, update_id);
if (mo_perform.meta_operation_post != nullptr)
- mo_perform.meta_operation_post (ctx, params);
+ mo_perform.meta_operation_post (ctx, {});
}
}
else
@@ -546,7 +677,7 @@ namespace build2
for (size_t i (0), n (files.size ()); i != n; ++i)
{
- const file& t (*files[i].as<target> ().is_a<file> ());
+ const file& t (files[i].as<target> ().as<file> ()); // Only files.
// Figure out where this file is inside the target directory.
//
@@ -758,66 +889,131 @@ namespace build2
// install -d <dir>
//
static void
- install (const process_path& cmd, context& ctx, const dir_path& d)
+ install (const process_path* cmd, context& ctx, const dir_path& d)
{
- path reld (relative (d));
+ path reld;
+ cstrings args;
- cstrings args {cmd.recall_string (), "-d"};
+ if (cmd != nullptr || verb >= 2)
+ {
+ reld = relative (d);
- args.push_back ("-m");
- args.push_back ("755");
- args.push_back (reld.string ().c_str ());
- args.push_back (nullptr);
+ args.push_back (cmd != nullptr ? cmd->recall_string () : "install");
+ args.push_back ("-d");
+ args.push_back ("-m");
+ args.push_back ("755");
+ args.push_back (reld.string ().c_str ());
+ args.push_back (nullptr);
- if (verb >= 2)
- print_process (args);
+ if (verb >= 2)
+ print_process (args);
+ }
- run (ctx, cmd, args, 1 /* finish_verbosity */);
+ if (cmd != nullptr)
+ run (ctx, *cmd, args, 1 /* finish_verbosity */);
+ else
+ {
+ try
+ {
+ // Note that mode has no effect on Windows, which is probably for
+ // the best.
+ //
+ try_mkdir_p (d, 0755);
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to create directory " << d << ": " << e;
+ }
+ }
}
// install <file> <dir>[/<name>]
//
static path
- install (const process_path& cmd,
+ install (const process_path* cmd,
const file& t,
const dir_path& d,
const path& n)
{
- path reld (relative (d));
- path relf (relative (t.path ()));
+ const path& f (t.path ());
+ path r (d / (n.empty () ? f.leaf () : n));
- if (!n.empty ())
- reld /= n.string ();
+ // Assume the file is executable if the owner has execute permission,
+ // in which case we make it executable for everyone.
+ //
+ bool exe ((path_perms (f) & permissions::xu) == permissions::xu);
- cstrings args {cmd.recall_string ()};
+ path relf, reld;
+ cstrings args;
- // Preserve timestamps. This could becomes important if, for
- // example, we have pre-generated sources. Note that the
- // install-sh script doesn't support this option, while both
- // Linux and BSD install's do.
- //
- args.push_back ("-p");
+ if (cmd != nullptr || verb >= 2)
+ {
+ relf = relative (f);
+ reld = relative (d);
- // Assume the file is executable if the owner has execute
- // permission, in which case we make it executable for
- // everyone.
- //
- args.push_back ("-m");
- args.push_back (
- (path_perms (t.path ()) & permissions::xu) == permissions::xu
- ? "755"
- : "644");
+ if (!n.empty ()) // Leave as just directory if no custom name.
+ reld /= n;
- args.push_back (relf.string ().c_str ());
- args.push_back (reld.string ().c_str ());
- args.push_back (nullptr);
+ args.push_back (cmd != nullptr ? cmd->recall_string () : "install");
- if (verb >= 2)
- print_process (args);
+ // Preserve timestamps. This could becomes important if, for example,
+ // we have pre-generated sources. Note that the install-sh script
+ // doesn't support this option, while both Linux and BSD install's do.
+ //
+ args.push_back ("-p");
- run (t.ctx, cmd, args, 1 /* finish_verbosity */);
+ // Assume the file is executable if the owner has execute permission,
+ // in which case we make it executable for everyone.
+ //
+ args.push_back ("-m");
+ args.push_back (exe ? "755" : "644");
+ args.push_back (relf.string ().c_str ());
+ args.push_back (reld.string ().c_str ());
+ args.push_back (nullptr);
- return d / (n.empty () ? relf.leaf () : n);
+ if (verb >= 2)
+ print_process (args);
+ }
+
+ if (cmd != nullptr)
+ run (t.ctx, *cmd, args, 1 /* finish_verbosity */);
+ else
+ {
+ permissions perm (permissions::ru | permissions::wu |
+ permissions::rg |
+ permissions::ro); // 644
+ if (exe)
+ perm |= permissions::xu | permissions::xg | permissions::xo; // 755
+
+ try
+ {
+ // Note that we don't pass cpflags::overwrite_content which means
+ // this will fail if the file already exists. Since we clean up the
+ // destination directory, this will detect cases where we have
+ // multiple source files with the same distribution destination.
+ //
+ cpfile (f,
+ r,
+ cpflags::overwrite_permissions | cpflags::copy_timestamps,
+ perm);
+ }
+ catch (const system_error& e)
+ {
+ if (e.code ().category () == generic_category () &&
+ e.code ().value () == EEXIST)
+ {
+ // @@ TMP (added in 0.16.0).
+ //
+ warn << "multiple files are distributed as " << r <<
+ info << "second file is " << f <<
+ info << "this warning will become error in the future";
+ }
+ else
+ fail << "unable to copy " << f << " to " << r << ": " << e;
+ }
+ }
+
+ return r;
}
static path
@@ -827,6 +1023,8 @@ namespace build2
const dir_path& dir,
const string& e)
{
+ // NOTE: similar code in bpkg (system-package-manager-archive.cxx).
+
path an (pkg + '.' + e);
// Delete old archive for good measure.
@@ -849,7 +1047,7 @@ namespace build2
if (e == "zip")
{
- // On Windows we use libarchive's bsdtar (zip is an MSYS executabales).
+ // On Windows we use libarchive's bsdtar (zip is an MSYS executable).
//
// While not explicitly stated, the compression-level option works
// for zip archives.
@@ -968,7 +1166,7 @@ namespace build2
process apr;
process cpr;
- // Change the archiver's working directory to dist_root.
+ // Change the archiver's working directory to root.
//
// Note: this function is called during serial execution and so no
// diagnostics buffering is needed (here and below).
diff --git a/libbuild2/dist/rule.cxx b/libbuild2/dist/rule.cxx
index 736490e..c63f7f3 100644
--- a/libbuild2/dist/rule.cxx
+++ b/libbuild2/dist/rule.cxx
@@ -30,10 +30,21 @@ namespace build2
const dir_path& src_root (rs.src_path ());
const dir_path& out_root (rs.out_path ());
- // If we can, go inside see-through groups.
+ // Note that we don't go inside see-through groups since the members for
+ // dist_* may be incomplete (or even bogus, e.g., the "representative
+ // sample"). Instead, for see-through groups our plan is as follows:
//
- for (prerequisite_member pm:
- group_prerequisite_members (a, t, members_mode::maybe))
+ // 1. Here we match them as groups (so that we still match all their
+ // prerequisites).
+ //
+ // 2. In dist_project() we collect them along with files after dist_*
+ // but before perform_update. Here we also skip files that are
+ // members of see-through groups (which we may still get).
+ //
+ // 3. During perform_update we collect all the see-through group
+ // members, similar to files on step (2).
+ //
+ for (const prerequisite& p: group_prerequisites (t))
{
// Note: no exclusion tests, we want all of them (and see also the
// dist_include() override). But if we don't ignore post hoc ones
@@ -41,12 +52,12 @@ namespace build2
// by the post-pass).
//
lookup l; // Ignore any operation-specific values.
- if (include (a, t, pm, &l) == include_type::posthoc)
+ if (include (a, t, p, &l) == include_type::posthoc)
continue;
// Skip prerequisites imported from other projects.
//
- if (pm.proj ())
+ if (p.proj)
continue;
// We used to always search and match but that resulted in the
@@ -65,20 +76,18 @@ namespace build2
// @@ Note that this is still an issue in a custom dist rule.
//
const target* pt (nullptr);
- if (pm.is_a<file> ())
+ if (p.is_a<file> ())
{
- pt = pm.load ();
+ pt = p.target.load ();
if (pt == nullptr)
{
- const prerequisite& p (pm.prerequisite);
-
// Search for an existing target or existing file in src.
//
// Note: see also similar code in match_postponed() below.
//
const prerequisite_key& k (p.key ());
- pt = k.tk.type->search (t, k);
+ pt = k.tk.type->search (t.ctx, &t, k);
if (pt == nullptr)
{
@@ -106,7 +115,7 @@ namespace build2
}
}
else
- pt = &pm.search (t);
+ pt = &search (t, p);
// Don't match targets that are outside of our project.
//
@@ -125,7 +134,7 @@ namespace build2
const prerequisite& p (pp.prereq);
const prerequisite_key& k (p.key ());
- const target* pt (k.tk.type->search (t, k));
+ const target* pt (k.tk.type->search (t.ctx, &t, k));
if (pt == nullptr)
{
diff --git a/libbuild2/dump.cxx b/libbuild2/dump.cxx
index c28dd31..9b7f5b1 100644
--- a/libbuild2/dump.cxx
+++ b/libbuild2/dump.cxx
@@ -3,6 +3,11 @@
#include <libbuild2/dump.hxx>
+#ifndef BUILD2_BOOTSTRAP
+# include <iostream> // cout
+# include <unordered_map>
+#endif
+
#include <libbuild2/rule.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
@@ -11,6 +16,7 @@
#include <libbuild2/diagnostics.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -49,9 +55,320 @@ namespace build2
if (v)
{
names storage;
- os << (a ? " " : "") << reverse (v, storage);
+ os << (a ? " " : "") << reverse (v, storage, true /* reduce */);
+ }
+ }
+
+#ifndef BUILD2_BOOTSTRAP
+
+ static string
+ quoted_target_name (const names_view& ns, bool rel)
+ {
+ ostringstream os;
+ stream_verb (os, stream_verbosity (rel ? 0 : 1, 0));
+ to_stream (os, ns, quote_mode::effective, '@');
+ return os.str ();
+ }
+
+ static void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const names_view& ns,
+ bool rel)
+ {
+ j.value (quoted_target_name (ns, rel));
+ }
+
+ static string
+ quoted_target_name (const target& t, bool rel)
+ {
+ names ns (t.as_name ()); // Note: potentially adds an extension.
+
+ // Don't print target names relative if the target is in src and out!=src.
+ // Failed that, we will end up with pointless ../../../... paths.
+ //
+ // It may also seem that we can omit @-qualification in this case, since
+ // it is implied by the containing scope. However, keep in mind that the
+ // target may not be directly in this scope. We could make it relative,
+ // though.
+ //
+ if (rel && !t.out.empty ())
+ {
+ // Make the out relative ourselves and then disable relative for src.
+ //
+ dir_path& o (ns.back ().dir);
+ o = relative (o); // Note: may return empty path.
+ if (o.empty ())
+ o = dir_path (".");
+
+ rel = false;
+ }
+
+ return quoted_target_name (ns, rel);
+ }
+
+ void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const target& t,
+ bool rel)
+ {
+ j.value (quoted_target_name (t, rel));
+ }
+
+ using target_name_cache = unordered_map<const target*, string>;
+
+ static void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const target& t,
+ target_name_cache& tc)
+ {
+ auto i (tc.find (&t));
+ if (i == tc.end ())
+ i = tc.emplace (&t, quoted_target_name (t, false /* relative */)).first;
+
+ j.value (i->second);
+ }
+
+ void
+ dump_display_target_name (json::stream_serializer& j,
+ const target& t,
+ bool rel)
+ {
+ // Note: see the quoted version above for details.
+
+ target_key tk (t.key ());
+
+ dir_path o;
+ if (rel && !tk.out->empty ())
+ {
+ o = relative (*tk.out);
+ if (o.empty ())
+ o = dir_path (".");
+ tk.out = &o;
+
+ rel = false;
+ }
+
+ // Change the stream verbosity to print relative if requested and omit
+ // extension.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (rel ? 0 : 1, 0));
+ os << tk;
+ j.value (os.str ());
+ }
+
+ static void
+ dump_value (json::stream_serializer& j, const value& v)
+ {
+ // Hints.
+ //
+ // Note that the pair hint should only be used for simple names.
+ //
+ optional<bool> h_array;
+ optional<bool> h_pair; // true/false - second/first is optional.
+
+ if (v.null)
+ {
+ j.value (nullptr);
+ return;
+ }
+ else if (v.type != nullptr)
+ {
+ const value_type& t (*v.type);
+
+ auto s_array = [&j] (const auto& vs)
+ {
+ j.begin_array ();
+ for (const auto& v: vs) j.value (v);
+ j.end_array ();
+ };
+
+ auto s_array_string = [&j] (const auto& vs)
+ {
+ j.begin_array ();
+ for (const auto& v: vs) j.value (v.string ());
+ j.end_array ();
+ };
+
+ // Note: check in the derived-first order.
+ //
+ if (t.is_a<bool> ()) j.value (v.as<bool> ());
+ else if (t.is_a<int64_t> ()) j.value (v.as<int64_t> ());
+ else if (t.is_a<uint64_t> ()) j.value (v.as<uint64_t> ());
+ else if (t.is_a<string> ()) j.value (v.as<string> ());
+ else if (t.is_a<path> ()) j.value (v.as<path> ().string ());
+ else if (t.is_a<dir_path> ()) j.value (v.as<dir_path> ().string ());
+ else if (t.is_a<target_triplet> ()) j.value (v.as<target_triplet> ().string ());
+ else if (t.is_a<project_name> ()) j.value (v.as<project_name> ().string ());
+ else if (t.is_a<int64s> ()) s_array (v.as<int64s> ());
+ else if (t.is_a<uint64s> ()) s_array (v.as<uint64s> ());
+ else if (t.is_a<strings> ()) s_array (v.as<strings> ());
+ else if (t.is_a<paths> ()) s_array_string (v.as<paths> ());
+ else if (t.is_a<dir_paths> ()) s_array_string (v.as<dir_paths> ());
+ else
+ {
+ // Note: check in the derived-first order.
+ //
+ if (t.is_a<name> ()) h_array = false;
+ else if (t.is_a<name_pair> ())
+ {
+ h_array = false;
+ h_pair = true;
+ }
+ else if (t.is_a<process_path_ex> ())
+ {
+ // Decide on array dynamically.
+ h_pair = true;
+ }
+ else if (t.is_a<process_path> ())
+ {
+ h_array = false;
+ h_pair = true;
+ }
+ else if (t.is_a<cmdline> () ||
+ t.is_a<vector<name>> ())
+ {
+ h_array = true;
+ }
+ else if (t.is_a<vector<pair<string, string>>> () ||
+ t.is_a<vector<pair<string, optional<string>>>> () ||
+ t.is_a<vector<pair<string, optional<bool>>>> () ||
+ t.is_a<map<string, string>> () ||
+ t.is_a<map<string, optional<string>>> () ||
+ t.is_a<map<string, optional<bool>>> () ||
+ t.is_a<map<project_name, dir_path>> ())
+ {
+ h_array = true;
+ h_pair = true;
+ }
+ else if (t.is_a<map<optional<string>, string>> () ||
+ t.is_a<vector<pair<optional<string>, string>>> ())
+ {
+ h_array = true;
+ h_pair = false;
+ }
+
+ goto fall_through;
+ }
+
+ return;
+
+ fall_through:
+ ;
+ }
+
+ names storage;
+ names_view ns (reverse (v, storage, true /* reduce */));
+
+ if (ns.empty ())
+ {
+ // When it comes to representing an empty value, our options are: empty
+ // array ([]), empty object ({}), or an absent member. The latter feels
+ // closer to null than empty, so that's out. After some experimentation,
+ // it feels the best choice is to use array unless we know for sure it
+ // is not, in which case we use an object if it's a pair and empty
+ // string otherwise (the empty string makes sense because we serialize
+ // complex names as target names; see below).
+ //
+ if (!h_array || *h_array)
+ {
+ j.begin_array ();
+ j.end_array ();
+ }
+ else
+ {
+ if (h_pair)
+ {
+ j.begin_object ();
+ j.end_object ();
+ }
+ else
+ j.value ("");
+ }
+ }
+ else
+ {
+ if (!h_array)
+ h_array = ns.size () > 2 || (ns.size () == 2 && !ns.front ().pair);
+
+ if (*h_array)
+ j.begin_array ();
+
+ // While it may be tempting to try to provide a heterogeneous array
+ // (i.e., all strings, all objects, all pairs), in case of pairs we
+ // actually don't know whether a non-pair element is first or second
+ // (it's up to interpretation; though we do hint which one is optional
+ // for typed values above). So we serialize each name in its most
+ // appropriate form.
+ //
+ auto simple = [] (const name& n)
+ {
+ return n.simple () || n.directory () || n.file ();
+ };
+
+ auto s_simple = [&j] (const name& n)
+ {
+ if (n.simple ())
+ j.value (n.value);
+ else if (n.directory ())
+ j.value (n.dir.string ());
+ else if (n.file ())
+ {
+ // Note: both must be present due to earlier checks.
+ //
+ j.value ((n.dir / n.value).string ());
+ }
+ else
+ return false;
+
+ return true;
+ };
+
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ const name& l (*i++);
+ const name* r (l.pair ? &*i++ : nullptr);
+
+ optional<bool> hp (h_pair);
+
+ if (!hp && r != nullptr && simple (l) && simple (*r))
+ hp = true;
+
+ if (hp)
+ {
+ // Pair of simple names.
+ //
+ j.begin_object ();
+
+ if (r != nullptr)
+ {
+ j.member_name ("first"); s_simple (l);
+ j.member_name ("second"); s_simple (*r);
+ }
+ else
+ {
+ j.member_name (*hp ? "first" : "second"); s_simple (l);
+ }
+
+ j.end_object ();
+ }
+ else if (r == nullptr && s_simple (l))
+ ;
+ else
+ {
+ // If complex name (or pair thereof), then assume a target name.
+ //
+ dump_quoted_target_name (j,
+ names_view (&l, r != nullptr ? 2 : 1),
+ false /* relative */);
+ }
+ }
+
+ if (*h_array)
+ j.end_array ();
}
}
+#endif
enum class variable_kind {scope, tt_pat, target, rule, prerequisite};
@@ -127,6 +444,68 @@ namespace build2
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_variable (json::stream_serializer& j,
+ const variable_map& vm,
+ const variable_map::const_iterator& vi,
+ const scope& s,
+ variable_kind k)
+ {
+ // Note: see the buildfile version above for comments.
+
+ assert (k != variable_kind::tt_pat); // TODO
+
+ const auto& p (*vi);
+ const variable& var (p.first);
+ const value& v (p.second);
+
+ lookup l (v, var, vm);
+ if (k != variable_kind::prerequisite)
+ {
+ if (var.override ())
+ return; // Ignore.
+
+ if (var.overrides != nullptr)
+ {
+ l = s.lookup_override (
+ var,
+ make_pair (l, 1),
+ k == variable_kind::target || k == variable_kind::rule,
+ k == variable_kind::rule).first;
+
+ assert (l.defined ()); // We at least have the original.
+ }
+ }
+
+ // Note that we do not distinguish between variable/value type.
+ //
+ // An empty value of a non-array type is represented as an empty object
+ // ({}).
+ //
+#if 0
+ struct variable
+ {
+ string name;
+ optional<string> type;
+ json_value value; // string|number|boolean|null|object|array
+ };
+#endif
+
+ j.begin_object ();
+
+ j.member ("name", var.name);
+
+ if (l->type != nullptr)
+ j.member ("type", l->type->name);
+
+ j.member_name ("value");
+ dump_value (j, *l);
+
+ j.end_object ();
+ }
+#endif
+
static void
dump_variables (ostream& os,
string& ind,
@@ -143,6 +522,20 @@ namespace build2
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_variables (json::stream_serializer& j,
+ const variable_map& vars,
+ const scope& s,
+ variable_kind k)
+ {
+ for (auto i (vars.begin ()), e (vars.end ()); i != e; ++i)
+ {
+ dump_variable (j, vars, i, s, k);
+ }
+ }
+#endif
+
// Dump target type/pattern-specific variables.
//
static void
@@ -248,10 +641,27 @@ namespace build2
}
}
+ // Similar to target::matched() but for the load phase.
+ //
+ static inline bool
+ matched (const target& t, action a)
+ {
+ // Note: running serial and task_count is 0 before any operation has
+ // started.
+ //
+ if (size_t c = t[a].task_count.load (memory_order_relaxed))
+ {
+ if (c == t.ctx.count_applied () || c == t.ctx.count_executed ())
+ return true;
+ }
+
+ return false;
+ }
+
static void
- dump_target (optional<action> a,
- ostream& os,
+ dump_target (ostream& os,
string& ind,
+ optional<action> a,
const target& t,
const scope& s,
bool rel)
@@ -260,6 +670,9 @@ namespace build2
// scope. To achieve this we are going to temporarily lower the stream
// path verbosity to level 0.
//
+ // @@ Not if in src and out != src? Otherwise end up with ../../../...
+ // See JSON version for the state of the art.
+ //
stream_verbosity osv, nsv;
if (rel)
{
@@ -321,32 +734,26 @@ namespace build2
// If the target has been matched to a rule, we also print resolved
// prerequisite targets.
//
- // Note: running serial and task_count is 0 before any operation has
- // started.
- //
const prerequisite_targets* pts (nullptr);
{
action inner; // @@ Only for the inner part of the action currently.
- if (size_t c = t[inner].task_count.load (memory_order_relaxed))
+ if (matched (t, inner))
{
- if (c == t.ctx.count_applied () || c == t.ctx.count_executed ())
- {
- pts = &t.prerequisite_targets[inner];
+ pts = &t.prerequisite_targets[inner];
- bool f (false);
- for (const target* pt: *pts)
+ bool f (false);
+ for (const target* pt: *pts)
+ {
+ if (pt != nullptr)
{
- if (pt != nullptr)
- {
- f = true;
- break;
- }
+ f = true;
+ break;
}
-
- if (!f)
- pts = nullptr;
}
+
+ if (!f)
+ pts = nullptr;
}
}
@@ -510,10 +917,318 @@ namespace build2
stream_verb (os, osv);
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_target (json::stream_serializer& j,
+ optional<action> a,
+ const target& t,
+ const scope& s,
+ bool rel,
+ target_name_cache& tcache)
+ {
+ // Note: see the buildfile version above for comments.
+
+ // Note that the target name (and display_name) are relative to the
+ // containing scope (if any).
+ //
+#if 0
+ struct prerequisite
+ {
+ string name; // Quoted/qualified name.
+ string type;
+ vector<variable> variables; // Prerequisite variables.
+ };
+
+ struct loaded_target
+ {
+ string name; // Quoted/qualified name.
+ string display_name;
+ string type; // Target type.
+ //string declaration;
+ optional<string> group; // Quoted/qualified group target name.
+
+ vector<variable> variables; // Target variables.
+
+ vector<prerequisite> prerequisites;
+ };
+
+ // @@ TODO: target attributes (rule_hint)
+
+ struct prerequisite_target
+ {
+ string name; // Target name (always absolute).
+ string type;
+ bool adhoc;
+ };
+
+ struct operation_state
+ {
+ string rule; // null if direct recipe match
+
+ optional<string> state; // unchanged|changed|group
+
+ vector<variable> variables; // Rule variables.
+
+ vector<prerequisite_target> prerequisite_targets;
+ };
+
+ struct matched_target
+ {
+ string name;
+ string display_name;
+ string type;
+ //string declaration;
+ optional<string> group;
+
+ optional<path> path; // Absent if not path-based target, not assigned.
+
+ vector<variable> variables;
+
+ optional<operation_state> outer_operation; // null if not matched.
+ operation_state inner_operation; // null if not matched.
+ };
+#endif
+
+ j.begin_object ();
+
+ j.member_name ("name");
+ dump_quoted_target_name (j, t, rel /* relative */);
+
+ j.member_name ("display_name");
+ dump_display_target_name (j, t, rel /* relative */);
+
+ j.member ("type", t.type ().name);
+
+ // @@ This value currently doesn't make much sense:
+ //
+ // - why are all the system headers prereq-new?
+ //
+ // - why is synthesized obje{} prereq-new?
+ //
+#if 0
+ {
+ const char* v (nullptr);
+ switch (t.decl)
+ {
+ case target_decl::prereq_new: v = "prerequisite-new"; break;
+ case target_decl::prereq_file: v = "prerequisite-file"; break;
+ case target_decl::implied: v = "implied"; break;
+ case target_decl::real: v = "real"; break;
+ }
+ j.member ("declaration", v);
+ }
+#endif
+
+ if (t.group != nullptr)
+ {
+ j.member_name ("group");
+ dump_quoted_target_name (j, *t.group, tcache);
+ }
+
+ if (a)
+ {
+ const string* v (nullptr);
+
+ if (t.is_a<dir> () || t.is_a<fsdir> ())
+ {
+ v = &t.dir.string ();
+ }
+ else if (const auto* pt = t.is_a<path_target> ())
+ {
+ const path& p (pt->path ());
+
+ if (!p.empty ())
+ v = &p.string ();
+ }
+
+ if (v != nullptr)
+ j.member ("path", *v);
+ }
+
+ // Target variables.
+ //
+ if (!t.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, t.vars, s, variable_kind::target);
+ j.end_array ();
+ }
+
+ // Prerequisites.
+ //
+ if (!a)
+ {
+ const prerequisites& ps (t.prerequisites ());
+
+ if (!ps.empty ())
+ {
+ j.member_begin_array ("prerequisites");
+
+ for (const prerequisite& p: ps)
+ {
+ j.begin_object ();
+
+ {
+ // Cobble together an equivalent of dump_quoted_target_name().
+ //
+ prerequisite_key pk (p.key ());
+ target_key& tk (pk.tk);
+
+ // It's possible that the containing scope differs from
+ // prerequisite's. This, for example, happens when we copy the
+ // prerequisite for a synthesized obj{} dependency that happens to
+ // be in a subdirectory, as in exe{foo}:src/cxx{foo}. In this
+ // case, we need to rebase relative paths to the containing scope.
+ //
+ dir_path d, o;
+ if (p.scope != s)
+ {
+ if (tk.out->empty ())
+ {
+ if (tk.dir->relative ())
+ {
+ d = (p.scope.out_path () / *tk.dir).relative (s.out_path ());
+ tk.dir = &d;
+ }
+ }
+ else
+ {
+ if (tk.dir->relative ())
+ {
+ d = (p.scope.src_path () / *tk.dir).relative (s.src_path ());
+ tk.dir = &d;
+ }
+
+ if (tk.out->relative ())
+ {
+ o = (p.scope.out_path () / *tk.out).relative (s.out_path ());
+ if (o.empty ())
+ o = dir_path (".");
+ tk.out = &o;
+ }
+ }
+ }
+
+ // If prerequisite paths are absolute, keep them absolute.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (1, 0));
+
+ if (pk.proj)
+ os << *pk.proj << '%';
+
+ to_stream (os, pk.tk.as_name (), quote_mode::effective, '@');
+
+ j.member ("name", os.str ());
+ }
+
+ j.member ("type", p.type.name);
+
+ if (!p.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, p.vars, s, variable_kind::prerequisite);
+ j.end_array ();
+ }
+
+ j.end_object ();
+ }
+
+ j.end_array ();
+ }
+ }
+ else
+ {
+ // Matched rules and their state (prerequisite_targets, vars, etc).
+ //
+ auto dump_opstate = [&tcache, &j, &s, &t] (action a)
+ {
+ const target::opstate& o (t[a]);
+
+ j.begin_object ();
+
+ j.member ("rule", o.rule != nullptr ? o.rule->first.c_str () : nullptr);
+
+ // It feels natural to omit the unknown state, as if it corresponded
+ // to absent in optional<target_state>.
+ //
+ if (o.state != target_state::unknown)
+ {
+ assert (o.state == target_state::unchanged ||
+ o.state == target_state::changed ||
+ o.state == target_state::group);
+
+ j.member ("state", to_string (o.state));
+ }
+
+ if (!o.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, o.vars, s, variable_kind::rule);
+ j.end_array ();
+ }
+
+ {
+ bool first (true);
+ for (const prerequisite_target& pt: t.prerequisite_targets[a])
+ {
+ if (pt.target == nullptr)
+ continue;
+
+ if (first)
+ {
+ j.member_begin_array ("prerequisite_targets");
+ first = false;
+ }
+
+ j.begin_object ();
+
+ j.member_name ("name");
+ dump_quoted_target_name (j, *pt.target, tcache);
+
+ j.member ("type", pt.target->type ().name);
+
+ if (pt.adhoc ())
+ j.member ("adhoc", true);
+
+ j.end_object ();
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ j.end_object ();
+ };
+
+ if (a->outer ())
+ {
+ j.member_name ("outer_operation");
+ if (matched (t, *a))
+ dump_opstate (*a);
+ else
+ j.value (nullptr);
+ }
+
+ {
+ action ia (a->inner_action ());
+
+ j.member_name ("inner_operation");
+ if (matched (t, ia))
+ dump_opstate (ia);
+ else
+ j.value (nullptr);
+ }
+ }
+
+ j.end_object ();
+ }
+#endif
+
static void
- dump_scope (optional<action> a,
- ostream& os,
+ dump_scope (ostream& os,
string& ind,
+ optional<action> a,
scope_map::const_iterator& i,
bool rel)
{
@@ -588,21 +1303,25 @@ namespace build2
// disabled amalgamation will be printed directly inside the global
// scope).
//
- for (auto e (p.ctx.scopes.end ());
- (i != e &&
- i->second.front () != nullptr &&
- i->second.front ()->parent_scope () == &p); )
+ for (auto e (p.ctx.scopes.end ()); i != e; )
{
- if (vb || rb || sb)
+ if (i->second.front () == nullptr)
+ ++i; // Skip over src paths.
+ else if (i->second.front ()->parent_scope () != &p)
+ break; // Moved past our parent.
+ else
{
- os << endl;
- vb = rb = false;
- }
+ if (vb || rb || sb)
+ {
+ os << endl;
+ vb = rb = false;
+ }
- os << endl; // Extra newline between scope blocks.
+ os << endl; // Extra newline between scope blocks.
- dump_scope (a, os, ind, i, true /* relative */);
- sb = true;
+ dump_scope (os, ind, a, i, true /* relative */);
+ sb = true;
+ }
}
// Targets.
@@ -624,7 +1343,7 @@ namespace build2
}
os << endl; // Extra newline between targets.
- dump_target (a, os, ind, t, p, true /* relative */);
+ dump_target (os, ind, a, t, p, true /* relative */);
tb = true;
}
@@ -635,45 +1354,245 @@ namespace build2
<< ind << '}';
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_scope (json::stream_serializer& j,
+ optional<action> a,
+ scope_map::const_iterator& i,
+ bool rel,
+ target_name_cache& tcache)
+ {
+ // Note: see the buildfile version above for additional comments.
+
+ const scope& p (*i->second.front ());
+ const dir_path& d (i->first);
+ ++i;
+
+#if 0
+ struct scope
+ {
+ // The out_path member is relative to the parent scope. It is empty for
+ // the special global scope. The src_path member is absent if the same
+ // as out_path (in-source build or scope outside of project).
+ //
+ string out_path;
+ optional<string> src_path;
+
+ vector<variable> variables; // Non-type/pattern scope variables.
+
+ vector<scope> scopes; // Immediate children.
+
+ vector<loaded_target|matched_target> targets;
+ };
+#endif
+
+ j.begin_object ();
+
+ if (d.empty ())
+ j.member ("out_path", ""); // Global scope.
+ else
+ {
+ const dir_path& rd (rel ? relative (d) : d);
+ j.member ("out_path", rd.empty () ? string (".") : rd.string ());
+
+ if (!p.out_eq_src ())
+ j.member ("src_path", p.src_path ().string ());
+ }
+
+ const dir_path* orb (relative_base);
+ relative_base = &d;
+
+ // Scope variables.
+ //
+ if (!p.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, p.vars, p, variable_kind::scope);
+ j.end_array ();
+ }
+
+ // Nested scopes of which we are an immediate parent.
+ //
+ {
+ bool first (true);
+ for (auto e (p.ctx.scopes.end ()); i != e; )
+ {
+ if (i->second.front () == nullptr)
+ ++i;
+ else if (i->second.front ()->parent_scope () != &p)
+ break;
+ else
+ {
+ if (first)
+ {
+ j.member_begin_array ("scopes");
+ first = false;
+ }
+
+ dump_scope (j, a, i, true /* relative */, tcache);
+ }
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ // Targets.
+ //
+ {
+ bool first (true);
+ for (const auto& pt: p.ctx.targets)
+ {
+ const target& t (*pt);
+
+ if (&p != &t.base_scope ()) // @@ PERF
+ continue;
+
+ // Skip targets that haven't been matched for this action.
+ //
+ if (a)
+ {
+ if (!(matched (t, a->inner_action ()) ||
+ (a->outer () && matched (t, *a))))
+ continue;
+ }
+
+ if (first)
+ {
+ j.member_begin_array ("targets");
+ first = false;
+ }
+
+ dump_target (j, a, t, p, true /* relative */, tcache);
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ relative_base = orb;
+ j.end_object ();
+ }
+#endif
+
void
- dump (const context& c, optional<action> a)
+ dump (const context& c, optional<action> a, dump_format fmt)
{
auto i (c.scopes.begin ());
assert (i->second.front () == &c.global_scope);
- // We don't lock diag_stream here as dump() is supposed to be called from
- // the main thread prior/after to any other threads being spawned.
- //
- string ind;
- ostream& os (*diag_stream);
- dump_scope (a, os, ind, i, false /* relative */);
- os << endl;
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ // We don't lock diag_stream here as dump() is supposed to be called
+ // from the main thread prior/after to any other threads being
+ // spawned.
+ //
+ string ind;
+ ostream& os (*diag_stream);
+ dump_scope (os, ind, a, i, false /* relative */);
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+ dump_scope (j, a, i, false /* relative */, tc);
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
void
- dump (const scope& s, const char* cind)
+ dump (const scope* s, optional<action> a, dump_format fmt, const char* cind)
{
- const scope_map& m (s.ctx.scopes);
- auto i (m.find_exact (s.out_path ()));
- assert (i != m.end () && i->second.front () == &s);
+ scope_map::const_iterator i;
+ if (s != nullptr)
+ {
+ const scope_map& m (s->ctx.scopes);
+ i = m.find_exact (s->out_path ());
+ assert (i != m.end () && i->second.front () == s);
+ }
- string ind (cind);
- ostream& os (*diag_stream);
- dump_scope (nullopt /* action */, os, ind, i, false /* relative */);
- os << endl;
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ string ind (cind);
+ ostream& os (*diag_stream);
+
+ if (s != nullptr)
+ dump_scope (os, ind, a, i, false /* relative */);
+ else
+ os << ind << "<no known scope to dump>";
+
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+
+ if (s != nullptr)
+ dump_scope (j, a, i, false /* relative */, tc);
+ else
+ j.value (nullptr);
+
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
void
- dump (const target& t, const char* cind)
+ dump (const target* t, optional<action> a, dump_format fmt, const char* cind)
{
- string ind (cind);
- ostream& os (*diag_stream);
- dump_target (nullopt /* action */,
- os,
- ind,
- t,
- t.base_scope (),
- false /* relative */);
- os << endl;
+ const scope* bs (t != nullptr ? &t->base_scope () : nullptr);
+
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ string ind (cind);
+ ostream& os (*diag_stream);
+
+ if (t != nullptr)
+ dump_target (os, ind, a, *t, *bs, false /* relative */);
+ else
+ os << ind << "<no known target to dump>";
+
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+
+ if (t != nullptr)
+ dump_target (j, a, *t, *bs, false /* relative */, tc);
+ else
+ j.value (nullptr);
+
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
}
diff --git a/libbuild2/dump.hxx b/libbuild2/dump.hxx
index 6ec6944..1a1a080 100644
--- a/libbuild2/dump.hxx
+++ b/libbuild2/dump.hxx
@@ -4,6 +4,10 @@
#ifndef LIBBUILD2_DUMP_HXX
#define LIBBUILD2_DUMP_HXX
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
@@ -14,18 +18,40 @@
namespace build2
{
+ enum class dump_format {buildfile, json};
+
// Dump the build state to diag_stream. If action is specified, then assume
// rules have been matched for this action and dump action-specific
// information (like rule-specific variables).
//
+ // If scope or target is NULL, then assume not found and write a format-
+ // appropriate indication.
+ //
+ LIBBUILD2_SYMEXPORT void
+ dump (const context&, optional<action>, dump_format);
+
LIBBUILD2_SYMEXPORT void
- dump (const context&, optional<action> = nullopt);
+ dump (const scope*, optional<action>, dump_format, const char* ind = "");
LIBBUILD2_SYMEXPORT void
- dump (const scope&, const char* ind = "");
+ dump (const target*, optional<action>, dump_format, const char* ind = "");
+#ifndef BUILD2_BOOTSTRAP
+ // Dump (effectively) quoted target name, optionally relative (to the out
+ // tree).
+ //
+ LIBBUILD2_SYMEXPORT void
+ dump_quoted_target_name (butl::json::stream_serializer&,
+ const target&,
+ bool relative = false);
+
+ // Dump display target name, optionally relative (to the out tree).
+ //
LIBBUILD2_SYMEXPORT void
- dump (const target&, const char* ind = "");
+ dump_display_target_name (butl::json::stream_serializer&,
+ const target&,
+ bool relative = false);
+#endif
}
#endif // LIBBUILD2_DUMP_HXX
diff --git a/libbuild2/dyndep.cxx b/libbuild2/dyndep.cxx
index ace901b..dbeb47e 100644
--- a/libbuild2/dyndep.cxx
+++ b/libbuild2/dyndep.cxx
@@ -5,6 +5,7 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/search.hxx>
#include <libbuild2/context.hxx>
#include <libbuild2/algorithm.hxx>
#include <libbuild2/filesystem.hxx>
@@ -69,19 +70,21 @@ namespace build2
{
const prerequisite_target& p (pts[i]);
- // @@ This currently doesn't cover adhoc targets if matched with
- // buildscript (it stores them in p.data). Probably need to redo
- // things there (see adhoc_buildscript_rule::apply()).
+ // If include_target flag is specified, then p.data contains the
+ // target pointer.
//
- if (p.target != nullptr)
+ if (const target* xt =
+ (p.target != nullptr ? p.target :
+ ((p.include & prerequisite_target::include_target) != 0
+ ? reinterpret_cast<target*> (p.data)
+ : nullptr)))
{
- if (p.target == &pt &&
- (p.include & prerequisite_target::include_udm) != 0)
+ if (xt == &pt && (p.include & prerequisite_target::include_udm) != 0)
return true;
- if (size_t n = p.target->prerequisite_targets[a].size ())
+ if (size_t n = xt->prerequisite_targets[a].size ())
{
- if (updated_during_match (a, *p.target, n, pt))
+ if (updated_during_match (a, *xt, n, pt))
return true;
}
}
@@ -115,7 +118,7 @@ namespace build2
recipe_function* const* rf (pt[a].recipe.target<recipe_function*> ());
if (rf == nullptr || *rf != &noop_action)
{
- if (!updated_during_match (a, t, pts_n, pt))
+ if (pts_n == 0 || !updated_during_match (a, t, pts_n, pt))
{
fail << what << ' ' << pt << " has non-noop recipe" <<
info << "consider listing it as static prerequisite of " << t;
@@ -138,12 +141,12 @@ namespace build2
{
diag_record dr;
- if (pt.matched (a))
+ if (pt.matched (a, memory_order_acquire))
{
recipe_function* const* rf (pt[a].recipe.target<recipe_function*> ());
if (rf == nullptr || *rf != &noop_action)
{
- if (!updated_during_match (a, t, pts_n, pt))
+ if (pts_n == 0 || !updated_during_match (a, t, pts_n, pt))
{
dr << fail << what << ' ' << pt << " has non-noop recipe";
}
@@ -162,12 +165,6 @@ namespace build2
dr << info << "consider listing it as static prerequisite of " << t;
}
- // Reverse-lookup target type(s) from file name/extension.
- //
- // If the list of base target types is specified, then only these types and
- // those derived from them are considered. Otherwise, any file-based type is
- // considered but not the file type itself.
- //
small_vector<const target_type*, 2> dyndep_rule::
map_extension (const scope& bs,
const string& n, const string& e,
@@ -436,6 +433,8 @@ namespace build2
// NOTE: see enter_header() caching logic if changing anyting here with
// regards to the target and base scope usage.
+ assert (!insert || t.ctx.phase == run_phase::match);
+
// Find or maybe insert the target.
//
// If insert is false, then don't consider dynamically-created targets
@@ -443,7 +442,7 @@ namespace build2
// which case return the target that would have been inserted.
//
// The directory is only moved from if insert is true. Note that it must
- // be normalized.
+ // be absolute and normalized.
//
auto find = [&trace, what, &bs, &t,
&map_extension,
@@ -452,6 +451,8 @@ namespace build2
bool insert,
bool dynamic = false) -> const file*
{
+ context& ctx (t.ctx);
+
// Split the file into its name part and extension. Here we can assume
// the name part is a valid filesystem name.
//
@@ -510,7 +511,7 @@ namespace build2
}
else
{
- const scope& bs (**t.ctx.scopes.find (d).first);
+ const scope& bs (**ctx.scopes.find (d).first);
if (const scope* rs = bs.root_scope ())
{
if (map_extension != nullptr)
@@ -564,7 +565,7 @@ namespace build2
{
const target_type& tt (*tts[i]);
- if (const target* x = t.ctx.targets.find (tt, d, out, n, e, trace))
+ if (const target* x = ctx.targets.find (tt, d, out, n, e, trace))
{
// What would be the harm in reusing a dynamically-inserted target
// if there is no buildfile-mentioned one? Probably none (since it
@@ -625,10 +626,29 @@ namespace build2
r = f;
}
- // @@ OPT: move d, out, n
- //
if (r == nullptr && insert)
+ {
+ // Like search(t, pk) but don't fail if the target is in src.
+ //
+ // While it may seem like there is not much difference, the caller may
+ // actually do more than just issue more specific diagnostics. For
+ // example, it may defer the failure to the tool diagnostics.
+ //
+#if 0
r = &search (t, *tts[0], d, out, n, &e, s);
+#else
+ prerequisite_key pk {nullopt, {tts[0], &d, &out, &n, move (e)}, s};
+
+ r = pk.tk.type->search (ctx, &t, pk);
+
+ if (r == nullptr && pk.tk.out->empty ())
+ {
+ auto p (ctx.scopes.find (d, false));
+ if (*p.first != nullptr || ++p.first == p.second)
+ r = &create_new_target (ctx, pk);
+ }
+#endif
+ }
return static_cast<const file*> (r);
};
@@ -833,48 +853,260 @@ namespace build2
map_ext, fallback, pfx_map, so_map);
}
- const file& dyndep_rule::
- inject_group_member (action a, const scope& bs, mtime_target& g,
- path p, const target_type& tt)
+ static pair<const file&, bool>
+ inject_group_member_impl (action a, const scope& bs, mtime_target& g,
+ path f, string n, string e,
+ const target_type& tt,
+ const function<dyndep_rule::group_filter_func>& fl)
{
- path n (p.leaf ());
- string e (n.extension ());
+ // NOTE: see adhoc_rule_regex_pattern::apply_group_members() for a variant
+ // of the same code.
- // Assume nobody else can insert these members (seems reasonable seeing
- // that their names are dynamically discovered).
+ // Note that we used to directly match such a member with group_recipe.
+ // But that messes up our dependency counts since we don't really know
+ // whether someone will execute such a member.
+ //
+ // So instead we now just link the member up to the group and rely on the
+ // special semantics in match_rule_impl() for groups with the dyn_members
+ // flag.
+ //
+ assert ((g.type ().flags & target_type::flag::dyn_members) ==
+ target_type::flag::dyn_members);
+
+ // We expect that nobody else can insert these members (seems reasonable
+ // seeing that their names are dynamically discovered).
//
auto l (search_new_locked (
bs.ctx,
tt,
- p.directory (),
+ f.directory (),
dir_path (), // Always in out.
- move (n.make_base ()).string (),
+ move (n),
&e,
&bs));
const file& t (l.first.as<file> ()); // Note: non-const only if have lock.
+ // We don't need to match the group recipe directy from ad hoc
+ // recipes/rules due to the special semantics for explicit group members
+ // in match_rule_impl(). This is what skip_match is for.
+ //
if (l.second)
{
l.first.group = &g;
l.second.unlock ();
- t.path (move (p)); // Only do this once.
+ t.path (move (f));
+ return pair<const file&, bool> (t, true);
}
else
- // Must have been already done (e.g., on previous operation in a
- // batch).
+ {
+ if (fl != nullptr && !fl (g, t))
+ return pair<const file&, bool> (t, false);
+ }
+
+ // Check if we already belong to this group. Note that this not a mere
+ // optimization since we may be in the member->group->member chain and
+ // trying to lock the member the second time would deadlock (this can be
+ // triggered, for example, by dist, which sort of depends on such members
+ // directly... which was not quite correct and is now fixed).
+ //
+ if (t.group == &g) // Note: atomic.
+ t.path (move (f));
+ else
+ {
+ // This shouldn't normally fail since we are the only ones that should
+ // know about this target (otherwise why is it dynamicaly discovered).
+ // However, nothing prevents the user from depending on such a target,
+ // however misguided.
//
- assert (t.group == &g);
+ target_lock tl (lock (a, t));
+
+ if (!tl)
+ fail << "group " << g << " member " << t << " is already matched" <<
+ info << "dynamically extracted group members cannot be used as "
+ << "prerequisites directly, only via group";
+
+ if (t.group == nullptr)
+ tl.target->group = &g;
+ else if (t.group != &g)
+ fail << "group " << g << " member " << t
+ << " is already member of group " << *t.group;
+
+ t.path (move (f));
+ }
+
+ return pair<const file&, bool> (t, true);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_group_member (action a, const scope& bs, mtime_target& g,
+ path f,
+ const target_type& tt,
+ const function<group_filter_func>& filter)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ return inject_group_member_impl (a, bs, g,
+ move (f), move (n).string (), move (e),
+ tt,
+ filter);
+ }
+
+ static const target_type&
+ map_target_type (const char* what,
+ const scope& bs,
+ const path& f, const string& n, const string& e,
+ const function<dyndep_rule::map_extension_func>& map_ext,
+ const target_type& fallback)
+ {
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ small_vector<const target_type*, 2> tts;
+ if (map_ext != nullptr)
+ tts = map_ext (bs, n, e);
+
+ // Not sure what else we can do in this case.
+ //
+ if (tts.size () > 1)
+ {
+ diag_record dr (fail);
+
+ dr << "mapping of " << what << " target path " << f
+ << " to target type is ambiguous";
+
+ for (const target_type* tt: tts)
+ dr << info << "can be " << tt->name << "{}";
+ }
+
+ const target_type& tt (tts.empty () ? fallback : *tts.front ());
+
+ if (!tt.is_a<file> ())
+ {
+ fail << what << " target path " << f << " mapped to non-file-based "
+ << "target type " << tt.name << "{}";
+ }
+
+ return tt;
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_group_member (const char* what,
+ action a, const scope& bs, mtime_target& g,
+ path f,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback,
+ const function<group_filter_func>& filter)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ const target_type& tt (
+ map_target_type (what, bs, f, n.string (), e, map_ext, fallback));
+
+ return inject_group_member_impl (a, bs, g,
+ move (f), move (n).string (), move (e),
+ tt,
+ filter);
+ }
+
+ pair<const file&, bool>
+ inject_adhoc_group_member_impl (action, const scope& bs, target& t,
+ path f, string n, string e,
+ const target_type& tt)
+ {
+ // Assume nobody else can insert these members (seems reasonable seeing
+ // that their names are dynamically discovered).
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ tt,
+ f.directory (),
+ dir_path (), // Always in out.
+ move (n),
+ &e,
+ &bs));
+
+ file* ft (&l.first.as<file> ()); // Note: non-const only if locked.
+
+ // Skip if this is one of the static targets (or a duplicate of the
+ // dynamic target).
+ //
+ // In particular, we expect to skip all the targets that we could not lock
+ // (e.g., in case all of this has already been done for the previous
+ // operation in a batch; make sure to test `update update update` and
+ // `update clean update ...` batches if changing anything here).
+ //
+ // While at it also find the ad hoc members list tail.
+ //
+ const_ptr<target>* tail (&t.adhoc_member);
+ for (target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ {
+ tail = nullptr;
+ break;
+ }
+
+ tail = &m->adhoc_member;
+ }
+
+ if (tail == nullptr)
+ return pair<const file&, bool> (*ft, false);
+
+ if (!l.second)
+ fail << "dynamic target " << *ft << " already exists and cannot be "
+ << "made ad hoc member of group " << t;
+
+ ft->group = &t;
+ l.second.unlock ();
+
+ // We need to be able to distinguish static targets from dynamic (see the
+ // static set hashing in adhoc_buildscript_rule::apply() for details).
+ //
+ assert (ft->decl != target_decl::real);
+
+ *tail = ft;
+ ft->path (move (f));
+
+ return pair<const file&, bool> (*ft, true);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_adhoc_group_member (action a, const scope& bs, target& t,
+ path f,
+ const target_type& tt)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ return inject_adhoc_group_member_impl (
+ a, bs, t, move (f), move (n).string (), move (e), tt);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_adhoc_group_member (const char* what,
+ action a, const scope& bs, target& t,
+ path f,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
- // This shouldn't fail since we are the only ones that should be matching
- // this target.
+ // Map extension to the target type, falling back to the fallback type.
//
- target_lock tl (lock (a, t));
- assert (tl);
+ const target_type& tt (
+ map_target_type (what, bs, f, n.string (), e, map_ext, fallback));
- match_inc_dependents (a, g);
- match_recipe (tl, group_recipe);
- return t;
+ return inject_adhoc_group_member_impl (
+ a, bs, t, move (f), move (n).string (), move (e), tt);
}
}
diff --git a/libbuild2/dyndep.hxx b/libbuild2/dyndep.hxx
index a6f800e..a0949c4 100644
--- a/libbuild2/dyndep.hxx
+++ b/libbuild2/dyndep.hxx
@@ -36,7 +36,7 @@ namespace build2
// target.
//
// Return the indication of whether it has changed or, if the passed
- // timestamp is not timestamp_unknown, is older than this timestamp. If
+ // timestamp is not timestamp_unknown, is newer than this timestamp. If
// the prerequisite target does not exists nor can be generated (no rule),
// then issue diagnostics and fail if the fail argument is true and return
// nullopt otherwise.
@@ -57,8 +57,10 @@ namespace build2
bool adhoc = false,
uintptr_t data = 0);
- // As above but verify the file is matched with noop_recipe and issue
- // diagnostics and fail otherwise (regardless of the fail flag).
+ // As above but verify the file is matched with noop_recipe or was updated
+ // during match and issue diagnostics and fail otherwise (regardless of
+ // the fail flag). Pass 0 for pts_n if don't want the "was updated during
+ // match" part.
//
// This version (together with verify_existing_file() below) is primarily
// useful for handling dynamic dependencies that are produced as a
@@ -77,9 +79,11 @@ namespace build2
bool adhoc = false,
uintptr_t data = 0);
- // Verify the file is matched with noop_recipe and issue diagnostics and
- // fail otherwise. If the file is not matched, then fail if the target is
- // not implied (that is, declared in a buildfile).
+ // Verify the file is matched with noop_recipe or was updated during match
+ // and issue diagnostics and fail otherwise. If the file is not matched,
+ // then fail if the target is not implied (that is, declared in a
+ // buildfile). Pass 0 for pts_n if don't want the "was updated during
+ // match" part.
//
// Note: can only be called in the execute phase.
//
@@ -94,6 +98,10 @@ namespace build2
// and those derived from them are considered. Otherwise, any file-based
// type is considered but not the file type itself.
//
+ // It's possible the extension-to-target type mapping is ambiguous (for
+ // example, because both C and C++-language headers use the same .h
+ // extension). So this function can return multiple target types.
+ //
static small_vector<const target_type*, 2>
map_extension (const scope& base,
const string& name, const string& ext,
@@ -224,26 +232,72 @@ namespace build2
const srcout_map& = {});
// Find or insert a target file path as a target of the specified type,
- // make it a member of the specified (non-ad hoc) mtime target group,
- // set its path, and match it with group_recipe.
+ // make it a member of the specified (non-ad hoc) mtime target group and
+ // set its path. Return the target and an indication of whether it was
+ // made a member (can only be false if a filter is provided; see below).
//
// The file path must be absolute and normalized. Note that this function
- // assumes that this member can only be matched via this group.
+ // assumes that this member can only be matched via this group. The group
+ // type must have the target_type::flag::dyn_members flag.
//
- // Note: we can split this function into {enter,match}_group_member()
- // if necessary.
+ // If specified, the group_filter function is called on the target before
+ // making it a group member, skipping it if this function returns false.
+ // Note that the filter is skipped if the target is newly inserted (the
+ // filter is meant to be used to skip duplicates).
//
- static const file&
+ using group_filter_func = bool (mtime_target& g, const file&);
+
+ static pair<const file&, bool>
inject_group_member (action, const scope& base, mtime_target&,
- path, const target_type&);
+ path,
+ const target_type&,
+ const function<group_filter_func>& = nullptr);
template <typename T>
- static const T&
- inject_group_member (action a, const scope& bs, mtime_target& g, path p)
+ static pair<const T&, bool>
+ inject_group_member (action a, const scope& bs, mtime_target& g,
+ path f,
+ const function<group_filter_func>& filter = nullptr)
{
- return inject_group_member (
- a, bs, g, move (p), T::static_type).template as<T> ();
+ auto p (inject_group_member (a, bs, g, move (f), T::static_type, filter));
+ return pair<const T&, bool> (p.first.template as<T> (), p.second);
}
+
+ // As above but the target type is determined using the map_extension
+ // function if specified, falling back to the fallback type if unable to
+ // (the what argument is used for diagnostics during this process).
+ //
+ static pair<const file&, bool>
+ inject_group_member (const char* what,
+ action, const scope& base, mtime_target& g,
+ path,
+ const function<map_extension_func>&,
+ const target_type& fallback,
+ const function<group_filter_func>& = nullptr);
+
+
+ // Find or insert a target file path as a target, make it a member of the
+ // specified ad hoc group unless it already is, and set its path. Return
+ // the target and an indication of whether it was added as a member.
+ //
+ // The file path must be absolute and normalized. Note that this function
+ // assumes that this target can only be known as a member of this group.
+ //
+ static pair<const file&, bool>
+ inject_adhoc_group_member (action, const scope& base, target& g,
+ path,
+ const target_type&);
+
+ // As above but the target type is determined using the map_extension
+ // function if specified, falling back to the fallback type if unable to
+ // (the what argument is used for diagnostics during this process).
+ //
+ static pair<const file&, bool>
+ inject_adhoc_group_member (const char* what,
+ action, const scope& base, target& g,
+ path,
+ const function<map_extension_func>&,
+ const target_type& fallback);
};
}
diff --git a/libbuild2/file-cache.hxx b/libbuild2/file-cache.hxx
index e31517e..98c2b67 100644
--- a/libbuild2/file-cache.hxx
+++ b/libbuild2/file-cache.hxx
@@ -119,9 +119,9 @@ namespace build2
// Move-to-NULL-only type.
//
- write (write&&);
+ write (write&&) noexcept;
write (const write&) = delete;
- write& operator= (write&&);
+ write& operator= (write&&) noexcept;
write& operator= (const write&) = delete;
~write ();
@@ -145,9 +145,9 @@ namespace build2
// Move-to-NULL-only type.
//
- read (read&&);
+ read (read&&) noexcept;
read (const read&) = delete;
- read& operator= (read&&);
+ read& operator= (read&&) noexcept;
read& operator= (const read&) = delete;
~read ();
diff --git a/libbuild2/file-cache.ixx b/libbuild2/file-cache.ixx
index 026f3fd..99be5ad 100644
--- a/libbuild2/file-cache.ixx
+++ b/libbuild2/file-cache.ixx
@@ -109,14 +109,14 @@ namespace build2
}
inline file_cache::write::
- write (write&& e)
+ write (write&& e) noexcept
: entry_ (e.entry_)
{
e.entry_ = nullptr;
}
inline file_cache::write& file_cache::write::
- operator= (write&& e)
+ operator= (write&& e) noexcept
{
if (this != &e)
{
@@ -136,14 +136,14 @@ namespace build2
}
inline file_cache::read::
- read (read&& e)
+ read (read&& e) noexcept
: entry_ (e.entry_)
{
e.entry_ = nullptr;
}
inline file_cache::read& file_cache::read::
- operator= (read&& e)
+ operator= (read&& e) noexcept
{
if (this != &e)
{
diff --git a/libbuild2/file.cxx b/libbuild2/file.cxx
index b93a20a..18147a2 100644
--- a/libbuild2/file.cxx
+++ b/libbuild2/file.cxx
@@ -4,6 +4,7 @@
#include <libbuild2/file.hxx>
#include <cerrno>
+#include <cstring> // strlen()
#include <iomanip> // left, setw()
#include <sstream>
@@ -29,6 +30,8 @@ namespace build2
{
// Standard and alternative build file/directory naming schemes.
//
+ extern const dir_path std_export_dir;
+ extern const dir_path alt_export_dir;
// build:
@@ -36,6 +39,7 @@ namespace build2
const dir_path std_root_dir (dir_path (std_build_dir) /= "root");
const dir_path std_bootstrap_dir (dir_path (std_build_dir) /= "bootstrap");
const dir_path std_build_build_dir (dir_path (std_build_dir) /= "build");
+ const dir_path std_export_dir (dir_path (std_build_dir) /= "export");
const path std_root_file (std_build_dir / "root.build");
const path std_bootstrap_file (std_build_dir / "bootstrap.build");
@@ -53,6 +57,7 @@ namespace build2
const dir_path alt_root_dir (dir_path (alt_build_dir) /= "root");
const dir_path alt_bootstrap_dir (dir_path (alt_build_dir) /= "bootstrap");
const dir_path alt_build_build_dir (dir_path (alt_build_dir) /= "build");
+ const dir_path alt_export_dir (dir_path (alt_build_dir) /= "export");
const path alt_root_file (alt_build_dir / "root.build2");
const path alt_bootstrap_file (alt_build_dir / "bootstrap.build2");
@@ -219,7 +224,7 @@ namespace build2
// Checking for plausability feels expensive since we have to recursively
// traverse the directory tree. Note, however, that if the answer is
// positive, then shortly after we will be traversing this tree anyway and
- // presumably this time getting the data from the cash (we don't really
+ // presumably this time getting the data from the cache (we don't really
// care about the negative answer since this is a degenerate case).
//
optional<path> bf;
@@ -358,7 +363,7 @@ namespace build2
//
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
// If this is a link, then type() will try to stat() it. And if the
// link is dangling or points to something inaccessible, it will fail.
@@ -523,10 +528,14 @@ namespace build2
pair<scope&, scope*>
switch_scope (scope& root, const dir_path& out_base, bool proj)
{
+ context& ctx (root.ctx);
+
+ assert (ctx.phase == run_phase::load);
+
// First, enter the scope into the map and see if it is in any project. If
// it is not, then there is nothing else to do.
//
- auto i (root.ctx.scopes.rw (root).insert_out (out_base));
+ auto i (ctx.scopes.rw (root).insert_out (out_base));
scope& base (*i->second.front ());
scope* rs (nullptr);
@@ -843,12 +852,35 @@ namespace build2
try
{
- for (const dir_entry& de: dir_iterator (d, true /* ignore_dangling */))
+ // It's probably possible that a subproject can be a symlink with the
+ // link target, for example, being in a git submodule. Considering that,
+ // it makes sense to warn about dangling symlinks.
+ //
+ for (const dir_entry& de:
+ dir_iterator (d, dir_iterator::detect_dangling))
{
+ const path& n (de.path ());
+
+ // Skip hidden entries.
+ //
+ if (n.empty () || n.string ().front () == '.')
+ continue;
+
if (de.type () != entry_type::directory)
+ {
+ if (de.type () == entry_type::unknown)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / n;
+ }
+
continue;
+ }
- dir_path sd (d / path_cast<dir_path> (de.path ()));
+ dir_path sd (d / path_cast<dir_path> (n));
bool src (false);
optional<bool> altn;
@@ -947,7 +979,16 @@ namespace build2
rs.root_extra->amalgamation = nullptr;
rs.root_extra->subprojects = nullptr;
+ // See GH issue #322.
+ //
+#if 0
assert (!aovr || aovr->empty ());
+#else
+ if (!(!aovr || aovr->empty ()))
+ fail << "amalgamation directory " << *aovr << " specified for simple "
+ << "project " << src_root <<
+ info << "see https://github.com/build2/build2/issues/322 for details";
+#endif
}
// We assume that bootstrap out cannot load this file explicitly. It
// feels wrong to allow this since that makes the whole bootstrap
@@ -1317,9 +1358,9 @@ namespace build2
// Call module's post-boot functions.
//
- for (size_t i (0); i != root.root_extra->modules.size (); ++i)
+ for (size_t i (0); i != root.root_extra->loaded_modules.size (); ++i)
{
- module_state& s (root.root_extra->modules[i]);
+ module_state& s (root.root_extra->loaded_modules[i]);
if (s.boot_post != nullptr)
boot_post_module (root, s);
@@ -1536,11 +1577,11 @@ namespace build2
// Note that init() can load additional modules invalidating iterators.
//
auto init_modules =
- [&root, n = root.root_extra->modules.size ()] (module_boot_init v)
+ [&root, n = root.root_extra->loaded_modules.size ()] (module_boot_init v)
{
for (size_t i (0); i != n; ++i)
{
- module_state& s (root.root_extra->modules[i]);
+ module_state& s (root.root_extra->loaded_modules[i]);
if (s.boot_init && *s.boot_init == v)
init_module (root, root, s.name, s.loc);
@@ -1603,12 +1644,19 @@ namespace build2
init_modules (module_boot_init::after);
}
- // Print the project configuration report, similar to how we do it in
+ // Print the project configuration report(s), similar to how we do it in
// build system modules.
//
- if (!p.config_report.empty () && verb >= (p.config_report_new ? 2 : 3))
+ using config_report = parser::config_report;
+
+ const project_name* proj (nullptr); // Resolve lazily.
+ for (const config_report& cr: p.config_reports)
{
- const project_name& proj (named_project (root)); // Can be empty.
+ if (verb < (cr.new_value ? 2 : 3))
+ continue;
+
+ if (proj == nullptr)
+ proj = &named_project (root); // Can be empty.
// @@ TODO/MAYBE:
//
@@ -1626,46 +1674,74 @@ namespace build2
// config @/tmp/tests
// libhello.tests.remote true
//
- string stem (!proj.empty () ? '.' + proj.variable () + '.' : string ());
+ // If the module name is not empty then it means the config variables
+ // are from the imported project and so we use that for <project>.
+ //
+ string stem (!cr.module.empty ()
+ ? '.' + cr.module.variable () + '.'
+ : (!proj->empty ()
+ ? '.' + proj->variable () + '.'
+ : string ()));
- // Calculate max name length.
+ // Return the variable name for printing.
//
- size_t pad (10);
- for (const pair<lookup, string>& lf: p.config_report)
+ auto name = [&stem] (const config_report::value& cv) -> const char*
{
- lookup l (lf.first);
+ lookup l (cv.val);
- size_t n;
if (l.value == nullptr)
{
- n = l.var->name.size ();
+ if (cv.org.empty ())
+ return l.var->name.c_str ();
+
+ // This case may or may not have the prefix.
+ //
+ size_t p, n (
+ !stem.empty ()
+ ? (p = cv.org.find (stem)) != string::npos ? p + stem.size () : 0
+ : cv.org.compare (0, 7, "config.") == 0 ? 7 : 0);
+
+ return cv.org.c_str () + n;
}
else
{
+ assert (cv.org.empty ()); // Sanity check.
+
size_t p (!stem.empty ()
? l.var->name.find (stem) + stem.size ()
: 7); // "config."
- n = l.var->name.size () - p;
+
+ return l.var->name.c_str () + p;
}
+ };
+
+ // Calculate max name length.
+ //
+ size_t pad (10);
+ for (const config_report::value& cv: cr.values)
+ {
+ size_t n (strlen (name (cv)));
if (n > pad)
pad = n;
}
// Use the special `config` module name (which doesn't have its own
- // report) for project configuration.
+ // report) for project's own configuration.
//
diag_record dr (text);
- dr << "config " << proj << '@' << root;
+ dr << (cr.module.empty () ? "config" : cr.module.string ().c_str ())
+ << ' ' << *proj << '@' << root;
names storage;
- for (const pair<lookup, string>& lf: p.config_report)
+ for (const config_report::value& cv: cr.values)
{
- lookup l (lf.first);
- const string& f (lf.second);
+ lookup l (cv.val);
+ const string& f (cv.fmt);
// If the report variable has been overriden, now is the time to
- // lookup its value.
+ // lookup its value. Note: see also the name() lambda above if
+ // changing anything here.
//
string n;
if (l.value == nullptr)
@@ -1681,24 +1757,26 @@ namespace build2
n = string (l.var->name, p);
}
+ const char* pn (name (cv)); // Print name.
+
dr << "\n ";
- if (const value& v = *l)
+ if (l)
{
storage.clear ();
- auto ns (reverse (v, storage));
+ auto ns (reverse (*l, storage, true /* reduce */));
if (f == "multiline")
{
- dr << n;
+ dr << pn;
for (auto& n: ns)
dr << "\n " << n;
}
else
- dr << left << setw (static_cast<int> (pad)) << n << ' ' << ns;
+ dr << left << setw (static_cast<int> (pad)) << pn << ' ' << ns;
}
else
- dr << left << setw (static_cast<int> (pad)) << n << " [null]";
+ dr << left << setw (static_cast<int> (pad)) << pn << " [null]";
}
}
@@ -2017,16 +2095,13 @@ namespace build2
&t);
}
- // Suggest appropriate ways to import the specified target (as type and
- // name) from the specified project.
- //
- static void
+ void
import_suggest (const diag_record& dr,
const project_name& pn,
- const target_type& tt,
+ const target_type* tt,
const string& tn,
bool rule_hint,
- const char* qual = nullptr)
+ const char* qual)
{
string pv (pn.variable ());
@@ -2038,11 +2113,11 @@ namespace build2
// Suggest ad hoc import but only if it's a path-based target (doing it
// for lib{} is very confusing).
//
- if (tt.is_a<path_target> ())
+ if (tt != nullptr && tt->is_a<path_target> ())
{
- string v (tt.is_a<exe> () && (pv == tn || pn == tn)
+ string v (tt->is_a<exe> () && (pv == tn || pn == tn)
? "config." + pv
- : "config.import." + pv + '.' + tn + '.' + tt.name);
+ : "config.import." + pv + '.' + tn + '.' + tt->name);
dr << info << "or use " << v << " configuration variable to specify "
<< "its " << (qual != nullptr ? qual : "") << "path";
@@ -2065,6 +2140,9 @@ namespace build2
// Return empty name if an ad hoc import resulted in a NULL target (only
// allowed if optional is true).
//
+ // Note that this function has a side effect of potentially marking some
+ // config.import.* variables as used.
+ //
pair<name, optional<dir_path>>
import_search (bool& new_value,
scope& ibase,
@@ -2096,6 +2174,9 @@ namespace build2
//
// 4. Normal import.
//
+ // @@ PERF: in quite a few places (local, subproject) we could have
+ // returned the scope and save on bootstrap in import_load().
+ //
if (tgt.unqualified ())
{
if (tgt.directory () && tgt.relative ())
@@ -2103,6 +2184,8 @@ namespace build2
if (tgt.absolute ())
{
+ // Ad hoc import.
+ //
// Actualize the directory to be analogous to the config.import.<proj>
// case (which is of abs_dir_path type).
//
@@ -2119,7 +2202,7 @@ namespace build2
fail (loc) << "project-local importation of target " << tgt
<< " from an unnamed project";
- tgt.proj = pn;
+ tgt.proj = pn; // Reduce to normal import.
return make_pair (move (tgt), optional<dir_path> (iroot.out_path ()));
}
@@ -2324,7 +2407,7 @@ namespace build2
[&proj, tt, &on] (const diag_record& dr)
{
import_suggest (
- dr, proj, *tt, on, false, "alternative ");
+ dr, proj, tt, on, false, "alternative ");
});
md = extract_metadata (e->process_path (),
@@ -2647,6 +2730,68 @@ namespace build2
fail (loc) << out_root << " is not out_root for " << *proj;
}
+ // Buildfile importation is quite different so handle it separately.
+ //
+ // Note that we don't need to load the project in this case.
+ //
+ // @@ For now we don't out-qualify the resulting target to be able to
+ // re-import it ad hoc (there is currently no support for out-qualified
+ // ad hoc import). Feels like this should be harmless since it's just a
+ // glorified path to a static file that nobody is actually going to use
+ // as a target (e.g., to depend upon).
+ //
+ if (tgt.type == "buildfile")
+ {
+ auto add_ext = [&altn] (string& n)
+ {
+ if (path_traits::find_extension (n) == string::npos)
+ {
+ if (n != (*altn ? alt_buildfile_file : std_buildfile_file).string ())
+ {
+ n += ".";
+ n += *altn ? alt_build_ext : std_build_ext;
+ }
+ }
+ };
+
+ if (proj)
+ {
+ name n;
+
+ if (src_root.empty ())
+ src_root = root->src_path ();
+
+ n.dir = move (src_root);
+ n.dir /= *altn ? alt_export_dir : std_export_dir;
+ if (!tgt.dir.empty ())
+ {
+ n.dir /= tgt.dir;
+ n.dir.normalize ();
+ }
+
+ n.type = tgt.type;
+ n.value = tgt.value;
+ add_ext (n.value);
+
+ pair<names, const scope&> r (names {move (n)}, *root);
+
+ // Cache.
+ //
+ if (cache_out_root.empty ())
+ cache_out_root = move (out_root);
+
+ ctx.import_cache.emplace (
+ import_key {move (cache_out_root), move (tgt), metav}, r);
+
+ return r;
+ }
+ else
+ {
+ add_ext (tgt.value);
+ return pair<names, const scope&> (names {move (tgt)}, *root);
+ }
+ }
+
// Load the imported root scope.
//
if (!root->root_extra->loaded)
@@ -2668,6 +2813,9 @@ namespace build2
if (cache_out_root.empty ())
cache_out_root = out_root;
+ if (src_root.empty ())
+ src_root = root->src_path ();
+
ts.assign (ctx.var_out_root) = move (out_root);
ts.assign (ctx.var_src_root) = move (src_root);
@@ -2698,7 +2846,8 @@ namespace build2
l5 ([&]{trace << "importing " << es;});
// @@ Should we verify these are all unqualified names? Or maybe there
- // is a use-case for the export stub to return a qualified name?
+ // is a use-case for the export stub to return a qualified name? E.g.,
+ // re-export?
//
names v;
{
@@ -2709,7 +2858,7 @@ namespace build2
});
parser p (ctx);
- v = p.parse_export_stub (ifs, path_name (es), gs, ts);
+ v = p.parse_export_stub (ifs, path_name (es), *root, gs, ts);
}
// If there were no export directive executed in an export stub,
@@ -2783,7 +2932,36 @@ namespace build2
}
}
- pair<names, import_kind>
+ const target_type&
+ import_target_type (scope& root,
+ const scope& iroot, const string& n,
+ const location& l)
+ {
+ // NOTE: see similar code in parser::parse_define().
+
+ const target_type* tt (iroot.find_target_type (n));
+ if (tt == nullptr)
+ fail (l) << "unknown imported target type " << n << " in project "
+ << iroot;
+
+ auto p (root.root_extra->target_types.insert (*tt));
+
+ if (!p.second && &p.first.get () != tt)
+ fail (l) << "imported target type " << n << " already defined in project "
+ << root;
+
+ return *tt;
+ }
+
+ static names
+ import2_buildfile (context&, names&&, bool, const location&);
+
+ static const target*
+ import2 (context&, const scope&, names&,
+ const string&, bool, const optional<string>&, bool,
+ const location&);
+
+ import_result<scope>
import (scope& base,
name tgt,
const optional<string>& ph2,
@@ -2813,7 +2991,10 @@ namespace build2
import_result<target> r (
import_direct (base, move (tgt), ph2, opt, metadata, loc));
- return make_pair (move (r.name), r.kind);
+ return import_result<scope> {
+ r.target != nullptr ? r.target->base_scope ().root_scope () : nullptr,
+ move (r.name),
+ r.kind};
}
pair<name, optional<dir_path>> r (
@@ -2829,6 +3010,7 @@ namespace build2
if (!r.second || r.second->empty ())
{
names ns;
+ const target* t (nullptr);
if (r.first.empty ())
{
@@ -2844,18 +3026,25 @@ namespace build2
//
if (ns.back ().qualified ())
{
- if (ph2)
+ if (ns.back ().type == "buildfile")
+ {
+ assert (ph2);
+ ns = import2_buildfile (ctx, move (ns), opt && !r.second, loc);
+ }
+ else if (ph2)
{
// This is tricky: we only want the optional semantics for the
// fallback case.
//
- if (const target* t = import (ctx,
- base.find_prerequisite_key (ns, loc),
- *ph2,
- opt && !r.second /* optional */,
- nullopt /* metadata */,
- false /* existing */,
- loc))
+ t = import2 (ctx,
+ base, ns,
+ *ph2,
+ opt && !r.second /* optional */,
+ nullopt /* metadata */,
+ false /* existing */,
+ loc);
+
+ if (t != nullptr)
{
// Note that here r.first was still project-qualified and we
// have no choice but to call as_name(). This shouldn't cause
@@ -2871,30 +3060,32 @@ namespace build2
}
}
- return make_pair (
+ return import_result<scope> {
+ t != nullptr ? t->base_scope ().root_scope () : nullptr,
move (ns),
- r.second.has_value () ? import_kind::adhoc : import_kind::fallback);
+ r.second.has_value () ? import_kind::adhoc : import_kind::fallback};
}
import_kind k (r.first.absolute ()
? import_kind::adhoc
: import_kind::normal);
- return make_pair (
- import_load (base.ctx, move (r), false /* metadata */, loc).first,
- k);
+ pair<names, const scope&> p (
+ import_load (base.ctx, move (r), false /* metadata */, loc));
+
+ return import_result<scope> {&p.second, move (p.first), k};
}
const target*
- import (context& ctx,
- const prerequisite_key& pk,
- const string& hint,
- bool opt,
- const optional<string>& meta,
- bool exist,
- const location& loc)
+ import2 (context& ctx,
+ const prerequisite_key& pk,
+ const string& hint,
+ bool opt,
+ const optional<string>& meta,
+ bool exist,
+ const location& loc)
{
- tracer trace ("import");
+ tracer trace ("import2");
// Neither hint nor metadata can be requested for existing.
//
@@ -2913,7 +3104,7 @@ namespace build2
{
assert (pk.scope != nullptr);
- // Note: similar to/inspired by match_rule().
+ // Note: similar to/inspired by match_rule_impl().
//
// Search scopes outwards, stopping at the project root.
//
@@ -3014,7 +3205,7 @@ namespace build2
auto df = make_diag_frame (
[&proj, &tt, &tk] (const diag_record& dr)
{
- import_suggest (dr, proj, tt, *tk.name, false, "alternative ");
+ import_suggest (dr, proj, &tt, *tk.name, false, "alternative ");
});
if (!(md = extract_metadata (pp, *meta, opt, loc)))
@@ -3047,6 +3238,8 @@ namespace build2
return *t;
}
+ // NOTE: see similar code in import2() below if changing anything here.
+
if (opt || exist)
return nullptr;
@@ -3059,7 +3252,135 @@ namespace build2
else
// Use metadata as proxy for immediate import.
//
- import_suggest (dr, proj, tt, *tk.name, meta && hint.empty ());
+ import_suggest (dr, proj, &tt, *tk.name, meta && hint.empty ());
+
+ dr << endf;
+ }
+
+ // As above but with scope/ns instead of pk. This version deals with the
+ // unknown target type case.
+ //
+ static const target*
+ import2 (context& ctx,
+ const scope& base, names& ns,
+ const string& hint,
+ bool opt,
+ const optional<string>& meta,
+ bool exist,
+ const location& loc)
+ {
+ // If we have a rule hint, then it's natural to expect this target type is
+ // known to the importing project. Ditto for project-less import.
+ //
+ const target_type* tt (nullptr);
+ if (hint.empty ())
+ {
+ size_t n;
+ if ((n = ns.size ()) != 0 && n == (ns[0].pair ? 2 : 1))
+ {
+ const name& n (ns.front ());
+
+ if (n.typed () && !n.proj->empty ())
+ {
+ tt = base.find_target_type (n.type);
+
+ if (tt == nullptr)
+ {
+ // A subset of code in the above version of import2().
+ //
+ if (opt || exist)
+ return nullptr;
+
+ diag_record dr;
+ dr << fail (loc) << "unable to import target " << ns;
+ import_suggest (dr, *n.proj, nullptr, string (), meta.has_value ());
+ }
+ }
+ }
+ }
+
+ return import2 (ctx,
+ base.find_prerequisite_key (ns, loc, tt),
+ hint,
+ opt,
+ meta,
+ exist,
+ loc);
+ }
+
+ static names
+ import2_buildfile (context&, names&& ns, bool opt, const location& loc)
+ {
+ tracer trace ("import2_buildfile");
+
+ assert (ns.size () == 1);
+ name n (move (ns.front ()));
+
+ // Our approach doesn't work for targets without a project so let's fail
+ // hard, even if optional.
+ //
+ if (!n.proj || n.proj->empty ())
+ fail (loc) << "unable to import target " << n << " without project name";
+
+ while (!build_install_buildfile.empty ()) // Breakout loop.
+ {
+ path f (build_install_buildfile /
+ dir_path (n.proj->string ()) /
+ n.dir /
+ n.value);
+
+ // See if we need to try with extensions.
+ //
+ bool ext (path_traits::find_extension (n.value) == string::npos &&
+ n.value != std_buildfile_file.string () &&
+ n.value != alt_buildfile_file.string ());
+
+ if (ext)
+ {
+ f += '.';
+ f += std_build_ext;
+ }
+
+ if (!exists (f))
+ {
+ l6 ([&]{trace << "tried " << f;});
+
+ if (ext)
+ {
+ f.make_base ();
+ f += '.';
+ f += alt_build_ext;
+
+ if (!exists (f))
+ {
+ l6 ([&]{trace << "tried " << f;});
+ break;
+ }
+ }
+ else
+ break;
+ }
+
+ // Split the path into the target.
+ //
+ ns = {name (f.directory (), move (n.type), f.leaf ().string ())};
+ return move (ns);
+ }
+
+ if (opt)
+ return names {};
+
+ diag_record dr;
+ dr << fail (loc) << "unable to import target " << n;
+
+ import_suggest (dr, *n.proj, nullptr /* tt */, n.value, false);
+
+ if (build_install_buildfile.empty ())
+ dr << info << "no exported buildfile installation location is "
+ << "configured in build2";
+ else
+ dr << info << "exported buildfile installation location is "
+ << build_install_buildfile;
dr << endf;
}
@@ -3081,11 +3402,13 @@ namespace build2
l5 ([&]{trace << tgt << " from " << base << " for " << what;});
- assert ((!opt || ph2) && (!metadata || ph2));
+ assert ((!opt || ph2) && (!metadata || ph2) && tgt.type != "buildfile");
context& ctx (base.ctx);
assert (ctx.phase == run_phase::load);
+ scope& root (*base.root_scope ());
+
// Use the original target name as metadata key.
//
auto meta (metadata ? optional<string> (tgt.value) : nullopt);
@@ -3093,6 +3416,12 @@ namespace build2
names ns, rns;
import_kind k;
const target* pt (nullptr);
+ const scope* iroot (nullptr); // Imported root scope.
+
+ // Original project/name as imported for diagnostics.
+ //
+ string oname (meta ? tgt.value : string ());
+ project_name oproj (meta && tgt.proj ? *tgt.proj : project_name ());
pair<name, optional<dir_path>> r (
import_search (new_value,
@@ -3124,13 +3453,13 @@ namespace build2
// This is tricky: we only want the optional semantics for the
// fallback case.
//
- pt = import (ctx,
- base.find_prerequisite_key (ns, loc),
- *ph2,
- opt && !r.second,
- meta,
- false /* existing */,
- loc);
+ pt = import2 (ctx,
+ base, ns,
+ *ph2,
+ opt && !r.second,
+ meta,
+ false /* existing */,
+ loc);
}
if (pt == nullptr)
@@ -3147,6 +3476,8 @@ namespace build2
// It's a bit fuzzy in which cases we end up here. So for now we keep
// the original if it's absolute and call as_name() otherwise.
//
+ // @@ TODO: resolve iroot or assume target type should be known?
+ //
if (r.first.absolute ())
rns.push_back (r.first);
@@ -3156,14 +3487,30 @@ namespace build2
else
{
k = r.first.absolute () ? import_kind::adhoc : import_kind::normal;
- rns = ns = import_load (base.ctx, move (r), metadata, loc).first;
+
+ pair<names, const scope&> p (
+ import_load (base.ctx, move (r), metadata, loc));
+
+ rns = ns = move (p.first);
+ iroot = &p.second;
}
if (pt == nullptr)
{
+ // Import (more precisely, alias) the target type into this project
+ // if not known.
+ //
+ const target_type* tt (nullptr);
+ if (iroot != nullptr && !ns.empty ())
+ {
+ const name& n (ns.front ());
+ if (n.typed ())
+ tt = &import_target_type (root, *iroot, n.type, loc);
+ }
+
// Similar logic to perform's search(). Note: modifies ns.
//
- target_key tk (base.find_target_key (ns, loc));
+ target_key tk (base.find_target_key (ns, loc, tt));
pt = ctx.targets.find (tk, trace);
if (pt == nullptr)
fail (loc) << "unknown imported target " << tk;
@@ -3180,6 +3527,13 @@ namespace build2
//
if (meta)
{
+ auto df = make_diag_frame (
+ [&oproj, &oname, &t] (const diag_record& dr)
+ {
+ if (!oproj.empty ())
+ import_suggest (dr, oproj, &t.type (), oname, false, "alternative ");
+ });
+
// The export.metadata value should start with the version followed by
// the metadata variable prefix.
//
@@ -3204,7 +3558,7 @@ namespace build2
catch (const invalid_argument& e)
{
fail (loc) << "invalid metadata version in imported target " << t
- << ": " << e;
+ << ": " << e << endf;
}
if (ver != 1)
@@ -3238,10 +3592,8 @@ namespace build2
//
if (const auto* e = cast_null<strings> (t.vars[pfx + ".environment"]))
{
- scope& rs (*base.root_scope ());
-
for (const string& v: *e)
- config::save_environment (rs, v);
+ config::save_environment (root, v);
}
}
else
@@ -3251,6 +3603,31 @@ namespace build2
return import_result<target> {pt, move (rns), k};
}
+ path
+ import_buildfile (scope& bs, name n, bool opt, const location& loc)
+ {
+ names r (import (bs,
+ move (n),
+ string () /* phase2 */,
+ opt,
+ false /* metadata */,
+ loc).name);
+
+ path p;
+ if (!r.empty ()) // Optional not found.
+ {
+ // Note: see also parse_import().
+ //
+ assert (r.size () == 1); // See import_load() for details.
+ name& n (r.front ());
+ p = n.dir / n.value; // Should already include extension.
+ }
+ else
+ assert (opt);
+
+ return p;
+ }
+
ostream&
operator<< (ostream& o, const import_result<exe>& r)
{
diff --git a/libbuild2/file.hxx b/libbuild2/file.hxx
index 5847498..36e4c00 100644
--- a/libbuild2/file.hxx
+++ b/libbuild2/file.hxx
@@ -19,6 +19,28 @@ namespace build2
class lexer;
class parser;
+ // The following filesystem entries in the build/ subdirectory are reserved
+ // by the build2 core:
+ //
+ // build/ -- build2 core-internal build state (e.g., recipes)
+ // bootstrap/ -- bootstrap state and hooks
+ // bootstrap.build -- bootstrap buildfile
+ // root/ -- root load hooks
+ // root.build -- root buildfile
+ // export.build -- export stub
+ // export/ -- exported buildfiles
+ //
+ // The build/, bootstrap/, root/, and config.build entries are in .gitignore
+ // as generated by bdep-new.
+ //
+ // The rest of the filesystem entries are shared between the project and the
+ // modules that it loads. In particular, if a project loads module named
+ // <mod>, then the <mod>.build, <mod>/, *.<mod> entries (spelled in any
+ // case) are reserved to this module and should not be used by the project
+ // unless explicitly allowed by the module. By convention, <mod>/build/ is
+ // for module-internal build state (e.g., C++ modules side-build) and is
+ // .gitignore'ed.
+ //
LIBBUILD2_SYMEXPORT extern const dir_path std_build_dir; // build/
// build/root.build
@@ -339,7 +361,9 @@ namespace build2
// Note also that we return names rather than a single name: while normally
// it will be a single target name, it can be an out-qualified pair (if
// someone wants to return a source target) but it can also be a non-target
- // since we don't restrict what users can import/export.
+ // since we don't restrict what users can import/export. If name has
+ // buildfile type, then the result is an absolute buildfile target to be
+ // included (once) at the point of importation.
//
// Finally, note that import is (and should be kept) idempotent or, more
// precisely, "accumulatively idempotent" in that additional steps may be
@@ -347,7 +371,18 @@ namespace build2
//
enum class import_kind {adhoc, normal, fallback};
- LIBBUILD2_SYMEXPORT pair<names, import_kind>
+ template <typename T>
+ struct import_result
+ {
+ const T* target; // Note: T can be imported target or imported scope.
+ names name;
+ import_kind kind;
+ };
+
+ // Note that import_result<scope>::target may be NULL even if name is not
+ // empty (e.g, out of project target imported via phase 2).
+ //
+ LIBBUILD2_SYMEXPORT import_result<scope>
import (scope& base,
name,
const optional<string>& phase2,
@@ -358,7 +393,7 @@ namespace build2
// Import phase 2.
//
const target&
- import (context&, const prerequisite_key&);
+ import2 (context&, const prerequisite_key&);
// As above but import the target "here and now" without waiting for phase 2
// (and thus omitting any rule-specific logic). This version of import is,
@@ -383,13 +418,15 @@ namespace build2
// target::as_name() for details) as well as the kind of import that was
// performed.
//
- template <typename T>
- struct import_result
- {
- const T* target;
- names name;
- import_kind kind;
- };
+ // Note: cannot be used to import buildfile targets (use import_buildfile()
+ // instead).
+
+ // Print import_direct<exe>() result either as a target for a normal import
+ // or as a process path for ad hoc and fallback imports. Normally used in
+ // build system modules to print the configuration report.
+ //
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const import_result<exe>&);
import_result<target>
import_direct (scope& base,
@@ -433,12 +470,16 @@ namespace build2
bool, bool, bool,
const location&, const char* = "import");
- // Print import_direct<exe>() result either as a target for a normal import
- // or as a process path for ad hoc and fallback imports. Normally used in
- // build system modules to print the configuration report.
+ // The import_direct() equivalent for importing buildfile targets. Return
+ // empty name if optional and not found. Note that the returned file path is
+ // not necessarily checked for existence so sourcing it may still fail.
//
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const import_result<exe>&);
+ // Note also that this function can be used for an ad hoc import by passing
+ // an absolute target name as would be returned by the normal import (can be
+ // useful for importing own buildfiles).
+ //
+ LIBBUILD2_SYMEXPORT path
+ import_buildfile (scope& base, name, bool optional, const location&);
// As import phase 2 but only imports as an already existing target. But
// unlike it, this function can be called during the load and execute
@@ -480,6 +521,27 @@ namespace build2
bool metadata,
const location&);
+ // Import (more precisely, alias as if using the `define =` syntax) the
+ // target type from imported project (iroot) into this project (root). If
+ // the target type with this name is already defined in this project, then
+ // make sure it is the same as in the imported project.
+ //
+ LIBBUILD2_SYMEXPORT const target_type&
+ import_target_type (scope& root,
+ const scope& iroot, const string&,
+ const location&);
+
+ // Suggest appropriate ways to import the specified target (as type and
+ // name) from the specified project.
+ //
+ void
+ import_suggest (const diag_record&,
+ const project_name&,
+ const target_type*,
+ const string& name,
+ bool rule_hint,
+ const char* qual = nullptr);
+
// Create a build system project in the specified directory.
//
LIBBUILD2_SYMEXPORT void
diff --git a/libbuild2/file.ixx b/libbuild2/file.ixx
index 43c46c9..dc39bcb 100644
--- a/libbuild2/file.ixx
+++ b/libbuild2/file.ixx
@@ -22,16 +22,16 @@ namespace build2
}
LIBBUILD2_SYMEXPORT const target*
- import (context&,
- const prerequisite_key&,
- const string& hint,
- bool optional_,
- const optional<string>& metadata, // False or metadata key.
- bool existing,
- const location&);
+ import2 (context&,
+ const prerequisite_key&,
+ const string& hint,
+ bool optional_,
+ const optional<string>& metadata, // False or metadata key.
+ bool existing,
+ const location&);
inline const target&
- import (context& ctx, const prerequisite_key& pk)
+ import2 (context& ctx, const prerequisite_key& pk)
{
assert (ctx.phase == run_phase::match);
@@ -40,7 +40,7 @@ namespace build2
// Looks like the only way to do this is to keep location in name and
// then in prerequisite. Perhaps one day...
//
- return *import (ctx, pk, string (), false, nullopt, false, location ());
+ return *import2 (ctx, pk, string (), false, nullopt, false, location ());
}
inline import_result<target>
@@ -98,6 +98,6 @@ namespace build2
inline const target*
import_existing (context& ctx, const prerequisite_key& pk)
{
- return import (ctx, pk, string (), false, nullopt, true, location ());
+ return import2 (ctx, pk, string (), false, nullopt, true, location ());
}
}
diff --git a/libbuild2/filesystem.cxx b/libbuild2/filesystem.cxx
index 196d9bd..f340dd7 100644
--- a/libbuild2/filesystem.cxx
+++ b/libbuild2/filesystem.cxx
@@ -291,7 +291,7 @@ namespace build2
{
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
// The .buildignore filesystem entry should be of the regular file
// type.
diff --git a/libbuild2/filesystem.hxx b/libbuild2/filesystem.hxx
index 2998cec..7b45a08 100644
--- a/libbuild2/filesystem.hxx
+++ b/libbuild2/filesystem.hxx
@@ -22,6 +22,8 @@
//
namespace build2
{
+ using butl::entry_type;
+
using butl::auto_rmfile;
using butl::auto_rmdir;
diff --git a/libbuild2/function.cxx b/libbuild2/function.cxx
index ae69730..3110547 100644
--- a/libbuild2/function.cxx
+++ b/libbuild2/function.cxx
@@ -213,7 +213,7 @@ namespace build2
if (f->arg_types[i] &&
*f->arg_types[i] == nullptr &&
args[i].type != nullptr)
- untypify (args[i]);
+ untypify (args[i], true /* reduce */);
}
}
@@ -348,20 +348,25 @@ namespace build2
// Static-initialize the function map and populate with builtin functions.
//
+ // NOTE: remember to also arrange for automatic documentation extraction in
+ // doc/buildfile!
void bool_functions (function_map&); // functions-bool.cxx
void builtin_functions (function_map&); // functions-builtin.cxx
void filesystem_functions (function_map&); // functions-filesystem.cxx
void integer_functions (function_map&); // functions-integer.cxx
+ void json_functions (function_map&); // functions-json.cxx
void name_functions (function_map&); // functions-name.cxx
void path_functions (function_map&); // functions-path.cxx
void process_functions (function_map&); // functions-process.cxx
void process_path_functions (function_map&); // functions-process-path.cxx
void regex_functions (function_map&); // functions-regex.cxx
void string_functions (function_map&); // functions-string.cxx
+ void target_functions (function_map&); // functions-target.cxx
void target_triplet_functions (function_map&); // functions-target-triplet.cxx
void project_name_functions (function_map&); // functions-target-triplet.cxx
+
void
insert_builtin_functions (function_map& m)
{
@@ -369,12 +374,14 @@ namespace build2
builtin_functions (m);
filesystem_functions (m);
integer_functions (m);
+ json_functions (m);
name_functions (m);
path_functions (m);
process_functions (m);
process_path_functions (m);
regex_functions (m);
string_functions (m);
+ target_functions (m);
target_triplet_functions (m);
project_name_functions (m);
}
diff --git a/libbuild2/function.hxx b/libbuild2/function.hxx
index 323ac41..cda856a 100644
--- a/libbuild2/function.hxx
+++ b/libbuild2/function.hxx
@@ -4,8 +4,9 @@
#ifndef LIBBUILD2_FUNCTION_HXX
#define LIBBUILD2_FUNCTION_HXX
-#include <utility> // index_sequence
-#include <type_traits> // aligned_storage
+#include <cstddef> // max_align_t
+#include <utility> // index_sequence
+#include <type_traits> // is_*
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
@@ -133,8 +134,8 @@ namespace build2
// Auxiliary data storage. Note that it is expected to be trivially
// copyable and destructible.
//
- std::aligned_storage<sizeof (void*) * 3>::type data;
- static const size_t data_size = sizeof (decltype (data));
+ static const size_t data_size = sizeof (void*) * 3;
+ alignas (std::max_align_t) unsigned char data[data_size];
function_overload (const char* an,
size_t mi, size_t ma, types ts,
diff --git a/libbuild2/function.test.cxx b/libbuild2/function.test.cxx
index 7ce7ad3..37ed5ff 100644
--- a/libbuild2/function.test.cxx
+++ b/libbuild2/function.test.cxx
@@ -115,7 +115,7 @@ namespace build2
else if (!a.empty ())
{
names storage;
- cout << reverse (a, storage);
+ cout << reverse (a, storage, true /* reduce */);
}
cout << endl;
}
diff --git a/libbuild2/functions-bool.cxx b/libbuild2/functions-bool.cxx
index 1ae89d2..bb2fd3f 100644
--- a/libbuild2/functions-bool.cxx
+++ b/libbuild2/functions-bool.cxx
@@ -15,6 +15,12 @@ namespace build2
// $string(<bool>)
//
+ // Convert a boolean value to a string literal `true` or `false`.
+ //
+
+ // Note that we don't handle NULL values for this type since it has no
+ // empty representation.
+ //
f["string"] += [](bool b) {return b ? "true" : "false";};
}
}
diff --git a/libbuild2/functions-builtin.cxx b/libbuild2/functions-builtin.cxx
index 6bf2264..e24ff8e 100644
--- a/libbuild2/functions-builtin.cxx
+++ b/libbuild2/functions-builtin.cxx
@@ -37,11 +37,17 @@ namespace build2
{
function_family f (m, "builtin");
- // Note that we may want to extend the scope argument to a more general
- // notion of "lookup context" (scope, target, prerequisite).
+ // $defined(<variable>)
+ //
+ // Return true if the specified variable is defined in the calling scope or
+ // any outer scopes.
//
// Note that this function is not pure.
//
+
+ // Note that we may want to extend the scope argument to a more general
+ // notion of "lookup context" (scope, target, prerequisite).
+ //
f.insert ("defined", false) += [](const scope* s, names name)
{
if (s == nullptr)
@@ -50,7 +56,17 @@ namespace build2
return (*s)[convert<string> (move (name))].defined ();
};
- // Return variable visibility if it has been entered and NULL otherwise.
+ // $visibility(<variable>)
+ //
+ // Return variable visibility if it is known and `null` otherwise.
+ //
+ // Possible visibility value are:
+ //
+ // global -- all outer scopes
+ // project -- this project (no outer projects)
+ // scope -- this scope (no outer scopes)
+ // target -- target and target type/pattern-specific
+ // prereq -- prerequisite-specific
//
// Note that this function is not pure.
//
@@ -67,23 +83,101 @@ namespace build2
: nullopt);
};
+ // $type(<value>)
+ //
+ // Return the type name of the value or empty string if untyped.
+ //
f["type"] += [](value* v) {return v->type != nullptr ? v->type->name : "";};
+
+ // $null(<value>)
+ //
+ // Return true if the value is `null`.
+ //
f["null"] += [](value* v) {return v->null;};
+
+ // $empty(<value>)
+ //
+ // Return true if the value is empty.
+ //
f["empty"] += [](value* v) {return v->null || v->empty ();};
+
+ // $first(<value>[, <not_pair>])
+ // $second(<value>[, <not_pair>])
+ //
+ // Return the first or the second half of a pair, respectively. If a value
+ // is not a pair, then return `null` unless the <not_pair> argument is
+ // `true`, in which case return the non-pair value.
+ //
+ // If multiple pairs are specified, then return the list of first/second
+ // halfs. If an element is not a pair, then omit it from the resulting
+ // list unless the <not_pair> argument is `true`, in which case add the
+ // non-pair element to the list.
+ //
+ f["first"] += [] (names ns, optional<value> not_pair)
+ {
+ // @@ TODO: would be nice to return typed half if passed typed value.
+
+ bool np (not_pair && convert<bool> (move (*not_pair)));
+
+ names r;
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ name& f (*i++);
+ name* s (f.pair ? &*i++ : nullptr);
+
+ if (s != nullptr || np)
+ {
+ f.pair = '\0';
+ r.push_back (move (f));
+ }
+ else if (ns.size () == 1)
+ return value (nullptr); // Single non-pair.
+ }
+
+ return value (move (r));
+ };
+
+ f["second"] += [] (names ns, optional<value> not_pair)
+ {
+ bool np (not_pair && convert<bool> (move (*not_pair)));
+
+ names r;
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ name& f (*i++);
+ name* s (f.pair ? &*i++ : nullptr);
+
+ if (s != nullptr)
+ r.push_back (move (*s));
+ else if (np)
+ r.push_back (move (f));
+ else if (ns.size () == 1)
+ return value (nullptr); // Single non-pair.
+ }
+
+ return value (move (r));
+ };
+
+ // Leave this one undocumented for now since it's unclear why would anyone
+ // want to use it currently (we don't yet have any function composition
+ // facilities).
+ //
f["identity"] += [](value* v) {return move (*v);};
- // Quote a value returning its string representation. If escape is true,
- // then also escape (with a backslash) the quote characters being added
- // (this is useful if the result will be re-parsed, for example as a
- // Testscript command line).
+ // $quote(<value>[, <escape>])
+ //
+ // Quote the value returning its string representation. If <escape> is
+ // `true`, then also escape (with a backslash) the quote characters being
+ // added (this is useful if the result will be re-parsed, for example as a
+ // script command line).
//
f["quote"] += [](value* v, optional<value> escape)
{
if (v->null)
return string ();
- untypify (*v); // Reverse to names.
+ untypify (*v, true /* reduce */); // Reverse to names.
ostringstream os;
to_stream (os,
@@ -94,13 +188,13 @@ namespace build2
return os.str ();
};
- // getenv
+ // $getenv(<name>)
//
- // Return NULL if the environment variable is not set, untyped value
- // otherwise.
+ // Get the value of the environment variable. Return `null` if the
+ // environment variable is not set.
//
// Note that if the build result can be affected by the variable being
- // queried, then it should be reported with the config.environment
+ // queried, then it should be reported with the `config.environment`
// directive.
//
// Note that this function is not pure.
diff --git a/libbuild2/functions-filesystem.cxx b/libbuild2/functions-filesystem.cxx
index ef7bfc5..665a0f3 100644
--- a/libbuild2/functions-filesystem.cxx
+++ b/libbuild2/functions-filesystem.cxx
@@ -7,6 +7,7 @@
#include <libbuild2/variable.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -29,12 +30,27 @@ namespace build2
return true;
};
+ auto dangling = [] (const dir_entry& de)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << de.base () / de.path ();
+
+ return true;
+ };
+
// Print paths "as is" in the diagnostics.
//
try
{
if (pattern.absolute ())
- path_search (pattern, add);
+ path_search (pattern,
+ add,
+ dir_path () /* start */,
+ path_match_flags::follow_symlinks,
+ dangling);
else
{
// An absolute start directory must be specified for the relative
@@ -54,7 +70,11 @@ namespace build2
<< "' is relative";
}
- path_search (pattern, add, *start);
+ path_search (pattern,
+ add,
+ *start,
+ path_match_flags::follow_symlinks,
+ dangling);
}
}
catch (const system_error& e)
@@ -83,14 +103,19 @@ namespace build2
function_family f (m, "filesystem");
- // path_search
+ // $path_search(<pattern>[, <start-dir>])
//
- // Return filesystem paths that match the pattern. If the pattern is an
- // absolute path, then the start directory is ignored (if present).
- // Otherwise, the start directory must be specified and be absolute.
+ // Return filesystem paths that match the shell-like wildcard pattern. If
+ // the pattern is an absolute path, then the start directory is ignored
+ // (if present). Otherwise, the start directory must be specified and be
+ // absolute.
//
// Note that this function is not pure.
//
+
+ // @@ In the future we may want to add a flag that controls the
+ // dangling/inaccessible treatment.
+ //
{
auto e (f.insert ("path_search", false));
@@ -115,6 +140,5 @@ namespace build2
convert<dir_path> (move (start)));
};
}
-
}
}
diff --git a/libbuild2/functions-integer.cxx b/libbuild2/functions-integer.cxx
index ddfc250..8f9e2cf 100644
--- a/libbuild2/functions-integer.cxx
+++ b/libbuild2/functions-integer.cxx
@@ -11,54 +11,18 @@ namespace build2
extern bool
functions_sort_flags (optional<names>); // functions-builtin.cxx
- static const char hex_digits[] = "0123456789abcdef";
-
static string
to_string (uint64_t i, optional<value> base, optional<value> width)
{
- uint64_t b (base ? convert<uint64_t> (move (*base)) : 10);
+ int b (base ?
+ static_cast<int> (convert<uint64_t> (move (*base)))
+ : 10);
+
size_t w (width
? static_cast<size_t> (convert<uint64_t> (move (*width)))
: 0);
- // One day we can switch to C++17 std::to_chars().
- //
- string r;
- switch (b)
- {
- case 10:
- {
- r = to_string (i);
- if (w > r.size ())
- r.insert (0, w - r.size (), '0');
- break;
- }
- case 16:
- {
- r.reserve (18);
- r += "0x";
-
- for (size_t j (64); j != 0; )
- {
- j -= 4;
- size_t d ((i >> j) & 0x0f);
-
- // Omit leading zeros but watch out for the i==0 corner case.
- //
- if (d != 0 || r.size () != 2 || j == 0)
- r += hex_digits[d];
- }
-
- if (w > r.size () - 2)
- r.insert (2, w - (r.size () - 2), '0');
-
- break;
- }
- default:
- throw invalid_argument ("unsupported base");
- }
-
- return r;
+ return (to_string (i, b, w));
}
void
@@ -69,6 +33,19 @@ namespace build2
// $string(<int64>)
// $string(<uint64>[, <base>[, <width>]])
//
+ // Convert an integer to a string. For unsigned integers we can specify
+ // the desired base and width. For example:
+ //
+ // x = [uint64] 0x0000ffff
+ //
+ // c.poptions += "-DOFFSET=$x" # -DOFFSET=65535
+ // c.poptions += "-DOFFSET=$string($x, 16)" # -DOFFSET=0xffff
+ // c.poptions += "-DOFFSET=$string($x, 16, 8)" # -DOFFSET=0x0000ffff
+ //
+
+ // Note that we don't handle NULL values for these type since they have no
+ // empty representation.
+ //
f["string"] += [](int64_t i) {return to_string (i);};
f["string"] += [](uint64_t i, optional<value> base, optional<value> width)
@@ -79,9 +56,10 @@ namespace build2
// $integer_sequence(<begin>, <end>[, <step>])
//
// Return the list of uint64 integers starting from <begin> (including) to
- // <end> (excluding) with the specified <step> or 1 if unspecified. If
+ // <end> (excluding) with the specified <step> or `1` if unspecified. If
// <begin> is greater than <end>, empty list is returned.
//
+
// Note that currently negative numbers are not supported but this could
// be handled if required (e.g., by returning int64s in this case).
//
@@ -120,7 +98,7 @@ namespace build2
//
// The following flags are supported:
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
f["sort"] += [](int64s v, optional<names> fs)
{
@@ -161,7 +139,7 @@ namespace build2
// $find_index(<ints>, <int>)
//
// Return the index of the first element in the integer sequence that is
- // equal to the specified integer or $size(<ints>) if none is found.
+ // equal to the specified integer or `$size(ints)` if none is found.
//
f["find_index"] += [](int64s vs, value v)
{
diff --git a/libbuild2/functions-json.cxx b/libbuild2/functions-json.cxx
new file mode 100644
index 0000000..e06d9a5
--- /dev/null
+++ b/libbuild2/functions-json.cxx
@@ -0,0 +1,335 @@
+// file : libbuild2/functions-json.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/parser.hxx>
+# include <libbutl/json/serializer.hxx>
+#endif
+
+using namespace std;
+
+namespace build2
+{
+ static size_t
+ array_find_index (const json_value& a, value v)
+ {
+ if (a.type != json_type::array)
+ fail << "expected json array instead of " << to_string (a.type)
+ << " as first argument";
+
+ auto b (a.array.begin ()), e (a.array.end ());
+ auto i (find (b, e, convert<json_value> (move (v))));
+ return i != e ? i - b : a.array.size ();
+ };
+
+ void
+ json_functions (function_map& m)
+ {
+ function_family f (m, "json");
+
+ // $value_type(<json>[, <distinguish_numbers>])
+ //
+ // Return the type of a JSON value: `null`, `boolean`, `number`, `string`,
+ // `array`, or `object`. If the <distinguish_numbers> argument is `true`,
+ // then instead of `number` return `signed number`, `unsigned number`, or
+ // `hexadecimal number`.
+ //
+ f["value_type"] += [] (json_value v, optional<value> distinguish_numbers)
+ {
+ bool dn (distinguish_numbers &&
+ convert<bool> (move (*distinguish_numbers)));
+
+ return to_string (v.type, dn);
+ };
+
+ // $value_size(<json>)
+ //
+ // Return the size of a JSON value.
+ //
+ // The size of a `null` value is `0`. The sizes of simple values
+ // (`boolean`, `number`, and `string`) is `1`. The size of `array` and
+ // `object` values is the number of elements and members, respectively.
+ //
+ // Note that the size of a `string` JSON value is not the length of the
+ // string. To get the length call `$string.size()` instead by casting the
+ // JSON value to the `string` value type.
+ //
+ f["value_size"] += [] (json_value v) -> size_t
+ {
+ // Note: should be consistent with value_traits<json_value>::empty(),
+ // json_subscript().
+ //
+ switch (v.type)
+ {
+ case json_type::null: return 0;
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ case json_type::string: break;
+ case json_type::array: return v.array.size ();
+ case json_type::object: return v.object.size ();
+ }
+
+ return 1;
+ };
+
+ // $member_name(<json-member>)
+ //
+ // Return the name of a JSON object member.
+ //
+ f["member_name"] += [] (json_value v)
+ {
+ // A member becomes an object with a single member (see json_reverse()
+ // for details).
+ //
+ if (v.type == json_type::object && v.object.size () == 1)
+ return move (v.object.front ().name);
+
+ fail << "json object member expected instead of " << v.type << endf;
+ };
+
+ // $member_value(<json-member>)
+ //
+ // Return the value of a JSON object member.
+ //
+ f["member_value"] += [] (json_value v)
+ {
+ // A member becomes an object with a single member (see json_reverse()
+ // for details).
+ //
+ if (v.type == json_type::object && v.object.size () == 1)
+ {
+ // Reverse simple JSON values to the corresponding fundamental type
+ // values for consistency with subscript/iteration (see
+ // json_subscript_impl() for background).
+ //
+ json_value& jr (v.object.front ().value);
+
+ switch (jr.type)
+ {
+#if 0
+ case json_type::null: return value (names {});
+#else
+ case json_type::null: return value ();
+#endif
+ case json_type::boolean: return value (jr.boolean);
+ case json_type::signed_number: return value (jr.signed_number);
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: return value (jr.unsigned_number);
+ case json_type::string: return value (move (jr.string));
+ case json_type::array:
+ case json_type::object: return value (move (jr));
+ }
+ }
+
+ fail << "json object member expected instead of " << v.type << endf;
+ };
+
+ // $object_names(<json-object>)
+ //
+ // Return the list of names in the JSON object. If the JSON `null` is
+ // passed instead, assume it is a missing object and return an empty list.
+ //
+ f["object_names"] += [] (json_value o)
+ {
+ names ns;
+
+ if (o.type == json_type::null)
+ ;
+ else if (o.type == json_type::object)
+ {
+ ns.reserve (o.object.size ());
+
+ for (json_member& m: o.object)
+ ns.push_back (name (move (m.name)));
+ }
+ else
+ fail << "expected json object instead of " << to_string (o.type);
+
+ return ns;
+ };
+
+ // $array_size(<json-array>)
+ //
+ // Return the number of elements in the JSON array. If the JSON `null`
+ // value is passed instead, assume it is a missing array and return `0`.
+ //
+ f["array_size"] += [] (json_value a) -> size_t
+ {
+ if (a.type == json_type::null)
+ return 0;
+
+ if (a.type == json_type::array)
+ return a.array.size ();
+
+ fail << "expected json array instead of " << to_string (a.type) << endf;
+ };
+
+ // $array_find(<json-array>, <json>)
+ //
+ // Return true if the JSON array contains the specified JSON value. If the
+ // JSON `null` value is passed instead, assume it is a missing array and
+ // return `false`.
+ //
+ f["array_find"] += [] (json_value a, value v)
+ {
+ if (a.type == json_type::null)
+ return false;
+
+ size_t i (array_find_index (a, move (v)));
+ return i != a.array.size (); // We now know it's an array.
+ };
+
+ // $array_find_index(<json-array>, <json>)
+ //
+ // Return the index of the first element in the JSON array that is equal
+ // to the specified JSON value or `$array_size(<json-array>)` if none is
+ // found. If the JSON `null` value is passed instead, assume it is a
+ // missing array and return `0`.
+ //
+ f["array_find_index"] += [](json_value a, value v) -> size_t
+ {
+ if (a.type == json_type::null)
+ return 0;
+
+ return array_find_index (a, move (v));
+ };
+
+#ifndef BUILD2_BOOTSTRAP
+
+ // @@ Flag to support multi-value (returning it as JSON array)? Then
+ // probably also in $serialize().
+ //
+ // @@ Flag to override duplicates instead of failing?
+
+ // $json.load(<path>)
+ //
+ // Parse the contents of the specified file as JSON input text and return
+ // the result as a value of the `json` type.
+ //
+ // See also `$json.parse()`.
+ //
+ // Note that this function is not pure.
+ //
+ f.insert (".load", false) += [] (names xf)
+ {
+ path f (convert<path> (move (xf)));
+
+ try
+ {
+ ifdstream is (f);
+ json_parser p (is, f.string ());
+ return json_value (p);
+ }
+ catch (const invalid_json_input& e)
+ {
+ fail (location (f, e.line, e.column)) << "invalid json input: " << e <<
+ info << "byte offset " << e.position << endf;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << f << ": " << e << endf;
+ }
+ };
+
+ // $json.parse(<text>)
+ //
+ // Parse the specified JSON input text and return the result as a value of
+ // the `json` type.
+ //
+ // See also `$json.load()` and `$json.serialize()`.
+ //
+ f[".parse"] += [] (names text)
+ {
+ string t (convert<string> (move (text)));
+
+ try
+ {
+ json_parser p (t, nullptr /* name */);
+ return json_value (p);
+ }
+ catch (const invalid_json_input& e)
+ {
+ fail << "invalid json input: " << e <<
+ info << "line " << e.line
+ << ", column " << e.column
+ << ", byte offset " << e.position << endf;
+ }
+ };
+
+ // $serialize(<json>[, <indentation>])
+ //
+ // Serialize the specified JSON value and return the resulting JSON output
+ // text.
+ //
+ // The optional <indentation> argument specifies the number of indentation
+ // spaces that should be used for pretty-printing. If `0` is passed, then
+ // no pretty-printing is performed. The default is `2` spaces.
+ //
+ // See also `$json.parse()`.
+ //
+ f["serialize"] += [] (json_value v, optional<value> indentation)
+ {
+ uint64_t i (indentation ? convert<uint64_t> (*indentation) : 2);
+
+ try
+ {
+ // For the diagnostics test.
+ //
+#if 0
+ if (v.type == json_type::string && v.string == "deadbeef")
+ {
+ v.string[4] = 0xe0;
+ v.string[5] = 0xe0;
+ }
+#endif
+
+ string o;
+ json_buffer_serializer s (o, i);
+ v.serialize (s);
+ return o;
+ }
+ catch (const invalid_json_output& e)
+ {
+ diag_record dr;
+ dr << fail << "invalid json value: " << e;
+
+ if (e.event)
+ dr << info << "while serializing " << to_string (*e.event);
+
+ if (e.offset != string::npos)
+ dr << info << "offending byte offset " << e.offset;
+
+ dr << endf;
+ }
+ };
+#endif
+
+ // $size(<json-set>)
+ // $size(<json-map>)
+ //
+ // Return the number of elements in the sequence.
+ //
+ f["size"] += [] (set<json_value> v) {return v.size ();};
+ f["size"] += [] (map<json_value, json_value> v) {return v.size ();};
+
+ // $keys(<json-map>)
+ //
+ // Return the list of keys in a json map as a json array.
+ //
+ // Note that the result is sorted in ascending order.
+ //
+ f["keys"] += [](map<json_value, json_value> v)
+ {
+ json_value r (json_type::array);
+ r.array.reserve (v.size ());
+ for (pair<const json_value, json_value>& p: v)
+ r.array.push_back (p.first); // @@ PERF: use C++17 map::extract() to steal.
+ return r;
+ };
+ }
+}
diff --git a/libbuild2/functions-name.cxx b/libbuild2/functions-name.cxx
index 9011cc0..456f85b 100644
--- a/libbuild2/functions-name.cxx
+++ b/libbuild2/functions-name.cxx
@@ -39,18 +39,40 @@ namespace build2
if (rp.first != nullptr)
n.type = rp.first->name;
+ if (n.value.empty () && (n.type == "dir" || n.type == "fsdir"))
+ {
+ n.value = n.dir.leaf ().string ();
+ n.dir.make_directory ();
+ }
+
return make_pair (move (n), move (rp.second));
}
const target&
to_target (const scope& s, name&& n, name&& o)
{
+ // Note: help the user out and search in both out and src like a
+ // prerequisite.
+ //
if (const target* r = search_existing (n, s, o.dir))
return *r;
- fail << "target "
- << (n.pair ? names {move (n), move (o)} : names {move (n)})
- << " not found" << endf;
+ // Inside recipes we don't treat `{}` as special so a literal target name
+ // will have no type and won't be found, which is confusing as hell.
+ //
+ bool typed (n.typed ());
+
+ diag_record dr (fail);
+
+ dr << "target "
+ << (n.pair ? names {move (n), move (o)} : names {move (n)})
+ << " not found";
+
+ if (!typed)
+ dr << info << "wrap it in ([names] ...) if this is literal target name "
+ << "specified inside recipe";
+
+ dr << endf;
}
const target&
@@ -75,7 +97,18 @@ namespace build2
const target_type* ntt (to_target_type (s, n, o).first);
if (ntt == nullptr)
+ {
+ // If this is an imported target and the target type is unknown, then
+ // it cannot possibly match one of the known types. We handle it like
+ // this instead of failing because the later failure (e.g., as a
+ // result of this target listed as prerequisite) will have more
+ // accurate diagnostics. See also filter() below.
+ //
+ if (n.proj)
+ return false;
+
fail << "unknown target type " << n.type << " in " << n;
+ }
return ntt->is_a (*tt);
}
@@ -109,15 +142,35 @@ namespace build2
name& n (*i);
bool p (n.pair);
- const target_type* ntt (to_target_type (s, n, p ? *++i : name ()).first);
+ // to_target_type() splits the name into the target name and extension.
+ // While we could try to reconstitute it with combine_name(), there are
+ // murky corner cases (see the default_extension argument) which won't
+ // be easy to handle. So let's just make a copy. Looking at the
+ // implementation of scope::find_target_type(), we can optimize for the
+ // (common) typed case by only copying the type.
+ //
+ name c (n.typed () ? name (n.type, "") : n);
+
+ const target_type* ntt (to_target_type (s, c, p ? *++i : name ()).first);
if (ntt == nullptr)
- fail << "unknown target type " << n.type << " in " << n;
+ {
+ // If this is an imported target and the target type is unknown, then
+ // it cannot possibly match one of the known types. We handle it like
+ // this instead of failing because the later failure (e.g., as a
+ // result of this target listed as prerequisite) will have more
+ // accurate diagnostics. See also is_a() above.
+ //
+ if (!n.proj)
+ fail << "unknown target type " << n.type << " in " << n;
+ }
- if ((find_if (tts.begin (), tts.end (),
- [ntt] (const target_type* tt)
- {
- return ntt->is_a (*tt);
- }) != tts.end ()) != out)
+ if (ntt != nullptr
+ ? (find_if (tts.begin (), tts.end (),
+ [ntt] (const target_type* tt)
+ {
+ return ntt->is_a (*tt);
+ }) != tts.end ()) != out
+ : out)
{
r.push_back (move (n));
if (p)
@@ -138,17 +191,30 @@ namespace build2
// on prerequisite names. They also won't always return the same result as
// if we were interrogating an actual target (e.g., the directory may be
// relative). Plus we now have functions that can only be called on
- // targets (see below).
+ // targets (see functions-target.cxx).
//
- function_family fn (m, "name");
+ function_family f (m, "name");
- fn["string"] += [](name n) {return to_string (n);};
+ // Note: let's leave this undocumented for now since it's not often needed
+ // and is a can of worms.
+ //
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](name* n)
+ {
+ return n != nullptr ? to_string (move (*n)) : string ();
+ };
- fn["name"] += [](const scope* s, name n)
+ // $name(<names>)
+ //
+ // Return the name of a target (or a list of names for a list of targets).
+ //
+ f["name"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.value;
};
- fn["name"] += [](const scope* s, names ns)
+ f["name"] += [](const scope* s, names ns)
{
small_vector<string, 1> r;
@@ -166,14 +232,18 @@ namespace build2
make_move_iterator (r.end ())));
};
- // Note: returns NULL if extension is unspecified (default) and empty if
- // specified as no extension.
+ // $extension(<name>)
//
- fn["extension"] += [](const scope* s, name n)
+ // Return the extension of a target.
+ //
+ // Note that this function returns `null` if the extension is unspecified
+ // (default) and empty string if it's specified as no extension.
+ //
+ f["extension"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).second;
};
- fn["extension"] += [](const scope* s, names ns)
+ f["extension"] += [](const scope* s, names ns)
{
// Note: can't do multiple due to NULL semantics.
//
@@ -188,11 +258,16 @@ namespace build2
return to_target_name (s, move (n), o).second;
};
- fn["directory"] += [](const scope* s, name n)
+ // $directory(<names>)
+ //
+ // Return the directory of a target (or a list of directories for a list
+ // of targets).
+ //
+ f["directory"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.dir;
};
- fn["directory"] += [](const scope* s, names ns)
+ f["directory"] += [](const scope* s, names ns)
{
small_vector<dir_path, 1> r;
@@ -210,11 +285,16 @@ namespace build2
make_move_iterator (r.end ())));
};
- fn["target_type"] += [](const scope* s, name n)
+ // $target_type(<names>)
+ //
+ // Return the target type name of a target (or a list of target type names
+ // for a list of targets).
+ //
+ f["target_type"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.type;
};
- fn["target_type"] += [](const scope* s, names ns)
+ f["target_type"] += [](const scope* s, names ns)
{
small_vector<string, 1> r;
@@ -232,13 +312,15 @@ namespace build2
make_move_iterator (r.end ())));
};
- // Note: returns NULL if no project specified.
+ // $project(<name>)
+ //
+ // Return the project of a target or `null` if not project-qualified.
//
- fn["project"] += [](const scope* s, name n)
+ f["project"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.proj;
};
- fn["project"] += [](const scope* s, names ns)
+ f["project"] += [](const scope* s, names ns)
{
// Note: can't do multiple due to NULL semantics.
//
@@ -259,11 +341,11 @@ namespace build2
// this is a dynamic type check that takes into account target type
// inheritance.
//
- fn["is_a"] += [](const scope* s, name n, names t)
+ f["is_a"] += [](const scope* s, name n, names t)
{
return is_a (s, move (n), name (), move (t));
};
- fn["is_a"] += [](const scope* s, names ns, names t)
+ f["is_a"] += [](const scope* s, names ns, names t)
{
auto i (ns.begin ());
@@ -279,15 +361,15 @@ namespace build2
// $filter(<names>, <target-types>)
// $filter_out(<names>, <target-types>)
//
- // Return names with target types which are-a (filter) or not are-a
- // (filter_out) one of <target-types>. See $is_a() for background.
+ // Return names with target types which are-a (`filter`) or not are-a
+ // (`filter_out`) one of <target-types>. See `$is_a()` for background.
//
- fn["filter"] += [](const scope* s, names ns, names ts)
+ f["filter"] += [](const scope* s, names ns, names ts)
{
return filter (s, move (ns), move (ts), false /* out */);
};
- fn["filter_out"] += [](const scope* s, names ns, names ts)
+ f["filter_out"] += [](const scope* s, names ns, names ts)
{
return filter (s, move (ns), move (ts), true /* out */);
};
@@ -296,7 +378,7 @@ namespace build2
//
// Return the number of elements in the sequence.
//
- fn["size"] += [] (names ns)
+ f["size"] += [] (names ns)
{
size_t n (0);
@@ -310,22 +392,22 @@ namespace build2
return n;
};
- // $sort(<names> [, <flags>])
+ // $sort(<names>[, <flags>])
//
// Sort names in ascending order.
//
// The following flags are supported:
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
- fn["sort"] += [] (names ns, optional<names> fs)
+ f["sort"] += [] (names ns, optional<names> fs)
{
//@@ TODO: shouldn't we do this in a pair-aware manner?
sort (ns.begin (), ns.end ());
if (functions_sort_flags (move (fs)))
- ns.erase (unique (ns.begin(), ns.end()), ns.end ());
+ ns.erase (unique (ns.begin (), ns.end ()), ns.end ());
return ns;
};
@@ -334,7 +416,7 @@ namespace build2
//
// Return true if the name sequence contains the specified name.
//
- fn["find"] += [](names vs, names v)
+ f["find"] += [](names vs, names v)
{
//@@ TODO: shouldn't we do this in a pair-aware manner?
@@ -345,9 +427,9 @@ namespace build2
// $find_index(<names>, <name>)
//
// Return the index of the first element in the name sequence that is
- // equal to the specified name or $size(<names>) if none is found.
+ // equal to the specified name or `$size(names)` if none is found.
//
- fn["find_index"] += [](names vs, names v)
+ f["find_index"] += [](names vs, names v)
{
//@@ TODO: shouldn't we do this in a pair-aware manner?
@@ -355,89 +437,15 @@ namespace build2
return i != vs.end () ? i - vs.begin () : vs.size ();
};
- // Functions that can be called only on real targets.
- //
- function_family ft (m, "target");
-
- // Note that while this function is not technically pure, we don't mark it
- // as such since it can only be called (normally form a recipe) after the
- // target has been matched, meaning that this target is a prerequisite and
- // therefore this impurity has been accounted for.
- //
- ft["path"] += [](const scope* s, names ns)
- {
- if (s == nullptr)
- fail << "target.path() called out of scope";
-
- // Most of the time we will have a single target so optimize for that.
- //
- small_vector<path, 1> r;
-
- for (auto i (ns.begin ()); i != ns.end (); ++i)
- {
- name& n (*i), o;
- const target& t (to_target (*s, move (n), move (n.pair ? *++i : o)));
-
- if (const auto* pt = t.is_a<path_target> ())
- {
- const path& p (pt->path ());
-
- if (&p != &empty_path)
- r.push_back (p);
- else
- fail << "target " << t << " path is not assigned";
- }
- else
- fail << "target " << t << " is not path-based";
- }
-
- // We want the result to be path if we were given a single target and
- // paths if multiple (or zero). The problem is, we cannot distinguish it
- // based on the argument type (e.g., name vs names) since passing an
- // out-qualified single target requires two names.
- //
- if (r.size () == 1)
- return value (move (r[0]));
-
- return value (paths (make_move_iterator (r.begin ()),
- make_move_iterator (r.end ())));
- };
-
- // This one can only be called on a single target since we don't support
- // containers of process_path's (though we probably could).
- //
- // Note that while this function is not technically pure, we don't mark it
- // as such for the same reasons as $path() above.
- //
- ft["process_path"] += [](const scope* s, names ns)
- {
- if (s == nullptr)
- fail << "target.process_path() called out of scope";
-
- if (ns.empty () || ns.size () != (ns[0].pair ? 2 : 1))
- fail << "target.process_path() expects single target";
-
- name o;
- const target& t (
- to_target (*s, move (ns[0]), move (ns[0].pair ? ns[1] : o)));
-
- if (const auto* et = t.is_a<exe> ())
- {
- process_path r (et->process_path ());
-
- if (r.empty ())
- fail << "target " << t << " path is not assigned";
-
- return r;
- }
- else
- fail << "target " << t << " is not process_path-based" << endf;
- };
-
// Name-specific overloads from builtins.
//
function_family fb (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
fb[".concat"] += [](dir_path d, name n)
{
d /= n.dir;
diff --git a/libbuild2/functions-path.cxx b/libbuild2/functions-path.cxx
index 0c9b57f..4b114f5 100644
--- a/libbuild2/functions-path.cxx
+++ b/libbuild2/functions-path.cxx
@@ -154,14 +154,66 @@ namespace build2
return path_match (entry, pattern, *start);
}
+ // Don't fail for absolute paths on Windows and, for example, just return
+ // c:/foo for c:\foo.
+ //
+ template <typename P>
+ static inline string
+ posix_string (P&& p)
+ {
+#ifndef _WIN32
+ return move (p).posix_string ();
+#else
+ if (p.relative ())
+ return move (p).posix_string ();
+
+ // Note: also handles root directories.
+ //
+ dir_path d (p.root_directory ());
+ return d.string () + '/' + p.leaf (d).posix_string ();
+#endif
+ }
+
+ // Similar to the above don't fail for absolute paths on Windows.
+ //
+ template <typename P>
+ static inline string
+ posix_representation (P&& p)
+ {
+#ifndef _WIN32
+ return move (p).posix_representation ();
+#else
+ if (p.relative ())
+ return move (p).posix_representation ();
+
+ // Note: also handles root directories.
+ //
+ dir_path d (p.root_directory ());
+ return d.string () + '/' + p.leaf (d).posix_representation ();
+#endif
+ }
+
void
path_functions (function_map& m)
{
function_family f (m, "path", &path_thunk);
- // string
+ // $string(<paths>)
+ //
+ // Return the traditional string representation of a path (or a list of
+ // string representations for a list of paths). In particular, for
+ // directory paths, the traditional representation does not include the
+ // trailing directory separator (except for the POSIX root directory). See
+ // `$representation()` below for the precise string representation.
+ //
+
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
//
- f["string"] += [](path p) {return move (p).string ();};
+ f["string"] += [](path* p)
+ {
+ return p != nullptr ? move (*p).string () : string ();
+ };
f["string"] += [](paths v)
{
@@ -179,7 +231,53 @@ namespace build2
return r;
};
- // representation
+ // $posix_string(<paths>)
+ // $path.posix_string(<untyped>)
+ //
+ // Return the traditional string representation of a path (or a list of
+ // string representations for a list of paths) using the POSIX directory
+ // separators (forward slashes).
+ //
+ f["posix_string"] += [](path p) {return posix_string (move (p));};
+ f["posix_string"] += [](dir_path p) {return posix_string (move (p));};
+
+ f["posix_string"] += [](paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_string (move (p)));
+ return r;
+ };
+
+ f["posix_string"] += [](dir_paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_string (move (p)));
+ return r;
+ };
+
+ f[".posix_string"] += [](names ns)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of strings.
+ //
+ for (name& n: ns)
+ {
+ n = n.directory ()
+ ? posix_string (move (n.dir))
+ : posix_string (convert<path> (move (n)));
+ }
+ return ns;
+ };
+
+ // $representation(<paths>)
+ //
+ // Return the precise string representation of a path (or a list of string
+ // representations for a list of paths). In particular, for directory
+ // paths, the precise representation includes the trailing directory
+ // separator. See `$string()` above for the traditional string
+ // representation.
//
f["representation"] += [](path p) {return move (p).representation ();};
@@ -199,8 +297,61 @@ namespace build2
return r;
};
- // canonicalize
+ // $posix_representation(<paths>)
+ // $path.posix_representation(<untyped>)
+ //
+ // Return the precise string representation of a path (or a list of string
+ // representations for a list of paths) using the POSIX directory
+ // separators (forward slashes).
//
+ f["posix_representation"] += [](path p)
+ {
+ return posix_representation (move (p));
+ };
+
+ f["posix_representation"] += [](dir_path p)
+ {
+ return posix_representation (move (p));
+ };
+
+ f["posix_representation"] += [](paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_representation (move (p)));
+ return r;
+ };
+
+ f["posix_representation"] += [](dir_paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_representation (move (p)));
+ return r;
+ };
+
+ f[".posix_representation"] += [](names ns)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of strings.
+ //
+ for (name& n: ns)
+ {
+ n = n.directory ()
+ ? posix_representation (move (n.dir))
+ : posix_representation (convert<path> (move (n)));
+ }
+ return ns;
+ };
+
+ // $canonicalize(<paths>)
+ // $path.canonicalize(<untyped>)
+ //
+ // Canonicalize the path (or list of paths) by converting all the
+ // directory separators to the canonical form for the host platform. Note
+ // that multiple directory separators are not collapsed.
+ //
+
// @@ TODO: add ability to specify alternative separator.
//
f["canonicalize"] += [](path p) {p.canonicalize (); return p;};
@@ -236,7 +387,13 @@ namespace build2
return ns;
};
- // normalize
+ // $normalize(<paths>)
+ // $path.normalize(<untyped>)
+ //
+ // Normalize the path (or list of paths) by collapsing the `.` and `..`
+ // components if possible, collapsing multiple directory separators, and
+ // converting all the directory separators to the canonical form for the
+ // host platform.
//
f["normalize"] += [](path p) {p.normalize (); return p;};
f["normalize"] += [](dir_path p) {p.normalize (); return p;};
@@ -271,7 +428,16 @@ namespace build2
return ns;
};
- // actualize
+ // $actualize(<paths>)
+ // $path.actualize(<untyped>)
+ //
+ // Actualize the path (or list of paths) by first normalizing it and then
+ // for host platforms with case-insensitive filesystems obtaining the
+ // actual spelling of the path.
+ //
+ // Note that only an absolute path can be actualized. If a path component
+ // does not exist, then its (and all subsequent) spelling is
+ // unchanged. This is a potentially expensive operation.
//
// Note that this function is not pure.
//
@@ -312,11 +478,12 @@ namespace build2
return ns;
};
- // $directory(<path>)
// $directory(<paths>)
+ // $path.directory(<untyped>)
//
- // Return the directory part of the path or empty path if there is no
- // directory. Directory of a root directory is an empty path.
+ // Return the directory part of a path (or a list of directory parts for a
+ // list of paths) or an empty path if there is no directory. A directory of
+ // a root directory is an empty path.
//
f["directory"] += &path::directory;
@@ -350,11 +517,12 @@ namespace build2
return ns;
};
- // $root_directory(<path>)
// $root_directory(<paths>)
+ // $path.root_directory(<untyped>)
//
- // Return the root directory of the path or empty path if the directory is
- // not absolute.
+ // Return the root directory of a path (or a list of root directories for
+ // a list of paths) or an empty path if the specified path is not
+ // absolute.
//
f["root_directory"] += &path::root_directory;
@@ -388,17 +556,22 @@ namespace build2
return ns;
};
- // $leaf(<path>)
- //
- f["leaf"] += &path::leaf;
-
- // $leaf(<path>, <dir-path>)
+ // $leaf(<paths>)
+ // $path.leaf(<untyped>)
// $leaf(<paths>, <dir-path>)
+ // $path.leaf(<untyped>, <dir-path>)
//
- // Return the path without the specified directory part. Return empty path
- // if the paths are the same. Issue diagnostics and fail if the directory
- // is not a prefix of the path. Note: expects both paths to be normalized.
+ // First form (one argument): return the last component of a path (or a
+ // list of last components for a list of paths).
//
+ // Second form (two arguments): return a path without the specified
+ // directory part (or a list of paths without the directory part for a
+ // list of paths). Return an empty path if the paths are the same. Issue
+ // diagnostics and fail if the directory is not a prefix of the
+ // path. Note: expects both paths to be normalized.
+ //
+ f["leaf"] += &path::leaf;
+
f["leaf"] += [](path p, dir_path d)
{
return leaf (p, move (d));
@@ -434,13 +607,13 @@ namespace build2
return ns;
};
- // $relative(<path>, <dir-path>)
// $relative(<paths>, <dir-path>)
+ // $path.relative(<untyped>, <dir-path>)
//
- // Return a path relative to the specified directory that is equivalent to
- // the specified path. Issue diagnostics and fail if a relative path
- // cannot be derived (for example, paths are on different drives on
- // Windows).
+ // Return the path relative to the specified directory that is equivalent
+ // to the specified path (or a list of relative paths for a list of
+ // specified paths). Issue diagnostics and fail if a relative path cannot
+ // be derived (for example, paths are on different drives on Windows).
//
f["relative"] += [](path p, dir_path d)
{
@@ -477,7 +650,11 @@ namespace build2
return ns;
};
- // base
+ // $base(<paths>)
+ // $path.base(<untyped>)
+ //
+ // Return the base part (without the extension) of a path (or a list of
+ // base parts for a list of paths).
//
f["base"] += &path::base;
@@ -511,7 +688,11 @@ namespace build2
return ns;
};
- // extension
+ // $extension(<path>)
+ // $path.extension(<untyped>)
+ //
+ // Return the extension part (without the dot) of a path or empty string
+ // if there is no extension.
//
f["extension"] += &extension;
@@ -521,32 +702,29 @@ namespace build2
};
// $size(<paths>)
- // $size(<dir_paths>)
+ // $size(<path>)
+ //
+ // First form: return the number of elements in the paths sequence.
+ //
+ // Second form: return the number of characters (bytes) in the path. Note
+ // that for `dir_path` the result does not include the trailing directory
+ // separator (except for the POSIX root directory).
//
- // Return the number of elements in the sequence.
//
f["size"] += [] (paths v) {return v.size ();};
f["size"] += [] (dir_paths v) {return v.size ();};
- // $size(<path>)
- // $size(<dir_path>)
- //
- // Return the number of characters (bytes) in the path. Note that for
- // dir_path the result does not include the trailing directory separator
- // (except for the POSIX root directory).
- //
f["size"] += [] (path v) {return v.size ();};
f["size"] += [] (dir_path v) {return v.size ();};
- // $sort(<paths> [, <flags>])
- // $sort(<dir_paths> [, <flags>])
+ // $sort(<paths>[, <flags>])
//
- // Sort paths in ascending order. Note that on hosts with a case-
- // insensitive filesystem the order is case-insensitive.
+ // Sort paths in ascending order. Note that on host platforms with a
+ // case-insensitive filesystem the order is case-insensitive.
//
// The following flags are supported:
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
f["sort"] += [](paths v, optional<names> fs)
{
@@ -569,11 +747,10 @@ namespace build2
};
// $find(<paths>, <path>)
- // $find(<dir_paths>, <dir_path>)
//
- // Return true if the path sequence contains the specified path. Note that
- // on hosts with a case-insensitive filesystem the comparison is
- // case-insensitive.
+ // Return true if the paths sequence contains the specified path. Note
+ // that on host platforms with a case-insensitive filesystem the
+ // comparison is case-insensitive.
//
f["find"] += [](paths vs, value v)
{
@@ -588,12 +765,11 @@ namespace build2
};
// $find_index(<paths>, <path>)
- // $find_index(<dir_paths>, <dir_path>)
//
- // Return the index of the first element in the path sequence that is
- // equal to the specified path or $size(<paths>) if none is found. Note
- // that on hosts with a case-insensitive filesystem the comparison is
- // case-insensitive.
+ // Return the index of the first element in the paths sequence that is
+ // equal to the specified path or `$size(paths)` if none is found. Note
+ // that on host platforms with a case-insensitive filesystem the
+ // comparison is case-insensitive.
//
f["find_index"] += [](paths vs, value v)
{
@@ -607,34 +783,36 @@ namespace build2
return i != vs.end () ? i - vs.begin () : vs.size ();
};
- // $path.match(<val>, <pat> [, <start>])
+ // $path.match(<entry>, <pattern>[, <start-dir>])
//
- // Match a filesystem entry name against a name pattern (both are strings),
- // or a filesystem entry path against a path pattern. For the latter case
- // the start directory may also be required (see below). The semantics of
- // the pattern and name/entry arguments is determined according to the
+ // Match a filesystem entry name against a name pattern (both are
+ // strings), or a filesystem entry path against a path pattern. For the
+ // latter case the start directory may also be required (see below). The
+ // pattern is a shell-like wildcard pattern. The semantics of the
+ // <pattern> and <entry> arguments is determined according to the
// following rules:
//
- // - The arguments must be of the string or path types, or be untyped.
+ // 1. The arguments must be of the string or path types, or be untyped.
//
- // - If one of the arguments is typed, then the other one must be of the
- // same type or be untyped. In the later case, an untyped argument is
- // converted to the type of the other argument.
+ // 2. If one of the arguments is typed, then the other one must be of the
+ // same type or be untyped. In the later case, an untyped argument is
+ // converted to the type of the other argument.
//
- // - If both arguments are untyped and the start directory is specified,
- // then the arguments are converted to the path type.
+ // 3. If both arguments are untyped and the start directory is specified,
+ // then the arguments are converted to the path type.
//
- // - If both arguments are untyped and the start directory is not
- // specified, then, if one of the arguments is syntactically a path (the
- // value contains a directory separator), convert them to the path type,
- // otherwise to the string type (match as names).
+ // 4. If both arguments are untyped and the start directory is not
+ // specified, then, if one of the arguments is syntactically a path (the
+ // value contains a directory separator), then they are converted to the
+ // path type, otherwise -- to the string type (match as names).
//
- // If pattern and entry paths are both either absolute or relative and
- // non-empty, and the first pattern component is not a self-matching
- // wildcard (doesn't contain ***), then the start directory is not
- // required, and is ignored if specified. Otherwise, the start directory
- // must be specified and be an absolute path.
+ // If pattern and entry paths are both either absolute or relative and not
+ // empty, and the first pattern component is not a self-matching wildcard
+ // (doesn't contain `***`), then the start directory is not required, and
+ // is ignored if specified. Otherwise, the start directory must be
+ // specified and be an absolute path.
//
+
// Name matching.
//
f[".match"] += [](string name, string pattern)
@@ -694,6 +872,11 @@ namespace build2
//
function_family b (m, "builtin", &path_thunk);
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected, especially
+ // if the NULL value is on the LHS. So for now we keep it a bit tighter.
+ //
b[".concat"] += &concat_path_string;
b[".concat"] += &concat_dir_path_string;
diff --git a/libbuild2/functions-process-path.cxx b/libbuild2/functions-process-path.cxx
index 486a806..6746623 100644
--- a/libbuild2/functions-process-path.cxx
+++ b/libbuild2/functions-process-path.cxx
@@ -11,24 +11,47 @@ namespace build2
void
process_path_functions (function_map& m)
{
- {
- function_family f (m, "process_path");
+ function_family f (m, "process_path");
+
+ // $recall(<process-path>)
+ //
+ // Return the recall path of an executable, that is, a path that is not
+ // necessarily absolute but which nevertheless can be used to re-run the
+ // executable in the current environment. This path, for example, could be
+ // used in diagnostics when printing the failing command line.
+ //
+
+ // As discussed in value_traits<process_path>, we always have recall.
+ //
+ f["recall"] += &process_path::recall;
- // As discussed in value_traits<process_path>, we always have recall.
- //
- f["recall"] += &process_path::recall;
- f["effect"] += [](process_path p)
- {
- return move (p.effect.empty () ? p.recall : p.effect);
- };
- }
+ // $effect(<process-path>)
+ //
+ // Return the effective path of an executable, that is, the absolute path
+ // to the executable that will also include any omitted extensions, etc.
+ //
+ f["effect"] += [] (process_path p)
{
- function_family f (m, "process_path_ex");
+ return move (p.effect.empty () ? p.recall : p.effect);
+ };
+
+ // $name(<process-path-ex>)
+ //
+ // Return the stable process name for diagnostics.
+ //
+ f["name"] += &process_path_ex::name;
+
+ // $checksum(<process-path-ex>)
+ //
+ // Return the executable checksum for change tracking.
+ //
+ f["checksum"] += &process_path_ex::checksum;
- f["name"] += &process_path_ex::name;
- f["checksum"] += &process_path_ex::checksum;
- f["env_checksum"] += &process_path_ex::env_checksum;
- }
+ // $env_checksum(<process-path-ex>)
+ //
+ // Return the environment checksum for change tracking.
+ //
+ f["env_checksum"] += &process_path_ex::env_checksum;
}
}
diff --git a/libbuild2/functions-process.cxx b/libbuild2/functions-process.cxx
index bbcbbab..6faa798 100644
--- a/libbuild2/functions-process.cxx
+++ b/libbuild2/functions-process.cxx
@@ -450,12 +450,12 @@ namespace build2
// $process.run(<prog>[ <args>...])
//
- // Run builtin or external program and return trimmed stdout.
+ // Run builtin or external program and return trimmed `stdout` output.
//
// Note that if the result of executing the program can be affected by
// environment variables and this result can in turn affect the build
// result, then such variables should be reported with the
- // config.environment directive.
+ // `config.environment` directive.
//
// Note that this function is not pure and can only be called during the
// load phase.
@@ -470,19 +470,20 @@ namespace build2
return run_process (s, pp, strings ());
};
- // $process.run_regex(<prog>[ <args>...], <pat> [, <fmt>])
+ // $process.run_regex(<prog>[ <args>...], <pat>[, <fmt>])
//
- // Run builtin or external program and return stdout lines matched and
- // optionally processed with regex.
+ // Run builtin or external program and return `stdout` output lines
+ // matched and optionally processed with a regular expression.
//
// Each line of stdout (including the customary trailing blank) is matched
// (as a whole) against <pat> and, if successful, returned, optionally
- // processed with <fmt>, as an element of a list.
+ // processed with <fmt>, as an element of a list. See the `$regex.*()`
+ // function family for details on regular expressions and format strings.
//
// Note that if the result of executing the program can be affected by
// environment variables and this result can in turn affect the build
// result, then such variables should be reported with the
- // config.environment directive.
+ // `config.environment` directive.
//
// Note that this function is not pure and can only be called during the
// load phase.
diff --git a/libbuild2/functions-project-name.cxx b/libbuild2/functions-project-name.cxx
index 145e62c..23523f0 100644
--- a/libbuild2/functions-project-name.cxx
+++ b/libbuild2/functions-project-name.cxx
@@ -13,8 +13,28 @@ namespace build2
{
function_family f (m, "project_name");
- f["string"] += [](project_name p) {return move (p).string ();};
+ // $string(<project-name>)
+ //
+ // Return the string representation of a project name. See also the
+ // `$variable()` function below.
+ //
+
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](project_name* p)
+ {
+ return p != nullptr ? move (*p).string () : string ();
+ };
+ // $base(<project-name>[, <extension>])
+ //
+ // Return the base part (without the extension) of a project name.
+ //
+ // If <extension> is specified, then only remove that extension. Note that
+ // <extension> should not include the dot and the comparison is always
+ // case-insensitive.
+ //
f["base"] += [](project_name p, optional<string> ext)
{
return ext ? p.base (ext->c_str ()) : p.base ();
@@ -25,13 +45,30 @@ namespace build2
return p.base (convert<string> (move (ext)).c_str ());
};
+ // $extension(<project-name>)
+ //
+ // Return the extension part (without the dot) of a project name or empty
+ // string if there is no extension.
+ //
f["extension"] += &project_name::extension;
+
+ // $variable(<project-name>)
+ //
+ // Return the string representation of a project name that is sanitized to
+ // be usable as a variable name. Specifically, `.`, `-`, and `+` are
+ // replaced with `_`.
+ //
f["variable"] += &project_name::variable;
// Project name-specific overloads from builtins.
//
function_family b (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
b[".concat"] += [](project_name n, string s)
{
string r (move (n).string ());
diff --git a/libbuild2/functions-regex.cxx b/libbuild2/functions-regex.cxx
index f0fee8d..cf3ffd0 100644
--- a/libbuild2/functions-regex.cxx
+++ b/libbuild2/functions-regex.cxx
@@ -21,7 +21,7 @@ namespace build2
// Optimize for the string value type.
//
if (v.type != &value_traits<string>::value_type)
- untypify (v);
+ untypify (v, true /* reduce */);
return convert<string> (move (v));
}
@@ -92,10 +92,7 @@ namespace build2
names r;
for (size_t i (1); i != m.size (); ++i)
- {
- if (m[i].matched)
- r.emplace_back (m.str (i));
- }
+ r.emplace_back (m[i].matched ? m.str (i) : string ());
return value (move (r));
}
@@ -161,10 +158,7 @@ namespace build2
if (subs)
{
for (size_t i (1); i != m.size (); ++i)
- {
- if (m[i].matched)
- r.emplace_back (m.str (i));
- }
+ r.emplace_back (m[i].matched ? m.str (i) : string ());
}
return value (move (r));
@@ -174,7 +168,9 @@ namespace build2
}
static pair<regex::flag_type, regex_constants::match_flag_type>
- parse_replacement_flags (optional<names>&& flags, bool first_only = true)
+ parse_replacement_flags (optional<names>&& flags,
+ bool first_only = true,
+ bool* copy_empty = nullptr)
{
regex::flag_type rf (regex::ECMAScript);
regex_constants::match_flag_type mf (regex_constants::match_default);
@@ -191,6 +187,8 @@ namespace build2
mf |= regex_constants::format_first_only;
else if (s == "format_no_copy")
mf |= regex_constants::format_no_copy;
+ else if (copy_empty != nullptr && s == "format_copy_empty")
+ *copy_empty = true;
else
throw invalid_argument ("invalid flag '" + s + '\'');
}
@@ -334,7 +332,10 @@ namespace build2
const string& fmt,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags), false));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ false /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
names r;
@@ -342,10 +343,10 @@ namespace build2
try
{
regex_replace_search (to_string (move (v)), rge, fmt,
- [&r] (string::const_iterator b,
- string::const_iterator e)
+ [copy_empty, &r] (string::const_iterator b,
+ string::const_iterator e)
{
- if (b != e)
+ if (copy_empty || b != e)
r.emplace_back (string (b, e));
},
fl.second);
@@ -364,26 +365,29 @@ namespace build2
// apply() overloads (below) for details.
//
static names
- apply (names&& s,
+ apply (names&& ns,
const string& re,
const string& fmt,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags)));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ true /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
names r;
try
{
- for (auto& v: s)
+ for (auto& n: ns)
{
- string s (regex_replace_search (convert<string> (move (v)),
+ string s (regex_replace_search (convert<string> (move (n)),
rge,
fmt,
fl.second).first);
- if (!s.empty ())
+ if (copy_empty || !s.empty ())
r.emplace_back (move (s));
}
}
@@ -422,67 +426,141 @@ namespace build2
// See find_match() overloads (below) for details.
//
static bool
- find_match (names&& s, const string& re, optional<names>&& flags)
+ find_match (names&& ns, const string& re, optional<names>&& flags)
{
regex::flag_type fl (parse_find_flags (move (flags)));
regex rge (parse_regex (re, fl));
- for (auto& v: s)
+ for (auto& n: ns)
{
- if (regex_match (convert<string> (move (v)), rge))
+ if (regex_match (convert<string> (move (n)), rge))
return true;
}
return false;
}
+ // Return a list of elements that match (matching is true) or don't match
+ // (matching is false) the regular expression. See filter_match() and
+ // filter_out_match() overloads (below) for details.
+ //
+ static names
+ filter_match (names&& ns,
+ const string& re,
+ optional<names>&& flags,
+ bool matching)
+ {
+ regex::flag_type fl (parse_find_flags (move (flags)));
+ regex rge (parse_regex (re, fl));
+
+ names r;
+
+ for (name& n: ns)
+ {
+ // Note that we need to preserve the element while converting it to
+ // string since we may add it to the resulting list. But let's optimize
+ // this for the simple value case by round-tripping it through the
+ // string.
+ //
+ bool s (n.simple ());
+ string v (convert<string> (s ? move (n) : name (n)));
+
+ if (regex_match (v, rge) == matching)
+ r.emplace_back (s ? name (move (v)) : move (n));
+ }
+
+ return r;
+ }
+
// Return true if a part of any of the list elements matches the regular
// expression. See find_search() overloads (below) for details.
//
static bool
- find_search (names&& s, const string& re, optional<names>&& flags)
+ find_search (names&& ns, const string& re, optional<names>&& flags)
{
regex::flag_type fl (parse_find_flags (move (flags)));
regex rge (parse_regex (re, fl));
- for (auto& v: s)
+ for (auto& n: ns)
{
- if (regex_search (convert<string> (move (v)), rge))
+ if (regex_search (convert<string> (move (n)), rge))
return true;
}
return false;
}
+ // Return those elements of a list which have a match (matching is true) or
+ // have no match (matching is false) between the regular expression and
+ // some/any part of the element. See filter_search() and filter_out_search()
+ // overloads (below) for details.
+ //
+ static names
+ filter_search (names&& ns,
+ const string& re,
+ optional<names>&& flags,
+ bool matching)
+ {
+ regex::flag_type fl (parse_find_flags (move (flags)));
+ regex rge (parse_regex (re, fl));
+
+ names r;
+
+ for (auto& n: ns)
+ {
+ // Note that we need to preserve the element while converting it to
+ // string since we may add it to the resulting list. But let's optimize
+ // this for the simple value case by round-tripping it through the
+ // string.
+ //
+ bool s (n.simple ());
+ string v (convert<string> (s ? move (n) : name (n)));
+
+ if (regex_search (v, rge) == matching)
+ r.emplace_back (s ? name (move (v)) : move (n));
+ }
+
+ return r;
+ }
+
// Replace matched parts of list elements using the format string and
// concatenate the transformed elements. See merge() overloads (below) for
// details.
//
static names
- merge (names&& s,
+ merge (names&& ns,
const string& re,
const string& fmt,
optional<string>&& delim,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags)));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ true /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
string rs;
try
{
- for (auto& v: s)
+ bool first (true);
+ for (auto& n: ns)
{
- string s (regex_replace_search (convert<string> (move (v)),
+ string s (regex_replace_search (convert<string> (move (n)),
rge,
fmt,
fl.second).first);
- if (!s.empty ())
+ if (copy_empty || !s.empty ())
{
- if (!rs.empty () && delim)
- rs.append (*delim);
+ if (delim)
+ {
+ if (first)
+ first = false;
+ else
+ rs.append (*delim);
+ }
rs.append (s);
}
@@ -510,129 +588,203 @@ namespace build2
//
// Match a value of an arbitrary type against the regular expression.
// Convert the value to string prior to matching. Return the boolean value
- // unless return_subs flag is specified (see below), in which case return
- // names (NULL if no match).
+ // unless `return_subs` flag is specified (see below), in which case
+ // return names (or `null` if no match).
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- // return_subs - return names (rather than boolean), that contain
- // sub-strings that match the marked sub-expressions and
- // NULL if no match
+ // return_subs - return names (rather than boolean), that contain
+ // sub-strings that match the marked sub-expressions
+ // and null if no match
//
- f[".match"] += [](value s, string re, optional<names> flags)
+ f[".match"] += [](value v, string re, optional<names> flags)
{
- return match (move (s), re, move (flags));
+ return match (move (v), re, move (flags));
};
- f[".match"] += [](value s, names re, optional<names> flags)
+ f[".match"] += [](value v, names re, optional<names> flags)
{
- return match (move (s), convert<string> (move (re)), move (flags));
+ return match (move (v), convert<string> (move (re)), move (flags));
};
// $regex.find_match(<vals>, <pat> [, <flags>])
//
// Match list elements against the regular expression and return true if
- // the match is found. Convert the elements to string prior to matching.
+ // the match is found. Convert the elements to strings prior to matching.
+ //
+ // The following flags are supported:
+ //
+ // icase - match ignoring case
+ //
+ f[".find_match"] += [](names ns, string re, optional<names> flags)
+ {
+ return find_match (move (ns), re, move (flags));
+ };
+
+ f[".find_match"] += [](names ns, names re, optional<names> flags)
+ {
+ return find_match (move (ns), convert<string> (move (re)), move (flags));
+ };
+
+ // $regex.filter_match(<vals>, <pat> [, <flags>])
+ // $regex.filter_out_match(<vals>, <pat> [, <flags>])
+ //
+ // Return elements of a list that match (`filter`) or do not match
+ // (`filter_out`) the regular expression. Convert the elements to strings
+ // prior to matching.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- f[".find_match"] += [](names s, string re, optional<names> flags)
+ f[".filter_match"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_match (move (ns), re, move (flags), true /* matching */);
+ };
+
+ f[".filter_match"] += [](names ns, names re, optional<names> flags)
{
- return find_match (move (s), re, move (flags));
+ return filter_match (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ true /* matching */);
};
- f[".find_match"] += [](names s, names re, optional<names> flags)
+ f[".filter_out_match"] += [](names s, string re, optional<names> flags)
{
- return find_match (move (s), convert<string> (move (re)), move (flags));
+ return filter_match (move (s), re, move (flags), false /* matching */);
+ };
+
+ f[".filter_out_match"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_match (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ false /* matching */);
};
// $regex.search(<val>, <pat> [, <flags>])
//
// Determine if there is a match between the regular expression and some
// part of a value of an arbitrary type. Convert the value to string prior
- // to searching. Return the boolean value unless return_match or
- // return_subs flag is specified (see below) in which case return names
- // (NULL if no match).
+ // to searching. Return the boolean value unless `return_match` or
+ // `return_subs` flag is specified (see below) in which case return names
+ // (`null` if no match).
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- // return_match - return names (rather than boolean), that contain a
- // sub-string that matches the whole regular expression and
- // NULL if no match
+ // return_match - return names (rather than boolean), that contain a
+ // sub-string that matches the whole regular expression
+ // and null if no match
//
- // return_subs - return names (rather than boolean), that contain
- // sub-strings that match the marked sub-expressions and
- // NULL if no match
+ // return_subs - return names (rather than boolean), that contain
+ // sub-strings that match the marked sub-expressions
+ // and null if no match
//
- // If both return_match and return_subs flags are specified then the
+ // If both `return_match` and `return_subs` flags are specified then the
// sub-string that matches the whole regular expression comes first.
//
- f[".search"] += [](value s, string re, optional<names> flags)
+ f[".search"] += [](value v, string re, optional<names> flags)
{
- return search (move (s), re, move (flags));
+ return search (move (v), re, move (flags));
};
- f[".search"] += [](value s, names re, optional<names> flags)
+ f[".search"] += [](value v, names re, optional<names> flags)
{
- return search (move (s), convert<string> (move (re)), move (flags));
+ return search (move (v), convert<string> (move (re)), move (flags));
};
// $regex.find_search(<vals>, <pat> [, <flags>])
//
// Determine if there is a match between the regular expression and some
- // part of any of the list elements. Convert the elements to string prior
+ // part of any of the list elements. Convert the elements to strings prior
// to matching.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- f[".find_search"] += [](names s, string re, optional<names> flags)
+ f[".find_search"] += [](names ns, string re, optional<names> flags)
{
- return find_search (move (s), re, move (flags));
+ return find_search (move (ns), re, move (flags));
};
- f[".find_search"] += [](names s, names re, optional<names> flags)
+ f[".find_search"] += [](names ns, names re, optional<names> flags)
{
- return find_search (move (s),
+ return find_search (move (ns),
convert<string> (move (re)),
move (flags));
};
+ // $regex.filter_search(<vals>, <pat> [, <flags>])
+ // $regex.filter_out_search(<vals>, <pat> [, <flags>])
+ //
+ // Return elements of a list for which there is a match (`filter`) or no
+ // match (`filter_out`) between the regular expression and some part of
+ // the element. Convert the elements to strings prior to matching.
+ //
+ // The following flags are supported:
+ //
+ // icase - match ignoring case
+ //
+ f[".filter_search"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_search (move (ns), re, move (flags), true /* matching */);
+ };
+
+ f[".filter_search"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_search (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ true /* matching */);
+ };
+
+ f[".filter_out_search"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_search (move (ns), re, move (flags), false /* matching */);
+ };
+
+ f[".filter_out_search"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_search (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ false /* matching */);
+ };
+
// $regex.replace(<val>, <pat>, <fmt> [, <flags>])
//
// Replace matched parts in a value of an arbitrary type, using the format
// string. Convert the value to string prior to matching. The result value
// is always untyped, regardless of the argument type.
//
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
- //
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- // format_first_only - only replace the first match
+ // format_first_only - only replace the first match
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // If both format_first_only and format_no_copy flags are specified then
- // the result will only contain the replacement of the first match.
+ // If both `format_first_only` and `format_no_copy` flags are specified
+ // then the result will only contain the replacement of the first match.
//
- f[".replace"] += [](value s, string re, string fmt, optional<names> flags)
+ // See also `$string.replace()`.
+ //
+ f[".replace"] += [](value v, string re, string fmt, optional<names> flags)
{
- return replace (move (s), re, fmt, move (flags));
+ return replace (move (v), re, fmt, move (flags));
};
- f[".replace"] += [](value s, names re, names fmt, optional<names> flags)
+ f[".replace"] += [](value v, names re, names fmt, optional<names> flags)
{
- return replace (move (s),
+ return replace (move (v),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
@@ -641,38 +793,38 @@ namespace build2
// $regex.replace_lines(<val>, <pat>, <fmt> [, <flags>])
//
// Convert the value to string, parse it into lines and for each line
- // apply the $regex.replace() function with the specified pattern, format,
- // and flags. If the format argument is NULL, omit the "all-NULL"
- // replacements for the matched lines from the result. Return unmatched
- // lines and line replacements as a name list unless return_lines flag is
- // specified (see below), in which case return a single multi-line simple
- // name value.
+ // apply the `$regex.replace()` function with the specified pattern,
+ // format, and flags. If the format argument is `null`, omit the
+ // "all-`null`" replacements for the matched lines from the result. Return
+ // unmatched lines and line replacements as a `name` list unless
+ // `return_lines` flag is specified (see below), in which case return a
+ // single multi-line simple `name` value.
//
- // The following flags are supported in addition to the $regex.replace()
- // function flags:
+ // The following flags are supported in addition to the `$regex.replace()`
+ // function's flags:
//
- // return_lines - return the simple name (rather than a name list)
- // containing the unmatched lines and line replacements
- // separated with newlines.
+ // return_lines - return the simple name (rather than a name list)
+ // containing the unmatched lines and line replacements
+ // separated with newlines.
//
- // Note that if format_no_copy is specified, unmatched lines are not
+ // Note that if `format_no_copy` is specified, unmatched lines are not
// copied either.
//
- f[".replace_lines"] += [](value s,
- string re,
- string fmt,
- optional<names> flags)
+ f[".replace_lines"] += [](value v,
+ string re,
+ string fmt,
+ optional<names> flags)
{
- return replace_lines (move (s), re, move (fmt), move (flags));
+ return replace_lines (move (v), re, move (fmt), move (flags));
};
- f[".replace_lines"] += [](value s,
- names re,
- names* fmt,
- optional<names> flags)
+ f[".replace_lines"] += [](value v,
+ names re,
+ names* fmt,
+ optional<names> flags)
{
return replace_lines (
- move (s),
+ move (v),
convert<string> (move (re)),
(fmt != nullptr
? optional<string> (convert<string> (move (*fmt)))
@@ -683,26 +835,27 @@ namespace build2
// $regex.split(<val>, <pat>, <fmt> [, <flags>])
//
// Split a value of an arbitrary type into a list of unmatched value parts
- // and replacements of the matched parts, omitting empty ones. Convert the
- // value to string prior to matching.
- //
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
+ // and replacements of the matched parts, omitting empty ones (unless the
+ // `format_copy_empty` flag is specified). Convert the value to string
+ // prior to matching.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
+ //
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_copy_empty - copy empty elements into the result
//
- f[".split"] += [](value s, string re, string fmt, optional<names> flags)
+ f[".split"] += [](value v, string re, string fmt, optional<names> flags)
{
- return split (move (s), re, fmt, move (flags));
+ return split (move (v), re, fmt, move (flags));
};
- f[".split"] += [](value s, names re, names fmt, optional<names> flags)
+ f[".split"] += [](value v, names re, names fmt, optional<names> flags)
{
- return split (move (s),
+ return split (move (v),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
@@ -711,45 +864,52 @@ namespace build2
// $regex.merge(<vals>, <pat>, <fmt> [, <delim> [, <flags>]])
//
// Replace matched parts in a list of elements using the regex format
- // string. Convert the elements to string prior to matching. The result
+ // string. Convert the elements to strings prior to matching. The result
// value is untyped and contains concatenation of transformed non-empty
- // elements optionally separated with a delimiter.
- //
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
+ // elements (unless the `format_copy_empty` flag is specified) optionally
+ // separated with a delimiter.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
+ //
+ // format_first_only - only replace the first match
//
- // format_first_only - only replace the first match
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_copy_empty - copy empty elements into the result
//
- // If both format_first_only and format_no_copy flags are specified then
- // the result will be a concatenation of only the first match
+ // If both `format_first_only` and `format_no_copy` flags are specified
+ // then the result will be a concatenation of only the first match
// replacements.
//
- f[".merge"] += [](names s,
- string re,
- string fmt,
- optional<string> delim,
- optional<names> flags)
- {
- return merge (move (s), re, fmt, move (delim), move (flags));
+ f[".merge"] += [](names ns,
+ string re,
+ string fmt,
+ optional<string*> delim,
+ optional<names> flags)
+ {
+ return merge (move (ns),
+ re,
+ fmt,
+ delim && *delim != nullptr
+ ? move (**delim)
+ : optional<string> (),
+ move (flags));
};
- f[".merge"] += [](names s,
- names re,
- names fmt,
- optional<names> delim,
- optional<names> flags)
+ f[".merge"] += [](names ns,
+ names re,
+ names fmt,
+ optional<names*> delim,
+ optional<names> flags)
{
- return merge (move (s),
+ return merge (move (ns),
convert<string> (move (re)),
convert<string> (move (fmt)),
- delim
- ? convert<string> (move (*delim))
+ delim && *delim != nullptr
+ ? convert<string> (move (**delim))
: optional<string> (),
move (flags));
};
@@ -757,32 +917,33 @@ namespace build2
// $regex.apply(<vals>, <pat>, <fmt> [, <flags>])
//
// Replace matched parts of each element in a list using the regex format
- // string. Convert the elements to string prior to matching. Return a list
- // of transformed elements, omitting the empty ones.
- //
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
+ // string. Convert the elements to strings prior to matching. Return a
+ // list of transformed elements, omitting the empty ones (unless the
+ // `format_copy_empty` flag is specified).
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
+ //
+ // format_first_only - only replace the first match
//
- // format_first_only - only replace the first match
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_copy_empty - copy empty elements into the result
//
- // If both format_first_only and format_no_copy flags are specified then
- // the result elements will only contain the replacement of the first
+ // If both `format_first_only` and `format_no_copy` flags are specified
+ // then the result elements will only contain the replacement of the first
// match.
//
- f[".apply"] += [](names s, string re, string fmt, optional<names> flags)
+ f[".apply"] += [](names ns, string re, string fmt, optional<names> flags)
{
- return apply (move (s), re, fmt, move (flags));
+ return apply (move (ns), re, fmt, move (flags));
};
- f[".apply"] += [](names s, names re, names fmt, optional<names> flags)
+ f[".apply"] += [](names ns, names re, names fmt, optional<names> flags)
{
- return apply (move (s),
+ return apply (move (ns),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
diff --git a/libbuild2/functions-string.cxx b/libbuild2/functions-string.cxx
index f5d7cb1..b7e0a17 100644
--- a/libbuild2/functions-string.cxx
+++ b/libbuild2/functions-string.cxx
@@ -8,6 +8,103 @@ using namespace std;
namespace build2
{
+ static string
+ replace (string&& s, value&& fv, value&& tv, optional<names>&& fs)
+ {
+ bool ic (false), fo (false), lo (false);
+ if (fs)
+ {
+ for (name& f: *fs)
+ {
+ string s (convert<string> (move (f)));
+
+ if (s == "icase")
+ ic = true;
+ else if (s == "first_only")
+ fo = true;
+ else if (s == "last_only")
+ lo = true;
+ else
+ throw invalid_argument ("invalid flag '" + s + '\'');
+ }
+ }
+
+ string f (convert<string> (move (fv)));
+ string t (convert<string> (move (tv)));
+
+ if (f.empty ())
+ throw invalid_argument ("empty <from> substring");
+
+ if (!s.empty ())
+ {
+ // Note that we don't cache s.size () since the string size will be
+ // changing as we are replacing. In fact, we may end up with an empty
+ // string after a replacement.
+
+ size_t fn (f.size ());
+
+ // Look for the substring forward in the [p, n) range.
+ //
+ auto find = [&s, &f, fn, ic] (size_t p) -> size_t
+ {
+ for (size_t n (s.size ()); p != n; ++p)
+ {
+ if (n - p >= fn &&
+ (ic
+ ? icasecmp (f, s.c_str () + p, fn)
+ : s.compare (p, fn, f)) == 0)
+ return p;
+ }
+
+ return string::npos;
+ };
+
+ // Look for the substring backard in the [0, n) range.
+ //
+ auto rfind = [&s, &f, fn, ic] (size_t n) -> size_t
+ {
+ if (n >= fn)
+ {
+ n -= fn; // Don't consider characters out of range.
+
+ for (size_t p (n);; )
+ {
+ if ((ic
+ ? icasecmp (f, s.c_str () + p, fn)
+ : s.compare (p, fn, f)) == 0)
+ return p;
+
+ if (--p == 0)
+ break;
+ }
+ }
+
+ return string::npos;
+ };
+
+ if (fo || lo)
+ {
+ size_t p (lo ? rfind (s.size ()) : find (0));
+
+ if (fo && lo && p != string::npos)
+ {
+ if (p != find (0))
+ p = string::npos;
+ }
+
+ if (p != string::npos)
+ s.replace (p, fn, t);
+ }
+ else
+ {
+ for (size_t p (0); (p = find (0)) != string::npos; p += fn)
+ s.replace (p, fn, t);
+ }
+ }
+
+ return move (s);
+ }
+
static size_t
find_index (const strings& vs, value&& v, optional<names>&& fs)
{
@@ -32,20 +129,27 @@ namespace build2
}));
return i != vs.end () ? i - vs.begin () : vs.size ();
- };
+ }
void
string_functions (function_map& m)
{
function_family f (m, "string");
- f["string"] += [](string s) {return s;};
-
- // @@ Shouldn't it concatenate elements into the single string?
- // @@ Doesn't seem to be used so far. Can consider removing.
+ // Note: leave undocumented since there is no good reason for the user to
+ // call this function (which would be converting string to string).
+ //
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
//
- // f["string"] += [](strings v) {return v;};
+ f["string"] += [](string* s)
+ {
+ return s != nullptr ? move (*s) : string ();
+ };
+ // $string.icasecmp(<untyped>, <untyped>)
+ // $icasecmp(<string>, <string>)
+ //
// Compare ASCII strings ignoring case and returning the boolean value.
//
f["icasecmp"] += [](string x, string y)
@@ -69,7 +173,43 @@ namespace build2
convert<string> (move (y))) == 0;
};
- // Trim.
+ // $string.replace(<untyped>, <from>, <to> [, <flags>])
+ // $replace(<string>, <from>, <to> [, <flags>])
+ //
+ // Replace occurences of substring <from> with <to> in a string. The
+ // <from> substring must not be empty.
+ //
+ // The following flags are supported:
+ //
+ // icase - compare ignoring case
+ //
+ // first_only - only replace the first match
+ //
+ // last_only - only replace the last match
+ //
+ //
+ // If both `first_only` and `last_only` flags are specified, then <from>
+ // is replaced only if it occurs in the string once.
+ //
+ // See also `$regex.replace()`.
+ //
+ f["replace"] += [](string s, value f, value t, optional<names> fs)
+ {
+ return replace (move (s), move (f), move (t), move (fs));
+ };
+
+ f[".replace"] += [](names s, value f, value t, optional<names> fs)
+ {
+ return names {
+ name (
+ replace (
+ convert<string> (move (s)), move (f), move (t), move (fs)))};
+ };
+
+ // $string.trim(<untyped>)
+ // $trim(<string>)
+ //
+ // Trim leading and trailing whitespaces in a string.
//
f["trim"] += [](string s)
{
@@ -81,7 +221,12 @@ namespace build2
return names {name (trim (convert<string> (move (s))))};
};
- // Convert ASCII strings into lower/upper case.
+ // $string.lcase(<untyped>)
+ // $string.ucase(<untyped>)
+ // $lcase(<string>)
+ // $ucase(<string>)
+ //
+ // Convert ASCII string into lower/upper case.
//
f["lcase"] += [](string s)
{
@@ -104,16 +249,18 @@ namespace build2
};
// $size(<strings>)
- //
- // Return the number of elements in the sequence.
- //
- f["size"] += [] (strings v) {return v.size ();};
-
+ // $size(<string-set>)
+ // $size(<string-map>)
// $size(<string>)
//
- // Return the number of characters (bytes) in the string.
+ // First three forms: return the number of elements in the sequence.
+ //
+ // Fourth form: return the number of characters (bytes) in the string.
//
- f["size"] += [] (string v) {return v.size ();};
+ f["size"] += [] (strings v) {return v.size ();};
+ f["size"] += [] (set<string> v) {return v.size ();};
+ f["size"] += [] (map<string, string> v) {return v.size ();};
+ f["size"] += [] (string v) {return v.size ();};
// $sort(<strings> [, <flags>])
//
@@ -121,9 +268,9 @@ namespace build2
//
// The following flags are supported:
//
- // icase - sort ignoring case
+ // icase - sort ignoring case
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
f["sort"] += [](strings v, optional<names> fs)
{
@@ -167,9 +314,9 @@ namespace build2
//
// The following flags are supported:
//
- // icase - compare ignoring case
+ // icase - compare ignoring case
//
- // See also $regex.find_{match,search}().
+ // See also `$regex.find_match()` and `$regex.find_search()`.
//
f["find"] += [](strings vs, value v, optional<names> fs)
{
@@ -179,35 +326,57 @@ namespace build2
// $find_index(<strings>, <string>[, <flags>])
//
// Return the index of the first element in the string sequence that
- // is equal to the specified string or $size(<strings>) if none is
+ // is equal to the specified string or `$size(strings)` if none is
// found.
//
// The following flags are supported:
//
- // icase - compare ignoring case
+ // icase - compare ignoring case
//
f["find_index"] += [](strings vs, value v, optional<names> fs)
{
return find_index (vs, move (v), move (fs));
};
+ // $keys(<string-map>)
+ //
+ // Return the list of keys in a string map.
+ //
+ // Note that the result is sorted in ascending order.
+ //
+ f["keys"] += [](map<string, string> v)
+ {
+ strings r;
+ r.reserve (v.size ());
+ for (pair<const string, string>& p: v)
+ r.push_back (p.first); // @@ PERF: use C++17 map::extract() to steal.
+ return r;
+ };
+
// String-specific overloads from builtins.
//
function_family b (m, "builtin");
- b[".concat"] += [](string l, string r) {l += r; return l;};
+ // Note that we must handle NULL values (relied upon by the parser to
+ // provide concatenation semantics consistent with untyped values).
+ //
+ b[".concat"] += [](string* l, string* r)
+ {
+ return l != nullptr
+ ? r != nullptr ? move (*l += *r) : move (*l)
+ : r != nullptr ? move (*r) : string ();
+ };
- b[".concat"] += [](string l, names ur)
+ b[".concat"] += [](string* l, names* ur)
{
- l += convert<string> (move (ur));
- return l;
+ string r (ur != nullptr ? convert<string> (move (*ur)) : string ());
+ return l != nullptr ? move (*l += r) : move (r);
};
- b[".concat"] += [](names ul, string r)
+ b[".concat"] += [](names* ul, string* r)
{
- string l (convert<string> (move (ul)));
- l += r;
- return l;
+ string l (ul != nullptr ? convert<string> (move (*ul)) : string ());
+ return r != nullptr ? move (l += *r) : move (l);
};
}
}
diff --git a/libbuild2/functions-target-triplet.cxx b/libbuild2/functions-target-triplet.cxx
index 4b0ec02..6e12c97 100644
--- a/libbuild2/functions-target-triplet.cxx
+++ b/libbuild2/functions-target-triplet.cxx
@@ -13,13 +13,39 @@ namespace build2
{
function_family f (m, "target_triplet");
- f["string"] += [](target_triplet t) {return t.string ();};
- f["representation"] += [](target_triplet t) {return t.representation ();};
+ // $string(<target-triplet>)
+ //
+ // Return the canonical (that is, without the `unknown` vendor component)
+ // target triplet string.
+ //
+
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](target_triplet* t)
+ {
+ return t != nullptr ? t->string () : string ();
+ };
+
+ // $representation(<target-triplet>)
+ //
+ // Return the complete target triplet string that always contains the
+ // vendor component.
+ //
+ f["representation"] += [](target_triplet t)
+ {
+ return t.representation ();
+ };
// Target triplet-specific overloads from builtins.
//
function_family b (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
b[".concat"] += [](target_triplet l, string sr) {return l.string () + sr;};
b[".concat"] += [](string sl, target_triplet r) {return sl + r.string ();};
diff --git a/libbuild2/functions-target.cxx b/libbuild2/functions-target.cxx
new file mode 100644
index 0000000..d564aa2
--- /dev/null
+++ b/libbuild2/functions-target.cxx
@@ -0,0 +1,108 @@
+// file : libbuild2/functions-target.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/functions-name.hxx> // to_target()
+
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+using namespace std;
+
+namespace build2
+{
+ void
+ target_functions (function_map& m)
+ {
+ // Functions that can be called only on real targets.
+ //
+ function_family f (m, "target");
+
+ // $path(<names>)
+ //
+ // Return the path of a target (or a list of paths for a list of
+ // targets). The path must be assigned, which normally happens during
+ // match. As a result, this function is normally called form a recipe.
+ //
+ // Note that while this function is technically not pure, we don't mark it
+ // as such since it can only be called (normally form a recipe) after the
+ // target has been matched, meaning that this target is a prerequisite and
+ // therefore this impurity has been accounted for.
+ //
+ f["path"] += [](const scope* s, names ns)
+ {
+ if (s == nullptr)
+ fail << "target.path() called out of scope";
+
+ // Most of the time we will have a single target so optimize for that.
+ //
+ small_vector<path, 1> r;
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i), o;
+ const target& t (to_target (*s, move (n), move (n.pair ? *++i : o)));
+
+ if (const auto* pt = t.is_a<path_target> ())
+ {
+ const path& p (pt->path ());
+
+ if (&p != &empty_path)
+ r.push_back (p);
+ else
+ fail << "target " << t << " path is not assigned";
+ }
+ else
+ fail << "target " << t << " is not path-based";
+ }
+
+ // We want the result to be path if we were given a single target and
+ // paths if multiple (or zero). The problem is, we cannot distinguish it
+ // based on the argument type (e.g., name vs names) since passing an
+ // out-qualified single target requires two names.
+ //
+ if (r.size () == 1)
+ return value (move (r[0]));
+
+ return value (paths (make_move_iterator (r.begin ()),
+ make_move_iterator (r.end ())));
+ };
+
+ // $process_path(<name>)
+ //
+ // Return the process path of an executable target.
+ //
+ // Note that while this function is not technically pure, we don't mark it
+ // as such for the same reasons as for `$path()` above.
+ //
+
+ // This one can only be called on a single target since we don't support
+ // containers of process_path's (though we probably could).
+ //
+ f["process_path"] += [](const scope* s, names ns)
+ {
+ if (s == nullptr)
+ fail << "target.process_path() called out of scope";
+
+ if (ns.empty () || ns.size () != (ns[0].pair ? 2 : 1))
+ fail << "target.process_path() expects single target";
+
+ name o;
+ const target& t (
+ to_target (*s, move (ns[0]), move (ns[0].pair ? ns[1] : o)));
+
+ if (const auto* et = t.is_a<exe> ())
+ {
+ process_path r (et->process_path ());
+
+ if (r.empty ())
+ fail << "target " << t << " path is not assigned";
+
+ return r;
+ }
+ else
+ fail << "target " << t << " is not executable-based" << endf;
+ };
+ }
+}
diff --git a/libbuild2/in/rule.cxx b/libbuild2/in/rule.cxx
index 74bc2a7..31a9d94 100644
--- a/libbuild2/in/rule.cxx
+++ b/libbuild2/in/rule.cxx
@@ -47,6 +47,13 @@ namespace build2
if (!fi)
l5 ([&]{trace << "no in file prerequisite for target " << t;});
+ // If we match, derive the file name here instead of in apply() to make
+ // it available early for the in{} prerequisite search (see
+ // install::file_rule::apply_impl() for background).
+ //
+ if (fi)
+ t.derive_path ();
+
return fi;
}
@@ -55,9 +62,9 @@ namespace build2
{
file& t (xt.as<file> ());
- // Derive the file name.
+ // Make sure derived rules assign the path in match().
//
- t.derive_path ();
+ assert (!t.path ().empty ());
// Inject dependency on the output directory.
//
diff --git a/libbuild2/in/rule.hxx b/libbuild2/in/rule.hxx
index 369fd93..67c2509 100644
--- a/libbuild2/in/rule.hxx
+++ b/libbuild2/in/rule.hxx
@@ -22,6 +22,11 @@ namespace build2
// cache data (e.g., in match() or apply()) to be used in substitute() and
// lookup() calls.
//
+ // A derived rule is also required to derive the target file name in
+ // match() instead of apply() to make it available early for the in{}
+ // prerequisite search (see install::file_rule::apply_impl() for
+ // background).
+ //
// Note also that currently this rule ignores the dry-run mode (see
// perform_update() for the rationale).
//
diff --git a/libbuild2/in/target.cxx b/libbuild2/in/target.cxx
index 54130ff..d664e3a 100644
--- a/libbuild2/in/target.cxx
+++ b/libbuild2/in/target.cxx
@@ -10,7 +10,7 @@ namespace build2
namespace in
{
static const target*
- in_search (const target& xt, const prerequisite_key& cpk)
+ in_search (context& ctx, const target* xt, const prerequisite_key& cpk)
{
// If we have no extension then derive it from our target. Then delegate
// to file_search().
@@ -18,7 +18,7 @@ namespace build2
prerequisite_key pk (cpk);
optional<string>& e (pk.tk.ext);
- if (!e)
+ if (!e && xt != nullptr)
{
// Why is the extension, say, .h.in and not .in (with .h being in the
// name)? While this is mostly academic (in this case things will work
@@ -28,16 +28,16 @@ namespace build2
//
// See also the low verbosity tidying up code in the rule.
//
- if (const file* t = xt.is_a<file> ())
+ if (const file* t = xt->is_a<file> ())
{
const string& te (t->derive_extension ());
e = te + (te.empty () ? "" : ".") + "in";
}
else
- fail << "prerequisite " << pk << " for a non-file target " << xt;
+ fail << "prerequisite " << pk << " for a non-file target " << *xt;
}
- return file_search (xt, pk);
+ return file_search (ctx, xt, pk);
}
static bool
diff --git a/libbuild2/install/functions.cxx b/libbuild2/install/functions.cxx
index 5668efe..1de4d3e 100644
--- a/libbuild2/install/functions.cxx
+++ b/libbuild2/install/functions.cxx
@@ -15,17 +15,125 @@ namespace build2
{
function_family f (m, "install");
- // Resolve potentially relative install.* value to an absolute directory
- // based on (other) install.* values visible from the calling scope.
+ // $install.resolve(<dir>[, <rel_base>])
+ //
+ // @@ TODO: add overload to call resolve_file().
+ //
+ // Resolve potentially relative install.* value to an absolute and
+ // normalized directory based on (other) install.* values visible from
+ // the calling scope.
+ //
+ // If rel_base is specified and is not empty, then make the resulting
+ // directory relative to it. If rel_base itself is relative, first
+ // resolve it to an absolute and normalized directory based on install.*
+ // values. Note that this argument is mandatory if this function is
+ // called during relocatable installation (install.relocatable is true).
+ // While you can pass empty directory to suppress this functionality,
+ // make sure this does not render the result non-relocatable.
+ //
+ // As an example, consider an executable that supports loading plugins
+ // and requires the plugin installation directory to be embedded into
+ // the executable during the build. The common way to support
+ // relocatable installations for such cases is to embed a path relative
+ // to the executable and complete it at runtime. If you would like to
+ // always use the relative path, regardless of whether the installation
+ // is relocatable of not, then you can simply always pass rel_base, for
+ // example:
+ //
+ // plugin_dir = $install.resolve($install.lib, $install.bin)
+ //
+ // Alternatively, if you would like to continue using absolute paths for
+ // non-relocatable installations, then you can use something like this:
+ //
+ // plugin_dir = $install.resolve($install.lib, ($install.relocatable ? $install.bin : [dir_path] ))
+ //
+ // Finally, if you are unable to support relocatable installations, the
+ // correct way to handle this is NOT to always pass an empty path for
+ // rel_base but rather assert in root.build that your project does not
+ // support relocatable installations, for example:
+ //
+ // assert (!$install.relocatable) 'relocatable installation not supported'
//
// Note that this function is not pure.
//
- f.insert (".resolve", false) += [] (const scope* s, dir_path d)
+ f.insert (".resolve", false) += [] (const scope* s,
+ dir_path dir,
+ optional<dir_path> rel_base)
{
if (s == nullptr)
fail << "install.resolve() called out of scope" << endf;
- return resolve_dir (*s, move (d));
+ if (!rel_base)
+ {
+ const scope& rs (*s->root_scope ());
+
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ fail << "relocatable installation requires relative base "
+ << "directory" <<
+ info << "pass empty relative base directory if this call does "
+ << "not affect installation relocatability" <<
+ info << "or add `assert (!$install.relocatable) 'relocatable "
+ << "installation not supported'` before the call";
+ }
+ }
+
+ return resolve_dir (*s,
+ move (dir),
+ rel_base ? move (*rel_base) : dir_path ());
+ };
+
+ // @@ TODO: add $install.chroot().
+
+ // $install.filter(<path>[, <type>])
+ //
+ // Apply filters from config.install.filter and return true if the
+ // specified filesystem entry should be installed/uninstalled. Note that
+ // the entry is specified as an absolute and normalized installation
+ // path (so not $path($>) but $install.resolve($>)).
+ //
+ // The type argument can be one of `regular`, `directory`, or `symlink`.
+ // If unspecified, either `directory` or `regular` is assumed, based on
+ // whether path is syntactially a directory (ends with a directory
+ // separator).
+ //
+ // Note that this function is not pure.
+ //
+ f.insert (".filter", false) += [] (const scope* s,
+ path p,
+ optional<names> ot)
+ {
+ if (s == nullptr)
+ fail << "install.filter() called out of scope" << endf;
+
+ entry_type t;
+ if (ot)
+ {
+ string v (convert<string> (move (*ot)));
+
+ if (v == "regular") t = entry_type::regular;
+ else if (v == "directory") t = entry_type::directory;
+ else if (v == "symlink") t = entry_type::symlink;
+ else throw invalid_argument ("unknown type '" + v + '\'');
+ }
+ else
+ t = p.to_directory () ? entry_type::directory : entry_type::regular;
+
+ // Split into directory and leaf.
+ //
+ dir_path d;
+ if (t == entry_type::directory)
+ {
+ d = path_cast<dir_path> (move (p));
+ p = path (); // No leaf.
+ }
+ else
+ {
+ d = p.directory ();
+ p.make_leaf ();
+ }
+
+ return filter_entry (*s->root_scope (), d, p, t);
};
}
}
diff --git a/libbuild2/install/init.cxx b/libbuild2/install/init.cxx
index 1a6ee0f..3df912f 100644
--- a/libbuild2/install/init.cxx
+++ b/libbuild2/install/init.cxx
@@ -260,7 +260,7 @@ namespace build2
// way we distinguish between the two is via the presence/absence of
// the trailing directory separator.
//
- // Plus it can have the special true/false values when acting as a
+ // Plus it can have the special true/false values when acting as an
// operation variable.
//
auto& ovar (rs.var_pool ().insert<path> ("install",
@@ -285,24 +285,26 @@ namespace build2
//
#define DIR(N, V) static const dir_path dir_##N (V)
- DIR (data_root, dir_path ("root"));
- DIR (exec_root, dir_path ("root"));
+ DIR (data_root, dir_path ("root"));
+ DIR (exec_root, dir_path ("root"));
- DIR (sbin, dir_path ("exec_root") /= "sbin");
- DIR (bin, dir_path ("exec_root") /= "bin");
- DIR (lib, (dir_path ("exec_root") /= "lib") /= "<private>");
- DIR (libexec, ((dir_path ("exec_root") /= "libexec") /= "<private>") /= "<project>");
- DIR (pkgconfig, dir_path ("lib") /= "pkgconfig");
+ DIR (sbin, dir_path ("exec_root") /= "sbin");
+ DIR (bin, dir_path ("exec_root") /= "bin");
+ DIR (lib, (dir_path ("exec_root") /= "lib") /= "<private>");
+ DIR (libexec, ((dir_path ("exec_root") /= "libexec") /= "<private>") /= "<project>");
+ DIR (pkgconfig, dir_path ("lib") /= "pkgconfig");
- DIR (etc, dir_path ("data_root") /= "etc");
- DIR (include, (dir_path ("data_root") /= "include") /= "<private>");
- DIR (share, dir_path ("data_root") /= "share");
- DIR (data, (dir_path ("share") /= "<private>") /= "<project>");
+ DIR (etc, dir_path ("data_root") /= "etc");
+ DIR (include, (dir_path ("data_root") /= "include") /= "<private>");
+ DIR (include_arch, dir_path ("include"));
+ DIR (share, dir_path ("data_root") /= "share");
+ DIR (data, (dir_path ("share") /= "<private>") /= "<project>");
+ DIR (buildfile, ((dir_path ("share") /= "build2") /= "export") /= "<project>");
- DIR (doc, ((dir_path ("share") /= "doc") /= "<private>") /= "<project>");
- DIR (legal, dir_path ("doc"));
- DIR (man, dir_path ("share") /= "man");
- DIR (man1, dir_path ("man") /= "man1");
+ DIR (doc, ((dir_path ("share") /= "doc") /= "<private>") /= "<project>");
+ DIR (legal, dir_path ("doc"));
+ DIR (man, dir_path ("share") /= "man");
+ DIR (man1, dir_path ("man") /= "man1");
#undef DIR
@@ -394,6 +396,9 @@ namespace build2
// Note: use mtime_target (instead of target) to take precedence over
// the fallback file rules below.
//
+ // @@ We could fix this by checking the target type in file_rule,
+ // similar to build2::file_rule.
+ //
bs.insert_rule<mtime_target> (perform_install_id, "install.group", gr);
bs.insert_rule<mtime_target> (perform_uninstall_id, "install.group", gr);
@@ -401,7 +406,7 @@ namespace build2
// operation, similar to update.
//
// @@ Hm, it's a bit fuzzy why we would be updating-for-install
- // something outside of any project..?
+ // something outside of any project?
//
scope& gs (rs.global_scope ());
@@ -420,9 +425,9 @@ namespace build2
using config::lookup_config;
using config::specified_config;
- // Note: ignore config.install.scope (see below).
+ // Note: ignore config.install.{scope,manifest} (see below).
//
- bool s (specified_config (rs, "install", {"scope"}));
+ bool s (specified_config (rs, "install", {"scope", "manifest"}));
// Adjust module priority so that the (numerous) config.install.*
// values are saved at the end of config.build.
@@ -459,6 +464,123 @@ namespace build2
config::unsave_variable (rs, v);
}
+ // config.install.manifest
+ //
+ // Installation manifest. Valid values are a file path or `-` to dump
+ // the manifest to stdout.
+ //
+ // If specified during the install operation, then write the
+ // information about all the filesystem entries being installed into
+ // the manifest. If specified during uninstall, then remove the
+ // filesystem entries according to the manifest as opposed to the
+ // current build state. In particular, this functionality can be used
+ // to avoid surprising (and potentially lengthy) updates during
+ // uninstall that may happen because of changes to system-installed
+ // dependencies (for example, the compiler or standard library).
+ //
+ // @@ TODO: manifest uninstall is still TODO.
+ //
+ // Note: there is a single manifest per operation and thus this
+ // variable can only be specified as a global override. (While it
+ // could be handy to save this varible in config.build in some
+ // situations, supporting this will complicate the global override
+ // case). Note that as a result the manifest file path may not be
+ // specified in terms of the config.install.* values.
+ //
+ // Note also that the manifest is produced even in the dry-run mode.
+ // However, in this case no directory creation is tracked.
+ //
+ // The format of the installation manifest is "JSON lines", that is,
+ // each line is a JSON text (this makes it possible to reverse the
+ // order of lines without loading the entire file into memory). For
+ // example (indented lines indicate line continuations):
+ //
+ // {"type":"directory","path":"/tmp/install","mode":"755"}
+ // {"type":"target","name":"/tmp/libhello/libs{hello}",
+ // "entries":[
+ // {"type":"file","path":"/tmp/install/lib/libhello-1.0.so","mode":"755"},
+ // {"type":"symlink","path":"/tmp/install/lib/libhello.so","target":"libhello-1.0.so"}]}
+ //
+ // Each line is a serialization of one of the following non-abstract
+ // C++ structs:
+ //
+ // struct entry // abstract
+ // {
+ // enum {directory, file, symlink, target} type;
+ // };
+ //
+ // struct filesystem_entry: entry // abstract
+ // {
+ // path path;
+ // };
+ //
+ // struct directory_entry: filesystem_entry
+ // {
+ // string mode;
+ // };
+ //
+ // struct file_entry: filesystem_entry
+ // {
+ // string mode;
+ // };
+ //
+ // struct symlink_entry: filesystem_entry
+ // {
+ // path target;
+ // };
+ //
+ // struct target_entry: entry
+ // {
+ // string name;
+ // vector<filesystem_entry*> entries;
+ // };
+ //
+ // New entry types may be added later. Additional entry members may be
+ // added later to existing entries after the existing members.
+ //
+ // If installation is relocatable (see config.install.relocatable) and
+ // the installation manifest file path is inside config.install.root
+ // (including chroot), then absolute filesystem_entry::path's are
+ // saved as relative to the manifest file's directory (note that
+ // symlink_entry::target cannot be absolute in relocatable
+ // installation).
+ //
+ {
+ auto& v (vp.insert<path> ("config.install.manifest"));
+
+ // If specified, verify it is a global override.
+ //
+ if (lookup l = rs[v])
+ {
+ if (!l.belongs (rs.global_scope ()))
+ fail << "config.install.manifest must be a global override" <<
+ info << "specify !config.install.manifest=...";
+ }
+
+ config::unsave_variable (rs, v);
+ }
+
+ // Support for relocatable install.
+ //
+ // Note that it is false by default since supporting relocatable
+ // installation may require extra effort and not all projects may
+ // support it. A project that is known not to support it should assert
+ // this fact in its root.build, for example:
+ //
+ // assert (!$install.relocatable) 'relocatable installation not supported'
+ //
+ {
+ auto& var (vp.insert<bool> ( "install.relocatable"));
+ auto& cvar (vp.insert<bool> ("config.install.relocatable"));
+
+ value& v (rs.assign (var));
+
+ // Note: unlike other variables, for ease of assertion set it to
+ // false if no config.install.* is specified.
+ //
+ v = s && cast_false<bool> (lookup_config (rs, cvar, false));
+ }
+
// Support for private install (aka poor man's Flatpack).
//
const dir_path* p;
@@ -496,35 +618,109 @@ namespace build2
}
}
- // Global config.install.* values.
+ // config.install.filter
//
- set_dir (s, p, rs, "", abs_dir_path (), false, "644", "755", cmd);
-
- set_dir (s, p, rs, "root", abs_dir_path ());
-
- set_dir (s, p, rs, "data_root", dir_data_root);
- set_dir (s, p, rs, "exec_root", dir_exec_root, false, "755");
+ // Installation filterting. The value of this variable is a list of
+ // key-value pairs that specify the filesystem entries to include or
+ // exclude from the installation. For example, the following filters
+ // will omit installing headers and static libraries (notice the
+ // quoting of the wildcard).
+ //
+ // config.install.filter='include/@false "*.a"@false'
+ //
+ // The key in each pair is a file or directory path or a path wildcard
+ // pattern. If a key is relative and contains a directory component or
+ // is a directory, then it is treated relative to the corresponding
+ // config.install.* location. Otherwise (simple path, normally a
+ // pattern), it is matched against the leaf of any path. Note that if
+ // an absolute path is specified, it should be without the
+ // config.install.chroot prefix.
+ //
+ // The value in each pair is either true (include) or false (exclude).
+ // The filters are evaluated in the order specified and the first
+ // match that is found determines the outcome. If no match is found,
+ // the default is to include. For a directory, while false means
+ // exclude all the sub-paths inside this directory, true does not mean
+ // that all the sub-paths will be included wholesale. Rather, the
+ // matched component of the sub-path is treated as included with the
+ // rest of the components matched against the following
+ // sub-filters. For example:
+ //
+ // config.install.filter='
+ // include/x86_64-linux-gnu/@true
+ // include/x86_64-linux-gnu/details/@false
+ // include/@false'
+ //
+ // The true or false value may be followed by comma and the `symlink`
+ // modifier to only apply to symlink filesystem entries. For example:
+ //
+ // config.install.filter='"*.so"@false,symlink'
+ //
+ // Note that this mechanism only affects what gets physically copied
+ // to the installation directory without affecting what gets built for
+ // install or the view of what gets installed at the buildfile level.
+ // For example, given the `include/@false *.a@false` filters, static
+ // libraries will still be built (unless arranged not to with
+ // config.bin.lib) and the pkg-config files will still end up with -I
+ // options pointing to the header installation directory. Note also
+ // that this mechanism applies to both install and uninstall
+ // operations.
+ //
+ // If you are familiar with the Debian or Fedora packaging, this
+ // mechanism is somewhat similar to (and can be used for a similar
+ // purpose as) the Debian's .install files and Fedora's %files spec
+ // file sections that are used to split the installation into multiple
+ // binary packages.
+ //
+ {
+ auto& var (vp.insert<filters> ( "install.filter"));
+ auto& cvar (vp.insert<filters> ("config.install.filter"));
- set_dir (s, p, rs, "sbin", dir_sbin);
- set_dir (s, p, rs, "bin", dir_bin);
- set_dir (s, p, rs, "lib", dir_lib);
- set_dir (s, p, rs, "libexec", dir_libexec);
- set_dir (s, p, rs, "pkgconfig", dir_pkgconfig, false, "644");
+ value& v (rs.assign (var));
- set_dir (s, p, rs, "etc", dir_etc);
- set_dir (s, p, rs, "include", dir_include);
- set_dir (s, p, rs, "share", dir_share);
- set_dir (s, p, rs, "data", dir_data);
+ if (s)
+ {
+ if (lookup l = lookup_config (rs, cvar, nullptr))
+ v = cast<filters> (l);
+ }
+ }
- set_dir (s, p, rs, "doc", dir_doc);
- set_dir (s, p, rs, "legal", dir_legal);
- set_dir (s, p, rs, "man", dir_man);
- set_dir (s, p, rs, "man1", dir_man1);
+ // Global config.install.* values.
+ //
+ set_dir (s, p, rs, "", abs_dir_path (), false, "644", "755", cmd);
+
+ set_dir (s, p, rs, "root", abs_dir_path ());
+
+ set_dir (s, p, rs, "data_root", dir_data_root);
+ set_dir (s, p, rs, "exec_root", dir_exec_root, false, "755");
+
+ set_dir (s, p, rs, "sbin", dir_sbin);
+ set_dir (s, p, rs, "bin", dir_bin);
+ set_dir (s, p, rs, "lib", dir_lib);
+ set_dir (s, p, rs, "libexec", dir_libexec);
+ set_dir (s, p, rs, "pkgconfig", dir_pkgconfig, false, "644");
+
+ set_dir (s, p, rs, "etc", dir_etc);
+ set_dir (s, p, rs, "include", dir_include);
+ set_dir (s, p, rs, "include_arch", dir_include_arch);
+ set_dir (s, p, rs, "share", dir_share);
+ set_dir (s, p, rs, "data", dir_data);
+ set_dir (s, p, rs, "buildfile", dir_buildfile);
+
+ set_dir (s, p, rs, "doc", dir_doc);
+ set_dir (s, p, rs, "legal", dir_legal);
+ set_dir (s, p, rs, "man", dir_man);
+ set_dir (s, p, rs, "man1", dir_man1);
}
// Configure "installability" for built-in target types.
//
+ // Note that for exe{} we also set explicit 755 mode in case it gets
+ // installed somewhere else where the default is not 755 (for example to
+ // libexec/, which on Debian has the 644 mode).
+ //
install_path<exe> (bs, dir_path ("bin"));
+ install_mode<exe> (bs, "755");
install_path<doc> (bs, dir_path ("doc"));
install_path<legal> (bs, dir_path ("legal"));
install_path<man> (bs, dir_path ("man"));
diff --git a/libbuild2/install/operation.cxx b/libbuild2/install/operation.cxx
index 52e8c94..ce5d24a 100644
--- a/libbuild2/install/operation.cxx
+++ b/libbuild2/install/operation.cxx
@@ -3,8 +3,15 @@
#include <libbuild2/install/operation.hxx>
+#include <sstream>
+
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/context.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/install/utility.hxx>
+
using namespace std;
using namespace butl;
@@ -12,25 +19,356 @@ namespace build2
{
namespace install
{
+#ifndef BUILD2_BOOTSTRAP
+ context_data::
+ context_data (const path* mf)
+ : manifest_name (mf),
+ manifest_os (mf != nullptr
+ ? open_file_or_stdout (manifest_name, manifest_ofs)
+ : manifest_ofs),
+ manifest_autorm (manifest_ofs.is_open () ? *mf : path ()),
+ manifest_json (manifest_os, 0 /* indentation */)
+ {
+ if (manifest_ofs.is_open ())
+ {
+ manifest_file = *mf;
+ manifest_file.complete ();
+ manifest_file.normalize ();
+ }
+ }
+
+ static path
+ relocatable_path (context_data& d, const target& t, path p)
+ {
+ // This is both inefficient (re-detecting relocatable manifest for every
+ // path) and a bit dirty (if multiple projects are being installed with
+ // different install.{relocatable,root} values, we may end up producing
+ // some paths relative and some absolute). But doing either of these
+ // properly is probably not worth the extra complexity.
+ //
+ if (!d.manifest_file.empty ()) // Not stdout.
+ {
+ const scope& rs (t.root_scope ());
+
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ // Note: install.root is abs_dir_path so absolute and normalized.
+ //
+ const dir_path* root (cast_null<dir_path> (rs["install.root"]));
+ if (root == nullptr)
+ fail << "unknown installation root directory in " << rs <<
+ info << "did you forget to specify config.install.root?";
+
+ // The manifest path would include chroot so if used, we need to add
+ // it to root and the file path (we could also strip it, but then
+ // making it absolute gets tricky on Windows).
+ //
+ dir_path md (d.manifest_file.directory ());
+
+ if (md.sub (chroot_path (rs, *root))) // Inside installation root.
+ {
+ p = chroot_path (rs, p);
+ try
+ {
+ p = p.relative (md);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make filesystem entry path " << p
+ << " relative to " << md <<
+ info << "required for relocatable installation manifest";
+ }
+ }
+ }
+ }
+
+ return p;
+ }
+
+ // Serialize current target and, if tgt is not NULL, start the new target.
+ //
+ // Note that we always serialize directories as top-level entries. And
+ // theoretically we can end up "splitting" a target with a directory
+ // creation. For example, if some files that belong to the target are
+ // installed into subdirectories that have not yet been created. So we
+ // have to cache the information for the current target in memory and only
+ // flush it once we see the next target (or the end).
+ //
+ // You may be wondering why not just serialize directories as target
+ // entries. While we could do that, it's not quite correct conceptually,
+ // since this would be the first of potentially many targets that caused
+ // the directory's creation. To put it another way, while files and
+ // symlinks belong to tragets, directories do not.
+ //
+ static void
+ manifest_flush_target (context_data& d, const target* tgt)
+ {
+ if (d.manifest_target != nullptr)
+ {
+ assert (!d.manifest_target_entries.empty ());
+
+ // Target name format is the same as in the structured result output.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (1, 0));
+ os << *d.manifest_target;
+
+ try
+ {
+ auto& s (d.manifest_json);
+
+ s.begin_object ();
+ s.member ("type", "target");
+ s.member ("name", os.str ());
+ s.member_name ("entries");
+ s.begin_array ();
+
+ for (const auto& e: d.manifest_target_entries)
+ {
+ path p (relocatable_path (d, *d.manifest_target, move (e.path)));
+
+ s.begin_object ();
+
+ if (e.target.empty ())
+ {
+ s.member ("type", "file");
+ s.member ("path", p.string ());
+ s.member ("mode", e.mode);
+ }
+ else
+ {
+ s.member ("type", "symlink");
+ s.member ("path", p.string ());
+ s.member ("target", e.target.string ());
+ }
+
+ s.end_object ();
+ }
+
+ s.end_array (); // entries member
+ s.end_object (); // target object
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+
+ d.manifest_target_entries.clear ();
+ }
+
+ d.manifest_target = tgt;
+ }
+
+ void context_data::
+ manifest_install_d (context& ctx,
+ const target& tgt,
+ const dir_path& dir,
+ const string& mode)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ try
+ {
+ auto& s (d.manifest_json);
+
+ // If we moved to the next target, flush the current one.
+ //
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, nullptr);
+
+ s.begin_object ();
+ s.member ("type", "directory");
+ s.member ("path", relocatable_path (d, tgt, dir).string ());
+ s.member ("mode", mode);
+ s.end_object ();
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+ }
+ }
+
+ void context_data::
+ manifest_install_f (context& ctx,
+ const target& tgt,
+ const dir_path& dir,
+ const path& name,
+ const string& mode)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, &tgt);
+
+ d.manifest_target_entries.push_back (
+ manifest_target_entry {dir / name, mode, path ()});
+ }
+ }
+
+ void context_data::
+ manifest_install_l (context& ctx,
+ const target& tgt,
+ const path& link_target,
+ const dir_path& dir,
+ const path& link)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, &tgt);
+
+ d.manifest_target_entries.push_back (
+ manifest_target_entry {dir / link, "", link_target});
+ }
+ }
+
+ static void
+ manifest_close (context& ctx)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ try
+ {
+ manifest_flush_target (d, nullptr);
+
+ d.manifest_os << '\n'; // Final newline.
+
+ if (d.manifest_ofs.is_open ())
+ {
+ d.manifest_ofs.close ();
+ d.manifest_autorm.cancel ();
+ }
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+ }
+ }
+#else
+ context_data::
+ context_data (const path*)
+ {
+ }
+
+ void context_data::
+ manifest_install_d (context&,
+ const target&,
+ const dir_path&,
+ const string&)
+ {
+ }
+
+ void context_data::
+ manifest_install_f (context&,
+ const target&,
+ const dir_path&,
+ const path&,
+ const string&)
+ {
+ }
+
+ void context_data::
+ manifest_install_l (context&,
+ const target&,
+ const path&,
+ const dir_path&,
+ const path&)
+ {
+ }
+
+ static void
+ manifest_close (context&)
+ {
+ }
+#endif
+
static operation_id
- install_pre (context&,
- const values& params,
+ pre_install (context&,
+ const values&,
meta_operation_id mo,
- const location& l)
+ const location&)
{
- if (!params.empty ())
- fail (l) << "unexpected parameters for operation install";
+ // Run update as a pre-operation, unless we are disfiguring.
+ //
+ return mo != disfigure_id ? update_id : 0;
+ }
+ static operation_id
+ pre_uninstall (context&,
+ const values&,
+ meta_operation_id mo,
+ const location&)
+ {
// Run update as a pre-operation, unless we are disfiguring.
//
return mo != disfigure_id ? update_id : 0;
}
+ static void
+ install_pre (context& ctx,
+ const values& params,
+ bool inner,
+ const location& l)
+ {
+ if (!params.empty ())
+ fail (l) << "unexpected parameters for operation install";
+
+ if (inner)
+ {
+ // See if we need to write the installation manifest.
+ //
+ // Note: go straight for the public variable pool.
+ //
+ const path* mf (
+ cast_null<path> (
+ ctx.global_scope[*ctx.var_pool.find ("config.install.manifest")]));
+
+ // Note that we cannot calculate whether the manifest should use
+ // relocatable (relative) paths once here since we don't know the
+ // value of config.install.root.
+
+ ctx.current_inner_odata = context::current_data_ptr (
+ new context_data (mf),
+ [] (void* p) {delete static_cast<context_data*> (p);});
+ }
+ }
+
+ static void
+ install_post (context& ctx, const values&, bool inner)
+ {
+ if (inner)
+ manifest_close (ctx);
+ }
+
// Note that we run both install and uninstall serially. The reason for
// this is all the fuzzy things we are trying to do like removing empty
// outer directories if they are empty. If we do this in parallel, then
// those things get racy. Also, since all we do here is creating/removing
// files, there is not going to be much speedup from doing it in parallel.
+ // There is also now the installation manifest, which relies on us
+ // installing all the filesystem entries of a target serially.
const operation_info op_install {
install_id,
@@ -42,8 +380,10 @@ namespace build2
"has nothing to install", // We cannot "be installed".
execution_mode::first,
0 /* concurrency */, // Run serially.
- &install_pre,
+ &pre_install,
nullptr,
+ &install_pre,
+ &install_post,
nullptr,
nullptr
};
@@ -67,7 +407,9 @@ namespace build2
"is not installed",
execution_mode::last,
0 /* concurrency */, // Run serially
- &install_pre,
+ &pre_uninstall,
+ nullptr,
+ nullptr,
nullptr,
nullptr,
nullptr
@@ -87,6 +429,8 @@ namespace build2
op_update.concurrency,
op_update.pre_operation,
op_update.post_operation,
+ op_update.operation_pre,
+ op_update.operation_post,
op_update.adhoc_match,
op_update.adhoc_apply
};
diff --git a/libbuild2/install/operation.hxx b/libbuild2/install/operation.hxx
index c1f5416..bd818b4 100644
--- a/libbuild2/install/operation.hxx
+++ b/libbuild2/install/operation.hxx
@@ -4,10 +4,15 @@
#ifndef LIBBUILD2_INSTALL_OPERATION_HXX
#define LIBBUILD2_INSTALL_OPERATION_HXX
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/operation.hxx>
+#include <libbuild2/filesystem.hxx> // auto_rmfile
namespace build2
{
@@ -16,6 +21,65 @@ namespace build2
extern const operation_info op_install;
extern const operation_info op_uninstall;
extern const operation_info op_update_for_install;
+
+ // Set as context::current_inner_odata during the install/uninstall inner
+ // operations.
+ //
+ struct context_data
+ {
+ // Manifest.
+ //
+#ifndef BUILD2_BOOTSTRAP
+ path manifest_file; // Absolute and normalized, empty if `-`.
+ path_name manifest_name; // Original path/name.
+ ofdstream manifest_ofs;
+ ostream& manifest_os;
+ auto_rmfile manifest_autorm;
+ butl::json::stream_serializer manifest_json;
+ const target* manifest_target = nullptr; // Target being installed.
+ struct manifest_target_entry
+ {
+ build2::path path;
+ string mode;
+ build2::path target;
+ };
+ vector<manifest_target_entry> manifest_target_entries;
+#endif
+
+ // The following manifest_install_[dfl]() functions correspond to (and
+ // are called from) file_rule::install_[dfl]().
+
+ // install -d -m <mode> <dir>
+ //
+ static void
+ manifest_install_d (context&,
+ const target&,
+ const dir_path& dir,
+ const string& mode);
+
+ // install -m <mode> <file> <dir>/<name>
+ //
+ static void
+ manifest_install_f (context&,
+ const target& file,
+ const dir_path& dir,
+ const path& name,
+ const string& mode);
+
+ // install -l <link_target> <dir>/<link>
+ //
+ static void
+ manifest_install_l (context&,
+ const target&,
+ const path& link_target,
+ const dir_path& dir,
+ const path& link);
+
+ // Constructor.
+ //
+ explicit
+ context_data (const path* manifest);
+ };
}
}
diff --git a/libbuild2/install/rule.cxx b/libbuild2/install/rule.cxx
index 8818ea3..873b2e9 100644
--- a/libbuild2/install/rule.cxx
+++ b/libbuild2/install/rule.cxx
@@ -13,6 +13,8 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/install/operation.hxx>
+
using namespace std;
using namespace butl;
@@ -69,27 +71,45 @@ namespace build2
return true;
}
- const target* alias_rule::
+ pair<const target*, uint64_t> alias_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
assert (i->member == nullptr);
- return filter (is, a, t, i->prerequisite);
+ return filter (is, a, t, i->prerequisite, me);
}
- const target* alias_rule::
+ pair<const target*, uint64_t> alias_rule::
filter (const scope* is,
- action, const target& t, const prerequisite& p) const
+ action, const target& t, const prerequisite& p,
+ match_extra&) const
{
const target& pt (search (t, p));
- return is == nullptr || pt.in (*is) ? &pt : nullptr;
+ const uint64_t options (match_extra::all_options); // No definition.
+ return make_pair (is == nullptr || pt.in (*is) ? &pt : nullptr, options);
}
recipe alias_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
+ {
+ return apply_impl (a, t, me);
+ }
+
+ recipe alias_rule::
+ apply (action, target&) const
+ {
+ assert (false); // Never called.
+ return nullptr;
+ }
+
+ recipe alias_rule::
+ apply_impl (action a, target& t, match_extra& me, bool reapply) const
{
tracer trace ("install::alias_rule::apply");
+ assert (!reapply || a.operation () != update_id);
+
// Pass-through to our installable prerequisites.
//
// @@ Shouldn't we do match in parallel (here and below)?
@@ -100,6 +120,8 @@ namespace build2
auto pms (group_prerequisite_members (a, t, members_mode::never));
for (auto i (pms.begin ()), e (pms.end ()); i != e; ++i)
{
+ // NOTE: see essentially the same logic in reapply_impl() below.
+ //
const prerequisite& p (i->prerequisite);
// Ignore excluded.
@@ -123,13 +145,17 @@ namespace build2
if (!is)
is = a.operation () != update_id ? install_scope (t) : nullptr;
- const target* pt (filter (*is, a, t, i));
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
+
if (pt == nullptr)
{
l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
- continue;
}
-
// Check if this prerequisite is explicitly "not installable", that
// is, there is the 'install' variable and its value is false.
//
@@ -141,64 +167,108 @@ namespace build2
//
// Note: not the same as lookup_install() above.
//
- auto l ((*pt)[var_install (*p.scope.root_scope ())]);
- if (l && cast<path> (l).string () == "false")
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
- continue;
+ pt = nullptr;
}
-
// If this is not a file-based target (e.g., a target group such as
// libu{}) then ignore it if there is no rule to install.
//
- if (pt->is_a<file> ())
- match_sync (a, *pt);
- else if (!try_match_sync (a, *pt).first)
+ else if (pt->is_a<file> ())
+ {
+ match_sync (a, *pt, options);
+ }
+ else if (!try_match_sync (a, *pt, options).first)
{
l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
pt = nullptr;
}
- if (pt != nullptr)
- pts.push_back (prerequisite_target (pt, pi));
+ if (pt != nullptr || reapply)
+ {
+ // Use auxiliary data for a NULL entry to distinguish between
+ // filtered out (1) and ignored for other reasons (0).
+ //
+ pts.push_back (
+ prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0));
+ }
}
return default_recipe;
}
- // fsdir_rule
- //
- const fsdir_rule fsdir_rule::instance;
-
- bool fsdir_rule::
- match (action, target&) const
+ void alias_rule::
+ reapply_impl (action a, target& t, match_extra& me) const
{
- // We always match.
- //
- // Note that we are called both as the outer part during the update-for-
- // un/install pre-operation and as the inner part during the un/install
- // operation itself.
- //
- return true;
- }
+ tracer trace ("install::alias_rule::reapply");
- recipe fsdir_rule::
- apply (action a, target& t) const
- {
- // If this is outer part of the update-for-un/install, delegate to the
- // default fsdir rule. Otherwise, this is a noop (we don't install
- // fsdir{}).
- //
- // For now we also assume we don't need to do anything for prerequisites
- // (the only sensible prerequisite of fsdir{} is another fsdir{}).
+ assert (a.operation () != update_id);
+
+ optional<const scope*> is;
+
+ // Iterate over prerequisites and prerequisite targets in parallel.
//
- if (a.operation () == update_id)
+ auto& pts (t.prerequisite_targets[a]);
+ size_t j (0), n (pts.size ()), en (0);
+
+ auto pms (group_prerequisite_members (a, t, members_mode::never));
+ for (auto i (pms.begin ()), e (pms.end ());
+ i != e && j != n;
+ ++i, ++j, ++en)
{
- match_inner (a, t);
- return &execute_inner;
+ // The same logic as in apply() above except that we skip
+ // prerequisites that were not filtered out.
+ //
+ const prerequisite& p (i->prerequisite);
+
+ include_type pi (include (a, t, p));
+ if (!pi)
+ continue;
+
+ if (p.proj)
+ continue;
+
+ prerequisite_target& pto (pts[j]);
+
+ if (pto.target != nullptr || pto.data == 0)
+ continue;
+
+ if (!is)
+ is = a.operation () != update_id ? install_scope (t) : nullptr;
+
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
+
+ if (pt == nullptr)
+ {
+ l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
+ }
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
+ pt = nullptr;
+ }
+ else if (pt->is_a<file> ())
+ {
+ match_sync (a, *pt, options);
+ }
+ else if (!try_match_sync (a, *pt, options).first)
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
+ pt = nullptr;
+ }
+
+ pto = prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0);
}
- else
- return noop_recipe;
+
+ assert (en == n); // Did not call apply() with true for reapply?
}
// group_rule
@@ -212,16 +282,20 @@ namespace build2
alias_rule::match (a, t);
}
- const target* group_rule::
- filter (action, const target&, const target& m) const
+ bool group_rule::
+ filter (action, const target&, const target&) const
{
- return &m;
+ return true;
}
- const target* group_rule::
+ pair<const target*, uint64_t> group_rule::
filter (const scope* is,
- action, const target& t, const prerequisite& p) const
+ action, const target& t, const prerequisite& p,
+ match_extra&) const
{
+ const uint64_t options (match_extra::all_options); // No definition.
+ pair<const target*, uint64_t> r (nullptr, options);
+
// The same logic as in file_rule::filter() below.
//
if (p.is_a<exe> ())
@@ -230,15 +304,18 @@ namespace build2
if (p.vars.empty () ||
cast_empty<path> (p.vars[var_install (rs)]).string () != "true")
- return nullptr;
+ return r;
}
const target& pt (search (t, p));
- return is == nullptr || pt.in (*is) ? &pt : nullptr;
+ if (is == nullptr || pt.in (*is))
+ r.first = &pt;
+
+ return r;
}
recipe group_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
tracer trace ("install::group_rule::apply");
@@ -263,17 +340,16 @@ namespace build2
auto& pts (t.prerequisite_targets[a]);
for (size_t i (0); i != gv.count; ++i)
{
- const target* m (gv.members[i]);
+ const target* mt (gv.members[i]);
- if (m == nullptr)
+ if (mt == nullptr)
continue;
// Let a customized rule have its say.
//
- const target* mt (filter (a, t, *m));
- if (mt == nullptr)
+ if (!filter (a, t, *mt))
{
- l5 ([&]{trace << "ignoring " << *m << " (filtered out)";});
+ l5 ([&]{trace << "ignoring " << *mt << " (filtered out)";});
continue;
}
@@ -296,7 +372,7 @@ namespace build2
// Delegate to the base rule.
//
- return alias_rule::apply (a, t);
+ return alias_rule::apply (a, t, me);
}
@@ -313,18 +389,29 @@ namespace build2
return true;
}
- const target* file_rule::
+ bool file_rule::
+ filter (action, const target&, const target&) const
+ {
+ return true;
+ }
+
+ pair<const target*, uint64_t> file_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
assert (i->member == nullptr);
- return filter (is, a, t, i->prerequisite);
+ return filter (is, a, t, i->prerequisite, me);
}
- const target* file_rule::
+ pair<const target*, uint64_t> file_rule::
filter (const scope* is,
- action, const target& t, const prerequisite& p) const
+ action, const target& t, const prerequisite& p,
+ match_extra&) const
{
+ const uint64_t options (match_extra::all_options); // No definition.
+ pair<const target*, uint64_t> r (nullptr, options);
+
// See also group_rule::filter() with identical semantics.
//
if (p.is_a<exe> ())
@@ -338,25 +425,37 @@ namespace build2
//
if (p.vars.empty () ||
cast_empty<path> (p.vars[var_install (rs)]).string () != "true")
- return nullptr;
+ return r;
}
const target& pt (search (t, p));
- return is == nullptr || pt.in (*is) ? &pt : nullptr;
+ if (is == nullptr || pt.in (*is))
+ r.first = &pt;
+
+ return r;
}
recipe file_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
- recipe r (apply_impl (a, t));
+ recipe r (apply_impl (a, t, me));
return r != nullptr ? move (r) : noop_recipe;
}
recipe file_rule::
- apply_impl (action a, target& t) const
+ apply (action, target&) const
+ {
+ assert (false); // Never called.
+ return nullptr;
+ }
+
+ recipe file_rule::
+ apply_impl (action a, target& t, match_extra& me, bool reapply) const
{
tracer trace ("install::file_rule::apply");
+ assert (!reapply || a.operation () != update_id);
+
// Note that we are called both as the outer part during the update-for-
// un/install pre-operation and as the inner part during the un/install
// operation itself.
@@ -376,11 +475,36 @@ namespace build2
// (actual update). We used to do this after matching the prerequisites
// but the inner rule may provide some rule-specific information (like
// the target extension for exe{}) that may be required during the
- // prerequisite search (like the base name for in{}).
+ // prerequisite search (like the base name for in{}; this no longer
+ // reproduces likely due to the changes to exe{} extension derivation
+ // but a contrived arrangement can still be made to trigger this).
//
+ // But then we discovered that doing this before the prerequisites messes
+ // up with the for-install signaling. Specifically, matching the
+ // prerequisites may signal that they are being updated for install,
+ // for example, for a library via a metadata library used in a moc
+ // recipe. While matching the inner rule may trigger updating during
+ // match of such prerequisites, for example, a source file generated by
+ // that moc recipe that depends on this metadata library. If we match
+ // prerequisites before, then the library that is pulled by the metadata
+ // library will be updated before we had a chance to signal that it
+ // should be updated for install.
+ //
+ // To try to accommodate both cases (as best as we can) we now split the
+ // inner rule match into two steps: we do the match before and apply
+ // after. This allows rules that deal with tricky prerequisites like
+ // in{} to assign the target path in match() instead of apply() (see
+ // in::rule, for example).
+ //
+#if 0
optional<bool> unchanged;
if (a.operation () == update_id)
unchanged = match_inner (a, t, unmatch::unchanged).first;
+#else
+ action ia (a.inner_action ());
+ if (a.operation () == update_id)
+ match_only_sync (ia, t);
+#endif
optional<const scope*> is; // Installation scope (resolve lazily).
@@ -388,6 +512,8 @@ namespace build2
auto pms (group_prerequisite_members (a, t, members_mode::never));
for (auto i (pms.begin ()), e (pms.end ()); i != e; ++i)
{
+ // NOTE: see essentially the same logic in reapply_impl() below.
+ //
const prerequisite& p (i->prerequisite);
// Ignore excluded.
@@ -411,27 +537,30 @@ namespace build2
if (!is)
is = a.operation () != update_id ? install_scope (t) : nullptr;
- const target* pt (filter (*is, a, t, i));
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
if (pt == nullptr)
{
l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
- continue;
}
-
+ //
// See if we were explicitly instructed not to touch this target (the
// same semantics as in alias_rule).
//
// Note: not the same as lookup_install() above.
//
- auto l ((*pt)[var_install (*p.scope.root_scope ())]);
- if (l && cast<path> (l).string () == "false")
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
- continue;
+ pt = nullptr;
}
-
- if (pt->is_a<file> ())
+ else if (pt->is_a<file> ())
{
// If the matched rule returned noop_recipe, then the target state
// is set to unchanged as an optimization. Use this knowledge to
@@ -439,19 +568,36 @@ namespace build2
// when updating static installable content (headers, documentation,
// etc).
//
- if (match_sync (a, *pt, unmatch::unchanged).first)
+ // Regarding options, the expectation here is that they are not used
+ // for the update operation. And for install/uninstall, if they are
+ // used, then they don't effect whether the target is unchanged. All
+ // feels reasonable.
+ //
+ if (match_sync (a, *pt, unmatch::unchanged, options).first)
pt = nullptr;
}
- else if (!try_match_sync (a, *pt).first)
+ else if (!try_match_sync (a, *pt, options).first)
{
l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
pt = nullptr;
}
- if (pt != nullptr)
- pts.push_back (prerequisite_target (pt, pi));
+ if (pt != nullptr || reapply)
+ {
+ // Use auxiliary data for a NULL entry to distinguish between
+ // filtered out (1) and ignored for other reasons (0).
+ //
+ pts.push_back (
+ prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0));
+ }
}
+#if 1
+ optional<bool> unchanged;
+ if (a.operation () == update_id)
+ unchanged = match_sync (ia, t, unmatch::unchanged).first;
+#endif
+
if (a.operation () == update_id)
{
return *unchanged
@@ -469,6 +615,79 @@ namespace build2
}
}
+ void file_rule::
+ reapply_impl (action a, target& t, match_extra& me) const
+ {
+ tracer trace ("install::file_rule::reapply");
+
+ assert (a.operation () != update_id);
+
+ optional<const scope*> is;
+
+ // Iterate over prerequisites and prerequisite targets in parallel.
+ //
+ auto& pts (t.prerequisite_targets[a]);
+ size_t j (0), n (pts.size ()), en (0);
+
+ auto pms (group_prerequisite_members (a, t, members_mode::never));
+ for (auto i (pms.begin ()), e (pms.end ());
+ i != e && j != n;
+ ++i, ++j, ++en)
+ {
+ // The same logic as in apply() above except that we skip
+ // prerequisites that were not filtered out.
+ //
+ const prerequisite& p (i->prerequisite);
+
+ include_type pi (include (a, t, p));
+ if (!pi)
+ continue;
+
+ if (p.proj)
+ continue;
+
+ prerequisite_target& pto (pts[j]);
+
+ if (pto.target != nullptr || pto.data == 0)
+ continue;
+
+ if (!is)
+ is = a.operation () != update_id ? install_scope (t) : nullptr;
+
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
+
+ if (pt == nullptr)
+ {
+ l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
+ }
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
+ pt = nullptr;
+ }
+ else if (pt->is_a<file> ())
+ {
+ if (match_sync (a, *pt, unmatch::unchanged, options).first)
+ pt = nullptr;
+ }
+ else if (!try_match_sync (a, *pt, options).first)
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
+ pt = nullptr;
+ }
+
+ pto = prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0);
+ }
+
+ assert (en == n); // Did not call apply() with true for reapply?
+ }
+
target_state file_rule::
perform_update (action a, const target& t)
{
@@ -553,7 +772,8 @@ namespace build2
const dir_path& d (t.out_dir ().leaf (p->out_path ()));
// Add it as another leading directory rather than modifying
- // the last one directly; somehow, it feels right.
+ // the last one directly; somehow, it feels right. Note: the
+ // result is normalized.
//
if (!d.empty ())
rs.emplace_back (rs.back ().dir / d, rs.back ());
@@ -564,8 +784,9 @@ namespace build2
return rs.back ();
}
- // Resolve installation directory name to absolute directory path. Return
- // all the super-directories leading up to the destination (last).
+ // Resolve installation directory name to absolute and normalized
+ // directory path. Return all the super-directories leading up to the
+ // destination (last).
//
// If target is not NULL, then also handle the subdirs logic.
//
@@ -664,24 +885,52 @@ namespace build2
return rs;
}
- static inline install_dirs
- resolve (const target& t, dir_path d, bool fail_unknown = true)
+ static dir_path
+ resolve_dir (const scope& s, const target* t,
+ dir_path d, dir_path rb,
+ bool fail_unknown)
{
- return resolve (t.base_scope (), &t, move (d), fail_unknown);
+ install_dirs rs (resolve (s, t, move (d), fail_unknown));
+
+ if (rs.empty ())
+ return dir_path ();
+
+ dir_path r (move (rs.back ().dir));
+
+ if (!rb.empty ())
+ {
+ dir_path b (resolve (s, t, move (rb), false).back ().dir);
+
+ try
+ {
+ r = r.relative (b);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make installation directory " << r
+ << " relative to " << b;
+ }
+ }
+
+ return r;
}
dir_path
- resolve_dir (const target& t, dir_path d, bool fail_unknown)
+ resolve_dir (const target& t, dir_path d, dir_path rb, bool fail_unknown)
{
- install_dirs r (resolve (t, move (d), fail_unknown));
- return r.empty () ? dir_path () : move (r.back ().dir);
+ return resolve_dir (t.base_scope (), &t, move (d), move (rb), fail_unknown);
}
dir_path
- resolve_dir (const scope& s, dir_path d, bool fail_unknown)
+ resolve_dir (const scope& s, dir_path d, dir_path rb, bool fail_unknown)
{
- install_dirs r (resolve (s, nullptr, move (d), fail_unknown));
- return r.empty () ? dir_path () : move (r.back ().dir);
+ return resolve_dir (s, nullptr, move (d), move (rb), fail_unknown);
+ }
+
+ static inline install_dirs
+ resolve (const target& t, dir_path d, bool fail_unknown = true)
+ {
+ return resolve (t.base_scope (), &t, move (d), fail_unknown);
}
path
@@ -751,30 +1000,15 @@ namespace build2
return s;
}
- // Given an abolute path return its chroot'ed version, if any, accoring to
- // install.chroot.
- //
- template <typename P>
- static inline P
- chroot_path (const scope& rs, const P& p)
- {
- if (const dir_path* d = cast_null<dir_path> (rs["install.chroot"]))
- {
- dir_path r (p.root_directory ());
- assert (!r.empty ()); // Must be absolute.
-
- return *d / p.leaf (r);
- }
-
- return p;
- }
-
void file_rule::
install_d (const scope& rs,
const install_dir& base,
const dir_path& d,
+ const file& t,
uint16_t verbosity)
{
+ assert (d.absolute ());
+
context& ctx (rs.ctx);
// Here is the problem: if this is a dry-run, then we will keep showing
@@ -787,7 +1021,10 @@ namespace build2
// with uninstall since the directories won't be empty (because we don't
// actually uninstall any files).
//
- if (ctx.dry_run)
+ // Note that this also means we won't have the directory entries in the
+ // manifest created with dry-run. Probably not a big deal.
+ //
+ if (ctx.dry_run || !filter_entry (rs, d, path (), entry_type::directory))
return;
dir_path chd (chroot_path (rs, d));
@@ -814,7 +1051,7 @@ namespace build2
dir_path pd (d.directory ());
if (pd != base.dir)
- install_d (rs, base, pd, verbosity);
+ install_d (rs, base, pd, t, verbosity);
}
cstrings args;
@@ -851,6 +1088,8 @@ namespace build2
run (ctx,
pp, args,
verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+
+ context_data::manifest_install_d (ctx, t, d, *base.dir_mode);
}
void file_rule::
@@ -861,8 +1100,15 @@ namespace build2
const path& f,
uint16_t verbosity)
{
+ assert (name.empty () || name.simple ());
+
context& ctx (rs.ctx);
+ const path& leaf (name.empty () ? f.leaf () : name);
+
+ if (!filter_entry (rs, base.dir, leaf, entry_type::regular))
+ return;
+
path relf (relative (f));
dir_path chd (chroot_path (rs, base.dir));
@@ -913,17 +1159,32 @@ namespace build2
run (ctx,
pp, args,
verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+
+ context_data::manifest_install_f (ctx, t, base.dir, leaf, *base.mode);
}
void file_rule::
install_l (const scope& rs,
const install_dir& base,
- const path& target,
const path& link,
+ const file& target,
+ const path& link_target,
uint16_t verbosity)
{
+ assert (link.simple () && !link.empty ());
+
context& ctx (rs.ctx);
+ if (!filter_entry (rs, base.dir, link, entry_type::symlink))
+ return;
+
+ if (link_target.absolute () &&
+ cast_false<bool> (rs["install.relocatable"]))
+ {
+ fail << "absolute symlink target " << link_target.string ()
+ << " in relocatable installation";
+ }
+
dir_path chd (chroot_path (rs, base.dir));
path rell (relative (chd));
@@ -940,7 +1201,7 @@ namespace build2
base.sudo != nullptr ? base.sudo->c_str () : nullptr,
"ln",
"-sf",
- target.string ().c_str (),
+ link_target.string ().c_str (),
rell.string ().c_str (),
nullptr};
@@ -958,7 +1219,7 @@ namespace build2
// a link. FreeBSD install(1) has the -l flag with the appropriate
// semantics. For consistency, we also pass -d above.
//
- print_diag ("install -l", target, chd / link);
+ print_diag ("install -l", link_target, chd / link);
}
}
@@ -977,15 +1238,15 @@ namespace build2
if (verb >= verbosity)
{
if (verb >= 2)
- text << "ln -sf " << target.string () << ' ' << rell.string ();
+ text << "ln -sf " << link_target.string () << ' ' << rell.string ();
else if (verb)
- print_diag ("install -l", target, chd / link);
+ print_diag ("install -l", link_target, chd / link);
}
if (!ctx.dry_run)
try
{
- mkanylink (target, rell, true /* copy */);
+ mkanylink (link_target, rell, true /* copy */);
}
catch (const pair<entry_type, system_error>& e)
{
@@ -997,6 +1258,12 @@ namespace build2
fail << "unable to make " << w << ' ' << rell << ": " << e.second;
}
#endif
+
+ context_data::manifest_install_l (ctx,
+ target,
+ link_target,
+ base.dir,
+ link);
}
target_state file_rule::
@@ -1045,7 +1312,7 @@ namespace build2
// sudo, etc).
//
for (auto i (ids.begin ()), j (i); i != ids.end (); j = i++)
- install_d (rs, *j, i->dir, verbosity); // install -d
+ install_d (rs, *j, i->dir, t, verbosity); // install -d
install_dir& id (ids.back ());
@@ -1079,6 +1346,8 @@ namespace build2
//
target_state r (straight_execute_prerequisites (a, t));
+ bool fr (filter (a, t, t));
+
// Then installable ad hoc group members, if any.
//
for (const target* m (t.adhoc_member);
@@ -1089,10 +1358,13 @@ namespace build2
{
if (!mf->path ().empty () && mf->mtime () != timestamp_nonexistent)
{
- if (const path* p = lookup_install<path> (*mf, "install"))
+ if (filter (a, t, *mf))
{
- install_target (*mf, *p, tp.empty () ? 1 : 2);
- r |= target_state::changed;
+ if (const path* p = lookup_install<path> (*mf, "install"))
+ {
+ install_target (*mf, *p, !fr || tp.empty () ? 1 : 2);
+ r |= target_state::changed;
+ }
}
}
}
@@ -1101,7 +1373,7 @@ namespace build2
// Finally install the target itself (since we got here we know the
// install variable is there).
//
- if (!tp.empty ())
+ if (fr && !tp.empty ())
{
install_target (t, cast<path> (t[var_install (rs)]), 1);
r |= target_state::changed;
@@ -1116,9 +1388,13 @@ namespace build2
const dir_path& d,
uint16_t verbosity)
{
+ assert (d.absolute ());
+
+ context& ctx (rs.ctx);
+
// See install_d() for the rationale.
//
- if (rs.ctx.dry_run)
+ if (ctx.dry_run || !filter_entry (rs, d, path (), entry_type::directory))
return false;
dir_path chd (chroot_path (rs, d));
@@ -1199,10 +1475,10 @@ namespace build2
}
process pr (run_start (pp, args,
- 0 /* stdin */,
- 1 /* stdout */,
- diag_buffer::pipe (rs.ctx) /* stderr */));
- diag_buffer dbuf (rs.ctx, args[0], pr);
+ 0 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+ diag_buffer dbuf (ctx, args[0], pr);
dbuf.read ();
r = run_finish_code (
dbuf,
@@ -1296,10 +1572,15 @@ namespace build2
const path& name,
uint16_t verbosity)
{
- assert (t != nullptr || !name.empty ());
+ assert (name.empty () ? t != nullptr : name.simple ());
+
+ const path& leaf (name.empty () ? t->path ().leaf () : name);
+
+ if (!filter_entry (rs, base.dir, leaf, entry_type::regular))
+ return false;
dir_path chd (chroot_path (rs, base.dir));
- path f (chd / (name.empty () ? t->path ().leaf () : name));
+ path f (chd / leaf);
try
{
@@ -1319,12 +1600,12 @@ namespace build2
if (t != nullptr)
{
if (name.empty ())
- print_diag ("uninstall ", *t, chd, "<-");
+ print_diag ("uninstall", *t, chd, "<-");
else
- print_diag ("uninstall ", *t, f, "<-");
+ print_diag ("uninstall", *t, f, "<-");
}
else
- print_diag ("uninstall ", f);
+ print_diag ("uninstall", f);
}
uninstall_f_impl (rs, base, f, verbosity);
@@ -1334,10 +1615,15 @@ namespace build2
bool file_rule::
uninstall_l (const scope& rs,
const install_dir& base,
- const path& /*target*/,
const path& link,
+ const path& /*link_target*/,
uint16_t verbosity)
{
+ assert (link.simple () && !link.empty ());
+
+ if (!filter_entry (rs, base.dir, link, entry_type::symlink))
+ return false;
+
dir_path chd (chroot_path (rs, base.dir));
path f (chd / link);
@@ -1439,7 +1725,9 @@ namespace build2
//
target_state r (target_state::unchanged);
- if (!tp.empty ())
+ bool fr (filter (a, t, t));
+
+ if (fr && !tp.empty ())
r |= uninstall_target (t, cast<path> (t[var_install (rs)]), 1);
// Then installable ad hoc group members, if any. To be anally precise,
@@ -1454,12 +1742,15 @@ namespace build2
{
if (!mf->path ().empty () && mf->mtime () != timestamp_nonexistent)
{
- if (const path* p = lookup_install<path> (*m, "install"))
+ if (filter (a, t, *mf))
{
- r |= uninstall_target (
- *mf,
- *p,
- tp.empty () || r != target_state::changed ? 1 : 2);
+ if (const path* p = lookup_install<path> (*m, "install"))
+ {
+ r |= uninstall_target (
+ *mf,
+ *p,
+ !fr || tp.empty () || r != target_state::changed ? 1 : 2);
+ }
}
}
}
@@ -1471,5 +1762,40 @@ namespace build2
return r;
}
+
+ // fsdir_rule
+ //
+ const fsdir_rule fsdir_rule::instance;
+
+ bool fsdir_rule::
+ match (action, target&) const
+ {
+ // We always match.
+ //
+ // Note that we are called both as the outer part during the update-for-
+ // un/install pre-operation and as the inner part during the un/install
+ // operation itself.
+ //
+ return true;
+ }
+
+ recipe fsdir_rule::
+ apply (action a, target& t) const
+ {
+ // If this is outer part of the update-for-un/install, delegate to the
+ // default fsdir rule. Otherwise, this is a noop (we don't install
+ // fsdir{}).
+ //
+ // For now we also assume we don't need to do anything for prerequisites
+ // (the only sensible prerequisite of fsdir{} is another fsdir{}).
+ //
+ if (a.operation () == update_id)
+ {
+ match_inner (a, t);
+ return inner_recipe;
+ }
+ else
+ return noop_recipe;
+ }
}
}
diff --git a/libbuild2/install/rule.hxx b/libbuild2/install/rule.hxx
index 98d2d0d..b023af5 100644
--- a/libbuild2/install/rule.hxx
+++ b/libbuild2/install/rule.hxx
@@ -25,42 +25,60 @@ namespace build2
match (action, target&) const override;
// Return NULL if this prerequisite should be ignored and pointer to its
- // target otherwise.
+ // target otherwise. In the latter case, return the match options that
+ // should be used for this prerequisite (use match_extra::all_options
+ // and not 0 if no match options are needed).
//
// The default implementation ignores prerequsites that are outside of
// the installation scope (see install_scope() for details).
//
+ // The default implementation always returns match_extra::all_options.
+ // The match_extra argument is not used by the default implementation.
+ //
// The prerequisite is passed as an iterator allowing the filter to
// "see" inside groups.
//
using prerequisite_iterator =
prerequisite_members_range<group_prerequisites>::iterator;
- virtual const target*
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const;
- virtual const target*
- filter (const scope*, action, const target&, const prerequisite&) const;
+ virtual pair<const target*, uint64_t>
+ filter (const scope*,
+ action, const target&, const prerequisite&,
+ match_extra&) const;
+ // Note: rule::apply() override (with match_extra).
+ //
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
+
+ // Implementation of apply().
+ //
+ // If the implementation may call reapply_impl(), then the reapply
+ // argument to apply_impl() must be true. Note that in this case, the
+ // *_impl() functions use the prerequisite_target::data member for own
+ // housekeeping.
+ //
+ recipe
+ apply_impl (action, target&, match_extra&, bool reapply = false) const;
+
+ // Implementation of reapply() that re-tries prerequisites that have
+ // been filtered out during the reapply() call. Note that currently not
+ // supported for update, only for install/uninstall.
+ //
+ void
+ reapply_impl (action, target&, match_extra&) const;
alias_rule () {}
static const alias_rule instance;
- };
-
- class fsdir_rule: public simple_rule
- {
- public:
- virtual bool
- match (action, target&) const override;
+ private:
virtual recipe
- apply (action, target&) const override;
-
- fsdir_rule () {}
- static const fsdir_rule instance;
+ apply (action, target&) const override; // Dummy simple_rule override.
};
// In addition to the alias rule's semantics, this rule sees through to
@@ -80,28 +98,26 @@ namespace build2
virtual bool
match (action, target&) const override;
- // Return NULL if this group member should be ignored and pointer to its
- // target otherwise.
+ // Return false if this group member should be ignored and true
+ // otherwise. Note that this filter is called during apply().
//
// The default implementation accepts all members.
//
- virtual const target*
+ virtual bool
filter (action, const target&, const target& group_member) const;
// Return NULL if this prerequisite should be ignored and pointer to its
- // target otherwise.
+ // target otherwise. The same semantics as in file_rule below.
//
- // The same semantics as in file_rule below.
- //
- using alias_rule::filter; // "Unhide" to make Clang happy.
-
- virtual const target*
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&,
- const prerequisite&) const override;
+ action, const target&, const prerequisite&,
+ match_extra&) const override;
+
+ using alias_rule::filter; // "Unhide" to make Clang happy.
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
group_rule (bool sto): see_through_only (sto) {}
static const group_rule instance;
@@ -117,8 +133,21 @@ namespace build2
virtual bool
match (action, target&) const override;
+ // Return false if this ad hoc group member should be ignored and true
+ // otherwise. Note that this filter is called during execute and only
+ // for install/uninstall (and not update). For generality, it is also
+ // (first) called on the target itself (can be detected by comparing
+ // the second and third arguments).
+ //
+ // The default implementation accepts all members.
+ //
+ virtual bool
+ filter (action, const target&, const target& adhoc_group_member) const;
+
// Return NULL if this prerequisite should be ignored and pointer to its
- // target otherwise.
+ // target otherwise. In the latter case, return the match options that
+ // should be used for this prerequisite (use match_extra::all_options
+ // and not 0 if no match options are needed).
//
// The default implementation ignores prerequsites that are outside of
// the installation scope (see install_scope() for details). It also
@@ -130,27 +159,47 @@ namespace build2
//
// exe{foo}: exe{bar}: install = true # foo runs bar
//
+ // The default implementation always returns match_extra::all_options.
+ // The match_extra argument is not used by the default implementation.
+ //
// The prerequisite is passed as an iterator allowing the filter to
// "see" inside groups.
//
using prerequisite_iterator =
prerequisite_members_range<group_prerequisites>::iterator;
- virtual const target*
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const;
- virtual const target*
- filter (const scope*, action, const target&, const prerequisite&) const;
+ virtual pair<const target*, uint64_t>
+ filter (const scope*,
+ action, const target&, const prerequisite&,
+ match_extra&) const;
+ // Note: rule::apply() override (with match_extra).
+ //
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
- // Implementation of apply() that returns empty_recipe if the target is
- // not installable.
+ // Implementation of apply() that returns empty_recipe (i.e., NULL) if
+ // the target is not installable.
+ //
+ // If the implementation may call reapply_impl(), then the reapply
+ // argument to apply_impl() must be true. Note that in this case, the
+ // *_impl() functions use the prerequisite_target::data member for own
+ // housekeeping.
//
recipe
- apply_impl (action, target&) const;
+ apply_impl (action, target&, match_extra&, bool reapply = false) const;
+
+ // Implementation of reapply() that re-tries prerequisites that have
+ // been filtered out during the reapply() call. Note that currently not
+ // supported for update, only for install/uninstall.
+ //
+ void
+ reapply_impl (action, target&, match_extra&) const;
static target_state
perform_update (action, const target&);
@@ -188,10 +237,16 @@ namespace build2
//
// install -d <dir>
//
+ // Note: <dir> is expected to be absolute.
+ //
+ // Note that the target argument only specifies which target caused
+ // this directory to be created.
+ //
static void
install_d (const scope& rs,
const install_dir& base,
const dir_path& dir,
+ const file& target,
uint16_t verbosity = 1);
// Install a file:
@@ -199,6 +254,8 @@ namespace build2
// install <file> <base>/ # if <name> is empty
// install <file> <base>/<name> # if <name> is not empty
//
+ // Note that <name> should be a simple path.
+ //
static void
install_f (const scope& rs,
const install_dir& base,
@@ -209,13 +266,25 @@ namespace build2
// Install (make) a symlink:
//
- // ln -s <target> <base>/<link>
+ // install -l <link_target> <base>/<link>
+ //
+ // Which is essentially:
+ //
+ // ln -s <link_target> <base>/<link>
+ //
+ // Note that <link> should be a simple path. Note that <link_target>
+ // must not be absolute if relocatable installation is requested
+ // (config.install.relocatable).
+ //
+ // Note that the target argument only specifies which target this
+ // symlink "belongs" to.
//
static void
install_l (const scope& rs,
const install_dir& base,
- const path& target,
const path& link,
+ const file& target,
+ const path& link_target,
uint16_t verbosity = 1);
// Uninstall (remove) a file or symlink:
@@ -241,8 +310,8 @@ namespace build2
static bool
uninstall_l (const scope& rs,
const install_dir& base,
- const path& target,
const path& link,
+ const path& link_target,
uint16_t verbosity = 1);
@@ -250,9 +319,9 @@ namespace build2
//
// uninstall -d <dir>
//
- // We try to remove all the directories between base and dir but not base
- // itself unless base == dir. Return false if nothing has been removed
- // (i.e., the directories do not exist or are not empty).
+ // We try to remove all the directories between base and dir but not
+ // base itself unless base == dir. Return false if nothing has been
+ // removed (i.e., the directories do not exist or are not empty).
//
static bool
uninstall_d (const scope& rs,
@@ -268,6 +337,23 @@ namespace build2
static const file_rule instance;
file_rule () {}
+
+ private:
+ virtual recipe
+ apply (action, target&) const override; // Dummy simple_rule override.
+ };
+
+ class fsdir_rule: public simple_rule
+ {
+ public:
+ virtual bool
+ match (action, target&) const override;
+
+ virtual recipe
+ apply (action, target&) const override;
+
+ fsdir_rule () {}
+ static const fsdir_rule instance;
};
}
}
diff --git a/libbuild2/install/utility.cxx b/libbuild2/install/utility.cxx
index c2a581e..43d97fb 100644
--- a/libbuild2/install/utility.cxx
+++ b/libbuild2/install/utility.cxx
@@ -3,6 +3,9 @@
#include <libbuild2/install/utility.hxx>
+#include <libbuild2/variable.hxx>
+#include <libbuild2/diagnostics.hxx>
+
namespace build2
{
namespace install
@@ -32,5 +35,261 @@ namespace build2
return nullptr;
}
+
+ bool
+ filter_entry (const scope& rs,
+ const dir_path& base,
+ const path& leaf,
+ entry_type type)
+ {
+ assert (type != entry_type::unknown &&
+ (type == entry_type::directory) == leaf.empty ());
+
+ const filters* fs (cast_null<filters> (rs["install.filter"]));
+
+ if (fs == nullptr || fs->empty ())
+ return true;
+
+ tracer trace ("install::filter");
+
+ // Parse, resolve, and apply each filter in order.
+ //
+ // If redoing all this work for every entry proves too slow, we can
+ // consider some form of caching (e.g., on the per-project basis).
+ //
+ auto i (fs->begin ());
+
+ bool negate (false);
+ if (i->first == "!")
+ {
+ negate = true;
+ ++i;
+ }
+
+ size_t limit (0); // See below.
+
+ for (auto e (fs->end ()); i != e; ++i)
+ {
+ const pair<string, optional<string>>& kv (*i);
+
+ path k;
+ try
+ {
+ k = path (kv.first);
+
+ if (k.absolute ())
+ k.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "invalid path '" << kv.first << "' in config.install.filter "
+ << "value";
+ }
+
+ bool v;
+ {
+ const string& s (kv.second ? *kv.second : string ());
+
+ size_t p (s.find (','));
+
+ if (s.compare (0, p, "true") == 0)
+ v = true;
+ else if (s.compare (0, p, "false") == 0)
+ v = false;
+ else
+ fail << "expected true or false instead of '" << string (s, 0, p)
+ << "' in config.install.filter value" << endf;
+
+ if (p != string::npos)
+ {
+ if (s.compare (p + 1, string::npos, "symlink") == 0)
+ {
+ if (type != entry_type::symlink)
+ continue;
+ }
+ else
+ fail << "unknown modifier '" << string (s, p + 1) << "' in "
+ << "config.install.filter value";
+ }
+ }
+
+ // @@ TODO (see below for all the corner cases). Note that in a sense
+ // we already have the match file in any subdirectory support via
+ // simple patterns so perhaps this is not worth the trouble. Or we
+ // could support some limited form (e.g., `**` should be in the
+ // last component). But it may still be tricky to determine if
+ // it is a sub-filter.
+ //
+ if (path_pattern_recursive (k))
+ fail << "recursive wildcard pattern '" << kv.first << "' in "
+ << "config.install.filter value";
+
+ if (k.simple () && !k.to_directory ())
+ {
+ // Simple name/pattern matched against the leaf.
+ //
+ // @@ What if it is `**`?
+ //
+ if (path_pattern (k))
+ {
+ if (!path_match (leaf, k))
+ continue;
+ }
+ else
+ {
+ if (k != leaf)
+ continue;
+ }
+ }
+ else
+ {
+ // Split into directory and leaf.
+ //
+ // @@ What if leaf is `**`?
+ //
+ dir_path d;
+ if (k.to_directory ())
+ {
+ d = path_cast<dir_path> (move (k));
+ k = path (); // No leaf.
+ }
+ else
+ {
+ d = k.directory ();
+ k.make_leaf ();
+ }
+
+ // Resolve relative directory.
+ //
+ // Note that this resolution is potentially project-specific (that
+ // is, different projects may have different install.* locaitons).
+ //
+ // Note that if the first component is/contains a wildcard (e.g.,
+ // `*/`), then the resulution will fail, which feels correct (what
+ // does */ mean?).
+ //
+ if (d.relative ())
+ {
+ // @@ Strictly speaking, this should be base, not root scope.
+ //
+ d = resolve_dir (rs, move (d));
+ }
+
+ // Return the number of path components in the path.
+ //
+ auto path_comp = [] (const path& p)
+ {
+ size_t n (0);
+ for (auto i (p.begin ()); i != p.end (); ++i)
+ ++n;
+ return n;
+ };
+
+ // We need the sub() semantics but which uses pattern match instead
+ // of equality for the prefix. Looks like chopping off the path and
+ // calling path_match() on that is the best we can do.
+ //
+ // @@ Assumes no `**` components.
+ //
+ auto path_sub = [&path_comp] (const dir_path& ent,
+ const dir_path& pat,
+ size_t n = 0)
+ {
+ if (n == 0)
+ n = path_comp (pat);
+
+ dir_path p;
+ for (auto i (ent.begin ()); n != 0 && i != ent.end (); --n, ++i)
+ p.combine (*i, i.separator ());
+
+ return path_match (p, pat);
+ };
+
+ // The following checks should continue on no match and fall through
+ // to return.
+ //
+ if (k.empty ()) // Directory.
+ {
+ // Directories have special semantics.
+ //
+ // Consider this sequence of filters:
+ //
+ // include/x86_64-linux-gnu/@true
+ // include/x86_64-linux-gnu/details/@false
+ // include/@false
+ //
+ // It seems the semantics we want is that only subcomponent
+ // filters should apply. Maybe remember the latest matched
+ // directory as a current limit? But perhaps we don't need to
+ // remember the directory itself but the number of path
+ // components?
+ //
+ // I guess for patterns we will use the actual matched directory,
+ // not the pattern, to calculate the limit? @@ Because we
+ // currently don't support `**`, we for now can count components
+ // in the pattern.
+
+ // Check if this is a sub-filter.
+ //
+ size_t n (path_comp (d));
+ if (n <= limit)
+ continue;
+
+ if (path_pattern (d))
+ {
+ if (!path_sub (base, d, n))
+ continue;
+ }
+ else
+ {
+ if (!base.sub (d))
+ continue;
+ }
+
+ if (v)
+ {
+ limit = n;
+ continue; // Continue looking for sub-filters.
+ }
+ }
+ else
+ {
+ if (path_pattern (d))
+ {
+ if (!path_sub (base, d))
+ continue;
+ }
+ else
+ {
+ if (!base.sub (d))
+ continue;
+ }
+
+ if (path_pattern (k))
+ {
+ // @@ Does not handle `**`.
+ //
+ if (!path_match (leaf, k))
+ continue;
+ }
+ else
+ {
+ if (k != leaf)
+ continue;
+ }
+ }
+ }
+
+ if (negate)
+ v = !v;
+
+ l4 ([&]{trace << (base / leaf)
+ << (v ? " included by " : " excluded by ")
+ << kv.first << '@' << *kv.second;});
+ return v;
+ }
+
+ return !negate;
+ }
}
}
diff --git a/libbuild2/install/utility.hxx b/libbuild2/install/utility.hxx
index 3e2dcad..fc40ebe 100644
--- a/libbuild2/install/utility.hxx
+++ b/libbuild2/install/utility.hxx
@@ -9,6 +9,7 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/filesystem.hxx> // entry_type
#include <libbuild2/export.hxx>
@@ -69,22 +70,56 @@ namespace build2
install_scope (const target&);
// Resolve relative installation directory path (e.g., include/libfoo) to
- // its absolute directory path (e.g., /usr/include/libfoo). If the
- // resolution encountered an unknown directory, issue diagnostics and fail
- // unless fail_unknown is false, in which case return empty directory.
+ // its absolute and normalized directory path (e.g., /usr/include/libfoo).
+ // If the resolution encountered an unknown directory, issue diagnostics
+ // and fail unless fail_unknown is false, in which case return empty
+ // directory.
+ //
+ // For rel_base semantics, see the $install.resolve() documentation. Note
+ // that fail_unknown does not apply to the rel_base resolution.
//
// Note: implemented in rule.cxx.
//
LIBBUILD2_SYMEXPORT dir_path
- resolve_dir (const target&, dir_path, bool fail_unknown = true);
+ resolve_dir (const target&,
+ dir_path,
+ dir_path rel_base = {},
+ bool fail_unknown = true);
LIBBUILD2_SYMEXPORT dir_path
- resolve_dir (const scope&, dir_path, bool fail_unknown = true);
+ resolve_dir (const scope&,
+ dir_path,
+ dir_path rel_base = {},
+ bool fail_unknown = true);
// Resolve file installation path returning empty path if not installable.
//
LIBBUILD2_SYMEXPORT path
resolve_file (const file&); // rule.cxx
+
+ // Given an abolute path return its chroot'ed version, if any, accoring to
+ // install.chroot.
+ //
+ template <typename P>
+ inline P
+ chroot_path (const scope& rs, const P& p)
+ {
+ assert (p.absolute ());
+ const dir_path* d (cast_null<dir_path> (rs["install.chroot"]));
+ return d != nullptr ? *d / p.leaf (p.root_directory ()) : p;
+ }
+
+ // Installation filtering (config.install.filter).
+ //
+ // If entry type is a directory, then leaf must be empty.
+ //
+ using filters = vector<pair<string, optional<string>>>;
+
+ LIBBUILD2_SYMEXPORT bool
+ filter_entry (const scope& rs,
+ const dir_path& base,
+ const path& leaf,
+ entry_type);
}
}
diff --git a/libbuild2/json.cxx b/libbuild2/json.cxx
new file mode 100644
index 0000000..4ed1631
--- /dev/null
+++ b/libbuild2/json.cxx
@@ -0,0 +1,904 @@
+// file : libbuild2/json.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/json.hxx>
+
+#include <limits>
+
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/parser.hxx>
+# include <libbutl/json/serializer.hxx>
+#endif
+
+namespace build2
+{
+ // json_event
+ //
+#ifndef BUILD2_BOOTSTRAP
+ const char*
+ to_string (json_event e)
+ {
+ switch (e)
+ {
+ case json_event::begin_object: return "beginning of object";
+ case json_event::end_object: return "end of object";
+ case json_event::begin_array: return "beginning of array";
+ case json_event::end_array: return "end of array";
+ case json_event::name: return "member name";
+ case json_event::string: return "string value";
+ case json_event::number: return "numeric value";
+ case json_event::boolean: return "boolean value";
+ case json_event::null: return "null value";
+ }
+
+ return "";
+ }
+#endif
+
+ // json_type
+ //
+ const char*
+ to_string (json_type t, bool dn) noexcept
+ {
+ using type = json_type;
+
+ switch (t)
+ {
+ case type::null: return "null";
+ case type::boolean: return "boolean";
+ case type::signed_number: return dn ? "signed number" : "number";
+ case type::unsigned_number: return dn ? "unsigned number" : "number";
+ case type::hexadecimal_number: return dn ? "hexadecimal number" : "number";
+ case type::string: return "string";
+ case type::array: return "array";
+ case type::object: return "object";
+ }
+ return "";
+ }
+
+ // json_value
+ //
+ const json_value null_json_value (json_type::null);
+
+ [[noreturn]] void
+ json_as_throw (json_type t, json_type e)
+ {
+ string m;
+ m = "expected ";
+ m += to_string (e, true);
+ m += " instead of ";
+ m += to_string (t, true);
+ throw invalid_argument (move (m));
+ }
+
+ [[noreturn]] static void
+ at_throw (json_type t, json_type e, bool index)
+ {
+ string m;
+
+ if (t != e && t != json_type::null)
+ {
+ m = "expected ";
+ m += to_string (e, true);
+ m += " instead of ";
+ m += to_string (t, true);
+ throw invalid_argument (move (m));
+ }
+ else
+ {
+ m = index ? "index" : "name";
+ m += " out of range in ";
+ m += to_string (e, true);
+ throw std::out_of_range (move (m));
+ }
+ }
+
+ const json_value& json_value::
+ at (size_t index) const
+ {
+ if (type == json_type::array)
+ {
+ if (index < array.size ())
+ return array[index];
+ }
+
+ at_throw (type, json_type::array, true);
+ }
+
+ json_value& json_value::
+ at (size_t index)
+ {
+ if (type == json_type::array)
+ {
+ if (index < array.size ())
+ return array[index];
+ }
+
+ at_throw (type, json_type::array, true);
+ }
+
+#if 0
+ const json_value& json_value::
+ operator[] (size_t index) const
+ {
+ if (type == json_type::null)
+ return null_json_value;
+
+ if (type == json_type::array)
+ return index < array.size () ? array[index] : null_json_value;
+
+ at_throw (type, json_type::array, true);
+ }
+
+ json_value& json_value::
+ operator[] (size_t index)
+ {
+ if (type == json_type::null)
+ {
+ new (&array) array_type ();
+ type = json_type::array;
+ }
+
+ if (type == json_type::array)
+ {
+ size_t n (array.size ());
+
+ if (index < n)
+ return array[index];
+
+ // If there are missing elements in between, fill them with nulls.
+ //
+ if (index != n)
+ array.resize (index, json_value ());
+
+ array.push_back (json_value ());
+ return array.back ();
+ }
+
+ at_throw (type, json_type::array, true);
+ }
+#endif
+
+ const json_value& json_value::
+ at (const char* name) const
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ if (i != object.end ())
+ return i->value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ json_value& json_value::
+ at (const char* name)
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ if (i != object.end ())
+ return i->value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ const json_value* json_value::
+ find (const char* name) const
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+ return i != object.end () ? &i->value : nullptr;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ json_value* json_value::
+ find (const char* name)
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ return i != object.end () ? &i->value : nullptr;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+#if 0
+ const json_value& json_value::
+ operator[] (const char* name) const
+ {
+ if (type == json_type::null)
+ return null_json_value;
+
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+
+ return i != object.end () ? i->value : null_json_value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ json_value& json_value::
+ operator[] (const char* name)
+ {
+ if (type == json_type::null)
+ {
+ new (&object) object_type ();
+ type = json_type::object;
+ }
+
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ if (i != object.end ())
+ return i->value;
+
+ object.push_back (json_member {name, json_value ()});
+ return object.back ().value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+#endif
+
+ int json_value::
+ compare (const json_value& v) const noexcept
+ {
+ int r (0);
+ {
+ // Note: we need to treat unsigned and hexadecimal the same.
+ //
+ json_type t (type == json_type::hexadecimal_number
+ ? json_type::unsigned_number
+ : type);
+
+ json_type vt (v.type == json_type::hexadecimal_number
+ ? json_type::unsigned_number
+ : v.type);
+
+ if (t != vt)
+ {
+ // Handle the special signed/unsigned number case here.
+ //
+ if (t == json_type::signed_number &&
+ vt == json_type::unsigned_number)
+ {
+ if (signed_number < 0)
+ r = -1;
+ else
+ {
+ uint64_t u (static_cast<uint64_t> (signed_number));
+ r = u < v.unsigned_number ? -1 : (u > v.unsigned_number ? 1 : 0);
+ }
+ }
+ else if (t == json_type::unsigned_number &&
+ vt == json_type::signed_number)
+ {
+ if (v.signed_number < 0)
+ r = 1;
+ else
+ {
+ uint64_t u (static_cast<uint64_t> (v.signed_number));
+ r = unsigned_number < u ? -1 : (unsigned_number > u ? 1 : 0);
+ }
+ }
+ else
+ r = (static_cast<uint8_t> (t) < static_cast<uint8_t> (vt) ? -1 : 1);
+ }
+ }
+
+ if (r == 0)
+ {
+ switch (type)
+ {
+ case json_type::null:
+ {
+ r = 0;
+ break;
+ }
+ case json_type::boolean:
+ {
+ r = boolean == v.boolean ? 0 : boolean ? 1 : -1;
+ break;
+ }
+ case json_type::signed_number:
+ {
+ r = (signed_number < v.signed_number
+ ? -1
+ : (signed_number > v.signed_number ? 1 : 0));
+ break;
+ }
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ r = (unsigned_number < v.unsigned_number
+ ? -1
+ : (unsigned_number > v.unsigned_number ? 1 : 0));
+ break;
+ }
+ case json_type::string:
+ {
+ r = string.compare (v.string);
+ break;
+ }
+ case json_type::array:
+ {
+ auto i (array.begin ()), ie (array.end ());
+ auto j (v.array.begin ()), je (v.array.end ());
+
+ for (; i != ie && j != je; ++i, ++j)
+ {
+ if ((r = i->compare (*j)) != 0)
+ break;
+ }
+
+ if (r == 0)
+ r = i == ie ? (j == je ? 0 : -1) : 1; // More elements than other?
+
+ break;
+ }
+ case json_type::object:
+ {
+ // We don't expect there to be a large number of members so it makes
+ // sense to iterate in the lexicographical order without making any
+ // copies.
+ //
+ auto next = [] (object_type::const_iterator p, // == e for first
+ object_type::const_iterator b,
+ object_type::const_iterator e)
+ {
+ // We need to find an element with the "smallest" name that is
+ // greater than the previous entry.
+ //
+ auto n (e);
+
+ for (auto i (b); i != e; ++i)
+ {
+ if (p == e || i->name > p->name)
+ {
+ int r;
+ if (n == e || (r = n->name.compare (i->name)) > 0)
+ n = i;
+ else
+ assert (r != 0); // No duplicates.
+ }
+ }
+
+ return n;
+ };
+
+ auto ib (object.begin ()), ie (object.end ()), i (ie);
+ auto jb (v.object.begin ()), je (v.object.end ()), j (je);
+
+ for (;;)
+ {
+ // Note: we must call next() on both.
+ //
+ i = next (i, ib, ie);
+ j = next (j, jb, je);
+
+ if (i == ie || j == je)
+ break;
+
+ // Determine if both have this name and if not, which name comes
+ // first.
+ //
+ int n (i->name.compare (j->name));
+
+ r = (n < 0 // If i's first, then i is greater.
+ ? -1
+ : (n > 0 // If j's first, then j is greater.
+ ? 1
+ : i->value.compare (j->value))); // Both have this name.
+
+ if (r != 0)
+ break;
+ }
+
+ if (r == 0)
+ r = i == ie ? (j == je ? 0 : -1) : 1; // More members than other?
+
+ break;
+ }
+ }
+ }
+
+ return r;
+ }
+
+ static void
+ append_numbers (json_value& l, const json_value& r) noexcept
+ {
+ auto append = [&l] (uint64_t u, int64_t s, bool hex = false)
+ {
+ if (s < 0)
+ {
+ // The absolute value of a minimum signed intereger is not
+ // representable in the 2s complement integers. So handle this
+ // specially for completeness.
+ //
+ uint64_t a (
+ s != std::numeric_limits<int64_t>::min ()
+ ? static_cast<uint64_t> (-s)
+ : static_cast<uint64_t> (std::numeric_limits<int64_t>::max ()) + 1);
+
+ if (u >= a)
+ {
+ l.unsigned_number = u - a;
+ l.type = (hex
+ ? json_type::hexadecimal_number
+ : json_type::unsigned_number);
+ }
+ else
+ {
+ l.signed_number = -static_cast<int64_t> (a - u);
+ l.type = json_type::signed_number;
+ }
+ }
+ else
+ {
+ l.unsigned_number = u + static_cast<uint64_t> (s);
+ l.type = (hex
+ ? json_type::hexadecimal_number
+ : json_type::unsigned_number);
+ }
+ };
+
+ // We try to keep LHS hex if possible.
+ //
+ if (l.type == json_type::signed_number)
+ {
+ if (r.type == json_type::signed_number)
+ {
+ // Deal with non-negative signed numbers for completeness.
+ //
+ if (l.signed_number >= 0)
+ append (static_cast <uint64_t> (l.signed_number), r.signed_number);
+ else if (r.signed_number >= 0)
+ append (static_cast <uint64_t> (r.signed_number), l.signed_number);
+ else
+ l.signed_number += r.signed_number;
+ }
+ else
+ append (r.unsigned_number, l.signed_number);
+ }
+ else
+ {
+ if (r.type == json_type::signed_number)
+ append (l.unsigned_number,
+ r.signed_number,
+ l.type == json_type::hexadecimal_number);
+ else
+ l.unsigned_number += r.unsigned_number;
+ }
+ }
+
+ void json_value::
+ append (json_value&& v, bool override)
+ {
+ if (type == json_type::null)
+ {
+ *this = move (v);
+ return;
+ }
+ else if (type == json_type::array)
+ {
+ if (v.type == json_type::array)
+ {
+ if (array.empty ())
+ array = move (v.array);
+ else
+ array.insert (array.end (),
+ make_move_iterator (v.array.begin ()),
+ make_move_iterator (v.array.end ()));
+ }
+ else
+ array.push_back (move (v));
+
+ return;
+ }
+ else
+ {
+ switch (v.type)
+ {
+ case json_type::null: return;
+ case json_type::boolean:
+ {
+ if (type != json_type::boolean)
+ break;
+
+ boolean = boolean || v.boolean;
+ return;
+ }
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ if (type != json_type::signed_number &&
+ type != json_type::unsigned_number &&
+ type != json_type::hexadecimal_number)
+ break;
+
+ append_numbers (*this, v);
+ return;
+ }
+ case json_type::string:
+ {
+ if (type != json_type::string)
+ break;
+
+ string += v.string;
+ return;
+ }
+ case json_type::array: break;
+ case json_type::object:
+ {
+ if (type != json_type::object)
+ break;
+
+ if (object.empty ())
+ object = move (v.object);
+ else
+ {
+ for (json_member& m: v.object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [&m] (const json_member& o)
+ {
+ return m.name == o.name;
+ }));
+ if (i == object.end ())
+ object.push_back (move (m));
+ else if (override)
+ i->value = move (m.value);
+ }
+ }
+
+ return;
+ }
+ }
+ }
+
+ throw invalid_argument (
+ string_type ("unable to append ") + to_string (v.type) + " to " +
+ to_string (type));
+ }
+
+ void json_value::
+ prepend (json_value&& v, bool override)
+ {
+ if (type == json_type::null)
+ {
+ *this = move (v);
+ return;
+ }
+ else if (type == json_type::array)
+ {
+ if (v.type == json_type::array)
+ {
+ if (array.empty ())
+ array = move (v.array);
+ else
+ array.insert (array.begin (),
+ make_move_iterator (v.array.begin ()),
+ make_move_iterator (v.array.end ()));
+ }
+ else
+ array.insert (array.begin (), move (v));
+
+ return;
+ }
+ else
+ {
+ switch (v.type)
+ {
+ case json_type::null: return;
+ case json_type::boolean:
+ {
+ if (type != json_type::boolean)
+ break;
+
+ boolean = boolean || v.boolean;
+ return;
+ }
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ if (type != json_type::signed_number &&
+ type != json_type::unsigned_number &&
+ type != json_type::hexadecimal_number)
+ break;
+
+ append_numbers (*this, v);
+ return;
+ }
+ case json_type::string:
+ {
+ if (type != json_type::string)
+ break;
+
+ string.insert (0, v.string);
+ return;
+ }
+ case json_type::array: break;
+ case json_type::object:
+ {
+ if (type != json_type::object)
+ break;
+
+ if (object.empty ())
+ object = move (v.object);
+ else
+ {
+ for (json_member& m: v.object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [&m] (const json_member& o)
+ {
+ return m.name == o.name;
+ }));
+ if (i == object.end ())
+ object.insert (object.begin (), move (m));
+ else if (override)
+ i->value = move (m.value);
+ }
+ }
+
+ return;
+ }
+ }
+ }
+
+ throw invalid_argument (
+ string_type ("unable to prepend ") + to_string (v.type) + " to " +
+ to_string (type));
+ }
+
+#ifndef BUILD2_BOOTSTRAP
+ json_value::
+ json_value (json_parser& p, optional<json_type> et)
+ {
+ using namespace butl::json;
+
+ // A JSON input text cannot be empty.
+ //
+ // Once we have JSON5 support we will be able to distinguish hexadecimal
+ // numbers.
+ //
+ json_type t (json_type::null);
+ switch (*p.next ())
+ {
+ case event::begin_object: t = json_type::object; break;
+ case event::begin_array: t = json_type::array; break;
+ case event::string: t = json_type::string; break;
+ case event::number: t = (p.value ()[0] == '-'
+ ? json_type::signed_number
+ : json_type::unsigned_number); break;
+ case event::boolean: t = json_type::boolean; break;
+ case event::null: t = json_type::null; break;
+ case event::name:
+ case event::end_array:
+ case event::end_object:
+ {
+ assert (false);
+ type = json_type::null;
+ return;
+ }
+ }
+
+ if (et && *et != t)
+ {
+ throw invalid_json_input (
+ p.input_name != nullptr ? p.input_name : "",
+ p.line (),
+ p.column (),
+ p.position (),
+ string_type ("expected ") + to_string (*et, true) + " instead of " +
+ to_string (t, true));
+ }
+
+ switch (t)
+ {
+ case json_type::object:
+ {
+ object_type o; // For exception safety.
+ while (*p.next () != event::end_object)
+ {
+ string_type n (p.name ());
+
+ // Check for duplicates. For now we fail but in the future we may
+ // provide a mode (via a flag) to override instead.
+ //
+ if (find_if (o.begin (), o.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }) != o.end ())
+ {
+ throw invalid_json_input (
+ p.input_name != nullptr ? p.input_name : "",
+ p.line (),
+ p.column (),
+ p.position (),
+ "duplicate object member '" + n + '\'');
+ }
+
+ o.push_back (json_member {move (n), json_value (p)});
+ }
+
+ new (&object) object_type (move (o));
+ type = t;
+ break;
+ }
+ case json_type::array:
+ {
+ array_type c; // For exception safety.
+ while (*p.peek () != event::end_array)
+ c.push_back (json_value (p));
+ p.next (); // Consume end_array.
+
+ new (&array) array_type (move (c));
+ type = t;
+ break;
+ }
+ case json_type::string:
+ {
+ string_type& s (p.value ());
+
+ // Don't move if small string optimized.
+ //
+ if (s.size () > 15)
+ new (&string) string_type (move (s));
+ else
+ new (&string) string_type (s);
+
+ type = t;
+ break;
+ }
+ case json_type::signed_number:
+ {
+ signed_number = p.value<int64_t> ();
+ type = t;
+ break;
+ }
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ unsigned_number = p.value<uint64_t> ();
+ type = t;
+ break;
+ }
+ case json_type::boolean:
+ {
+ boolean = p.value<bool> ();
+ type = t;
+ break;
+ }
+ case json_type::null:
+ {
+ type = t;
+ break;
+ }
+ }
+ }
+
+ void json_value::
+ serialize (json_buffer_serializer& s, optional<json_type> et) const
+ {
+ using namespace butl::json;
+
+ if (et && *et != type)
+ {
+ throw invalid_json_output (
+ nullopt,
+ invalid_json_output::error_code::invalid_value,
+ string_type ("expected ") + to_string (*et, true) + " instead of " +
+ to_string (type, true));
+ }
+
+ switch (type)
+ {
+ case json_type::null:
+ {
+ s.value (nullptr);
+ break;
+ }
+ case json_type::boolean:
+ {
+ s.value (boolean);
+ break;
+ }
+ case json_type::signed_number:
+ {
+ s.value (signed_number);
+ break;
+ }
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ // When we have JSON5 support, we will be able to serialize
+ // hexadecimal properly.
+ //
+ s.value (unsigned_number);
+ break;
+ }
+ case json_type::string:
+ {
+ s.value (string);
+ break;
+ }
+ case json_type::array:
+ {
+ s.begin_array ();
+ for (const json_value& e: array)
+ e.serialize (s);
+ s.end_array ();
+ break;
+ }
+ case json_type::object:
+ {
+ s.begin_object ();
+ for (const json_member& m: object)
+ {
+ s.member_name (m.name);
+ m.value.serialize (s);
+ }
+ s.end_object ();
+ break;
+ }
+ }
+ }
+
+#else
+ json_value::
+ json_value (json_parser&, optional<json_type>)
+ {
+ assert (false);
+ type = json_type::null;
+ }
+
+ void json_value::
+ serialize (json_buffer_serializer&, optional<json_type>) const
+ {
+ assert (false);
+ }
+#endif
+}
diff --git a/libbuild2/json.hxx b/libbuild2/json.hxx
new file mode 100644
index 0000000..96596e3
--- /dev/null
+++ b/libbuild2/json.hxx
@@ -0,0 +1,369 @@
+// file : libbuild2/json.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_JSON_HXX
+#define LIBBUILD2_JSON_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/export.hxx>
+
+namespace butl
+{
+ namespace json
+ {
+ enum class event: uint8_t;
+ class parser;
+ class buffer_serializer;
+ class stream_serializer;
+ class invalid_json_input;
+ class invalid_json_output;
+ }
+}
+
+namespace build2
+{
+ using json_event = butl::json::event;
+ using json_parser = butl::json::parser;
+ using json_buffer_serializer = butl::json::buffer_serializer;
+ using json_stream_serializer = butl::json::stream_serializer;
+ using butl::json::invalid_json_input;
+ using butl::json::invalid_json_output;
+
+#ifndef BUILD2_BOOTSTRAP
+ LIBBUILD2_SYMEXPORT const char*
+ to_string (json_event);
+#endif
+
+ // @@ TODO:
+ //
+ // - provide swap().
+ // - provide operator=(uint64_t), etc.
+ // - provide std::hash specialization
+ // - tighted at()/[] interface in json_array and json_object
+ // - tighten noexcep where possible
+ // - operator bool() - in a sense null is like nullopt.
+ //
+
+ // This JSON representation has one extensions compared to the standard JSON
+ // model: it distinguishes between signed, unsigned, and hexadecimal
+ // numbers.
+ //
+ // Note also that we don't assume that object members are in a sorted order
+ // (but do assume there are no duplicates). However, we could add an
+ // argument to signal that this is the case to speed up some functions, for
+ // example, compare().
+ //
+ enum class json_type: uint8_t
+ {
+ null, // Note: keep first for comparison.
+ boolean,
+ signed_number,
+ unsigned_number,
+ hexadecimal_number,
+ string,
+ array,
+ object,
+ };
+
+ // Return the JSON type as string. If distinguish_numbers is true, then
+ // distinguish between the singned, unsigned, and hexadecimal types.
+ //
+ LIBBUILD2_SYMEXPORT const char*
+ to_string (json_type, bool distinguish_numbers = false) noexcept;
+
+ inline ostream&
+ operator<< (ostream& os, json_type t) {return os << to_string (t);}
+
+ struct json_member;
+
+ class LIBBUILD2_SYMEXPORT json_value
+ {
+ public:
+ using string_type = build2::string;
+ using array_type = vector<json_value>;
+ using object_type = vector<json_member>;
+
+ json_type type;
+
+ // Unchecked value access.
+ //
+ union
+ {
+ bool boolean;
+ int64_t signed_number;
+ uint64_t unsigned_number; // Also used for hexadecimal_number.
+ string_type string;
+ array_type array;
+ object_type object;
+ };
+
+ // Checked value access.
+ //
+ // If the type matches, return the corresponding member of the union.
+ // Otherwise throw std::invalid_argument.
+ //
+ bool as_bool () const;
+ bool& as_bool ();
+
+ int64_t as_int64 () const;
+ int64_t& as_int64 ();
+
+ uint64_t as_uint64 () const;
+ uint64_t& as_uint64 ();
+
+ const string_type& as_string () const;
+ string_type& as_string ();
+
+ const array_type& as_array () const;
+ array_type& as_array ();
+
+ const object_type& as_object () const;
+ object_type& as_object ();
+
+
+ // Construction.
+ //
+ explicit
+ json_value (json_type = json_type::null) noexcept;
+
+ explicit
+ json_value (std::nullptr_t) noexcept;
+
+ explicit
+ json_value (bool) noexcept;
+
+ explicit
+ json_value (int64_t) noexcept;
+
+ explicit
+ json_value (uint64_t, bool hexadecimal = false) noexcept;
+
+ explicit
+ json_value (string_type);
+
+ // If the expected type is specfied, then fail if it does not match
+ // parsed. Throws invalid_json_input.
+ //
+ explicit
+ json_value (json_parser&, optional<json_type> expected = {});
+
+ // If the expected type is specfied, then fail if it does not match the
+ // value's. Throws invalid_json_output.
+ //
+ void
+ serialize (json_buffer_serializer&,
+ optional<json_type> expected = {}) const;
+
+ // Note that values of different types are never equal, except for
+ // signed/unsigned/hexadecimal numbers. Null is equal to null and is less
+ // than any other value. Arrays are compared lexicographically. Object
+ // members are considered in the lexicographically-compared name-ascending
+ // order (see RFC8785). An absent member is less than a present member
+ // (even if it's null).
+ //
+ int
+ compare (const json_value&) const noexcept;
+
+ // Append/prepend one JSON value to another. Throw invalid_argument if the
+ // values are incompatible. Note that for numbers this can also lead to
+ // the change of the value type.
+ //
+ // Append/prepend an array to an array splices in the array elements
+ // rather than adding an element of the array type.
+ //
+ // By default, append to an object overrides existing members while
+ // prepend does not. In a sense, whatever appears last is kept, which is
+ // consistent with what we expect to happen when specifying the same name
+ // repeatedly (provided it's not considered invalid) in a text
+ // representation (e.g., {"a":1,"a":2}). Position-wise, both append and
+ // prepend retain the positions of existing members with append inserting
+ // new ones at the end while prepend -- at the beginning.
+ //
+ void
+ append (json_value&&, bool override = true);
+
+ void
+ prepend (json_value&&, bool override = false);
+
+ // Array element access.
+ //
+ // If the index is out of array bounds, the at() functions throw
+ // std::out_of_range, the const operator[] returns null_json_value, and
+ // the non-const operator[] inserts a new null value at the specified
+ // position (filling any missing elements in between with nulls) and
+ // returns that. All three functions throw std::invalid_argument if the
+ // value is not an array or null with null treated as (missing) array
+ // rather than wrong value type (and with at() functions throwing
+ // out_of_range in this case).
+ //
+ // Note that non-const operator[] will not only insert a new element but
+ // will also turn the value it is called upon into array if it is null.
+ // This semantics allows you to string several subscripts to build up a
+ // chain of values.
+ //
+ // Note also that while the operator[] interface is convenient for
+ // accessing and modifying (or building up) values deep in the tree, it
+ // can lead to inefficiencies or even undesirable semantics during
+ // otherwise read-only access of a non-const object due to the potential
+ // insertion of null values for missing array elements. As a result, it's
+ // recommended to always use a const reference for read-only access (or
+ // use the at() interface if this is deemed too easy to forget).
+ //
+ const json_value&
+ at (size_t) const;
+
+ json_value&
+ at (size_t);
+
+#if 0
+ const json_value&
+ operator[] (size_t) const;
+
+ json_value&
+ operator[] (size_t);
+#endif
+
+
+ // Object member access.
+ //
+ // If a member with the specified name is not found in the object, the
+ // at() functions throw std::out_of_range, the find() function returns
+ // NULL, the const operator[] returns null_json_value, and the non-const
+ // operator[] adds a new member with the specified name and null value and
+ // returns that value. All three functions throw std::invalid_argument if
+ // the value is not an object or null with null treated as (missing)
+ // object rather than wrong value type (and with at() functions throwing
+ // out_of_range in this case).
+ //
+ // Note that non-const operator[] will not only insert a new member but
+ // will also turn the value it is called upon into object if it is null.
+ // This semantics allows you to string several subscripts to build up a
+ // chain of values.
+ //
+ // Note also that while the operator[] interface is convenient for
+ // accessing and modifying (or building up) values deep in the tree, it
+ // can lead to inefficiencies or even undesirable semantics during
+ // otherwise read-only access of a non-const object due to the potential
+ // insertion of null values for missing object members. As a result, it's
+ // recommended to always use a const reference for read-only access (or
+ // use the at() interface if this is deemed too easy to forget).
+ //
+ const json_value&
+ at (const char*) const;
+
+ json_value&
+ at (const char*);
+
+ const json_value*
+ find (const char*) const;
+
+ json_value*
+ find (const char*);
+
+#if 0
+ const json_value&
+ operator[] (const char*) const;
+
+ json_value&
+ operator[] (const char*);
+#endif
+
+ const json_value&
+ at (const string_type&) const;
+
+ json_value&
+ at (const string_type&);
+
+ const json_value*
+ find (const string_type&) const;
+
+ json_value*
+ find (const string_type&);
+
+#if 0
+ const json_value&
+ operator[] (const string_type&) const;
+
+ json_value&
+ operator[] (const string_type&);
+#endif
+
+ // Note that the moved-from value becomes JSON null value.
+ //
+ json_value (json_value&&) noexcept;
+ json_value (const json_value&);
+
+ json_value& operator= (json_value&&) noexcept;
+ json_value& operator= (const json_value&);
+
+ ~json_value () noexcept;
+ };
+
+ LIBBUILD2_SYMEXPORT extern const json_value null_json_value;
+
+ inline bool
+ operator== (const json_value& x, const json_value& y) {return x.compare (y) == 0;}
+
+ inline bool
+ operator!= (const json_value& x, const json_value& y) {return !(x == y);}
+
+ inline bool
+ operator< (const json_value& x, const json_value& y) {return x.compare (y) < 0;}
+
+ inline bool
+ operator<= (const json_value& x, const json_value& y) {return x.compare (y) <= 0;}
+
+ inline bool
+ operator> (const json_value& x, const json_value& y) {return !(x <= y);}
+
+ inline bool
+ operator>= (const json_value& x, const json_value& y) {return !(x < y);}
+
+ // A JSON object member.
+ //
+ struct json_member
+ {
+ // @@ TODO: add some convenience constructors?
+
+ string name;
+ json_value value;
+ };
+
+ // A JSON value that can only be an array.
+ //
+ class /*LIBBUILD2_SYMEXPORT*/ json_array: public json_value
+ {
+ public:
+ // Create empty array.
+ //
+ json_array () noexcept;
+
+ explicit
+ json_array (json_parser&);
+
+ void
+ serialize (json_buffer_serializer& s) const;
+ };
+
+ // A JSON value that can only be an object.
+ //
+ class /*LIBBUILD2_SYMEXPORT*/ json_object: public json_value
+ {
+ public:
+ // Create empty object.
+ //
+ json_object () noexcept;
+
+ explicit
+ json_object (json_parser&);
+
+ void
+ serialize (json_buffer_serializer& s) const;
+ };
+}
+
+#include <libbuild2/json.ixx>
+
+#endif // LIBBUILD2_JSON_HXX
diff --git a/libbuild2/json.ixx b/libbuild2/json.ixx
new file mode 100644
index 0000000..76cd00a
--- /dev/null
+++ b/libbuild2/json.ixx
@@ -0,0 +1,349 @@
+// file : libbuild2/json.ixx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+namespace build2
+{
+ [[noreturn]] LIBBUILD2_SYMEXPORT void
+ json_as_throw (json_type actual, json_type expected);
+
+ inline bool json_value::
+ as_bool () const
+ {
+ if (type == json_type::boolean)
+ return boolean;
+
+ json_as_throw (type, json_type::boolean);
+ }
+
+ inline bool& json_value::
+ as_bool ()
+ {
+ if (type == json_type::boolean)
+ return boolean;
+
+ json_as_throw (type, json_type::boolean);
+ }
+
+ inline int64_t json_value::
+ as_int64 () const
+ {
+ if (type == json_type::signed_number)
+ return signed_number;
+
+ json_as_throw (type, json_type::signed_number);
+ }
+
+ inline int64_t& json_value::
+ as_int64 ()
+ {
+ if (type == json_type::signed_number)
+ return signed_number;
+
+ json_as_throw (type, json_type::signed_number);
+ }
+
+ inline uint64_t json_value::
+ as_uint64 () const
+ {
+ if (type == json_type::unsigned_number ||
+ type == json_type::hexadecimal_number)
+ return unsigned_number;
+
+ json_as_throw (type, json_type::unsigned_number);
+ }
+
+ inline uint64_t& json_value::
+ as_uint64 ()
+ {
+ if (type == json_type::unsigned_number ||
+ type == json_type::hexadecimal_number)
+ return unsigned_number;
+
+ json_as_throw (type, json_type::unsigned_number);
+ }
+
+ inline const string& json_value::
+ as_string () const
+ {
+ if (type == json_type::string)
+ return string;
+
+ json_as_throw (type, json_type::string);
+ }
+
+ inline string& json_value::
+ as_string ()
+ {
+ if (type == json_type::string)
+ return string;
+
+ json_as_throw (type, json_type::string);
+ }
+
+ inline const json_value::array_type& json_value::
+ as_array () const
+ {
+ if (type == json_type::array)
+ return array;
+
+ json_as_throw (type, json_type::array);
+ }
+
+ inline json_value::array_type& json_value::
+ as_array ()
+ {
+ if (type == json_type::array)
+ return array;
+
+ json_as_throw (type, json_type::array);
+ }
+
+ inline const json_value::object_type& json_value::
+ as_object () const
+ {
+ if (type == json_type::object)
+ return object;
+
+ json_as_throw (type, json_type::object);
+ }
+
+ inline json_value::object_type& json_value::
+ as_object ()
+ {
+ if (type == json_type::object)
+ return object;
+
+ json_as_throw (type, json_type::object);
+ }
+
+ inline json_value::
+ ~json_value () noexcept
+ {
+ switch (type)
+ {
+ case json_type::null:
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: break;
+ case json_type::string: string.~string_type (); break;
+ case json_type::array: array.~array_type (); break;
+ case json_type::object: object.~object_type (); break;
+ }
+ }
+
+ inline json_value::
+ json_value (json_type t) noexcept
+ : type (t)
+ {
+ switch (type)
+ {
+ case json_type::null: break;
+ case json_type::boolean: boolean = false; break;
+ case json_type::signed_number: signed_number = 0; break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: unsigned_number = 0; break;
+ case json_type::string: new (&string) string_type (); break;
+ case json_type::array: new (&array) array_type (); break;
+ case json_type::object: new (&object) object_type (); break;
+ }
+ }
+
+ inline json_value::
+ json_value (std::nullptr_t) noexcept
+ : type (json_type::null)
+ {
+ }
+
+ inline json_value::
+ json_value (bool v) noexcept
+ : type (json_type::boolean), boolean (v)
+ {
+ }
+
+ inline json_value::
+ json_value (int64_t v) noexcept
+ : type (json_type::signed_number), signed_number (v)
+ {
+ }
+
+ inline json_value::
+ json_value (uint64_t v, bool hex) noexcept
+ : type (hex
+ ? json_type::hexadecimal_number
+ : json_type::unsigned_number),
+ unsigned_number (v)
+ {
+ }
+
+ inline json_value::
+ json_value (string_type v)
+ : type (json_type::string), string (move (v))
+ {
+ }
+
+ inline const json_value& json_value::
+ at (const string_type& n) const
+ {
+ return at (n.c_str ());
+ }
+
+ inline json_value& json_value::
+ at (const string_type& n)
+ {
+ return at (n.c_str ());
+ }
+
+ inline const json_value* json_value::
+ find (const string_type& n) const
+ {
+ return find (n.c_str ());
+ }
+
+ inline json_value* json_value::
+ find (const string_type& n)
+ {
+ return find (n.c_str ());
+ }
+
+#if 0
+ inline const json_value& json_value::
+ operator[] (const string_type& n) const
+ {
+ return operator[] (n.c_str ());
+ }
+
+ inline json_value& json_value::
+ operator[] (const string_type& n)
+ {
+ return operator[] (n.c_str ());
+ }
+#endif
+
+ inline json_value::
+ json_value (json_value&& v) noexcept
+ : type (v.type)
+ {
+ switch (type)
+ {
+ case json_type::null:
+ break;
+ case json_type::boolean:
+ boolean = v.boolean;
+ break;
+ case json_type::signed_number:
+ signed_number = v.signed_number;
+ break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ unsigned_number = v.unsigned_number;
+ break;
+ case json_type::string:
+ new (&string) string_type (move (v.string));
+ v.string.~string_type ();
+ break;
+ case json_type::array:
+ new (&array) array_type (move (v.array));
+ v.array.~array_type ();
+ break;
+ case json_type::object:
+ new (&object) object_type (move (v.object));
+ v.object.~object_type ();
+ break;
+ }
+
+ v.type = json_type::null;
+ }
+
+ inline json_value::
+ json_value (const json_value& v)
+ : type (v.type)
+ {
+ switch (type)
+ {
+ case json_type::null:
+ break;
+ case json_type::boolean:
+ boolean = v.boolean;
+ break;
+ case json_type::signed_number:
+ signed_number = v.signed_number;
+ break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ unsigned_number = v.unsigned_number;
+ break;
+ case json_type::string:
+ new (&string) string_type (v.string);
+ break;
+ case json_type::array:
+ new (&array) array_type (v.array);
+ break;
+ case json_type::object:
+ new (&object) object_type (v.object);
+ break;
+ }
+ }
+
+ inline json_value& json_value::
+ operator= (json_value&& v) noexcept
+ {
+ if (this != &v)
+ {
+ this->~json_value ();
+ new (this) json_value (move (v));
+ }
+ return *this;
+ }
+
+ inline json_value& json_value::
+ operator= (const json_value& v)
+ {
+ if (this != &v)
+ {
+ this->~json_value ();
+ new (this) json_value (v);
+ }
+ return *this;
+ }
+
+ // json_array
+ //
+ inline json_array::
+ json_array () noexcept
+ : json_value (json_type::array)
+ {
+ }
+
+ inline json_array::
+ json_array (json_parser& p)
+ : json_value (p, json_type::array)
+ {
+ }
+
+ inline void json_array::
+ serialize (json_buffer_serializer& s) const
+ {
+ json_value::serialize (s, json_type::array);
+ }
+
+ // json_object
+ //
+ inline json_object::
+ json_object () noexcept
+ : json_value (json_type::object)
+ {
+ }
+
+ inline json_object::
+ json_object (json_parser& p)
+ : json_value (p, json_type::object)
+ {
+ }
+
+ inline void json_object::
+ serialize (json_buffer_serializer& s) const
+ {
+ json_value::serialize (s, json_type::object);
+ }
+}
diff --git a/libbuild2/lexer.cxx b/libbuild2/lexer.cxx
index 9176422..04c15be 100644
--- a/libbuild2/lexer.cxx
+++ b/libbuild2/lexer.cxx
@@ -713,9 +713,9 @@ namespace build2
}
token lexer::
- word (state st, bool sep)
+ word (const state& rst, bool sep)
{
- lexer_mode m (st.mode);
+ lexer_mode m (rst.mode);
xchar c (peek ());
assert (!eos (c));
@@ -746,22 +746,66 @@ namespace build2
lexeme += c;
};
- for (; !eos (c); c = peek ())
+ const state* st (&rst);
+ for (bool first (true); !eos (c); first = false, c = peek ())
{
// First handle escape sequences.
//
if (c == '\\')
{
- // In the variable mode we treat the beginning of the escape sequence
- // as a separator (think \"$foo\").
+ // In the variable mode we treat immediate `\` as the escape sequence
+ // literal and any following as a separator (think \"$foo\").
//
if (m == lexer_mode::variable)
- break;
+ {
+ if (!first)
+ break;
+
+ get ();
+ c = get ();
+
+ if (eos (c))
+ fail (c) << "unterminated escape sequence";
+
+ // For now we only support all the simple C/C++ escape sequences
+ // plus \0 (which in C/C++ is an octal escape sequence).
+ //
+ // In the future we may decide to support more elaborate sequences
+ // such as \xNN, \uNNNN, etc.
+ //
+ // Note: we return it in the literal form instead of translating for
+ // easier printing.
+ //
+ switch (c)
+ {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\':
+ case '0':
+ case 'a':
+ case 'b':
+ case 'f':
+ case 'n':
+ case 'r':
+ case 't':
+ case 'v': lexeme = c; break;
+ default:
+ fail (c) << "unknown escape sequence \\" << c;
+ }
+
+ state_.pop ();
+ return token (type::escape,
+ move (lexeme),
+ sep,
+ qtype, qcomp, qfirst,
+ ln, cn);
+ }
get ();
xchar p (peek ());
- const char* esc (st.escapes);
+ const char* esc (st->escapes);
if (esc == nullptr ||
(*esc != '\0' && !eos (p) && strchr (esc, p) != nullptr))
@@ -777,7 +821,7 @@ namespace build2
continue;
}
else
- unget (c); // Treat as a normal character.
+ unget (c); // Fall through to treat as a normal character.
}
bool done (false);
@@ -806,8 +850,8 @@ namespace build2
get ();
state_.pop ();
- st = state_.top ();
- m = st.mode;
+ st = &state_.top ();
+ m = st->mode;
continue;
}
}
@@ -816,19 +860,17 @@ namespace build2
//
else if (m == lexer_mode::variable)
{
- bool first (lexeme.empty ());
-
// Handle special variable names, if any.
//
- if (first &&
- st.data != 0 &&
- strchr (reinterpret_cast<const char*> (st.data), c) != nullptr)
+ if (first &&
+ st->data != 0 &&
+ strchr (reinterpret_cast<const char*> (st->data), c) != nullptr)
{
get ();
lexeme += c;
done = true;
}
- else if (c != '_' && !(first ? alpha (c) : alnum (c)))
+ else if (c != '_' && !(lexeme.empty () ? alpha (c) : alnum (c)))
{
if (c != '.')
done = true;
@@ -848,17 +890,17 @@ namespace build2
{
// First check if it's a pair separator.
//
- if (c == st.sep_pair)
+ if (c == st->sep_pair)
done = true;
else
{
// Then see if this character or character sequence is a separator.
//
- for (const char* p (strchr (st.sep_first, c));
+ for (const char* p (strchr (st->sep_first, c));
p != nullptr;
p = done ? nullptr : strchr (p + 1, c))
{
- char s (st.sep_second[p - st.sep_first]);
+ char s (st->sep_second[p - st->sep_first]);
// See if it has a second.
//
@@ -876,13 +918,19 @@ namespace build2
// Handle single and double quotes if enabled for this mode and unless
// they were considered separators.
//
- if (st.quotes && !done)
+ if (st->quotes && !done)
{
auto quoted_mode = [this] (lexer_mode m)
{
+ // In the double-quoted mode we only do effective escaping of the
+ // special `$("\` characters, line continuations, plus `)` for
+ // symmetry. Nothing can be escaped in single-quoted.
+ //
+ const char* esc (m == lexer_mode::double_quoted ? "$()\"\\\n" : "");
+
state_.push (state {
m, 0, nullopt, false, false, '\0', false, true, true,
- state_.top ().escapes, nullptr, nullptr});
+ esc, nullptr, nullptr});
};
switch (c)
@@ -933,8 +981,8 @@ namespace build2
quoted_mode (lexer_mode::double_quoted);
- st = state_.top ();
- m = st.mode;
+ st = &state_.top ();
+ m = st->mode;
switch (qtype)
{
@@ -1090,6 +1138,8 @@ namespace build2
}
case '\\':
{
+ // See if this is line continuation.
+ //
get ();
if (peek () == '\n')
diff --git a/libbuild2/lexer.hxx b/libbuild2/lexer.hxx
index 4371206..e913829 100644
--- a/libbuild2/lexer.hxx
+++ b/libbuild2/lexer.hxx
@@ -26,14 +26,15 @@ namespace build2
// mode we don't treat certain characters (e.g., `+`, `=`) as special so
// that we can use them in the variable values, e.g., `foo = g++`. In
// contrast, in the variable mode, we restrict certain character (e.g., `/`)
- // from appearing in the name. The values mode is like value but recogizes
- // `,` as special (used in contexts where we need to list multiple
- // values). The attributes/attribute_value modes are like values where each
- // value is potentially a variable assignment; they don't treat `{` and `}`
- // as special (so we cannot have name groups in attributes) as well as
- // recognizes `=` and `]`. The subscript mode is like value but doesn't
- // treat `{` and `}` as special and recognizes `]`. The eval mode is used in
- // the evaluation context.
+ // from appearing in the name. Additionally, in the variable mode we
+ // recognize leading `\` as the beginning of the escape sequent ($\n). The
+ // values mode is like value but recogizes `,` as special (used in contexts
+ // where we need to list multiple values). The attributes/attribute_value
+ // modes are like values where each value is potentially a variable
+ // assignment; they don't treat `{` and `}` as special (so we cannot have
+ // name groups in attributes) as well as recognizes `=` and `]`. The
+ // subscript mode is like value but doesn't treat `{` and `}` as special and
+ // recognizes `]`. The eval mode is used in the evaluation context.
//
// A number of modes are "derived" from the value/values mode by recognizing
// a few extra characters:
@@ -262,7 +263,7 @@ namespace build2
// been "expired" from the top).
//
virtual token
- word (state current, bool separated);
+ word (const state& current, bool separated);
// Return true in first if we have seen any spaces. Skipped empty lines
// don't count. In other words, we are only interested in spaces that are
diff --git a/libbuild2/module.cxx b/libbuild2/module.cxx
index 62145ca..1aaa38d 100644
--- a/libbuild2/module.cxx
+++ b/libbuild2/module.cxx
@@ -30,26 +30,26 @@ using namespace butl;
namespace build2
{
- mutex loaded_modules_lock::mutex_;
+ mutex module_libraries_lock::mutex_;
- loaded_module_map loaded_modules;
+ module_libraries_map module_libraries;
void
load_builtin_module (module_load_function* lf)
{
for (const module_functions* i (lf ()); i->name != nullptr; ++i)
- loaded_modules[i->name] = i;
+ module_libraries.emplace (i->name, module_library {*i, dir_path ()});
}
// Sorted array of bundled modules (excluding core modules bundled with
// libbuild2; see below).
//
-#if !defined(BUILD2_BOOTSTRAP) && !defined(LIBBUILD2_STATIC_BUILD)
static const char* bundled_modules[] = {
"bash",
"bin",
"c",
"cc",
+ "cli",
"cxx",
"in",
"version"
@@ -63,7 +63,6 @@ namespace build2
bundled_modules + sizeof (bundled_modules) / sizeof (*bundled_modules),
mod);
}
-#endif
// Note: also used by ad hoc recipes thus not static.
//
@@ -81,10 +80,10 @@ namespace build2
// adding a reasonable margin for future growth.
//
ctx.module_context_storage->reset (
- new context (ctx.sched,
- ctx.mutexes,
- ctx.fcache,
- false, /* match_only */
+ new context (*ctx.sched,
+ *ctx.mutexes,
+ *ctx.fcache,
+ nullopt, /* match_only */
false, /* no_external_modules */
false, /* dry_run */
ctx.no_diag_buffer,
@@ -128,6 +127,9 @@ namespace build2
{
// New update operation.
//
+ assert (op_update.operation_pre == nullptr &&
+ op_update.operation_post == nullptr);
+
ctx.module_context->current_operation (op_update);
// Un-tune the scheduler.
@@ -141,8 +143,8 @@ namespace build2
// keep it in case things change. Actually, we may need it, if the
// scheduler was started up in a tuned state, like in bpkg).
//
- auto sched_tune (ctx.sched.serial ()
- ? scheduler::tune_guard (ctx.sched, 0)
+ auto sched_tune (ctx.sched->serial ()
+ ? scheduler::tune_guard (*ctx.sched, 0)
: scheduler::tune_guard ());
// Remap verbosity level 0 to 1 unless we were requested to be silent.
@@ -240,11 +242,20 @@ namespace build2
}
#endif
- static module_load_function*
+ // Return the module functions as well as the module project directory or
+ // empty if not imported from project. Return {nullptr, nullopt} if not
+ // found.
+ //
+ // The dry-run mode only calls import_search() and always returns NULL for
+ // module functions (see below for background).
+ //
+ static pair<module_load_function*, optional<dir_path>>
import_module (
#if defined(BUILD2_BOOTSTRAP) || defined(LIBBUILD2_STATIC_BUILD)
+ bool,
scope&,
#else
+ bool dry_run,
scope& bs,
#endif
const string& mod,
@@ -258,15 +269,21 @@ namespace build2
{
tracer trace ("import_module");
+ pair<module_load_function*, optional<dir_path>> r (nullptr, nullopt);
+
// Take care of core modules that are bundled with libbuild2 in case they
// are not pre-loaded by the driver.
//
- if (mod == "config") return &config::build2_config_load;
- else if (mod == "dist") return &dist::build2_dist_load;
- else if (mod == "install") return &install::build2_install_load;
- else if (mod == "test") return &test::build2_test_load;
+ if (mod == "config") r.first = &config::build2_config_load;
+ else if (mod == "dist") r.first = &dist::build2_dist_load;
+ else if (mod == "install") r.first = &install::build2_install_load;
+ else if (mod == "test") r.first = &test::build2_test_load;
- module_load_function* r (nullptr);
+ if (r.first != nullptr)
+ {
+ r.second = dir_path ();
+ return r;
+ }
// No dynamic loading of build system modules during bootstrap or if
// statically-linked..
@@ -335,7 +352,7 @@ namespace build2
// and undefined if the module was not mentioned.
//
if (boot && !bundled && ctx.no_external_modules)
- return nullptr;
+ return r; // NULL
// See if we can import a target for this module.
//
@@ -390,7 +407,7 @@ namespace build2
if (ir.first.empty ())
{
assert (opt);
- return nullptr;
+ return r; // NULL
}
if (ir.second)
@@ -398,6 +415,8 @@ namespace build2
// What if a module is specified with config.import.<mod>.<lib>.libs?
// Note that this could still be a project-qualified target.
//
+ // Note: we now return an empty directory to mean something else.
+ //
if (ir.second->empty ())
fail (loc) << "direct module target importation not yet supported";
@@ -405,6 +424,17 @@ namespace build2
// the target (which will also give us the shared library path).
//
l5 ([&]{trace << "found " << ir.first << " in " << *ir.second;});
+ }
+
+ if (dry_run)
+ {
+ r.second = ir.second ? move (*ir.second) : dir_path ();
+ return r;
+ }
+
+ if (ir.second)
+ {
+ r.second = *ir.second;
// Create the build context if necessary.
//
@@ -417,7 +447,7 @@ namespace build2
create_module_context (ctx, loc);
}
- // Inherit loaded_modules lock from the outer context.
+ // Inherit module_libraries lock from the outer context.
//
ctx.module_context->modules_lock = ctx.modules_lock;
@@ -426,7 +456,7 @@ namespace build2
//
auto_thread_env penv (nullptr);
context& ctx (*bs.ctx.module_context);
- scheduler::phase_guard pg (ctx.sched);
+ scheduler::phase_guard pg (*ctx.sched);
// Load the imported project in the module context.
//
@@ -477,6 +507,8 @@ namespace build2
}
else
{
+ r.second = dir_path ();
+
// No module project found. Form the shared library name (incorporating
// build system core version) and try using system-default search
// (installed, rpath, etc).
@@ -519,7 +551,7 @@ namespace build2
fail (loc) << "unable to lookup " << sym << " in build system module "
<< mod << " (" << lib << "): " << err;
- r = function_cast<module_load_function*> (hs.second);
+ r.first = function_cast<module_load_function*> (hs.second);
}
else if (!opt)
{
@@ -531,7 +563,10 @@ namespace build2
<< "line variable to specify its project out_root";
}
else
+ {
+ r.second = nullopt;
l5 ([&]{trace << "unable to load " << lib << ": " << err;});
+ }
#endif // BUILD2_BOOTSTRAP || LIBBUILD2_STATIC_BUILD
@@ -547,89 +582,200 @@ namespace build2
{
tracer trace ("find_module");
- // Note that we hold the lock for the entire time it takes to build a
- // module.
+ // If this is a submodule, get the main module name.
//
- loaded_modules_lock lock (bs.ctx);
+ string mmod (smod, 0, smod.find ('.'));
- // Optional modules and submodules sure make this logic convoluted. So we
- // divide it into two parts: (1) find or insert an entry (for submodule
- // or, failed that, for the main module, the latter potentially NULL) and
- // (2) analyze the entry and issue diagnostics.
+ // We have a somewhat strange two-level caching in imported_modules
+ // and module_libraries in order to achieve the following:
+ //
+ // 1. Correctly handle cases where a module can be imported from one
+ // project but not the other.
+ //
+ // 2. Make sure that for each project that imports the module we actually
+ // call import_search() in order to mark any config.import.* as used.
+ //
+ // 3. Make sure that all the projects import the same module.
//
- auto i (loaded_modules.find (smod)), e (loaded_modules.end ());
+ scope& rs (*bs.root_scope ());
- if (i == e)
+ const string* mod;
+ const module_functions* fun;
+
+ // First check the project's imported_modules in case this (main) module
+ // is known to be not found.
+ //
+ auto j (rs.root_extra->imported_modules.find (mmod));
+ auto je (rs.root_extra->imported_modules.end ());
+
+ if (j != je && !j->found)
+ {
+ mod = &mmod;
+ fun = nullptr;
+ }
+ else
{
- // If this is a submodule, get the main module name.
+ // Note that we hold the lock for the entire time it takes to build a
+ // module.
//
- string mmod (smod, 0, smod.find ('.'));
+ module_libraries_lock lock (bs.ctx);
- if (mmod != smod)
- i = loaded_modules.find (mmod);
+ // Optional modules and submodules sure make this logic convoluted. So
+ // we divide it into two parts: (1) find or insert an entry (for
+ // submodule or, failed that, for the main module) and (2) analyze the
+ // entry and issue diagnostics.
+ //
+ auto i (module_libraries.find (smod));
+ auto ie (module_libraries.end ());
- if (i == e)
+ bool imported (false);
+ if (i == ie)
{
- module_load_function* f (import_module (bs, mmod, loc, boot, opt));
+ if (mmod != smod)
+ i = module_libraries.find (mmod);
- if (f != nullptr)
+ if (i == ie)
{
- // Enter all the entries noticing which one is our submodule. If
- // none are, then we notice the main module.
- //
- for (const module_functions* j (f ()); j->name != nullptr; ++j)
+ pair<module_load_function*, optional<dir_path>> ir (
+ import_module (false /* dry_run */, bs, mmod, loc, boot, opt));
+
+ if (module_load_function* f = ir.first)
{
- const string& n (j->name);
+ // Enter all the entries noticing which one is our submodule. If
+ // none are, then we notice the main module.
+ //
+ for (const module_functions* j (f ()); j->name != nullptr; ++j)
+ {
+ const string& n (j->name);
+
+ l5 ([&]{trace << "registering " << n;});
- l5 ([&]{trace << "registering " << n;});
+ bool main (n == mmod);
- auto p (loaded_modules.emplace (n, j));
+ auto p (module_libraries.emplace (
+ n,
+ module_library {
+ *j,
+ main ? move (*ir.second) : dir_path ()}));
- if (!p.second)
- fail (loc) << "build system submodule name " << n << " of main "
- << "module " << mmod << " is already in use";
+ if (!p.second)
+ fail (loc) << "build system submodule name " << n << " of main "
+ << "module " << mmod << " is already in use";
- if (n == smod || (i == e && n == mmod))
- i = p.first;
+ // Note: this assumes the main module is last.
+ //
+ if (n == smod || (main && i == ie))
+ i = p.first;
+ }
+
+ // We should at least have the main module.
+ //
+ if (i == ie)
+ fail (loc) << "invalid function list in build system module "
+ << mmod;
}
- // We should at least have the main module.
- //
- if (i == e)
- fail (loc) << "invalid function list in build system module "
- << mmod;
+ imported = true;
}
- else
- i = loaded_modules.emplace (move (mmod), nullptr).first;
+ }
+
+ // Now the iterator points to a submodule or to the main module, or to
+ // end if neither is found.
+ //
+ assert (j == je || i != ie); // Cache state consistecy sanity check.
+
+ if (i != ie)
+ {
+ // Note: these should remain stable after we release the lock.
+ //
+ mod = &i->first;
+ fun = &i->second.functions.get ();
+
+ // If this project hasn't imported this main module and we found the
+ // entry in the cache, then we have to perform the import_search()
+ // part of import_module() in order to cover items (2) and (3) above.
+ //
+ // There is one nuance: omit this for bundled modules since it's
+ // possible to first import them ad hoc and then, if we call
+ // import_search() again, to find them differently (e.g., as a
+ // subproject).
+ //
+ if (j == je && !imported && !bundled_module (mmod))
+ {
+ pair<module_load_function*, optional<dir_path>> ir (
+ import_module (true /* dry_run */, bs, mmod, loc, boot, opt));
+
+ if (ir.second)
+ {
+ if (i->first != mmod)
+ {
+ i = module_libraries.find (mmod);
+ assert (i != ie); // Has to be there.
+ }
+
+ const dir_path& cd (*ir.second);
+ const dir_path& pd (i->second.import_path);
+
+ if (cd != pd)
+ {
+ fail (loc) << "inconsistent build system module " << mmod
+ << " importation" <<
+ info << rs << " imports it as "
+ << (cd.empty () ? "ad hoc" : cd.representation ().c_str ()) <<
+ info << "previously imported as "
+ << (pd.empty () ? "ad hoc" : pd.representation ().c_str ());
+ }
+ }
+ else
+ {
+ // This module is not found from this project.
+ //
+ mod = &mmod;
+ fun = nullptr;
+ }
+ }
+ }
+ else
+ {
+ mod = &mmod;
+ fun = nullptr;
}
}
+ // Cache the result in imported_modules if necessary.
+ //
+ if (j == je)
+ rs.root_extra->imported_modules.push_back (
+ module_import {mmod, fun != nullptr});
+
// Reduce skipped external module to optional.
//
- if (boot && i->second == nullptr)
+ if (boot && fun == nullptr)
opt = true;
- // Now the iterator points to a submodule or to the main module, the
- // latter potentially NULL.
+ // Handle optional.
//
- if (!opt)
+ if (fun == nullptr)
{
- if (i->second == nullptr)
- {
- fail (loc) << "unable to load build system module " << i->first;
- }
- else if (i->first != smod)
- {
- fail (loc) << "build system module " << i->first << " has no "
+ if (!opt)
+ fail (loc) << "unable to load build system module " << *mod;
+ }
+ else if (*mod != smod)
+ {
+ if (!opt)
+ fail (loc) << "build system module " << *mod << " has no "
<< "submodule " << smod;
+ else
+ {
+ // Note that if the main module exists but has no such submodule, we
+ // return NULL rather than fail (think of an older version of a module
+ // that doesn't implement some extra functionality).
+ //
+ fun = nullptr;
}
}
- // Note that if the main module exists but has no such submodule, we
- // return NULL rather than fail (think of an older version of a module
- // that doesn't implement some extra functionality).
- //
- return i->second;
+ return fun;
}
void
@@ -637,7 +783,7 @@ namespace build2
{
// First see if this modules has already been booted for this project.
//
- module_map& lm (rs.root_extra->modules);
+ module_state_map& lm (rs.root_extra->loaded_modules);
auto i (lm.find (mod));
if (i != lm.end ())
@@ -713,7 +859,7 @@ namespace build2
{
// First see if this modules has already been inited for this project.
//
- module_map& lm (rs.root_extra->modules);
+ module_state_map& lm (rs.root_extra->loaded_modules);
auto i (lm.find (mod));
bool f (i == lm.end ());
@@ -833,7 +979,7 @@ namespace build2
if (cast_false<bool> (bs[name + ".loaded"]))
{
if (cast_false<bool> (bs[name + ".configured"]))
- return rs.root_extra->modules.find (name)->module;
+ return rs.root_extra->loaded_modules.find (name)->module;
}
else
{
@@ -855,7 +1001,7 @@ namespace build2
// attempt to load it was optional?
return cast_false<bool> (bs[name + ".loaded"])
- ? rs.root_extra->modules.find (name)->module
+ ? rs.root_extra->loaded_modules.find (name)->module
: init_module (rs, bs, name, loc, false /* optional */, hints)->module;
}
}
diff --git a/libbuild2/module.hxx b/libbuild2/module.hxx
index 8223bae..6cdd040 100644
--- a/libbuild2/module.hxx
+++ b/libbuild2/module.hxx
@@ -21,6 +21,12 @@ namespace build2
// implementation's perspectives, the module library is "loaded" and the
// module is optionally "bootstrapped" (or "booted" for short) and then
// "initialized" (or "inited").
+ //
+ // Note also that a module name (or component thereof, for submodules) is
+ // not a project name (in particular, it can be less than 3 characters long)
+ // and we usually use `-` instead of `_` as a word separator within
+ // components, for example `c.as-cpp` (since the top-level component ends up
+ // in the library name; but this is not a hard rule).
// Base class for module instance.
//
@@ -145,9 +151,9 @@ namespace build2
//
// The <name> part in the function name is the main module name without
// submodule components (for example, `c` in `c.config`) and the load
- // function is expected to return boot/init functions for all its submodules
- // (if any) as well as for the module itself as an array of module_functions
- // terminated with an all-NULL entry.
+ // function is expected to return boot/init functions as an array of
+ // module_functions: entries for all its submodules (if any) first, followed
+ // by the module itself, and terminated with an all-NULL entry.
//
// Note that the load function is guaranteed to be called during serial
// execution (either from main() or during the load phase).
@@ -155,7 +161,31 @@ namespace build2
extern "C"
using module_load_function = const module_functions* ();
- // Module state.
+ // Imported module state.
+ //
+ // The module name is the main module (corresponding to the library). If
+ // found is false then this module could not be imported from this project.
+ //
+ struct module_import
+ {
+ const string name;
+ bool found;
+ };
+
+ struct module_import_map: vector<module_import>
+ {
+ iterator
+ find (const string& name)
+ {
+ return find_if (
+ begin (), end (),
+ [&name] (const module_import& i) {return i.name == name;});
+ }
+ };
+
+ // Loaded module state.
+ //
+ // Note that unlike import_state, the module name here could be a submodule.
//
struct module_state
{
@@ -167,7 +197,7 @@ namespace build2
optional<module_boot_init> boot_init;
};
- struct module_map: vector<module_state>
+ struct module_state_map: vector<module_state>
{
iterator
find (const string& name)
@@ -268,23 +298,28 @@ namespace build2
return static_cast<T&> (*load_module (root, base, name, l, config_hints));
}
- // Loaded modules (as in libraries).
+ // Loaded module libraries.
//
- // A NULL entry for the main module indicates that a module library was not
- // found.
+ // Note that this map contains entries for all the submodules.
//
- using loaded_module_map = map<string, const module_functions*>;
+ struct module_library
+ {
+ reference_wrapper<const module_functions> functions;
+ dir_path import_path; // Only for main module.
+ };
+
+ using module_libraries_map = map<string, module_library>;
- // The loaded_modules map is locked per top-level (as opposed to nested)
+ // The module_libraries map is locked per top-level (as opposed to nested)
// context (see context.hxx for details).
//
// Note: should only be constructed during contexts-wide serial execution.
//
- class LIBBUILD2_SYMEXPORT loaded_modules_lock
+ class LIBBUILD2_SYMEXPORT module_libraries_lock
{
public:
explicit
- loaded_modules_lock (context& c)
+ module_libraries_lock (context& c)
: ctx_ (c), lock_ (mutex_, defer_lock)
{
if (ctx_.modules_lock == nullptr)
@@ -294,7 +329,7 @@ namespace build2
}
}
- ~loaded_modules_lock ()
+ ~module_libraries_lock ()
{
if (ctx_.modules_lock == this)
ctx_.modules_lock = nullptr;
@@ -306,7 +341,7 @@ namespace build2
mlock lock_;
};
- LIBBUILD2_SYMEXPORT extern loaded_module_map loaded_modules;
+ LIBBUILD2_SYMEXPORT extern module_libraries_map modules_libraries;
// Load a builtin module (i.e., a module linked as a static/shared library
// or that is part of the build system driver).
diff --git a/libbuild2/operation.cxx b/libbuild2/operation.cxx
index 4146b57..6f88e38 100644
--- a/libbuild2/operation.cxx
+++ b/libbuild2/operation.cxx
@@ -18,6 +18,10 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/diagnostics.hxx>
+#if 0
+#include <libbuild2/adhoc-rule-buildscript.hxx> // @@ For a hack below.
+#endif
+
using namespace std;
using namespace butl;
@@ -81,7 +85,8 @@ namespace build2
{
// Load project's root.build.
//
- load_root (root);
+ if (!root.root_extra->loaded)
+ load_root (root);
// Create the base scope. Note that its existence doesn't mean it was
// already setup as a base scope; it can be the same as root.
@@ -159,8 +164,9 @@ namespace build2
//
map.reserve (ctx.targets.size () / 2);
- bool e (false);
+ size_t count_matched (ctx.count_matched ());
+ bool e (false);
for (size_t pass (1); pass != 3; ++pass)
{
for (const auto& pt: ctx.targets)
@@ -175,8 +181,7 @@ namespace build2
//
const target::opstate& s (t->state[a]);
- if (s.task_count.load (memory_order_relaxed) - ctx.count_base () <
- target::offset_matched)
+ if (s.task_count.load (memory_order_relaxed) < count_matched)
continue;
// Skip if for some reason the path is not assigned.
@@ -278,11 +283,11 @@ namespace build2
// through a few hoops to make sure we don't overindulge.
//
md.incr = stderr_term // Scale depending on output type.
- ? (ctx.sched.serial () ? 1 : 5)
+ ? (ctx.sched->serial () ? 1 : 5)
: 100;
md.what = " targets to " + diag_do (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
md.incr,
[&md] (size_t c) -> size_t
@@ -325,7 +330,10 @@ namespace build2
const target& t (ts[i].as<target> ());
l5 ([&]{trace << diag_doing (a, t);});
- target_state s (match_async (a, t, 0, task_count, false));
+ target_state s (match_async (a, t,
+ 0, task_count,
+ match_extra::all_options,
+ false /* fail */));
// Bail out if the target has failed and we weren't instructed to
// keep going.
@@ -352,11 +360,14 @@ namespace build2
bool posthoc_fail (false);
if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going))
{
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
// Note that on each iteration we may end up with new entries at the
// back. Since we start and end each iteration in serial execution, we
// don't need to mess with the mutex.
//
- for (const context::posthoc_target& p: ctx.current_posthoc_targets)
+ for (const posthoc_target& p: ctx.current_posthoc_targets)
{
action a (p.action); // May not be the same as argument action.
const target& t (p.target);
@@ -375,16 +386,21 @@ namespace build2
//
// @@ PERF: match in parallel (need match_direct_async(), etc).
//
- for (const target* pt: p.prerequisite_targets)
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
{
- target_state s (match_direct_sync (a, *pt, false /* fail */));
-
- if (s == target_state::failed)
+ if (pt.target != nullptr)
{
- posthoc_fail = true;
+ target_state s (match_direct_sync (a, *pt.target,
+ pt.match_options,
+ false /* fail */));
- if (!ctx.keep_going)
- break;
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
}
}
@@ -414,7 +430,7 @@ namespace build2
target_state s;
if (j < i)
{
- s = match_complete (a, t, false);
+ s = match_complete (a, t, match_extra::all_options, false /* fail */);
if (posthoc_fail)
s = /*t.state[a].state =*/ target_state::failed;
@@ -485,7 +501,10 @@ namespace build2
bool posthoc_fail (false);
auto execute_posthoc = [&ctx, &posthoc_fail] ()
{
- for (const context::posthoc_target& p: ctx.current_posthoc_targets)
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ for (const posthoc_target& p: ctx.current_posthoc_targets)
{
action a (p.action); // May not be the same as argument action.
const target& t (p.target);
@@ -499,16 +518,20 @@ namespace build2
});
#if 0
- for (const target* pt: p.prerequisite_targets)
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
{
- target_state s (execute_direct_sync (a, *pt, false /* fail */));
-
- if (s == target_state::failed)
+ if (pt.target != nullptr)
{
- posthoc_fail = true;
+ target_state s (
+ execute_direct_sync (a, *pt.target, false /* fail */));
- if (!ctx.keep_going)
- break;
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
}
}
#else
@@ -518,16 +541,20 @@ namespace build2
atomic_count tc (0);
wait_guard wg (ctx, tc);
- for (const target* pt: p.prerequisite_targets)
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
{
- target_state s (execute_direct_async (a, *pt, 0, tc, false /*fail*/));
-
- if (s == target_state::failed)
+ if (pt.target != nullptr)
{
- posthoc_fail = true;
+ target_state s (
+ execute_direct_async (a, *pt.target, 0, tc, false /*fail*/));
- if (!ctx.keep_going)
- break;
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
}
}
@@ -535,18 +562,21 @@ namespace build2
// Process the result.
//
- for (const target* pt: p.prerequisite_targets)
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
{
- // Similar to below, no need to wait.
- //
- target_state s (pt->executed_state (a, false /* fail */));
-
- if (s == target_state::failed)
+ if (pt.target != nullptr)
{
- // Note: no need to keep going.
+ // Similar to below, no need to wait.
//
- posthoc_fail = true;
- break;
+ target_state s (pt.target->executed_state (a, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ // Note: no need to keep going.
+ //
+ posthoc_fail = true;
+ break;
+ }
}
}
#endif
@@ -571,7 +601,7 @@ namespace build2
switch (ctx.current_inner_oif->concurrency)
{
- case 0: sched_tune = tune_guard (ctx.sched, 1); break; // Run serially.
+ case 0: sched_tune = tune_guard (*ctx.sched, 1); break; // Run serially.
case 1: break; // Run as is.
default: assert (false); // Not supported.
}
@@ -594,7 +624,7 @@ namespace build2
{
what = "% of targets " + diag_did (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
init - incr,
[init, incr, &what, &ctx] (size_t c) -> size_t
@@ -761,26 +791,191 @@ namespace build2
if (fail)
throw failed ();
- // We should have executed every target that we matched, provided we
+#ifndef NDEBUG
+ size_t base (ctx.count_base ());
+
+ // For now we disable these checks if we've performed any group member
+ // resolutions that required a match (with apply()) but not execute.
+ //
+ if (ctx.target_count.load (memory_order_relaxed) != 0 &&
+ ctx.resolve_count.load (memory_order_relaxed) != 0)
+ {
+ // These counts are only tracked for the inner operation.
+ //
+ action ia (a.outer () ? a.inner_action () : a);
+
+ // While it may seem that just decrementing the counters for every
+ // target with the resolve_counted flag set should be enough, this will
+ // miss any prerequisites that this target has matched but did not
+ // execute, which may affect both task_count and dependency_count. Note
+ // that this applies recursively and we effectively need to pretend to
+ // execute this target and all its prerequisites, recursively without
+ // actually executing any of their recepies.
+ //
+ // That last bit means we must be able to interpret the populated
+ // prerequisite_targets generically, which is a requirement we place on
+ // rules that resolve groups in apply (see target::group_members() for
+ // details). It so happens that our own adhoc_buildscript_rule doesn't
+ // follow this rule (see execute_update_prerequisites()) so we detect
+ // and handle this with a hack.
+ //
+ // @@ Hm, but there is no guarantee that this holds recursively since
+ // prerequisites may not be see-through groups. For this to work we
+ // would have to impose this restriction globally. Which we could
+ // probably do, just need to audit things carefully (especially
+ // cc::link_rule). But we already sort of rely on that for dump! Maybe
+ // should just require it everywhere and fix adhoc_buildscript_rule.
+ //
+ // @@ There are special recipes that don't populate prerequisite_targets
+ // like group_recipe! Are we banning any user-defined such recipes?
+ // Need to actually look if we have anything else like this. There
+ // is also inner_recipe, though doesn't apply here (only for outer).
+ //
+ // @@ TMP: do and enable after the 0.16.0 release.
+ //
+ // Note: recursive lambda.
+ //
+#if 0
+ auto pretend_execute = [base, ia] (target& t,
+ const auto& pretend_execute) -> void
+ {
+ context& ctx (t.ctx);
+
+ // Note: tries to emulate the execute_impl() functions semantics.
+ //
+ auto execute_impl = [base, ia, &ctx, &pretend_execute] (target& t)
+ {
+ target::opstate& s (t.state[ia]);
+
+ size_t gd (ctx.dependency_count.fetch_sub (1, memory_order_relaxed));
+ size_t td (s.dependents.fetch_sub (1, memory_order_release));
+ assert (td != 0 && gd != 0);
+
+ // Execute unless already executed.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_executed)
+ pretend_execute (t, pretend_execute);
+ };
+
+ target::opstate& s (t.state[ia]);
+
+ if (s.state != target_state::unchanged) // Noop recipe.
+ {
+ if (s.recipe_group_action)
+ {
+ execute_impl (const_cast<target&> (*t.group));
+ }
+ else
+ {
+ // @@ Special hack for adhoc_buildscript_rule (remember to drop
+ // include above if getting rid of).
+ //
+ bool adhoc (
+ ia == perform_update_id &&
+ s.rule != nullptr &&
+ dynamic_cast<const adhoc_buildscript_rule*> (
+ &s.rule->second.get ()) != nullptr);
+
+ for (const prerequisite_target& p: t.prerequisite_targets[ia])
+ {
+ const target* pt;
+
+ if (adhoc)
+ pt = (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr);
+ else
+ pt = p.target;
+
+ if (pt != nullptr)
+ execute_impl (const_cast<target&> (*pt));
+ }
+
+ ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+ }
+
+ s.state = target_state::changed;
+ }
+
+ s.task_count.store (base + target::offset_executed,
+ memory_order_relaxed);
+ };
+#endif
+
+ for (const auto& pt: ctx.targets)
+ {
+ target& t (*pt);
+ target::opstate& s (t.state[ia]);
+
+ // We are only interested in the targets that have been matched for
+ // this operation and are in the applied state.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_applied)
+ continue;
+
+ if (s.resolve_counted)
+ {
+#if 0
+ pretend_execute (t, pretend_execute);
+
+ if (ctx.resolve_count.load (memory_order_relaxed) == 0)
+ break;
+#else
+ return; // Skip all the below checks.
+#endif
+ }
+ }
+ }
+
+ // We should have executed every target that we have matched, provided we
// haven't failed (in which case we could have bailed out early).
//
assert (ctx.target_count.load (memory_order_relaxed) == 0);
+ assert (ctx.resolve_count.load (memory_order_relaxed) == 0); // Sanity check.
-#ifndef NDEBUG
if (ctx.dependency_count.load (memory_order_relaxed) != 0)
{
+ auto dependents = [base] (action a, const target& t)
+ {
+ const target::opstate& s (t.state[a]);
+
+ // Only consider targets that have been matched for this operation
+ // (since matching is what causes the dependents count reset).
+ //
+ size_t c (s.task_count.load (memory_order_relaxed));
+
+ return (c >= base + target::offset_applied
+ ? s.dependents.load (memory_order_relaxed)
+ : 0);
+ };
+
diag_record dr;
dr << info << "detected unexecuted matched targets:";
for (const auto& pt: ctx.targets)
{
const target& t (*pt);
- if (size_t n = t[a].dependents.load (memory_order_relaxed))
+
+ if (size_t n = dependents (a, t))
dr << text << t << ' ' << n;
+
+ if (a.outer ())
+ {
+ if (size_t n = dependents (a.inner_action (), t))
+ dr << text << t << ' ' << n;
+ }
}
}
-#endif
+
assert (ctx.dependency_count.load (memory_order_relaxed) == 0);
+#endif
}
const meta_operation_info mo_perform {
@@ -807,31 +1002,66 @@ namespace build2
// Note: similar approach to forward() in configure.
//
- static bool
- info_json (const values& params,
- const char* mo = nullptr,
- const location& l = location ())
+ struct info_params
{
+ bool json = false;
+ bool subprojects = true;
+ };
+
+ // Note: should not fail if mo is NULL (see info_subprojects() below).
+ //
+ static info_params
+ info_parse_params (const values& params,
+ const char* mo = nullptr,
+ const location& l = location ())
+ {
+ info_params r;
+
if (params.size () == 1)
{
- const names& ns (cast<names> (params[0]));
+ for (const name& n: cast<names> (params[0]))
+ {
+ if (n.simple ())
+ {
+ if (n.value == "json")
+ {
+ r.json = true;
+ continue;
+ }
+
+ if (n.value == "no_subprojects")
+ {
+ r.subprojects = false;
+ continue;
+ }
- if (ns.size () == 1 && ns[0].simple () && ns[0].value == "json")
- return true;
- else if (!ns.empty ())
- fail (l) << "unexpected parameter '" << ns << "' for "
- << "meta-operation " << mo;
+ // Fall through.
+ }
+
+ if (mo != nullptr)
+ fail (l) << "unexpected parameter '" << n << "' for "
+ << "meta-operation " << mo;
+ }
}
else if (!params.empty ())
+ {
+ if (mo != nullptr)
fail (l) << "unexpected parameters for meta-operation " << mo;
+ }
- return false;
+ return r;
+ }
+
+ bool
+ info_subprojects (const values& params)
+ {
+ return info_parse_params (params).subprojects;
}
static void
info_pre (context&, const values& params, const location& l)
{
- info_json (params, "info", l); // Validate.
+ info_parse_params (params, "info", l); // Validate.
}
static operation_id
@@ -884,7 +1114,7 @@ namespace build2
}
static void
- info_execute_lines (action_targets& ts)
+ info_execute_lines (action_targets& ts, bool subp)
{
for (size_t i (0); i != ts.size (); ++i)
{
@@ -917,7 +1147,7 @@ namespace build2
//
auto print_mods = [&rs] ()
{
- for (const module_state& ms: rs.root_extra->modules)
+ for (const module_state& ms: rs.root_extra->loaded_modules)
cout << ' ' << ms.name;
};
@@ -958,8 +1188,13 @@ namespace build2
<< "url:" ; print_empty (cast_empty<string> (rs[ctx.var_project_url])); cout << endl
<< "src_root:" ; print_dir (cast<dir_path> (rs[ctx.var_src_root])); cout << endl
<< "out_root:" ; print_dir (cast<dir_path> (rs[ctx.var_out_root])); cout << endl
- << "amalgamation:" ; print_pdir (*rs.root_extra->amalgamation); cout << endl
- << "subprojects:" ; print_null (*rs.root_extra->subprojects); cout << endl
+ << "amalgamation:" ; print_pdir (*rs.root_extra->amalgamation); cout << endl;
+ if (subp)
+ {
+ cout
+ << "subprojects:" ; print_null (*rs.root_extra->subprojects); cout << endl;
+ }
+ cout
<< "operations:" ; print_ops (rs.root_extra->operations, ctx.operation_table); cout << endl
<< "meta-operations:"; print_ops (rs.root_extra->meta_operations, ctx.meta_operation_table); cout << endl
<< "modules:" ; print_mods (); cout << endl;
@@ -968,7 +1203,7 @@ namespace build2
#ifndef BUILD2_BOOTSTRAP
static void
- info_execute_json (action_targets& ts)
+ info_execute_json (action_targets& ts, bool subp)
{
json::stream_serializer s (cout);
s.begin_array ();
@@ -1039,6 +1274,7 @@ namespace build2
// Print subprojects.
//
+ if (subp)
{
const subprojects* sps (*rs.root_extra->subprojects);
@@ -1082,12 +1318,12 @@ namespace build2
// Print modules.
//
- if (!rs.root_extra->modules.empty ())
+ if (!rs.root_extra->loaded_modules.empty ())
{
s.member_name ("modules", false /* check */);
s.begin_array ();
- for (const module_state& ms: rs.root_extra->modules)
+ for (const module_state& ms: rs.root_extra->loaded_modules)
s.value (ms.name, false /* check */);
s.end_array ();
@@ -1101,7 +1337,7 @@ namespace build2
}
#else
static void
- info_execute_json (action_targets&)
+ info_execute_json (action_targets&, bool)
{
}
#endif //BUILD2_BOOTSTRAP
@@ -1113,14 +1349,16 @@ namespace build2
uint16_t,
bool)
{
+ info_params ip (info_parse_params (params));
+
// Note that both outputs will not be "ideal" if the user does something
// like `b info(foo/) info(bar/)` instead of `b info(foo/ bar/)`. Oh,
// well.
//
- if (info_json (params))
- info_execute_json (ts);
+ if (ip.json)
+ info_execute_json (ts, ip.subprojects);
else
- info_execute_lines (ts);
+ info_execute_lines (ts, ip.subprojects);
}
const meta_operation_info mo_info {
@@ -1157,6 +1395,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -1183,6 +1423,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -1199,6 +1441,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
}
diff --git a/libbuild2/operation.hxx b/libbuild2/operation.hxx
index 2ff82ad..e8ff38a 100644
--- a/libbuild2/operation.hxx
+++ b/libbuild2/operation.hxx
@@ -121,6 +121,8 @@ namespace build2
// End of operation and meta-operation batches.
//
+ // Note: not called in case any of the earlier callbacks failed.
+ //
void (*operation_post) (context&, const values&, operation_id);
void (*meta_operation_post) (context&, const values&);
@@ -182,6 +184,11 @@ namespace build2
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_perform;
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_info;
+ // Return true if params does not contain no_subprojects.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ info_subprojects (const values& params);
+
// Operation info.
//
// NOTE: keep POD-like to ensure can be constant-initialized in order to
@@ -218,14 +225,22 @@ namespace build2
//
const size_t concurrency;
- // The first argument in all the callbacks is the operation parameters.
+ // The values argument in the callbacks is the operation parameters. If
+ // the operation expects parameters, then it should have a non-NULL
+ // operation_pre() callback. Failed that, any parameters will be diagnosed
+ // as unexpected.
//
- // If the operation expects parameters, then it should have a non-NULL
- // pre(). Failed that, any parameters will be diagnosed as unexpected.
+ // Note also that if the specified operation has outer (for example,
+ // update-for-install), then parameters belong to outer (for example,
+ // install; this is done in order to be consistent with the case when
+ // update is performed as a pre-operation of install).
- // If the returned operation_id's are not 0, then they are injected
- // as pre/post operations for this operation. Can be NULL if unused.
- // The returned operation_id shall not be default_id.
+ // Pre/post operations for this operation. Note that these callbacks are
+ // called before this operation becomes current.
+ //
+ // If the returned by pre/post_*() operation_id's are not 0, then they are
+ // injected as pre/post operations for this operation. Can be NULL if
+ // unused. The returned operation_id shall not be default_id.
//
operation_id (*pre_operation) (
context&, const values&, meta_operation_id, const location&);
@@ -233,6 +248,16 @@ namespace build2
operation_id (*post_operation) (
context&, const values&, meta_operation_id);
+ // Called immediately after/before this operation becomes/ceases to be
+ // current operation for the specified context. Can be used to
+ // initialize/finalize operation-specific data (context::current_*_odata).
+ // Can be NULL if unused.
+ //
+ void (*operation_pre) (
+ context&, const values&, bool inner, const location&);
+ void (*operation_post) (
+ context&, const values&, bool inner);
+
// Operation-specific ad hoc rule callbacks. Essentially, if not NULL,
// then every ad hoc rule match and apply call for this operation is
// proxied through these functions.
diff --git a/libbuild2/parser.cxx b/libbuild2/parser.cxx
index 29fe23f..5321cd5 100644
--- a/libbuild2/parser.cxx
+++ b/libbuild2/parser.cxx
@@ -44,7 +44,10 @@ namespace build2
{
o << '=';
names storage;
- to_stream (o, reverse (a.value, storage), quote_mode::normal, '@');
+ to_stream (o,
+ reverse (a.value, storage, true /* reduce */),
+ quote_mode::normal,
+ '@');
}
return o;
@@ -59,27 +62,7 @@ namespace build2
enter_scope (parser& p, dir_path&& d)
: p_ (&p), r_ (p.root_), s_ (p.scope_), b_ (p.pbase_)
{
- // Try hard not to call normalize(). Most of the time we will go just
- // one level deeper.
- //
- bool n (true);
-
- if (d.relative ())
- {
- // Relative scopes are opened relative to out, not src.
- //
- if (d.simple () && !d.current () && !d.parent ())
- {
- d = dir_path (p.scope_->out_path ()) /= d.string ();
- n = false;
- }
- else
- d = p.scope_->out_path () / d;
- }
-
- if (n)
- d.normalize ();
-
+ complete_normalize (*p.scope_, d);
e_ = p.switch_scope (d);
}
@@ -105,8 +88,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_scope (enter_scope&& x) {*this = move (x);}
- enter_scope& operator= (enter_scope&& x)
+ enter_scope (enter_scope&& x) noexcept {*this = move (x);}
+ enter_scope& operator= (enter_scope&& x) noexcept
{
if (this != &x)
{
@@ -123,6 +106,31 @@ namespace build2
enter_scope (const enter_scope&) = delete;
enter_scope& operator= (const enter_scope&) = delete;
+ static void
+ complete_normalize (scope& s, dir_path& d)
+ {
+ // Try hard not to call normalize(). Most of the time we will go just
+ // one level deeper.
+ //
+ bool n (true);
+
+ if (d.relative ())
+ {
+ // Relative scopes are opened relative to out, not src.
+ //
+ if (d.simple () && !d.current () && !d.parent ())
+ {
+ d = dir_path (s.out_path ()) /= d.string ();
+ n = false;
+ }
+ else
+ d = s.out_path () / d;
+ }
+
+ if (n)
+ d.normalize ();
+ }
+
private:
parser* p_;
scope* r_;
@@ -164,6 +172,10 @@ namespace build2
tracer& tr)
{
auto r (p.scope_->find_target_type (n, o, loc));
+
+ if (r.first.factory == nullptr)
+ p.fail (loc) << "abstract target type " << r.first.name << "{}";
+
return p.ctx->targets.insert (
r.first, // target type
move (n.dir),
@@ -184,6 +196,10 @@ namespace build2
tracer& tr)
{
auto r (p.scope_->find_target_type (n, o, loc));
+
+ if (r.first.factory == nullptr)
+ p.fail (loc) << "abstract target type " << r.first.name << "{}";
+
return p.ctx->targets.find (r.first, // target type
n.dir,
o.dir,
@@ -200,8 +216,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_target (enter_target&& x) {*this = move (x);}
- enter_target& operator= (enter_target&& x) {
+ enter_target (enter_target&& x) noexcept {*this = move (x);}
+ enter_target& operator= (enter_target&& x) noexcept {
p_ = x.p_; t_ = x.t_; x.p_ = nullptr; return *this;}
enter_target (const enter_target&) = delete;
@@ -232,8 +248,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_prerequisite (enter_prerequisite&& x) {*this = move (x);}
- enter_prerequisite& operator= (enter_prerequisite&& x) {
+ enter_prerequisite (enter_prerequisite&& x) noexcept {*this = move (x);}
+ enter_prerequisite& operator= (enter_prerequisite&& x) noexcept {
p_ = x.p_; r_ = x.r_; x.p_ = nullptr; return *this;}
enter_prerequisite (const enter_prerequisite&) = delete;
@@ -262,10 +278,11 @@ namespace build2
scope* root,
scope& base,
target* tgt,
- prerequisite* prq)
+ prerequisite* prq,
+ bool enter)
{
lexer l (is, in);
- parse_buildfile (l, root, base, tgt, prq);
+ parse_buildfile (l, root, base, tgt, prq, enter);
}
void parser::
@@ -273,7 +290,8 @@ namespace build2
scope* root,
scope& base,
target* tgt,
- prerequisite* prq)
+ prerequisite* prq,
+ bool enter)
{
path_ = &l.name ();
lexer_ = &l;
@@ -292,9 +310,9 @@ namespace build2
? auto_project_env (*root_)
: auto_project_env ());
- if (path_->path != nullptr)
- enter_buildfile (*path_->path); // Note: needs scope_.
-
+ const buildfile* bf (enter && path_->path != nullptr
+ ? &enter_buildfile<buildfile> (*path_->path)
+ : nullptr);
token t;
type tt;
next (t, tt);
@@ -306,13 +324,34 @@ namespace build2
else
{
parse_clause (t, tt);
- process_default_target (t);
+
+ if (stage_ != stage::boot && stage_ != stage::root)
+ process_default_target (t, bf);
}
if (tt != type::eos)
fail (t) << "unexpected " << t;
}
+ names parser::
+ parse_export_stub (istream& is, const path_name& name,
+ const scope& rs, scope& gs, scope& ts)
+ {
+ // Enter the export stub manually with correct out.
+ //
+ if (name.path != nullptr)
+ {
+ dir_path out (!rs.out_eq_src ()
+ ? out_src (name.path->directory (), rs)
+ : dir_path ());
+
+ enter_buildfile<buildfile> (*name.path, move (out));
+ }
+
+ parse_buildfile (is, name, &gs, ts, nullptr, nullptr, false /* enter */);
+ return move (export_value);
+ }
+
token parser::
parse_variable (lexer& l, scope& s, const variable& var, type kind)
{
@@ -578,6 +617,12 @@ namespace build2
{
f = &parser::parse_config_environment;
}
+ else if (n == "recipe")
+ {
+ // Valid only after recipe header (%).
+ //
+ fail (t) << n << " directive without % recipe header";
+ }
if (f != nullptr)
{
@@ -594,9 +639,39 @@ namespace build2
location nloc (get_location (t));
names ns;
- if (tt != type::labrace)
+ // We have to parse names in chunks to detect invalid cases of the
+ // group{foo}<...> syntax.
+ //
+ // Consider (1):
+ //
+ // x =
+ // group{foo} $x<...>:
+ //
+ // And (2):
+ //
+ // x = group{foo} group{bar}
+ // $x<...>:
+ //
+ // As well as (3):
+ //
+ // <...><...>:
+ //
+ struct chunk
{
- ns = parse_names (t, tt, pattern_mode::preserve);
+ size_t pos; // Index in ns of the beginning of the last chunk.
+ location loc; // Position of the beginning of the last chunk.
+ };
+ optional<chunk> ns_last;
+
+ bool labrace_first (tt == type::labrace);
+ if (!labrace_first)
+ {
+ do
+ {
+ ns_last = chunk {ns.size (), get_location (t)};
+ parse_names (t, tt, ns, pattern_mode::preserve, true /* chunk */);
+ }
+ while (start_names (tt));
// Allow things like function calls that don't result in anything.
//
@@ -612,44 +687,87 @@ namespace build2
}
}
- // Handle ad hoc target group specification (<...>).
+ // Handle target group specification (<...>).
//
// We keep an "optional" (empty) vector of names parallel to ns that
- // contains the ad hoc group members.
+ // contains the group members. Note that when we "catch" gns up to ns,
+ // we populate it with ad hoc (as opposed to explicit) groups with no
+ // members.
//
- adhoc_names ans;
+ group_names gns;
if (tt == type::labrace)
{
- while (tt == type::labrace)
+ for (; tt == type::labrace; labrace_first = false)
{
- // Parse target names inside < >.
+ // Detect explicit group (group{foo}<...>).
+ //
+ // Note that `<` first thing on the line is not seperated thus the
+ // labrace_first complication.
+ //
+ bool expl (!t.separated && !labrace_first);
+ if (expl)
+ {
+ // Note: (N) refers to the example in the above comment.
+ //
+ if (!ns_last /* (3) */ || ns_last->pos == ns.size () /* (1) */)
+ {
+ fail (t) << "group name or whitespace expected before '<'";
+ }
+ else
+ {
+ size_t n (ns.size () - ns_last->pos);
+
+ // Note: could be a pair.
+ //
+ if ((n > 2 || (n == 2 && !ns[ns_last->pos].pair)) /* (2) */)
+ {
+ fail (t) << "single group name or whitespace expected before "
+ << "'<' instead of '"
+ << names_view (ns.data () + ns_last->pos, n) << "'";
+ }
+ }
+ }
+
+ // Parse target names inside <>.
//
// We "reserve" the right to have attributes inside <> though what
// exactly that would mean is unclear. One potentially useful
- // semantics would be the ability to specify attributes for ad hoc
- // members though the fact that the primary target is listed first
- // would make it rather unintuitive. Maybe attributes that change
- // the group semantics itself?
+ // semantics would be the ability to specify attributes for group
+ // members though the fact that the primary target for ad hoc groups
+ // is listed first would make it rather unintuitive. Maybe
+ // attributes that change the group semantics itself?
//
next_with_attributes (t, tt);
auto at (attributes_push (t, tt));
if (at.first)
- fail (at.second) << "attributes before ad hoc target";
+ fail (at.second) << "attributes before group member";
else
attributes_pop ();
- // Allow empty case (<>).
+ // For explicit groups, the group target is already in ns and all
+ // the members should go straight to gns.
//
- if (tt != type::rabrace)
+ // For ad hoc groups, the first name (or a pair) is the primary
+ // target which we need to keep in ns. The rest, if any, are ad
+ // hoc members that we should move to gns.
+ //
+ if (expl)
+ {
+ gns.resize (ns.size ()); // Catch up with the names vector.
+ group_names_loc& g (gns.back ());
+ g.expl = true;
+ g.group_loc = move (ns_last->loc);
+ g.member_loc = get_location (t); // Start of members.
+
+ if (tt != type::rabrace) // Handle empty case (<>)
+ parse_names (t, tt, g.ns, pattern_mode::preserve);
+ }
+ else if (tt != type::rabrace) // Allow and ignore empty case (<>).
{
- location aloc (get_location (t));
+ location mloc (get_location (t)); // Start of members.
- // The first name (or a pair) is the primary target which we need
- // to keep in ns. The rest, if any, are ad hoc members that we
- // should move to ans.
- //
size_t m (ns.size ());
parse_names (t, tt, ns, pattern_mode::preserve);
size_t n (ns.size ());
@@ -666,11 +784,10 @@ namespace build2
{
n -= m; // Number of names in ns we should end up with.
- ans.resize (n); // Catch up with the names vector.
- adhoc_names_loc& a (ans.back ());
-
- a.loc = move (aloc);
- a.ns.insert (a.ns.end (),
+ gns.resize (n); // Catch up with the names vector.
+ group_names_loc& g (gns.back ());
+ g.group_loc = g.member_loc = move (mloc);
+ g.ns.insert (g.ns.end (),
make_move_iterator (ns.begin () + n),
make_move_iterator (ns.end ()));
ns.resize (n);
@@ -684,12 +801,16 @@ namespace build2
// Parse the next chunk of target names after >, if any.
//
next (t, tt);
- if (start_names (tt))
- parse_names (t, tt, ns, pattern_mode::preserve);
+ ns_last = nullopt; // To detect <...><...>.
+ while (start_names (tt))
+ {
+ ns_last = chunk {ns.size (), get_location (t)};
+ parse_names (t, tt, ns, pattern_mode::preserve, true /* chunk */);
+ }
}
- if (!ans.empty ())
- ans.resize (ns.size ()); // Catch up with the final chunk.
+ if (!gns.empty ())
+ gns.resize (ns.size ()); // Catch up with the final chunk.
if (tt != type::colon)
fail (t) << "expected ':' instead of " << t;
@@ -717,11 +838,11 @@ namespace build2
// evaluated. The function signature is:
//
// void (token& t, type& tt,
- // bool adhoc_member,
+ // optional<bool> member, // true -- explict, false -- ad hoc
// optional<pattern_type>, const target_type* pat_tt, string pat,
// const location& pat_loc)
//
- // Note that the target and its ad hoc members are inserted implied
+ // Note that the target and its group members are inserted implied
// but this flag can be cleared and default_target logic applied if
// appropriate.
//
@@ -811,27 +932,31 @@ namespace build2
// Resolve target type. If none is specified, then it's file{}.
//
+ // Note: abstract target type is ok here.
+ //
const target_type* ttype (n.untyped ()
? &file::static_type
: scope_->find_target_type (n.type));
if (ttype == nullptr)
- fail (nloc) << "unknown target type " << n.type;
+ fail (nloc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
- f (t, tt, false, n.pattern, ttype, move (n.value), nloc);
+ f (t, tt, nullopt, n.pattern, ttype, move (n.value), nloc);
};
auto for_each = [this, &trace, &for_one_pat,
- &t, &tt, &as, &ns, &nloc, &ans] (auto&& f)
+ &t, &tt, &as, &ns, &nloc, &gns] (auto&& f)
{
- // We need replay if we have multiple targets or ad hoc members.
+ // We need replay if we have multiple targets or group members.
//
// Note: watch out for an out-qualified single target (two names).
//
replay_guard rg (*this,
ns.size () > 2 ||
(ns.size () == 2 && !ns[0].pair) ||
- !ans.empty ());
+ !gns.empty ());
for (size_t i (0), e (ns.size ()); i != e; )
{
@@ -851,8 +976,9 @@ namespace build2
if (n.pair)
fail (nloc) << "out-qualified target type/pattern";
- if (!ans.empty () && !ans[i].ns.empty ())
- fail (ans[i].loc) << "ad hoc member in target type/pattern";
+ if (!gns.empty () && !gns[i].ns.empty ())
+ fail (gns[i].member_loc)
+ << "group member in target type/pattern";
if (*n.pattern == pattern_type::regex_substitution)
fail (nloc) << "regex substitution " << n << " without "
@@ -862,7 +988,8 @@ namespace build2
}
else
{
- vector<reference_wrapper<target>> ams;
+ bool expl;
+ vector<reference_wrapper<target>> gms;
{
name o (n.pair ? move (ns[++i]) : name ());
enter_target tg (*this,
@@ -875,24 +1002,32 @@ namespace build2
if (!as.empty ())
apply_target_attributes (*target_, as);
- // Enter ad hoc members.
+ // Enter group members.
//
- if (!ans.empty ())
+ if (!gns.empty ())
{
// Note: index after the pair increment.
//
- ams = enter_adhoc_members (move (ans[i]), true /* implied */);
+ group_names_loc& g (gns[i]);
+ expl = g.expl;
+
+ if (expl && !target_->is_a<group> ())
+ fail (g.group_loc) << *target_ << " is not group target";
+
+ gms = expl
+ ? enter_explicit_members (move (g), true /* implied */)
+ : enter_adhoc_members (move (g), true /* implied */);
}
- f (t, tt, false, nullopt, nullptr, string (), location ());
+ f (t, tt, nullopt, nullopt, nullptr, string (), location ());
}
- for (target& am: ams)
+ for (target& gm: gms)
{
rg.play (); // Replay.
- enter_target tg (*this, am);
- f (t, tt, true, nullopt, nullptr, string (), location ());
+ enter_target tg (*this, gm);
+ f (t, tt, expl, nullopt, nullptr, string (), location ());
}
}
@@ -951,8 +1086,11 @@ namespace build2
//
if (tt == type::assign || tt == type::prepend || tt == type::append)
{
- if (!ans.empty ())
- fail (ans[0].loc) << "ad hoc member in target type/pattern";
+ // Note: ns contains single target name.
+ //
+ if (!gns.empty ())
+ fail (gns[0].member_loc)
+ << "group member in target type/pattern";
// Note: see the same code below if changing anything here.
//
@@ -971,7 +1109,7 @@ namespace build2
for_one_pat (
[this, &var, akind, &aloc] (
token& t, type& tt,
- bool,
+ optional<bool>,
optional<pattern_type> pt, const target_type* ptt,
string pat, const location& ploc)
{
@@ -1017,7 +1155,7 @@ namespace build2
for_one_pat (
[this] (
token& t, type& tt,
- bool,
+ optional<bool>,
optional<pattern_type> pt, const target_type* ptt,
string pat, const location& ploc)
{
@@ -1037,8 +1175,11 @@ namespace build2
if (pns.empty () &&
tt != type::percent && tt != type::multi_lcbrace)
{
- if (!ans.empty ())
- fail (ans[0].loc) << "ad hoc member in target type/pattern";
+ // Note: ns contains single target name.
+ //
+ if (!gns.empty ())
+ fail (gns[0].member_loc)
+ << "group member in target type/pattern";
if (!as.empty ())
fail (as.loc) << "attributes before target type/pattern";
@@ -1142,22 +1283,33 @@ namespace build2
check_pattern (n, nloc);
- // Verify all the ad hoc members are patterns or substitutions and
- // of the correct type.
+ // If we have group members, verify all the members are patterns or
+ // substitutions (ad hoc) or subsitutions (explicit) and of the
+ // correct pattern type. A rule for an explicit group that wishes to
+ // match based on some of its members feels far fetched.
//
- names ns (ans.empty () ? names () : move (ans[0].ns));
- const location& aloc (ans.empty () ? location () : ans[0].loc);
+ // For explicit groups the use-case is to inject static members
+ // which could otherwise be tedious to specify for each group.
+ //
+ const location& mloc (gns.empty () ? location () : gns[0].member_loc);
+ names ns (gns.empty () ? names () : move (gns[0].ns));
+ bool expl (gns.empty () ? false : gns[0].expl);
for (name& n: ns)
{
if (!n.pattern || !(*n.pattern == pt || (st && *n.pattern == *st)))
{
- fail (aloc) << "expected " << pn << " pattern or substitution "
+ fail (mloc) << "expected " << pn << " pattern or substitution "
<< "instead of " << n;
}
if (*n.pattern != pattern_type::regex_substitution)
- check_pattern (n, aloc);
+ {
+ if (expl)
+ fail (mloc) << "explicit group member pattern " << n;
+
+ check_pattern (n, mloc);
+ }
}
// The same for prerequisites except here we can have non-patterns.
@@ -1217,7 +1369,15 @@ namespace build2
: scope_->find_target_type (n.type);
if (ttype == nullptr)
- fail (nloc) << "unknown target type " << n.type;
+ fail (nloc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
+
+ if (!gns.empty ())
+ {
+ if (ttype->is_a<group> () != expl)
+ fail (nloc) << "group type and target type mismatch";
+ }
unique_ptr<adhoc_rule_pattern> rp;
switch (pt)
@@ -1230,7 +1390,7 @@ namespace build2
rp.reset (new adhoc_rule_regex_pattern (
*scope_, rn, *ttype,
move (n), nloc,
- move (ns), aloc,
+ move (ns), mloc,
move (pns), ploc));
break;
case pattern_type::regex_substitution:
@@ -1309,7 +1469,12 @@ namespace build2
// And the same for the configure meta-operation to, for
// example, make sure a hinted ad hoc rule matches. @@ Hm,
// maybe we fixed this with action-specific hints? But the
- // injection part above may still apply.
+ // injection part above may still apply. BTW, this is also
+ // required for see-through groups in order to resolve their
+ // member.
+ //
+ // Note also that the equivalent semantics for ad hoc recipes
+ // is provided by match_adhoc_recipe().
//
if (a.meta_operation () == perform_id)
{
@@ -1331,7 +1496,8 @@ namespace build2
// see-through target group, then we may also need to
// register update for other meta-operations (see, for
// example, wildcard update registration in the cli
- // module).
+ // module). BTW, we can now detect such a target via
+ // its target type flags.
}
}
}
@@ -1375,7 +1541,7 @@ namespace build2
st = token (t), // Save start token (will be gone on replay).
recipes = small_vector<shared_ptr<adhoc_rule>, 1> ()]
(token& t, type& tt,
- bool am,
+ optional<bool> gm, // true -- explicit, false -- ad hoc
optional<pattern_type> pt, const target_type* ptt, string pat,
const location& ploc) mutable
{
@@ -1389,7 +1555,14 @@ namespace build2
//
next (t, tt); // Newline.
next (t, tt); // First token inside the variable block.
- parse_variable_block (t, tt, pt, ptt, move (pat), ploc);
+
+ // For explicit groups we only assign variables on the group
+ // omitting the members.
+ //
+ if (!gm || !*gm)
+ parse_variable_block (t, tt, pt, ptt, move (pat), ploc);
+ else
+ skip_block (t, tt);
if (tt != type::rcbrace)
fail (t) << "expected '}' instead of " << t;
@@ -1405,10 +1578,10 @@ namespace build2
else
rt = st;
- // If this is an ad hoc group member then we know we are
- // replaying and can skip the recipe.
+ // If this is a group member then we know we are replaying and
+ // can skip the recipe.
//
- if (am)
+ if (gm)
{
replay_skip ();
next (t, tt);
@@ -1435,7 +1608,7 @@ namespace build2
// Note also that we treat this as an explicit dependency
// declaration (i.e., not implied).
//
- enter_targets (move (ns), nloc, move (ans), 0, as);
+ enter_targets (move (ns), nloc, move (gns), 0, as);
}
continue;
@@ -1450,7 +1623,8 @@ namespace build2
if (!start_names (tt))
fail (t) << "unexpected " << t;
- // @@ PAT: currently we pattern-expand target-specific var names.
+ // @@ PAT: currently we pattern-expand target-specific var names (see
+ // also parse_import()).
//
const location ploc (get_location (t));
names pns (parse_names (t, tt, pattern_mode::expand));
@@ -1485,7 +1659,7 @@ namespace build2
for_each (
[this, &var, akind, &aloc] (
token& t, type& tt,
- bool,
+ optional<bool> gm,
optional<pattern_type> pt, const target_type* ptt, string pat,
const location& ploc)
{
@@ -1494,7 +1668,18 @@ namespace build2
*pt, *ptt, move (pat), ploc,
var, akind, aloc);
else
- parse_variable (t, tt, var, akind);
+ {
+ // Skip explicit group members (see the block case above for
+ // background).
+ //
+ if (!gm || !*gm)
+ parse_variable (t, tt, var, akind);
+ else
+ {
+ next (t, tt);
+ skip_line (t, tt);
+ }
+ }
});
next_after_newline (t, tt);
@@ -1512,7 +1697,7 @@ namespace build2
parse_dependency (t, tt,
move (ns), nloc,
- move (ans),
+ move (gns),
move (pns), ploc,
as);
}
@@ -1738,7 +1923,7 @@ namespace build2
// Parse a recipe chain.
//
// % [<attrs>] [<buildspec>]
- // [if|if!|switch ...]
+ // [if|if!|switch|recipe ...]
// {{ [<lang> ...]
// ...
// }}
@@ -1757,10 +1942,27 @@ namespace build2
//
if (target_ != nullptr)
{
+ // @@ What if some members are added later?
+ //
+ // @@ Also, what happends if redeclared as real dependency, do we
+ // upgrade the members?
+ //
if (target_->decl != target_decl::real)
{
- for (target* m (target_); m != nullptr; m = m->adhoc_member)
- m->decl = target_decl::real;
+ target_->decl = target_decl::real;
+
+ if (group* g = target_->is_a<group> ())
+ {
+ for (const target& m: g->static_members)
+ const_cast<target&> (m).decl = target_decl::real; // During load.
+ }
+ else
+ {
+ for (target* m (target_->adhoc_member);
+ m != nullptr;
+ m = m->adhoc_member)
+ m->decl = target_decl::real;
+ }
if (default_target_ == nullptr)
default_target_ = target_;
@@ -1798,7 +2000,131 @@ namespace build2
attributes& as;
buildspec& bs;
const location& bsloc;
- } d {ttype, name, recipes, first, clean, i, as, bs, bsloc};
+ function<void (string&&)> parse_trailer;
+ } d {ttype, name, recipes, first, clean, i, as, bs, bsloc, {}};
+
+ d.parse_trailer = [this, &d] (string&& text)
+ {
+ if (d.first)
+ {
+ adhoc_rule& ar (*d.recipes.back ());
+
+ // Translate each buildspec entry into action and add it to the
+ // recipe entry.
+ //
+ const location& l (d.bsloc);
+
+ for (metaopspec& m: d.bs)
+ {
+ meta_operation_id mi (ctx->meta_operation_table.find (m.name));
+
+ if (mi == 0)
+ fail (l) << "unknown meta-operation " << m.name;
+
+ const meta_operation_info* mf (
+ root_->root_extra->meta_operations[mi]);
+
+ if (mf == nullptr)
+ fail (l) << "project " << *root_ << " does not support meta-"
+ << "operation " << ctx->meta_operation_table[mi].name;
+
+ for (opspec& o: m)
+ {
+ operation_id oi;
+ if (o.name.empty ())
+ {
+ if (mf->operation_pre == nullptr)
+ oi = update_id;
+ else
+ // Calling operation_pre() to translate doesn't feel
+ // appropriate here.
+ //
+ fail (l) << "default operation in recipe action" << endf;
+ }
+ else
+ oi = ctx->operation_table.find (o.name);
+
+ if (oi == 0)
+ fail (l) << "unknown operation " << o.name;
+
+ const operation_info* of (root_->root_extra->operations[oi]);
+
+ if (of == nullptr)
+ fail (l) << "project " << *root_ << " does not support "
+ << "operation " << ctx->operation_table[oi];
+
+ // Note: for now always inner (see match_rule_impl() for
+ // details).
+ //
+ action a (mi, oi);
+
+ // Check for duplicates (local).
+ //
+ if (find_if (
+ d.recipes.begin (), d.recipes.end (),
+ [a] (const shared_ptr<adhoc_rule>& r)
+ {
+ auto& as (r->actions);
+ return find (as.begin (), as.end (), a) != as.end ();
+ }) != d.recipes.end ())
+ {
+ fail (l) << "duplicate " << mf->name << '(' << of->name
+ << ") recipe";
+ }
+
+ ar.actions.push_back (a);
+ }
+ }
+
+ // Set the recipe text.
+ //
+ if (ar.recipe_text (
+ *scope_,
+ d.ttype != nullptr ? *d.ttype : target_->type (),
+ move (text),
+ d.as))
+ d.clean = true;
+
+ // Verify we have no unhandled attributes.
+ //
+ for (attribute& a: d.as)
+ fail (d.as.loc) << "unknown recipe attribute " << a << endf;
+ }
+
+ // Copy the recipe over to the target verifying there are no
+ // duplicates (global).
+ //
+ if (target_ != nullptr)
+ {
+ const shared_ptr<adhoc_rule>& r (d.recipes[d.i]);
+
+ for (const shared_ptr<adhoc_rule>& er: target_->adhoc_recipes)
+ {
+ auto& as (er->actions);
+
+ for (action a: r->actions)
+ {
+ if (find (as.begin (), as.end (), a) != as.end ())
+ {
+ const meta_operation_info* mf (
+ root_->root_extra->meta_operations[a.meta_operation ()]);
+
+ const operation_info* of (
+ root_->root_extra->operations[a.operation ()]);
+
+ fail (d.bsloc)
+ << "duplicate " << mf->name << '(' << of->name
+ << ") recipe for target " << *target_;
+ }
+ }
+ }
+
+ target_->adhoc_recipes.push_back (r);
+
+ // Note that "registration" of configure_* and dist_* actions
+ // (similar to ad hoc rules) is provided by match_adhoc_recipe().
+ }
+ };
// Note that this function must be called at most once per iteration.
//
@@ -1841,7 +2167,7 @@ namespace build2
// to rule_name.
shared_ptr<adhoc_rule> ar;
- if (!lang)
+ if (!lang || icasecmp (*lang, "buildscript") == 0)
{
// Buildscript
//
@@ -1937,129 +2263,200 @@ namespace build2
}
if (!skip)
- {
- if (d.first)
- {
- adhoc_rule& ar (*d.recipes.back ());
-
- // Translate each buildspec entry into action and add it to the
- // recipe entry.
- //
- const location& l (d.bsloc);
-
- for (metaopspec& m: d.bs)
- {
- meta_operation_id mi (ctx->meta_operation_table.find (m.name));
+ d.parse_trailer (move (t.value));
- if (mi == 0)
- fail (l) << "unknown meta-operation " << m.name;
+ next (t, tt);
+ assert (tt == type::multi_rcbrace);
- const meta_operation_info* mf (
- root_->root_extra->meta_operations[mi]);
+ next (t, tt); // Newline.
+ next_after_newline (t, tt, token (t)); // Should be on its own line.
+ };
- if (mf == nullptr)
- fail (l) << "project " << *root_ << " does not support meta-"
- << "operation " << ctx->meta_operation_table[mi].name;
+ auto parse_recipe_directive = [this, &d] (token& t, type& tt,
+ const string&)
+ {
+ // Parse recipe directive:
+ //
+ // recipe <lang> <file>
+ //
+ // Note that here <lang> is not optional.
+ //
+ // @@ We could guess <lang> from the extension.
- for (opspec& o: m)
- {
- operation_id oi;
- if (o.name.empty ())
- {
- if (mf->operation_pre == nullptr)
- oi = update_id;
- else
- // Calling operation_pre() to translate doesn't feel
- // appropriate here.
- //
- fail (l) << "default operation in recipe action" << endf;
- }
- else
- oi = ctx->operation_table.find (o.name);
+ // Use value mode to minimize the number of special characters.
+ //
+ mode (lexer_mode::value, '@');
- if (oi == 0)
- fail (l) << "unknown operation " << o.name;
+ // Parse <lang>.
+ //
+ if (next (t, tt) != type::word)
+ fail (t) << "expected recipe language instead of " << t;
- const operation_info* of (root_->root_extra->operations[oi]);
+ location lloc (get_location (t));
+ string lang (t.value);
+ next (t, tt);
- if (of == nullptr)
- fail (l) << "project " << *root_ << " does not support "
- << "operation " << ctx->operation_table[oi];
+ // Parse <file> as names to get variable expansion, etc.
+ //
+ location nloc (get_location (t));
+ names ns (parse_names (t, tt, pattern_mode::ignore, "file name"));
- // Note: for now always inner (see match_rule() for details).
- //
- action a (mi, oi);
+ path file;
+ try
+ {
+ file = convert<path> (move (ns));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (nloc) << "invalid recipe file path: " << e;
+ }
- // Check for duplicates (local).
- //
- if (find_if (
- d.recipes.begin (), d.recipes.end (),
- [a] (const shared_ptr<adhoc_rule>& r)
- {
- auto& as (r->actions);
- return find (as.begin (), as.end (), a) != as.end ();
- }) != d.recipes.end ())
- {
- fail (l) << "duplicate " << mf->name << '(' << of->name
- << ") recipe";
- }
+ string text;
+ if (d.first)
+ {
+ // Source relative to the buildfile rather than src scope. In
+ // particular, this make sourcing from exported buildfiles work.
+ //
+ if (file.relative () && path_->path != nullptr)
+ {
+ // Note: all sourced/included/imported paths are absolute and
+ // normalized.
+ //
+ file = path_->path->directory () / file;
+ }
- ar.actions.push_back (a);
- }
- }
+ file.normalize ();
- // Set the recipe text.
- //
- if (ar.recipe_text (
- *scope_,
- d.ttype != nullptr ? *d.ttype : target_->type (),
- move (t.value),
- d.as))
- d.clean = true;
-
- // Verify we have no unhandled attributes.
- //
- for (attribute& a: d.as)
- fail (d.as.loc) << "unknown recipe attribute " << a << endf;
+ try
+ {
+ ifdstream ifs (file);
+ text = ifs.read_text ();
+ }
+ catch (const io_error& e)
+ {
+ fail (nloc) << "unable to read recipe file " << file << ": " << e;
}
- // Copy the recipe over to the target verifying there are no
- // duplicates (global).
- //
- if (target_ != nullptr)
+ shared_ptr<adhoc_rule> ar;
{
- const shared_ptr<adhoc_rule>& r (d.recipes[d.i]);
+ // This is expected to be the location of the opening multi-curly
+ // with the recipe body starting from the following line. So we
+ // need to fudge the line number a bit.
+ //
+ location loc (file, 0, 1);
- for (const shared_ptr<adhoc_rule>& er: target_->adhoc_recipes)
+ if (icasecmp (lang, "buildscript") == 0)
{
- auto& as (er->actions);
+ // Buildscript
+ //
+ ar.reset (
+ new adhoc_buildscript_rule (
+ d.name.empty () ? "<ad hoc buildscript recipe>" : d.name,
+ loc,
+ 2)); // Use `{{` and `}}` for dump.
- for (action a: r->actions)
+ // Enter as buildfile-like so that it gets automatically
+ // distributed. Note: must be consistent with build/export/
+ // handling in process_default_target().
+ //
+ enter_buildfile<buildscript> (file);
+ }
+ else if (icasecmp (lang, "c++") == 0)
+ {
+ // C++
+ //
+ // We expect to find a C++ comment line with version and
+ // optional fragment separator before the first non-comment,
+ // non-blank line:
+ //
+ // // c++ <ver> [<sep>]
+ //
+ string s;
+ location sloc (file, 1, 1);
{
- if (find (as.begin (), as.end (), a) != as.end ())
+ // Note: observe blank lines for accurate line count.
+ //
+ size_t b (0), e (0);
+ for (size_t m (0), n (text.size ());
+ next_word (text, n, b, e, m, '\n', '\r'), b != n;
+ sloc.line++)
{
- const meta_operation_info* mf (
- root_->root_extra->meta_operations[a.meta_operation ()]);
+ s.assign (text, b, e - b);
- const operation_info* of (
- root_->root_extra->operations[a.operation ()]);
+ if (!trim (s).empty ())
+ {
+ if (icasecmp (s, "// c++ ", 7) == 0)
+ break;
- fail (d.bsloc)
- << "duplicate " << mf->name << '(' << of->name
- << ") recipe for target " << *target_;
+ if (s[0] != '/' || s[1] != '/')
+ {
+ b = e;
+ break;
+ }
+ }
}
+
+ if (b == e)
+ fail (sloc) << "no '// c++ <version> [<separator>]' line";
}
- }
- target_->adhoc_recipes.push_back (r);
+ uint64_t ver;
+ optional<string> sep;
+ {
+ size_t b (7), e (7);
+ if (next_word (s, b, e, ' ', '\t') == 0)
+ fail (sloc) << "missing c++ recipe version" << endf;
+
+ try
+ {
+ ver = convert<uint64_t> (build2::name (string (s, b, e - b)));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid c++ recipe version: " << e << endf;
+ }
+
+ if (next_word (s, b, e, ' ', '\t') != 0)
+ {
+ sep = string (s, b, e - b);
+
+ if (next_word (s, b, e, ' ', '\t') != 0)
+ fail (sloc) << "junk after fragment separator";
+ }
+ }
+
+ ar.reset (
+ new adhoc_cxx_rule (
+ d.name.empty () ? "<ad hoc c++ recipe>" : d.name,
+ loc,
+ 2, // Use `{{` and `}}` for dump.
+ ver,
+ move (sep)));
+
+ // Enter as buildfile-like so that it gets automatically
+ // distributed. Note: must be consistent with build/export/
+ // handling in process_default_target().
+ //
+ // While ideally we would want to use the cxx{} target type,
+ // it's defined in a seperate build system module (which may not
+ // even be loaded by this project, so even runtime lookup won't
+ // work). So we use file{} instead.
+ //
+ enter_buildfile<build2::file> (file);
+ }
+ else
+ fail (lloc) << "unknown recipe language '" << lang << "'";
}
+
+ assert (d.recipes[d.i] == nullptr);
+ d.recipes[d.i] = move (ar);
}
+ else
+ assert (d.recipes[d.i] != nullptr);
- next (t, tt);
- assert (tt == type::multi_rcbrace);
+ d.parse_trailer (move (text));
- next (t, tt); // Newline.
- next_after_newline (t, tt, token (t)); // Should be on its own line.
+ next_after_newline (t, tt);
};
bsloc = get_location (t); // Fallback location.
@@ -2119,7 +2516,7 @@ namespace build2
expire_mode ();
next_after_newline (t, tt, "recipe action");
- // See if this is if-else or switch.
+ // See if this is if-else/switch or `recipe`.
//
// We want the keyword test similar to parse_clause() but we cannot do
// it if replaying. So we skip it with understanding that if it's not
@@ -2137,12 +2534,19 @@ namespace build2
if (n == "if" || n == "if!")
{
- parse_if_else (t, tt, true /* multi */, parse_block);
+ parse_if_else (t, tt, true /* multi */,
+ parse_block, parse_recipe_directive);
continue;
}
else if (n == "switch")
{
- parse_switch (t, tt, true /* multi */, parse_block);
+ parse_switch (t, tt, true /* multi */,
+ parse_block, parse_recipe_directive);
+ continue;
+ }
+ else if (n == "recipe")
+ {
+ parse_recipe_directive (t, tt, "" /* kind */);
continue;
}
@@ -2150,7 +2554,7 @@ namespace build2
}
if (tt != type::multi_lcbrace)
- fail (t) << "expected recipe block instead of " << t;
+ fail (t) << "expected recipe block or 'recipe' instead of " << t;
// Fall through.
}
@@ -2196,15 +2600,96 @@ namespace build2
}
vector<reference_wrapper<target>> parser::
- enter_adhoc_members (adhoc_names_loc&& ans, bool implied)
+ enter_explicit_members (group_names_loc&& gns, bool implied)
{
- tracer trace ("parser::enter_adhoc_members", &path_);
+ tracer trace ("parser::enter_explicit_members", &path_);
+
+ names& ns (gns.ns);
+ const location& loc (gns.member_loc);
vector<reference_wrapper<target>> r;
- r.reserve (ans.ns.size ());
+ r.reserve (ns.size ());
- names& ns (ans.ns);
- const location& loc (ans.loc);
+ group& g (target_->as<group> ());
+ auto& ms (g.static_members);
+
+ for (size_t i (0); i != ns.size (); ++i)
+ {
+ name&& n (move (ns[i]));
+ name&& o (n.pair ? move (ns[++i]) : name ());
+
+ if (n.qualified ())
+ fail (loc) << "project name in target " << n;
+
+ // We derive the path unless the target name ends with the '...' escape
+ // which here we treat as the "let the rule derive the path" indicator
+ // (see target::split_name() for details). This will only be useful for
+ // referring to group members that are managed by the group's matching
+ // rule. Note also that omitting '...' for such a member could be used
+ // to override the file name, provided the rule checks if the path has
+ // already been derived before doing it itself.
+ //
+ // @@ What can the ad hoc recipe/rule do differently here? Maybe get
+ // path from dynamic targets? Maybe we will have custom path
+ // derivation support in buildscript in the future?
+ //
+ bool escaped;
+ {
+ const string& v (n.value);
+ size_t p (v.size ());
+
+ escaped = (p > 3 &&
+ v[--p] == '.' && v[--p] == '.' && v[--p] == '.' &&
+ v[--p] != '.');
+ }
+
+ target& m (enter_target::insert_target (*this,
+ move (n), move (o),
+ implied,
+ loc, trace));
+
+ if (g == m)
+ fail (loc) << "explicit group member " << m << " is group itself";
+
+ // Add as static member skipping duplicates.
+ //
+ if (find (ms.begin (), ms.end (), m) == ms.end ())
+ {
+ if (m.group == nullptr)
+ m.group = &g;
+ else if (m.group != &g)
+ fail (loc) << g << " group member " << m << " already belongs to "
+ << "group " << *m.group;
+
+ ms.push_back (m);
+ }
+
+ if (!escaped)
+ {
+ if (file* ft = m.is_a<file> ())
+ ft->derive_path ();
+ }
+
+ r.push_back (m);
+ }
+
+ return r;
+ }
+
+ vector<reference_wrapper<target>> parser::
+ enter_adhoc_members (group_names_loc&& gns, bool implied)
+ {
+ tracer trace ("parser::enter_adhoc_members", &path_);
+
+ names& ns (gns.ns);
+ const location& loc (gns.member_loc);
+
+ if (target_->is_a<group> ())
+ fail (loc) << "ad hoc group primary member " << *target_
+ << " is explicit group";
+
+ vector<reference_wrapper<target>> r;
+ r.reserve (ns.size ());
for (size_t i (0); i != ns.size (); ++i)
{
@@ -2232,14 +2717,16 @@ namespace build2
v[--p] != '.');
}
- target& at (
- enter_target::insert_target (*this,
- move (n), move (o),
- implied,
- loc, trace));
+ target& m (enter_target::insert_target (*this,
+ move (n), move (o),
+ implied,
+ loc, trace));
- if (target_ == &at)
- fail (loc) << "ad hoc group member " << at << " is primary target";
+ if (target_ == &m)
+ fail (loc) << "ad hoc group member " << m << " is primary target";
+
+ if (m.is_a<group> ())
+ fail (loc) << "ad hoc group member " << m << " is explicit group";
// Add as an ad hoc member at the end of the chain skipping duplicates.
//
@@ -2247,7 +2734,7 @@ namespace build2
const_ptr<target>* mp (&target_->adhoc_member);
for (; *mp != nullptr; mp = &(*mp)->adhoc_member)
{
- if (*mp == &at)
+ if (*mp == &m)
{
mp = nullptr;
break;
@@ -2256,18 +2743,22 @@ namespace build2
if (mp != nullptr)
{
- *mp = &at;
- at.group = target_;
+ if (m.group == nullptr)
+ m.group = target_;
+ else if (m.group != target_)
+ fail (loc) << *target_ << " ad hoc group member " << m
+ << " already belongs to group " << *m.group;
+ *mp = &m;
}
}
if (!escaped)
{
- if (file* ft = at.is_a<file> ())
+ if (file* ft = m.is_a<file> ())
ft->derive_path ();
}
- r.push_back (at);
+ r.push_back (m);
}
return r;
@@ -2276,12 +2767,12 @@ namespace build2
small_vector<pair<reference_wrapper<target>,
vector<reference_wrapper<target>>>, 1> parser::
enter_targets (names&& tns, const location& tloc, // Target names.
- adhoc_names&& ans, // Ad hoc target names.
+ group_names&& gns, // Group member names.
size_t prereq_size,
const attributes& tas) // Target attributes.
{
- // Enter all the targets (normally we will have just one) and their ad hoc
- // groups.
+ // Enter all the targets (normally we will have just one) and their group
+ // members.
//
tracer trace ("parser::enter_targets", &path_);
@@ -2311,14 +2802,21 @@ namespace build2
if (!tas.empty ())
apply_target_attributes (*target_, tas);
- // Enter ad hoc members.
+ // Enter group members.
//
- vector<reference_wrapper<target>> ams;
- if (!ans.empty ())
+ vector<reference_wrapper<target>> gms;
+ if (!gns.empty ())
{
// Note: index after the pair increment.
//
- ams = enter_adhoc_members (move (ans[i]), false /* implied */);
+ group_names_loc& g (gns[i]);
+
+ if (g.expl && !target_->is_a<group> ())
+ fail (g.group_loc) << *target_ << " is not group target";
+
+ gms = g.expl
+ ? enter_explicit_members (move (g), false /* implied */)
+ : enter_adhoc_members (move (g), false /* implied */);
}
if (default_target_ == nullptr)
@@ -2326,7 +2824,7 @@ namespace build2
target_->prerequisites_state_.store (2, memory_order_relaxed);
target_->prerequisites_.reserve (prereq_size);
- tgs.emplace_back (*target_, move (ams));
+ tgs.emplace_back (*target_, move (gms));
}
return tgs;
@@ -2414,7 +2912,7 @@ namespace build2
void parser::
parse_dependency (token& t, token_type& tt,
names&& tns, const location& tloc, // Target names.
- adhoc_names&& ans, // Ad hoc target names.
+ group_names&& gns, // Group member names.
names&& pns, const location& ploc, // Prereq names.
const attributes& tas) // Target attributes.
{
@@ -2468,7 +2966,7 @@ namespace build2
//
small_vector<pair<reference_wrapper<target>,
vector<reference_wrapper<target>>>, 1>
- tgs (enter_targets (move (tns), tloc, move (ans), pns.size (), tas));
+ tgs (enter_targets (move (tns), tloc, move (gns), pns.size (), tas));
// Now enter each prerequisite into each target.
//
@@ -2486,7 +2984,27 @@ namespace build2
optional<string>& e (rp.second);
if (t == nullptr)
- fail (ploc) << "unknown target type " << n.type;
+ {
+ if (n.proj)
+ {
+ // If the target type is unknown then no phase 2 import (like
+ // rule-specific search) can possibly succeed so we can fail now and
+ // with a more accurate reason. See import2(names) for background.
+ //
+ diag_record dr;
+ dr << fail (ploc) << "unable to import target " << n;
+ import_suggest (dr, *n.proj, nullptr, string (), false);
+ }
+ else
+ {
+ fail (ploc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
+ }
+ }
+
+ if (t->factory == nullptr)
+ fail (ploc) << "abstract target type " << t->name << "{}";
// Current dir collapses to an empty one.
//
@@ -2564,32 +3082,41 @@ namespace build2
//
// We handle multiple targets and/or prerequisites by replaying the tokens
// (see the target-specific case comments for details). The function
- // signature is:
+ // signature for for_each_t (see for_each on the gm argument semantics):
+ //
+ // void (token& t, type& tt, optional<bool> gm)
+ //
+ // And for for_each_p:
//
// void (token& t, type& tt)
//
auto for_each_t = [this, &t, &tt, &tgs] (auto&& f)
{
- // We need replay if we have multiple targets or ad hoc members.
+ // We need replay if we have multiple targets or group members.
//
replay_guard rg (*this, tgs.size () > 1 || !tgs[0].second.empty ());
for (auto ti (tgs.begin ()), te (tgs.end ()); ti != te; )
{
target& tg (ti->first);
- const vector<reference_wrapper<target>>& ams (ti->second);
+ const vector<reference_wrapper<target>>& gms (ti->second);
{
enter_target g (*this, tg);
- f (t, tt, false);
+ f (t, tt, nullopt);
}
- for (target& am: ams)
+ if (!gms.empty ())
{
- rg.play (); // Replay.
+ bool expl (tg.is_a<group> ());
+
+ for (target& gm: gms)
+ {
+ rg.play (); // Replay.
- enter_target g (*this, am);
- f (t, tt, true);
+ enter_target g (*this, gm);
+ f (t, tt, expl);
+ }
}
if (++ti != te)
@@ -2647,7 +3174,7 @@ namespace build2
this,
st = token (t), // Save start token (will be gone on replay).
recipes = small_vector<shared_ptr<adhoc_rule>, 1> ()]
- (token& t, type& tt, bool am) mutable
+ (token& t, type& tt, optional<bool> gm) mutable
{
token rt; // Recipe start token.
@@ -2657,7 +3184,14 @@ namespace build2
{
next (t, tt); // Newline.
next (t, tt); // First token inside the variable block.
- parse_variable_block (t, tt);
+
+ // Skip explicit group members (see the block case above for
+ // background).
+ //
+ if (!gm || !*gm)
+ parse_variable_block (t, tt);
+ else
+ skip_block (t, tt);
if (tt != type::rcbrace)
fail (t) << "expected '}' instead of " << t;
@@ -2673,10 +3207,10 @@ namespace build2
else
rt = st;
- // If this is an ad hoc group member then we know we are
- // replaying and can skip the recipe.
+ // If this is a group member then we know we are replaying and can
+ // skip the recipe.
//
- if (am)
+ if (gm)
{
replay_skip ();
next (t, tt);
@@ -2831,16 +3365,16 @@ namespace build2
// we just say that the dependency chain is equivalent to specifying
// each dependency separately.
//
- // Also note that supporting ad hoc target group specification in
- // chains will be complicated. For example, what if prerequisites that
- // have ad hoc targets don't end up being chained? Do we just silently
- // drop them? Also, these are prerequsites first that happened to be
- // reused as target names so perhaps it is the right thing not to
- // support, conceptually.
+ // Also note that supporting target group specification in chains will
+ // be complicated. For example, what if prerequisites that have group
+ // members don't end up being chained? Do we just silently drop them?
+ // Also, these are prerequsites first that happened to be reused as
+ // target names so perhaps it is the right thing not to support,
+ // conceptually.
//
parse_dependency (t, tt,
move (pns), ploc,
- {} /* ad hoc target name */,
+ {} /* group names */,
move (ns), loc,
attributes () /* target attributes */);
}
@@ -2848,14 +3382,18 @@ namespace build2
}
void parser::
- source (istream& is, const path_name& in, const location& loc, bool deft)
+ source_buildfile (istream& is,
+ const path_name& in,
+ const location& loc,
+ optional<bool> deft)
{
- tracer trace ("parser::source", &path_);
+ tracer trace ("parser::source_buildfile", &path_);
l5 ([&]{trace (loc) << "entering " << in;});
- if (in.path != nullptr)
- enter_buildfile (*in.path);
+ const buildfile* bf (in.path != nullptr
+ ? &enter_buildfile<buildfile> (*in.path)
+ : nullptr);
const path_name* op (path_);
path_ = &in;
@@ -2865,11 +3403,11 @@ namespace build2
lexer_ = &l;
target* odt;
- if (deft)
- {
+ if (!deft || *deft)
odt = default_target_;
+
+ if (deft && *deft)
default_target_ = nullptr;
- }
token t;
type tt;
@@ -2879,12 +3417,15 @@ namespace build2
if (tt != type::eos)
fail (t) << "unexpected " << t;
- if (deft)
+ if (deft && *deft)
{
- process_default_target (t);
- default_target_ = odt;
+ if (stage_ != stage::boot && stage_ != stage::root)
+ process_default_target (t, bf);
}
+ if (!deft || *deft)
+ default_target_ = odt;
+
lexer_ = ol;
path_ = op;
@@ -2894,11 +3435,35 @@ namespace build2
void parser::
parse_source (token& t, type& tt)
{
+ // source [<attrs>] <path>+
+ //
+
// The rest should be a list of buildfiles. Parse them as names in the
- // value mode to get variable expansion and directory prefixes.
+ // value mode to get variable expansion and directory prefixes. Also
+ // handle optional attributes.
//
mode (lexer_mode::value, '@');
- next (t, tt);
+ next_with_attributes (t, tt);
+ attributes_push (t, tt);
+
+ bool nodt (false); // Source buildfile without default target semantics.
+ {
+ attributes as (attributes_pop ());
+ const location& l (as.loc);
+
+ for (const attribute& a: as)
+ {
+ const string& n (a.name);
+
+ if (n == "no_default_target")
+ {
+ nodt = true;
+ }
+ else
+ fail (l) << "unknown source directive attribute " << a;
+ }
+ }
+
const location l (get_location (t));
names ns (tt != type::newline && tt != type::eos
? parse_names (t, tt, pattern_mode::expand, "path", nullptr)
@@ -2925,10 +3490,10 @@ namespace build2
try
{
ifdstream ifs (p);
- source (ifs,
- path_name (p),
- get_location (t),
- false /* default_target */);
+ source_buildfile (ifs,
+ path_name (p),
+ get_location (t),
+ nodt ? optional<bool> {} : false);
}
catch (const io_error& e)
{
@@ -2942,6 +3507,9 @@ namespace build2
void parser::
parse_include (token& t, type& tt)
{
+ // include <path>+
+ //
+
tracer trace ("parser::parse_include", &path_);
if (stage_ == stage::boot)
@@ -3058,6 +3626,8 @@ namespace build2
continue;
}
+ // Note: see a variant of this in parse_import().
+ //
// Clear/restore if/switch location.
//
// We do it here but not in parse_source since the included buildfile is
@@ -3073,10 +3643,10 @@ namespace build2
try
{
ifdstream ifs (p);
- source (ifs,
- path_name (p),
- get_location (t),
- true /* default_target */);
+ source_buildfile (ifs,
+ path_name (p),
+ get_location (t),
+ true /* default_target */);
}
catch (const io_error& e)
{
@@ -3159,10 +3729,10 @@ namespace build2
dr << info (l) << "while parsing " << args[0] << " output";
});
- source (is,
- path_name ("<stdout>"),
- l,
- false /* default_target */);
+ source_buildfile (is,
+ path_name ("<stdout>"),
+ l,
+ false /* default_target */);
}
is.close (); // Detect errors.
@@ -3222,14 +3792,16 @@ namespace build2
// which case it will be duplicating them in its root.build file). So
// for now we allow this trusting the user knows what they are doing.
//
- string proj;
- {
- const project_name& n (named_project (*root_));
-
- if (!n.empty ())
- proj = n.variable ();
- }
-
+ // There is another special case: a buildfile imported from another
+ // project. In this case we also allow <project> to be the imported
+ // project name in addition to importing. The thinking here is that an
+ // imported buildfile is in a sense like a module (may provide rules which
+ // may require configuration, etc) and should be able to use its own
+ // project name (which is often the corresponding tool name) in the
+ // configuration variables, just like modules. In this case we use the
+ // imported project name as the reporting module name (but which can
+ // be overridden with config.report.module attribute).
+ //
const location loc (get_location (t));
// We are now in the normal lexing mode and we let the lexer handle `?=`.
@@ -3247,6 +3819,11 @@ namespace build2
optional<string> report;
string report_var;
+ // Reporting module name. Empty means the config module reporting
+ // project's own configuration.
+ //
+ project_name report_module;
+
for (auto i (as.begin ()); i != as.end (); )
{
if (i->name == "null")
@@ -3277,6 +3854,23 @@ namespace build2
try
{
report_var = convert<string> (move (i->value));
+
+ if (!report)
+ report = string ("true");
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (as.loc) << "invalid " << i->name << " attribute value: " << e;
+ }
+ }
+ else if (i->name == "config.report.module")
+ {
+ try
+ {
+ report_module = convert<project_name> (move (i->value));
+
+ if (!report)
+ report = string ("true");
}
catch (const invalid_argument& e)
{
@@ -3300,9 +3894,13 @@ namespace build2
// As a way to print custom (discovered, computed, etc) configuration
// information we allow specifying a non config.* variable provided it is
- // explicitly marked with the config.report attribute.
+ // explicitly marked with the config.report attribute (or another
+ // attribute that implies it).
//
bool new_val (false);
+ string org_var; // Original variable if config.report.variable specified.
+
+ const variable* var (nullptr); // config.* variable.
lookup l;
if (report && *report != "false" && !config)
@@ -3321,7 +3919,14 @@ namespace build2
// philosophical question. In either case it doesn't seem useful for it
// to unconditionally force reporting at level 2.
//
- report_var = move (name);
+ if (!report_var.empty ())
+ {
+ // For example, config [config.report.variable=multi] multi_database
+ //
+ org_var = move (name);
+ }
+ else
+ report_var = move (name);
next (t, tt); // We shouldn't have the default value part.
}
@@ -3334,41 +3939,133 @@ namespace build2
// config prefix and the project substring.
//
{
- diag_record dr;
+ string proj;
+ {
+ const project_name& n (named_project (*root_));
- if (!config)
- dr << fail (t) << "configuration variable '" << name
- << "' does not start with 'config.'";
+ if (!n.empty ())
+ proj = n.variable ();
+ }
- if (!proj.empty ())
+ diag_record dr;
+ do // Breakout loop.
{
- size_t p (name.find ('.' + proj));
+ if (!config)
+ {
+ dr << fail (t) << "configuration variable '" << name
+ << "' does not start with 'config.'";
+ break;
+ }
- if (p == string::npos ||
- ((p += proj.size () + 1) != name.size () && // config.<proj>
- name[p] != '.')) // config.<proj>.
+ auto match = [&name] (const string& proj)
{
+ size_t p (name.find ('.' + proj));
+ return (p != string::npos &&
+ ((p += proj.size () + 1) == name.size () || // config.<proj>
+ name[p] == '.')); // config.<proj>.
+ };
+
+ if (!proj.empty () && match (proj))
+ break;
+
+ // See if this buildfile belongs to a different project. If so, use
+ // the project name as the reporting module name.
+ //
+ if (path_->path != nullptr)
+ {
+ // Note: all sourced/included/imported paths are absolute and
+ // normalized.
+ //
+ const path& f (*path_->path);
+ dir_path d (f.directory ());
+
+ auto p (ctx->scopes.find (d)); // Note: never empty.
+ if (*p.first != &ctx->global_scope)
+ {
+ // The buildfile will most likely be in src which means we may
+ // end up with multiple scopes (see scope_map for background).
+ // First check if one of them is us. If not, then we can extract
+ // the project name from any one of them.
+ //
+ const scope& bs (**p.first); // Save.
+
+ for (; p.first != p.second; ++p.first)
+ {
+ if (root_ == (*p.first)->root_scope ())
+ break;
+ }
+
+ if (p.first == p.second)
+ {
+ // Note: we expect the project itself to be named.
+ //
+ const project_name& n (project (*bs.root_scope ()));
+
+ if (!n.empty ())
+ {
+ // If the buildfile comes from a different project, then
+ // it's more likely to use the imported project's config
+ // variables. So replace proj with that for diagnostics
+ // below.
+ //
+ proj = n.variable ();
+
+ if (*report != "false" && verb >= 2)
+ report_module = n;
+ }
+ }
+ }
+ else
+ {
+ // If the buildfile is not in any project, then it could be
+ // installed.
+ //
+ // Per import2_buildfile(), exported buildfiles are installed
+ // into $install.buildfile/<proj>/....
+ //
+ const dir_path& id (build_install_buildfile);
+
+ if (!id.empty () && d.sub (id))
+ {
+ dir_path l (d.leaf (id));
+ if (!l.empty ())
+ {
+ project_name n (*l.begin ());
+ proj = n.variable ();
+
+ if (*report != "false" && verb >= 2)
+ report_module = move (n);
+ }
+ }
+ }
+ }
+
+ if (!proj.empty () && match (proj))
+ break;
+
+ // Note: only if proj not empty (see above).
+ //
+ if (!proj.empty ())
dr << fail (t) << "configuration variable '" << name
<< "' does not include project name";
- }
}
+ while (false);
if (!dr.empty ())
dr << info << "expected variable name in the 'config[.**]."
<< (proj.empty () ? "<project>" : proj.c_str ()) << ".**' form";
}
- const variable& var (
- parse_variable_name (move (name), get_location (t)));
- apply_variable_attributes (var);
+ var = &parse_variable_name (move (name), get_location (t));
+ apply_variable_attributes (*var);
// Note that even though we are relying on the config.** variable
// pattern to set global visibility, let's make sure as a sanity check.
//
- if (var.visibility != variable_visibility::global)
+ if (var->visibility != variable_visibility::global)
{
- fail (t) << "configuration variable " << var << " has "
- << var.visibility << " visibility";
+ fail (t) << "configuration variable " << *var << " has "
+ << var->visibility << " visibility";
}
// See if we have the default value part.
@@ -3390,15 +4087,15 @@ namespace build2
//
bool dev;
{
- size_t p (var.name.rfind ('.'));
- dev = p != 6 && var.name.compare (p + 1, string::npos, "develop") == 0;
+ size_t p (var->name.rfind ('.'));
+ dev = p != 6 && var->name.compare (p + 1, string::npos, "develop") == 0;
}
uint64_t sflags (0);
if (dev)
{
- if (var.type != &value_traits<bool>::value_type)
- fail (loc) << var << " variable must be of type bool";
+ if (var->type != &value_traits<bool>::value_type)
+ fail (loc) << *var << " variable must be of type bool";
// This is quite messy: below we don't always parse the value (plus it
// may be computed) so here we just peek at the next token. But we
@@ -3407,10 +4104,10 @@ namespace build2
if (!def_val ||
peek (lexer_mode::value, '@') != type::word ||
peeked ().value != "false")
- fail (loc) << var << " variable default value must be literal false";
+ fail (loc) << *var << " variable default value must be literal false";
if (nullable)
- fail (loc) << var << " variable must not be nullable";
+ fail (loc) << *var << " variable must not be nullable";
sflags |= config::save_false_omitted;
}
@@ -3419,7 +4116,7 @@ namespace build2
// in order to mark it as saved. We also have to do this to get the new
// value status.
//
- l = config::lookup_config (new_val, *root_, var, sflags);
+ l = config::lookup_config (new_val, *root_, *var, sflags);
// Handle the default value.
//
@@ -3455,12 +4152,12 @@ namespace build2
else
{
value lhs, rhs (parse_variable_value (t, tt, !dev /* mode */));
- apply_value_attributes (&var, lhs, move (rhs), type::assign);
+ apply_value_attributes (var, lhs, move (rhs), type::assign);
if (!nullable)
nullable = lhs.null;
- l = config::lookup_config (new_val, *root_, var, move (lhs), sflags);
+ l = config::lookup_config (new_val, *root_, *var, move (lhs), sflags);
}
}
@@ -3471,22 +4168,44 @@ namespace build2
// then the user is expected to handle the undefined case).
//
if (!nullable && l.defined () && l->null)
- fail (loc) << "null value in non-nullable variable " << var;
+ fail (loc) << "null value in non-nullable variable " << *var;
}
// We will be printing the report at either level 2 (-v) or 3 (-V)
- // depending on the final value of config_report_new.
+ // depending on the final value of config_report::new_value.
//
- // Note that for the config_report_new calculation we only incorporate
- // variables that we are actually reporting.
+ // Note that for the config_report::new_value calculation we only
+ // incorporate variables that we are actually reporting.
//
if (*report != "false" && verb >= 2)
{
+ // Find existing or insert new config_report entry for this module.
+ //
+ auto i (find_if (config_reports.begin (),
+ config_reports.end (),
+ [&report_module] (const config_report& r)
+ {
+ return r.module == report_module;
+ }));
+
+ if (i == config_reports.end ())
+ {
+ config_reports.push_back (
+ config_report {move (report_module), {}, false});
+ i = config_reports.end () - 1;
+ }
+
+ auto& report_values (i->values);
+ bool& report_new_value (i->new_value);
+
// We don't want to lookup the report variable value here since it's
// most likely not set yet.
//
if (!report_var.empty ())
{
+ if (org_var.empty () && var != nullptr)
+ org_var = var->name;
+
// In a somewhat hackish way we pass the variable in an undefined
// lookup.
//
@@ -3500,25 +4219,35 @@ namespace build2
if (l.var != nullptr)
{
- auto r (make_pair (l, move (*report)));
-
// If we have a duplicate, update it (it could be useful to have
// multiple config directives to "probe" the value before calculating
// the default; see lookup_config() for details).
//
- auto i (find_if (config_report.begin (),
- config_report.end (),
- [&l] (const pair<lookup, string>& p)
+ // Since the original variable is what the user will see in the
+ // report, we prefer that as a key.
+ //
+ auto i (find_if (report_values.begin (),
+ report_values.end (),
+ [&org_var, &l] (const config_report::value& v)
{
- return p.first.var == l.var;
+ return (v.org.empty () && org_var.empty ()
+ ? v.val.var == l.var
+ : (v.org.empty ()
+ ? v.val.var->name == org_var
+ : v.org == l.var->name));
}));
- if (i == config_report.end ())
- config_report.push_back (move (r));
+ if (i == report_values.end ())
+ report_values.push_back (
+ config_report::value {l, move (*report), move (org_var)});
else
- *i = move (r);
+ {
+ i->val = l;
+ i->fmt = move (*report);
+ if (i->org.empty ()) i->org = move (org_var);
+ }
- config_report_new = config_report_new || new_val;
+ report_new_value = report_new_value || new_val;
}
}
@@ -3575,10 +4304,14 @@ namespace build2
if (stage_ == stage::boot)
fail (t) << "import during bootstrap";
- // General import format:
+ // General import form:
//
// import[?!] [<attrs>] <var> = [<attrs>] (<target>|<project>%<target>])+
//
+ // Special form for importing buildfiles:
+ //
+ // import[?!] [<attrs>] (<target>|<project>%<target>])+
+ //
bool opt (t.value.back () == '?');
optional<string> ph2 (opt || t.value.back () == '!'
? optional<string> (string ())
@@ -3588,13 +4321,16 @@ namespace build2
//
next_with_attributes (t, tt);
- // Get variable attributes, if any, and deal with the special metadata
- // attribute. Since currently it can only appear in the import directive,
- // we handle it in an ad hoc manner.
+ // Get variable (or value, in the second form) attributes, if any, and
+ // deal with the special metadata and rule_hint attributes. Since
+ // currently they can only appear in the import directive, we handle them
+ // in an ad hoc manner.
//
attributes_push (t, tt);
- bool meta (false);
+ bool meta (false); // Import with metadata.
+ bool once (false); // Import buildfile once.
+ bool nodt (false); // Import buildfile without default target semantics.
{
attributes& as (attributes_top ());
const location& l (as.loc);
@@ -3612,6 +4348,14 @@ namespace build2
meta = true;
}
+ else if (n == "no_default_target")
+ {
+ nodt = true;
+ }
+ else if (n == "once")
+ {
+ once = true;
+ }
else if (n == "rule_hint")
{
if (!ph2)
@@ -3642,80 +4386,224 @@ namespace build2
}
}
- if (tt != type::word)
- fail (t) << "expected variable name instead of " << t;
-
- const variable& var (
- parse_variable_name (move (t.value), get_location (t)));
- apply_variable_attributes (var);
+ // Note that before supporting the second form (without <var>) we used to
+ // parse the value after assignment in the value mode. However, we don't
+ // really need to since what we should have is a bunch of target names.
+ // In other words, whatever the value mode does not treat as special
+ // compared to the normal mode (like `:`) would be illegal here.
+ //
+ // Note that we expant patterns for the ad hoc import case:
+ //
+ // import sub = */
+ //
+ // @@ PAT: the only issue here is that we currently pattern-expand var
+ // name (same assue as with target-specific var names).
+ //
+ if (!start_names (tt))
+ fail (t) << "expected variable name or buildfile target instead of " << t;
- if (var.visibility > variable_visibility::scope)
- {
- fail (t) << "variable " << var << " has " << var.visibility
- << " visibility but is assigned in import";
- }
+ location loc (get_location (t));
+ names ns (parse_names (t, tt, pattern_mode::expand));
- // Next should come the assignment operator. Note that we don't support
+ // Next could come the assignment operator. Note that we don't support
// default assignment (?=) yet (could make sense when attempting to import
// alternatives or some such).
//
- next (t, tt);
+ type atype;
+ const variable* var (nullptr);
+ if (tt == type::assign || tt == type::append || tt == type::prepend)
+ {
+ var = &parse_variable_name (move (ns), loc);
+ apply_variable_attributes (*var);
- if (tt != type::assign && tt != type::append && tt != type::prepend)
- fail (t) << "expected variable assignment instead of " << t;
+ if (var->visibility > variable_visibility::scope)
+ {
+ fail (loc) << "variable " << *var << " has " << var->visibility
+ << " visibility but is assigned in import";
+ }
- type atype (tt);
- value& val (atype == type::assign
- ? scope_->assign (var)
- : scope_->append (var));
+ atype = tt;
+ next_with_attributes (t, tt);
+ attributes_push (t, tt, true /* standalone */);
- // The rest should be a list of targets. Parse them similar to a value on
- // the RHS of an assignment (attributes, etc).
- //
- // Note that we expant patterns for the ad hoc import case:
- //
- // import sub = */
+ if (!start_names (tt))
+ fail (t) << "expected target to import instead of " << t;
+
+ loc = get_location (t);
+ ns = parse_names (t, tt, pattern_mode::expand);
+ }
+ else if (tt == type::default_assign)
+ fail (t) << "default assignment not yet supported";
+
+
+ // If there are any value attributes, roundtrip the names through the
+ // value applying the attributes.
//
- mode (lexer_mode::value, '@');
- next_with_attributes (t, tt);
+ if (!attributes_top ().empty ())
+ {
+ value lhs, rhs (move (ns));
+ apply_value_attributes (nullptr, lhs, move (rhs), type::assign);
- if (tt == type::newline || tt == type::eos)
- fail (t) << "expected target to import instead of " << t;
+ if (!lhs)
+ fail (loc) << "expected target to import instead of null value";
- const location loc (get_location (t));
+ untypify (lhs, true /* reduce */);
+ ns = move (lhs.as<names> ());
+ }
+ else
+ attributes_pop ();
- if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
+ value* val (var != nullptr ?
+ &(atype == type::assign
+ ? scope_->assign (*var)
+ : scope_->append (*var))
+ : nullptr);
+
+ for (name& n: ns)
{
- names storage;
- for (name& n: reverse (v, storage))
+ // @@ Could this be an out-qualified ad hoc import? Yes, see comment
+ // about buildfile import in import_load().
+ //
+ if (n.pair)
+ fail (loc) << "unexpected pair in import";
+
+ // See if we are importing a buildfile target. Such an import is always
+ // immediate.
+ //
+ bool bf (n.type == "buildfile");
+ if (bf)
{
- // @@ Could this be an out-qualified ad hoc import?
- //
- if (n.pair)
- fail (loc) << "unexpected pair in import";
+ if (meta)
+ fail (loc) << "metadata requested for buildfile target " << n;
- // import() will check the name, if required.
- //
- names r (import (*scope_, move (n), ph2, opt, meta, loc).first);
+ if (var != nullptr)
+ {
+ if (once)
+ fail (loc) << "once importation requested with variable assignment";
+
+ if (nodt)
+ fail (loc) << "no_default_target importation requested with "
+ << "variable assignment";
+ }
+ if (ph2 && !ph2->empty ())
+ fail (loc) << "rule hint specified for buildfile target " << n;
+ }
+ else
+ {
+ if (once)
+ fail (loc) << "once importation requested for target " << n;
+
+ if (nodt)
+ fail (loc) << "no_default_target importation requested for target "
+ << n;
+
+ if (var == nullptr)
+ fail (loc) << "variable assignment required to import target " << n;
+ }
+
+ // import() will check the name, if required.
+ //
+ import_result<scope> ir (
+ import (*scope_,
+ move (n),
+ ph2 ? ph2 : bf ? optional<string> (string ()) : nullopt,
+ opt,
+ meta,
+ loc));
+
+ names& r (ir.name);
+
+ if (val != nullptr)
+ {
if (r.empty ()) // Optional not found.
{
if (atype == type::assign)
- val = nullptr;
+ *val = nullptr;
}
else
{
- if (atype == type::assign)
- val.assign (move (r), &var);
- else if (atype == type::prepend)
- val.prepend (move (r), &var);
- else
- val.append (move (r), &var);
+ // Import (more precisely, alias) the target type into this project
+ // if not known.
+ //
+ // Note that if the result is ignored (val is NULL), then it's fair
+ // to assume this is not necessary.
+ //
+ if (const scope* iroot = ir.target)
+ {
+ const name& n (r.front ());
+ if (n.typed ())
+ import_target_type (*root_, *iroot, n.type, loc);
+ }
+
+ if (atype == type::assign) val->assign (move (r), var);
+ else if (atype == type::prepend) val->prepend (move (r), var);
+ else val->append (move (r), var);
}
if (atype == type::assign)
atype = type::append; // Append subsequent values.
}
+ else
+ {
+ assert (bf);
+
+ if (r.empty ()) // Optional not found.
+ {
+ assert (opt);
+ continue;
+ }
+
+ // Note: see also import_buildfile().
+ //
+ assert (r.size () == 1); // See import_load() for details.
+ name& n (r.front ());
+ path p (n.dir / n.value); // Should already include extension.
+
+ // Note: similar to parse_include().
+ //
+ // Nuance: we insert this buildfile even with once=false in case it
+ // gets imported with once=true from another place.
+ //
+ if (!root_->root_extra->insert_buildfile (p) && once)
+ {
+ l5 ([&]{trace (loc) << "skipping already imported " << p;});
+ continue;
+ }
+
+ // Clear/restore if/switch location.
+ //
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = nullopt;
+
+ try
+ {
+ ifdstream ifs (p);
+
+ auto df = make_diag_frame (
+ [this, &p, &loc] (const diag_record& dr)
+ {
+ dr << info (loc) << p << " imported from here";
+ });
+
+ // @@ Do we want to enter this buildfile? What's the harm (one
+ // benefit is that it will be in dump). But, we currently don't
+ // out-qualify them, though feels like there is nothing fatal
+ // in that, just inaccurate.
+ //
+ source_buildfile (ifs,
+ path_name (p),
+ loc,
+ nodt ? optional<bool> {} : false);
+ }
+ catch (const io_error& e)
+ {
+ fail (loc) << "unable to read imported buildfile " << p << ": " << e;
+ }
+ }
}
next_after_newline (t, tt);
@@ -3757,7 +4645,12 @@ namespace build2
fail (l) << "null value in export";
if (val.type != nullptr)
- untypify (val);
+ {
+ // While feels far-fetched, let's preserve empty typed values in the
+ // result.
+ //
+ untypify (val, false /* reduce */);
+ }
export_value = move (val).as<names> ();
@@ -3844,41 +4737,160 @@ namespace build2
void parser::
parse_define (token& t, type& tt)
{
- // define <derived>: <base>
+ // define [<attrs>] <derived>: <base>
+ // define <alias> = <scope>/<type>
//
// See tests/define.
//
- if (next (t, tt) != type::word)
- fail (t) << "expected name instead of " << t << " in target type "
- << "definition";
+ next_with_attributes (t, tt);
- string dn (move (t.value));
- const location dnl (get_location (t));
+ attributes_push (t, tt);
+ attributes as (attributes_pop ());
- if (next (t, tt) != type::colon)
- fail (t) << "expected ':' instead of " << t << " in target type "
+ if (tt != type::word)
+ fail (t) << "expected name instead of " << t << " in target type "
<< "definition";
+ string n (move (t.value));
+ const location nl (get_location (t));
+
next (t, tt);
- if (tt == type::word)
+ if (tt == type::colon)
{
+ // Handle attributes.
+ //
+ target_type::flag fs (target_type::flag::none);
+ {
+ const location& l (as.loc);
+
+ for (attribute& a: as)
+ {
+ const string& n (a.name);
+ value& v (a.value);
+
+ if (n == "see_through") fs |= target_type::flag::see_through;
+ else if (n == "member_hint") fs |= target_type::flag::member_hint;
+ else
+ fail (l) << "unknown target type definition attribute " << n;
+
+ if (!v.null)
+ fail (l) << "unexpected value in attribute " << n;
+ }
+ }
+
+ if (next (t, tt) != type::word)
+ fail (t) << "expected name instead of " << t << " in target type "
+ << "definition";
+
// Target.
//
const string& bn (t.value);
const target_type* bt (scope_->find_target_type (bn));
if (bt == nullptr)
- fail (t) << "unknown target type " << bn;
+ fail (t) << "unknown target type " << bn <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
- if (!root_->derive_target_type (move (dn), *bt).second)
- fail (dnl) << "target type " << dn << " already defined in this "
- << "project";
+ // The derive_target_type() call below does not produce a non-abstract
+ // type if passed an abstract base. So we ban this for now (it's unclear
+ // why would someone want to do this).
+ //
+ if (bt->factory == nullptr)
+ fail (t) << "abstract base target type " << bt->name << "{}";
+
+ // Note that the group{foo}<...> syntax is only recognized for group-
+ // based targets and ad hoc buildscript recipes/rules only match group.
+ // (We may want to relax this for member_hint in the future since its
+ // currently also used on non-mtime-based targets, though what exactly
+ // we will do in ad hoc recipes/rules in this case is fuzzy).
+ //
+ if ((fs & target_type::flag::group) == target_type::flag::group &&
+ !bt->is_a<group> ())
+ fail (t) << "base target type " << bn << " must be group for "
+ << "group-related attribute";
+
+ if (!root_->derive_target_type (move (n), *bt, fs).second)
+ fail (nl) << "target type " << n << " already defined in project "
+ << *root_;
next (t, tt); // Get newline.
}
+ else if (tt == type::assign)
+ {
+ if (!as.empty ())
+ fail (as.loc) << "unexpected target type alias attribute";
+
+ // The rest should be a path-like target type. Parse it as names in
+ // the value mode to get variable expansion, etc.
+ //
+ mode (lexer_mode::value, '@');
+ next (t, tt);
+ const location tl (get_location (t));
+ names ns (
+ parse_names (t, tt, pattern_mode::ignore, "target type", nullptr));
+
+ name* tn (nullptr);
+ if (ns.size () == 1)
+ {
+ tn = &ns.front ();
+
+ if (tn->file ())
+ {
+ try
+ {
+ tn->canonicalize ();
+
+ if (tn->dir.absolute ())
+ tn->dir.normalize ();
+ else
+ tn = nullptr;
+ }
+ catch (const invalid_path&) {tn = nullptr;}
+ catch (const invalid_argument&) {tn = nullptr;}
+ }
+ else
+ tn = nullptr;
+ }
+
+ if (tn == nullptr)
+ fail (tl) << "expected scope-qualified target type instead of " << ns;
+
+ // If we got here, then tn->dir is the scope and tn->value is the target
+ // type.
+ //
+ // NOTE: see similar code in import_target_type().
+ //
+ const target_type* tt (nullptr);
+ if (const scope* rs = ctx->scopes.find_out (tn->dir).root_scope ())
+ {
+ tt = rs->find_target_type (tn->value);
+
+ if (tt == nullptr)
+ fail (tl) << "unknown target type " << tn->value << " in scope "
+ << *rs;
+ }
+ else
+ fail (tl) << "unknown project scope " << tn->dir << " in scope"
+ << "-qualified target type" <<
+ info << "did you forget to import the corresponding project?";
+
+ if (n != tn->value)
+ fail (nl) << "alias target type name " << n << " does not match "
+ << tn->value;
+
+ // Note that this is potentially a shallow reference to a user-derived
+ // target type. Seeing that we only ever destory the entire graph, this
+ // should be ok.
+ //
+ auto p (root_->root_extra->target_types.insert (*tt));
+
+ if (!p.second && &p.first.get () != tt)
+ fail (nl) << "target type " << n << " already defined in this project";
+ }
else
- fail (t) << "expected name instead of " << t << " in target type "
+ fail (t) << "expected ':' or '=' instead of " << t << " in target type "
<< "definition";
next_after_newline (t, tt);
@@ -3898,14 +4910,17 @@ namespace build2
[this] (token& t, type& tt, bool s, const string& k)
{
return parse_clause_block (t, tt, s, k);
- });
+ },
+ {});
}
void parser::
parse_if_else (token& t, type& tt,
bool multi,
const function<void (
- token&, type&, bool, const string&)>& parse_block)
+ token&, type&, bool, const string&)>& parse_block,
+ const function<void (
+ token&, token_type&, const string&)>& parse_recipe_directive)
{
// Handle the whole if-else chain. See tests/if-else.
//
@@ -3930,7 +4945,7 @@ namespace build2
// is not an option. So let's skip it.
//
if (taken)
- skip_line (t, tt);
+ skip_line (t, tt); // Skip expression.
else
{
if (tt == type::newline || tt == type::eos)
@@ -3990,31 +5005,65 @@ namespace build2
parse_block (t, tt, !take, k);
taken = taken || take;
}
- else if (!multi) // No lines in multi-curly if-else.
+ else
{
- if (take)
+ // The only valid line in multi-curly if-else is `recipe`.
+ //
+ if (multi)
{
- if (!parse_clause (t, tt, true))
- fail (t) << "expected " << k << "-line instead of " << t;
+ // Note that we cannot do the keyword test if we are replaying. So
+ // we skip it with the understanding that if it's not a keywords,
+ // then we wouldn't have gotten here on the replay.
+ //
+ if (tt == type::word &&
+ (replay_ == replay::play || keyword (t)) &&
+ t.value == "recipe")
+ {
+ if (take)
+ {
+ parse_recipe_directive (t, tt, k);
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
- taken = true;
+ if (tt == type::newline)
+ next (t, tt);
+ }
+ }
+ else
+ fail (t) << "expected " << k << "-block or 'recipe' instead of "
+ << t;
}
else
{
- skip_line (t, tt);
+ if (tt == type::multi_lcbrace)
+ fail (t) << "expected " << k << "-line instead of " << t <<
+ info << "did you forget to specify % recipe header?";
- if (tt == type::newline)
- next (t, tt);
+ if (take)
+ {
+ if (!parse_clause (t, tt, true))
+ fail (t) << "expected " << k << "-line instead of " << t;
+
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
+
+ if (tt == type::newline)
+ next (t, tt);
+ }
}
}
- else
- fail (t) << "expected " << k << "-block instead of " << t;
// See if we have another el* keyword.
//
// Note that we cannot do the keyword test if we are replaying. So we
// skip it with the understanding that if it's not a keywords, then we
- // wouldn't have gotten here on the reply (see parse_recipe() for
+ // wouldn't have gotten here on the replay (see parse_recipe() for
// details).
//
if (k != "else" &&
@@ -4045,14 +5094,17 @@ namespace build2
[this] (token& t, type& tt, bool s, const string& k)
{
return parse_clause_block (t, tt, s, k);
- });
+ },
+ {});
}
void parser::
parse_switch (token& t, type& tt,
bool multi,
const function<void (
- token&, type&, bool, const string&)>& parse_block)
+ token&, type&, bool, const string&)>& parse_block,
+ const function<void (
+ token&, token_type&, const string&)>& parse_recipe_directive)
{
// switch <value> [: <func> [<arg>]] [, <value>...]
// {
@@ -4147,7 +5199,7 @@ namespace build2
{
// Note that we cannot do the keyword test if we are replaying. So we
// skip it with the understanding that if it's not a keywords, then we
- // wouldn't have gotten here on the reply (see parse_recipe() for
+ // wouldn't have gotten here on the replay (see parse_recipe() for
// details). Note that this appears to mean that replay cannot be used
// if we allow lines, only blocks. Consider:
//
@@ -4353,25 +5405,49 @@ namespace build2
parse_block (t, tt, !take, k);
taken = taken || take;
}
- else if (!multi) // No lines in multi-curly if-else.
+ else
{
- if (take)
+ if (multi)
{
- if (!parse_clause (t, tt, true))
- fail (t) << "expected " << k << "-line instead of " << t;
+ if (tt == type::word &&
+ (replay_ == replay::play || keyword (t)) &&
+ t.value == "recipe")
+ {
+ if (take)
+ {
+ parse_recipe_directive (t, tt, k);
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
- taken = true;
+ if (tt == type::newline)
+ next (t, tt);
+ }
+ }
+ else
+ fail (t) << "expected " << k << "-block or 'recipe' instead of "
+ << t;
}
else
{
- skip_line (t, tt);
+ if (take)
+ {
+ if (!parse_clause (t, tt, true))
+ fail (t) << "expected " << k << "-line instead of " << t;
- if (tt == type::newline)
- next (t, tt);
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
+
+ if (tt == type::newline)
+ next (t, tt);
+ }
}
}
- else
- fail (t) << "expected " << k << "-block instead of " << t;
}
if (tt != type::rcbrace)
@@ -4433,15 +5509,24 @@ namespace build2
value val (parse_value_with_attributes (t, tt, pattern_mode::expand));
- // If this value is a vector, then save its element type so that we
+ // If the value type provides custom iterate function, then use that (see
+ // value_type::iterate for details).
+ //
+ auto iterate (val.type != nullptr ? val.type->iterate : nullptr);
+
+ // If this value is a container, then save its element type so that we
// can typify each element below.
//
const value_type* etype (nullptr);
- if (val && val.type != nullptr)
+ if (!iterate && val && val.type != nullptr)
{
etype = val.type->element_type;
- untypify (val);
+
+ // Note that here we don't want to be reducing empty simple values to
+ // empty lists.
+ //
+ untypify (val, false /* reduce */);
}
if (tt != type::newline)
@@ -4494,33 +5579,45 @@ namespace build2
if (!val)
return;
- names& ns (val.as<names> ());
-
- if (ns.empty ())
- return;
+ names* ns (nullptr);
+ if (!iterate)
+ {
+ ns = &val.as<names> ();
+ if (ns->empty ())
+ return;
+ }
istringstream is (move (body));
- for (auto i (ns.begin ()), e (ns.end ());; )
+ struct data
{
- // Set the variable value.
- //
- bool pair (i->pair);
- names n;
- n.push_back (move (*i));
- if (pair) n.push_back (move (*++i));
- value v (move (n));
+ const variable& var;
+ const attributes& val_attrs;
+ uint64_t line;
+ bool block;
+ value& lhs;
+ istringstream& is;
- if (etype != nullptr)
- typify (v, *etype, &var);
+ } d {var, val_attrs, line, block, lhs, is};
+
+ function<void (value&&, bool first)> iteration =
+ [this, &d] (value&& v, bool first)
+ {
+ // Rewind the stream.
+ //
+ if (!first)
+ {
+ d.is.clear ();
+ d.is.seekg (0);
+ }
// Inject element attributes.
//
- attributes_.push_back (val_attrs);
+ attributes_.push_back (d.val_attrs);
- apply_value_attributes (&var, lhs, move (v), type::assign);
+ apply_value_attributes (&d.var, d.lhs, move (v), type::assign);
- lexer l (is, *path_, line);
+ lexer l (d.is, *path_, d.line);
lexer* ol (lexer_);
lexer_ = &l;
@@ -4528,7 +5625,7 @@ namespace build2
type tt;
next (t, tt);
- if (block)
+ if (d.block)
{
next (t, tt); // {
next (t, tt); // <newline>
@@ -4536,20 +5633,33 @@ namespace build2
parse_clause (t, tt);
- if (tt != (block ? type::rcbrace : type::eos))
- fail (t) << "expected name " << (block ? "or '}' " : "")
+ if (tt != (d.block ? type::rcbrace : type::eos))
+ fail (t) << "expected name " << (d.block ? "or '}' " : "")
<< "instead of " << t;
lexer_ = ol;
+ };
- if (++i == e)
- break;
+ if (!iterate)
+ {
+ for (auto b (ns->begin ()), i (b), e (ns->end ()); i != e; ++i)
+ {
+ // Set the variable value.
+ //
+ bool pair (i->pair);
+ names n;
+ n.push_back (move (*i));
+ if (pair) n.push_back (move (*++i));
+ value v (move (n));
- // Rewind the stream.
- //
- is.clear ();
- is.seekg (0);
+ if (etype != nullptr)
+ typify (v, *etype, &var);
+
+ iteration (move (v), i == b);
+ }
}
+ else
+ iterate (val, iteration);
}
void parser::
@@ -4622,7 +5732,7 @@ namespace build2
if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
{
names storage;
- cout << reverse (v, storage) << endl;
+ cout << reverse (v, storage, true /* reduce */) << endl;
}
else
cout << "[null]" << endl;
@@ -4655,7 +5765,7 @@ namespace build2
if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
{
names storage;
- dr << reverse (v, storage);
+ dr << reverse (v, storage, true /* reduce */);
}
if (tt != type::eos)
@@ -4685,8 +5795,10 @@ namespace build2
if (ns.empty ())
{
+ // Indent two spaces.
+ //
if (scope_ != nullptr)
- dump (*scope_, " "); // Indent two spaces.
+ dump (scope_, nullopt /* action */, dump_format::buildfile, " ");
else
os << " <no current scope>" << endl;
}
@@ -4704,8 +5816,10 @@ namespace build2
const target* t (enter_target::find_target (*this, n, o, l, trace));
+ // Indent two spaces.
+ //
if (t != nullptr)
- dump (*t, " "); // Indent two spaces.
+ dump (t, nullopt /* action */, dump_format::buildfile, " ");
else
{
os << " <no target " << n;
@@ -4766,9 +5880,13 @@ namespace build2
{
// Parse and enter a variable name for assignment (as opposed to lookup).
- // The list should contain a single, simple name.
+ // The list should contain a single, simple name. Go an extra mile to
+ // issue less confusing diagnostics.
//
- if (ns.size () != 1 || ns[0].pattern || !ns[0].simple () || ns[0].empty ())
+ size_t n (ns.size ());
+ if (n == 0 || (n == 1 && ns[0].empty ()))
+ fail (l) << "empty variable name";
+ else if (n != 1 || ns[0].pattern || !ns[0].simple ())
fail (l) << "expected variable name instead of " << ns;
return parse_variable_name (move (ns[0].value), l);
@@ -4838,7 +5956,12 @@ namespace build2
// We store prepend/append values untyped (similar to overrides).
//
if (rhs.type != nullptr && kind != type::assign)
- untypify (rhs);
+ {
+ // Our heuristics for prepend/append of a typed value is to preserve
+ // empty (see apply_value_attributes() for details) so do not reduce.
+ //
+ untypify (rhs, false /* reduce */);
+ }
if (p.second)
{
@@ -4928,30 +6051,116 @@ namespace build2
const value_type* parser::
find_value_type (const scope*, const string& n)
{
- auto ptr = [] (const value_type& vt) {return &vt;};
-
- return
- n == "bool" ? ptr (value_traits<bool>::value_type) :
- n == "int64" ? ptr (value_traits<int64_t>::value_type) :
- n == "uint64" ? ptr (value_traits<uint64_t>::value_type) :
- n == "string" ? ptr (value_traits<string>::value_type) :
- n == "path" ? ptr (value_traits<path>::value_type) :
- n == "dir_path" ? ptr (value_traits<dir_path>::value_type) :
- n == "abs_dir_path" ? ptr (value_traits<abs_dir_path>::value_type) :
- n == "name" ? ptr (value_traits<name>::value_type) :
- n == "name_pair" ? ptr (value_traits<name_pair>::value_type) :
- n == "target_triplet" ? ptr (value_traits<target_triplet>::value_type) :
- n == "project_name" ? ptr (value_traits<project_name>::value_type) :
-
- n == "int64s" ? ptr (value_traits<int64s>::value_type) :
- n == "uint64s" ? ptr (value_traits<uint64s>::value_type) :
- n == "strings" ? ptr (value_traits<strings>::value_type) :
- n == "paths" ? ptr (value_traits<paths>::value_type) :
- n == "dir_paths" ? ptr (value_traits<dir_paths>::value_type) :
- n == "names" ? ptr (value_traits<vector<name>>::value_type) :
- n == "cmdline" ? ptr (value_traits<cmdline>::value_type) :
-
- nullptr;
+ switch (n[0])
+ {
+ case 'a':
+ {
+ if (n == "abs_dir_path") return &value_traits<abs_dir_path>::value_type;
+ break;
+ }
+ case 'b':
+ {
+ if (n == "bool") return &value_traits<bool>::value_type;
+ break;
+ }
+ case 'c':
+ {
+ if (n == "cmdline") return &value_traits<cmdline>::value_type;
+ break;
+ }
+ case 'd':
+ {
+ if (n.compare (0, 8, "dir_path") == 0)
+ {
+ if (n[8] == '\0') return &value_traits<dir_path>::value_type;
+ if (n[8] == 's' &&
+ n[9] == '\0') return &value_traits<dir_paths>::value_type;
+ }
+ break;
+ }
+ case 'i':
+ {
+ if (n.compare (0, 5, "int64") == 0)
+ {
+ if (n[5] == '\0') return &value_traits<int64_t>::value_type;
+ if (n[5] == 's' &&
+ n[6] == '\0') return &value_traits<int64s>::value_type;
+ }
+ break;
+ }
+ case 'j':
+ {
+ if (n.compare (0, 4, "json") == 0)
+ {
+ if (n[4] == '\0') return &value_traits<json_value>::value_type;
+ if (n == "json_array") return &value_traits<json_array>::value_type;
+ if (n == "json_object")
+ return &value_traits<json_object>::value_type;
+ if (n == "json_set")
+ return &value_traits<set<json_value>>::value_type;
+ if (n == "json_map")
+ return &value_traits<map<json_value, json_value>>::value_type;
+ }
+ break;
+ }
+ case 'n':
+ {
+ if (n.compare (0, 4, "name") == 0)
+ {
+ if (n[4] == '\0') return &value_traits<name>::value_type;
+ if (n[4] == 's' &&
+ n[5] == '\0') return &value_traits<vector<name>>::value_type;
+ if (n == "name_pair") return &value_traits<name_pair>::value_type;
+ }
+ break;
+ }
+
+ case 'p':
+ {
+ if (n.compare (0, 4, "path") == 0)
+ {
+ if (n[4] == '\0') return &value_traits<path>::value_type;
+ if (n[4] == 's' &&
+ n[5] == '\0') return &value_traits<paths>::value_type;
+ }
+ else if (n == "project_name")
+ return &value_traits<project_name>::value_type;
+ break;
+ }
+ case 's':
+ {
+ if (n.compare (0, 6, "string") == 0)
+ {
+ if (n[6] == '\0') return &value_traits<string>::value_type;
+ if (n[6] == 's' &&
+ n[7] == '\0') return &value_traits<strings>::value_type;
+ if (n == "string_set") return &value_traits<set<string>>::value_type;
+ if (n == "string_map")
+ return &value_traits<map<string,string>>::value_type;
+ }
+ break;
+ }
+ case 't':
+ {
+ if (n == "target_triplet")
+ return &value_traits<target_triplet>::value_type;
+ break;
+ }
+ case 'u':
+ {
+ if (n.compare (0, 6, "uint64") == 0)
+ {
+ if (n[6] == '\0') return &value_traits<uint64_t>::value_type;
+ if (n[6] == 's' &&
+ n[7] == '\0') return &value_traits<uint64s>::value_type;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return nullptr;
}
void parser::
@@ -5076,7 +6285,7 @@ namespace build2
type kind)
{
attributes as (attributes_pop ());
- const location& l (as.loc);
+ const location& l (as.loc); // This points to value if no attributes.
// Essentially this is an attribute-augmented assign/append/prepend.
//
@@ -5090,6 +6299,8 @@ namespace build2
if (n == "null")
{
+ // @@ Looks like here we assume representationally empty?
+ //
if (rhs && !rhs.empty ()) // Note: null means we had an expansion.
fail (l) << "value with null attribute";
@@ -5147,6 +6358,13 @@ namespace build2
bool rhs_type (false);
if (rhs.type != nullptr)
{
+ // Our heuristics is to not reduce typed RHS empty simple values for
+ // prepend/append and additionally for assign provided LHS is a
+ // container.
+ //
+ bool reduce (kind == type::assign &&
+ (type == nullptr || !type->container));
+
// Only consider RHS type if there is no explicit or variable type.
//
if (type == nullptr)
@@ -5157,7 +6375,7 @@ namespace build2
// Reduce this to the untyped value case for simplicity.
//
- untypify (rhs);
+ untypify (rhs, reduce);
}
if (kind == type::assign)
@@ -5186,6 +6404,17 @@ namespace build2
}
else
{
+ auto df = make_diag_frame (
+ [this, var, &l](const diag_record& dr)
+ {
+ if (!l.empty ())
+ {
+ dr << info (l);
+ if (var != nullptr) dr << "variable " << var->name << ' ';
+ dr << "value is assigned here";
+ }
+ });
+
if (kind == type::assign)
{
if (rhs)
@@ -5975,9 +7204,11 @@ namespace build2
// May throw invalid_path.
//
auto include_pattern =
- [&r, &append, &include_match, sp, &l, this] (string&& p,
- optional<string>&& e,
- bool a)
+ [this,
+ &append, &include_match,
+ &r, sp, &l, &dir] (string&& p,
+ optional<string>&& e,
+ bool a)
{
// If we don't already have any matches and our pattern doesn't contain
// multiple recursive wildcards, then the result will be unique and we
@@ -6035,9 +7266,51 @@ namespace build2
return true;
};
+ const function<bool (const dir_entry&)> dangling (
+ [&dir] (const dir_entry& de)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ const path& n (de.path ());
+
+ // One case where this turned out to be not worth it practically
+ // (too much noise) is the backlinks to executables (and the
+ // associated DLL assemblies for Windows). So we now have this
+ // heuristics that if this looks like an executable (or DLL for
+ // Windows), then we omit the warning. On POSIX, where executables
+ // don't have extensions, we will consider it an executable only if
+ // we are not looking for directories (which also normally don't
+ // have extension).
+ //
+ // @@ PEDANTIC: re-enable if --pedantic.
+ //
+ if (sl)
+ {
+ string e (n.extension ());
+
+ if ((e.empty () && !dir) ||
+ path_traits::compare (e, "exe") == 0 ||
+ path_traits::compare (e, "dll") == 0 ||
+ path_traits::compare (e, "pdb") == 0 || // .{exe,dll}.pdb
+ (path_traits::compare (e, "dlls") == 0 && // .exe.dlls assembly
+ path_traits::compare (n.base ().extension (), "exe") == 0))
+ return true;
+ }
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry")
+ << ' ' << de.base () / n;
+
+ return true;
+ });
+
try
{
- path_search (path (move (p)), process, *sp);
+ path_search (path (move (p)),
+ process,
+ *sp,
+ path_match_flags::follow_symlinks,
+ dangling);
}
catch (const system_error& e)
{
@@ -6483,9 +7756,35 @@ namespace build2
bool concat_quoted_first (false);
name concat_data;
+ auto concat_diag_multiple = [this] (const location& loc,
+ const char* what_expansion)
+ {
+ diag_record dr (fail (loc));
+
+ dr << "concatenating " << what_expansion << " contains multiple values";
+
+ // See if this looks like a subscript without an evaluation context and
+ // help the user out.
+ //
+ if (mode () != lexer_mode::eval)
+ {
+ const token& t (peeked ()); // Should be peeked at.
+
+ if (t.type == type::word &&
+ t.qtype == quote_type::unquoted &&
+ t.value[0] == '[')
+ {
+ dr << info << "wrap it in (...) evaluation context if this "
+ << "is value subscript";
+ }
+ }
+ };
+
auto concat_typed = [this, what, &vnull, &vtype,
- &concat, &concat_data] (value&& rhs,
- const location& loc)
+ &concat, &concat_data,
+ &concat_diag_multiple] (value&& rhs,
+ const location& loc,
+ const char* what_expansion)
{
// If we have no LHS yet, then simply copy value/type.
//
@@ -6502,6 +7801,10 @@ namespace build2
// RHS.
//
+ // Note that if RHS contains multiple values then we expect the result
+ // to be a single value somehow or, more likely, there to be no
+ // suitable $builtin.concat() overload.
+ //
a.push_back (move (rhs));
const char* l ((a[0].type != nullptr ? a[0].type->name : "<untyped>"));
@@ -6543,18 +7846,22 @@ namespace build2
if (!vnull)
{
if (vtype != nullptr)
- untypify (rhs);
+ untypify (rhs, true /* reduce */);
names& d (rhs.as<names> ());
- // If the value is empty, then untypify() will (typically; no pun
- // intended) represent it as an empty sequence of names rather than
- // a sequence of one empty name. This is usually what we need (see
- // simple_reverse() for details) but not in this case.
+ // If the value is empty, then we asked untypify() to reduce it to
+ // an empty sequence of names rather than a sequence of one empty
+ // name.
//
- if (!d.empty ())
+ if (size_t n = d.size ())
{
- assert (d.size () == 1); // Must be a single value.
+ if (n != 1)
+ {
+ assert (what_expansion != nullptr);
+ concat_diag_multiple (loc, what_expansion);
+ }
+
concat_data = move (d[0]);
}
}
@@ -6849,7 +8156,7 @@ namespace build2
//
names ns;
ns.push_back (name (move (val)));
- concat_typed (value (move (ns)), get_location (t));
+ concat_typed (value (move (ns)), get_location (t), nullptr);
}
else
{
@@ -6951,6 +8258,8 @@ namespace build2
if (ttp == nullptr)
ppat = pinc = false;
+ else if (ttp->factory == nullptr)
+ fail (loc) << "abstract target type " << ttp->name << "{}";
}
}
@@ -7112,6 +8421,9 @@ namespace build2
? scope_->find_target_type (*tp)
: nullptr);
+ if (ttp != nullptr && ttp->factory == nullptr)
+ fail (loc) << "abstract target type " << ttp->name << "{}";
+
if (tp == nullptr || ttp != nullptr)
{
if (pmode == pattern_mode::detect)
@@ -7316,11 +8628,15 @@ namespace build2
// token is a paren or a word, we turn it on and switch to the eval
// mode if what we get next is a paren.
//
- // Also sniff out the special variables string from mode data for
- // the ad hoc $() handling below.
- //
mode (lexer_mode::variable);
+ // Sniff out the special variables string from mode data and use
+ // that to recognize special variables in the ad hoc $() handling
+ // below.
+ //
+ // Note: must be done before calling next() which may expire the
+ // mode.
+ //
auto special = [s = reinterpret_cast<const char*> (mode_data ())]
(const token& t) -> char
{
@@ -7359,163 +8675,202 @@ namespace build2
next (t, tt);
loc = get_location (t);
- names qual;
- string name;
-
- if (t.separated)
- ; // Leave the name empty to fail below.
- else if (tt == type::word)
+ if (tt == type::escape)
{
- name = move (t.value);
+ // For now we only support all the simple C/C++ escape sequences
+ // plus \0 (which in C/C++ is an octal escape sequence). See the
+ // lexer part for details.
+ //
+ // Note: cannot be subscripted.
+ //
+ if (!pre_parse_)
+ {
+ string s;
+ switch (char c = t.value[0])
+ {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\': s = c; break;
+ case '0': s = '\0'; break;
+ case 'a': s = '\a'; break;
+ case 'b': s = '\b'; break;
+ case 'f': s = '\f'; break;
+ case 'n': s = '\n'; break;
+ case 'r': s = '\r'; break;
+ case 't': s = '\t'; break;
+ case 'v': s = '\v'; break;
+ default:
+ assert (false);
+ }
+
+ result_data = name (move (s));
+ what = "escape sequence expansion";
+ }
+
+ tt = peek ();
}
- else if (tt == type::lparen)
+ else
{
- expire_mode ();
- mode (lexer_mode::eval, '@');
- next_with_attributes (t, tt);
+ names qual;
+ string name;
- // Handle the $(x) case ad hoc. We do it this way in order to get
- // the variable name even during pre-parse. It should also be
- // faster.
- //
- char c;
- if ((tt == type::word
- ? path_traits::rfind_separator (t.value) == string::npos
- : (c = special (t))) &&
- peek () == type::rparen)
+ if (t.separated)
+ ; // Leave the name empty to fail below.
+ else if (tt == type::word)
{
- name = (tt == type::word ? move (t.value) : string (1, c));
- next (t, tt); // Get `)`.
+ name = move (t.value);
}
- else
+ else if (tt == type::lparen)
{
- using name_type = build2::name;
-
- values vs (parse_eval (t, tt, pmode));
+ expire_mode ();
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
- if (!pre_parse_)
+ // Handle the $(x) case ad hoc. We do it this way in order to
+ // get the variable name even during pre-parse. It should also
+ // be faster.
+ //
+ char c ('\0');
+ if ((tt == type::word
+ ? path_traits::rfind_separator (t.value) == string::npos
+ : (c = special (t))) &&
+ peek () == type::rparen)
{
- if (vs.size () != 1)
- fail (loc) << "expected single variable/function name";
+ name = (tt == type::word ? move (t.value) : string (1, c));
+ next (t, tt); // Get `)`.
+ }
+ else
+ {
+ using name_type = build2::name;
- value& v (vs[0]);
+ values vs (parse_eval (t, tt, pmode));
- if (!v)
- fail (loc) << "null variable/function name";
+ if (!pre_parse_)
+ {
+ if (vs.size () != 1)
+ fail (loc) << "expected single variable/function name";
- names storage;
- vector_view<name_type> ns (reverse (v, storage)); // Movable.
- size_t n (ns.size ());
+ value& v (vs[0]);
- // We cannot handle scope-qualification in the eval context as
- // we do for target-qualification (see eval-qual) since then
- // we would be treating all paths as qualified variables. So
- // we have to do it here.
- //
- if (n >= 2 && ns[0].pair == ':') // $(foo: x)
- {
- // Note: name is first (see eval for details).
+ if (!v)
+ fail (loc) << "null variable/function name";
+
+ names storage;
+ vector_view<name_type> ns (
+ reverse (v, storage, true /* reduce */)); // Movable.
+ size_t n (ns.size ());
+
+ // We cannot handle scope-qualification in the eval context
+ // as we do for target-qualification (see eval-qual) since
+ // then we would be treating all paths as qualified
+ // variables. So we have to do it here.
//
- qual.push_back (move (ns[1]));
+ if (n >= 2 && ns[0].pair == ':') // $(foo: x)
+ {
+ // Note: name is first (see eval for details).
+ //
+ qual.push_back (move (ns[1]));
- if (qual.back ().empty ())
- fail (loc) << "empty variable/function qualification";
+ if (qual.back ().empty ())
+ fail (loc) << "empty variable/function qualification";
- if (n > 2)
- qual.push_back (move (ns[2]));
+ if (n > 2)
+ qual.push_back (move (ns[2]));
- // Move name to the last position (see below).
- //
- swap (ns[0], ns[n - 1]);
- }
- else if (n == 2 && ns[0].directory ()) // $(foo/ x)
- {
- qual.push_back (move (ns[0]));
- qual.back ().pair = '/';
- }
- else if (n > 1)
- fail (loc) << "expected variable/function name instead of '"
- << ns << "'";
+ // Move name to the last position (see below).
+ //
+ swap (ns[0], ns[n - 1]);
+ }
+ else if (n == 2 && ns[0].directory ()) // $(foo/ x)
+ {
+ qual.push_back (move (ns[0]));
+ qual.back ().pair = '/';
+ }
+ else if (n > 1)
+ fail (loc) << "expected variable/function name instead of '"
+ << ns << "'";
- // Note: checked for empty below.
- //
- if (!ns[n - 1].simple ())
- fail (loc) << "expected variable/function name instead of '"
- << ns[n - 1] << "'";
+ // Note: checked for empty below.
+ //
+ if (!ns[n - 1].simple ())
+ fail (loc) << "expected variable/function name instead of '"
+ << ns[n - 1] << "'";
- size_t p;
- if (n == 1 && // $(foo/x)
- (p = path_traits::rfind_separator (ns[0].value)) !=
+ size_t p;
+ if (n == 1 && // $(foo/x)
+ (p = path_traits::rfind_separator (ns[0].value)) !=
string::npos)
- {
- // Note that p cannot point to the last character since then
- // it would have been a directory, not a simple name.
- //
- string& s (ns[0].value);
+ {
+ // Note that p cannot point to the last character since
+ // then it would have been a directory, not a simple name.
+ //
+ string& s (ns[0].value);
- name = string (s, p + 1);
- s.resize (p + 1);
- qual.push_back (name_type (dir_path (move (s))));
- qual.back ().pair = '/';
+ name = string (s, p + 1);
+ s.resize (p + 1);
+ qual.push_back (name_type (dir_path (move (s))));
+ qual.back ().pair = '/';
+ }
+ else
+ name = move (ns[n - 1].value);
}
- else
- name = move (ns[n - 1].value);
}
}
- }
- else
- fail (t) << "expected variable/function name instead of " << t;
+ else
+ fail (t) << "expected variable/function name instead of " << t;
- if (!pre_parse_ && name.empty ())
- fail (loc) << "empty variable/function name";
+ if (!pre_parse_ && name.empty ())
+ fail (loc) << "empty variable/function name";
- // Figure out whether this is a variable expansion with potential
- // subscript or a function call.
- //
- if (sub) enable_subscript ();
- tt = peek ();
-
- // Note that we require function call opening paren to be
- // unseparated; consider: $x ($x == 'foo' ? 'FOO' : 'BAR').
- //
- if (tt == type::lparen && !peeked ().separated)
- {
- // Function call.
+ // Figure out whether this is a variable expansion with potential
+ // subscript or a function call.
//
- next (t, tt); // Get '('.
- mode (lexer_mode::eval, '@');
- next_with_attributes (t, tt);
-
- // @@ Should we use (target/scope) qualification (of name) as the
- // context in which to call the function? Hm, interesting...
- //
- values args (parse_eval (t, tt, pmode));
-
if (sub) enable_subscript ();
tt = peek ();
- // Note that we "move" args to call().
+ // Note that we require function call opening paren to be
+ // unseparated; consider: $x ($x == 'foo' ? 'FOO' : 'BAR').
//
- if (!pre_parse_)
+ if (tt == type::lparen && !peeked ().separated)
{
- result_data = ctx->functions.call (scope_, name, args, loc);
- what = "function call";
+ // Function call.
+ //
+ next (t, tt); // Get '('.
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
+
+ // @@ Should we use (target/scope) qualification (of name) as
+ // the context in which to call the function? Hm, interesting...
+ //
+ values args (parse_eval (t, tt, pmode));
+
+ if (sub) enable_subscript ();
+ tt = peek ();
+
+ // Note that we "move" args to call().
+ //
+ if (!pre_parse_)
+ {
+ result_data = ctx->functions.call (scope_, name, args, loc);
+ what = "function call";
+ }
+ else
+ lookup_function (move (name), loc);
}
else
- lookup_function (move (name), loc);
- }
- else
- {
- // Variable expansion.
- //
- lookup l (lookup_variable (move (qual), move (name), loc));
-
- if (!pre_parse_)
{
- if (l.defined ())
- result = l.value; // Otherwise leave as NULL result_data.
+ // Variable expansion.
+ //
+ lookup l (lookup_variable (move (qual), move (name), loc));
+
+ if (!pre_parse_)
+ {
+ if (l.defined ())
+ result = l.value; // Otherwise leave as NULL result_data.
- what = "variable expansion";
+ what = "variable expansion";
+ }
}
}
}
@@ -7547,117 +8902,132 @@ namespace build2
// Handle value subscript.
//
- if (tt == type::lsbrace && mode () == lexer_mode::eval)
+ if (mode () == lexer_mode::eval) // Note: not if(sub)!
{
- location bl (get_location (t));
- next (t, tt); // `[`
- mode (lexer_mode::subscript, '\0' /* pair */);
- next (t, tt);
-
- location l (get_location (t));
- value v (
- tt != type::rsbrace
- ? parse_value (t, tt, pattern_mode::ignore, "value subscript")
- : value (names ()));
-
- if (tt != type::rsbrace)
+ while (tt == type::lsbrace)
{
- // Note: wildcard pattern should have `]` as well so no escaping
- // suggestion.
- //
- fail (t) << "expected ']' instead of " << t;
- }
+ location bl (get_location (t));
+ next (t, tt); // `[`
+ mode (lexer_mode::subscript, '\0' /* pair */);
+ next (t, tt);
- if (!pre_parse_)
- {
- uint64_t j;
- try
- {
- j = convert<uint64_t> (move (v));
- }
- catch (const invalid_argument& e)
+ location l (get_location (t));
+ value v (
+ tt != type::rsbrace
+ ? parse_value (t, tt, pattern_mode::ignore, "value subscript")
+ : value (names ()));
+
+ if (tt != type::rsbrace)
{
- fail (l) << "invalid value subscript: " << e <<
- info (bl) << "use the '\\[' escape sequence if this is a "
- << "wildcard pattern" << endf;
+ // Note: wildcard pattern should have `]` as well so no escaping
+ // suggestion.
+ //
+ fail (t) << "expected ']' instead of " << t;
}
- // Similar to expanding an undefined variable, we return NULL if
- // the index is out of bounds.
- //
- // Note that result may or may not point to result_data.
- //
- if (result->null)
- result_data = value ();
- else if (result->type == nullptr)
+ if (!pre_parse_)
{
- const names& ns (result->as<names> ());
-
- // Pair-aware subscript.
+ // For type-specific subscript implementations we pass the
+ // subscript value as is.
//
- names r;
- for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ if (auto f = (result->type != nullptr
+ ? result->type->subscript
+ : nullptr))
{
- if (j == 0)
+ result_data = f (*result, &result_data, move (v), l, bl);
+ }
+ else
+ {
+ uint64_t j;
+ try
{
- r.push_back (*i);
- if (i->pair)
- r.push_back (*++i);
- break;
+ j = convert<uint64_t> (move (v));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid value subscript: " << e <<
+ info (bl) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern" << endf;
}
- if (i->pair)
- ++i;
- }
+ // Similar to expanding an undefined variable, we return NULL
+ // if the index is out of bounds.
+ //
+ // Note that result may or may not point to result_data.
+ //
+ if (result->null)
+ result_data = value ();
+ else if (result->type == nullptr)
+ {
+ const names& ns (result->as<names> ());
- result_data = r.empty () ? value () : value (move (r));
- }
- else
- {
- // Similar logic to parse_for().
- //
- // @@ Maybe we should invent type-aware subscript? Could also
- // be used for non-index subscripts (map keys etc).
- //
- const value_type* etype (result->type->element_type);
+ // Pair-aware subscript.
+ //
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ {
+ if (j == 0)
+ {
+ r.push_back (*i);
+ if (i->pair)
+ r.push_back (*++i);
+ break;
+ }
- value val (result == &result_data
- ? value (move (result_data))
- : value (*result));
+ if (i->pair)
+ ++i;
+ }
- untypify (val);
+ result_data = r.empty () ? value () : value (move (r));
+ }
+ else
+ {
+ // Similar logic to parse_for().
+ //
+ const value_type* etype (result->type->element_type);
- names& ns (val.as<names> ());
+ value val (result == &result_data
+ ? value (move (result_data))
+ : value (*result));
- // Pair-aware subscript.
- //
- names r;
- for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
- {
- bool p (i->pair);
+ untypify (val, false /* reduce */);
- if (j == 0)
- {
- r.push_back (move (*i));
- if (p)
- r.push_back (move (*++i));
- break;
- }
+ names& ns (val.as<names> ());
- if (p)
- ++i;
- }
+ // Pair-aware subscript.
+ //
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ {
+ bool p (i->pair);
+
+ if (j == 0)
+ {
+ r.push_back (move (*i));
+ if (p)
+ r.push_back (move (*++i));
+ break;
+ }
+
+ if (p)
+ ++i;
+ }
- result_data = r.empty () ? value () : value (move (r));
+ result_data = r.empty () ? value () : value (move (r));
- if (etype != nullptr)
- typify (result_data, *etype, nullptr /* var */);
+ if (etype != nullptr)
+ typify (result_data, *etype, nullptr /* var */);
+ }
+ }
+
+ result = &result_data;
}
- result = &result_data;
+ // See if we have chained subscript.
+ //
+ enable_subscript ();
+ tt = peek ();
}
-
- tt = peek ();
}
if (pre_parse_)
@@ -7701,7 +9071,8 @@ namespace build2
// then it should not be overloaded for a type). In a quoted
// context we use $string() which returns a "canonical
// representation" (e.g., a directory path without a trailing
- // slash).
+ // slash). Note: looks like we use typed $concat() now in the
+ // unquoted context.
//
if (result->type != nullptr && quoted)
{
@@ -7735,7 +9106,11 @@ namespace build2
fail (loc) << "no string conversion for " << t;
result_data = move (p.first);
- untypify (result_data); // Convert to untyped simple name.
+
+ // Convert to untyped simple name reducing empty string to empty
+ // names as an optimization.
+ //
+ untypify (result_data, true /* reduce */);
}
if ((concat && vtype != nullptr) || // LHS typed.
@@ -7744,13 +9119,13 @@ namespace build2
if (result != &result_data) // Same reason as above.
result = &(result_data = *result);
- concat_typed (move (result_data), loc);
+ concat_typed (move (result_data), loc, what);
}
//
// Untyped concatenation. Note that if RHS is NULL/empty, we still
// set the concat flag.
//
- else if (!result->null && !result->empty ())
+ else if (!result->null)
{
// This can only be an untyped value.
//
@@ -7758,53 +9133,36 @@ namespace build2
//
const names& lv (cast<names> (*result));
- // This should be a simple value or a simple directory.
- //
- if (lv.size () > 1)
+ if (size_t s = lv.size ())
{
- diag_record dr (fail (loc));
-
- dr << "concatenating " << what << " contains multiple values";
-
- // See if this looks like a subscript without an evaluation
- // context and help the user out.
+ // This should be a simple value or a simple directory.
//
- if (mode () != lexer_mode::eval)
- {
- const token& t (peeked ()); // Should be peeked at.
+ if (s > 1)
+ concat_diag_multiple (loc, what);
- if (t.type == type::word &&
- t.qtype == quote_type::unquoted &&
- t.value[0] == '[')
- {
- dr << info << "wrap it in (...) evaluation context if this "
- << "is value subscript";
- }
- }
- }
+ const name& n (lv[0]);
- const name& n (lv[0]);
+ if (n.qualified ())
+ fail (loc) << "concatenating " << what << " contains project "
+ << "name";
- if (n.qualified ())
- fail (loc) << "concatenating " << what << " contains project "
- << "name";
-
- if (n.typed ())
- fail (loc) << "concatenating " << what << " contains type";
+ if (n.typed ())
+ fail (loc) << "concatenating " << what << " contains target type";
- if (!n.dir.empty ())
- {
- if (!n.value.empty ())
- fail (loc) << "concatenating " << what << " contains "
- << "directory";
+ if (!n.dir.empty ())
+ {
+ if (!n.value.empty ())
+ fail (loc) << "concatenating " << what << " contains "
+ << "directory";
- // Note that here we cannot assume what's in dir is really a
- // path (think s/foo/bar/) so we have to reverse it exactly.
- //
- concat_data.value += n.dir.representation ();
+ // Note that here we cannot assume what's in dir is really a
+ // path (think s/foo/bar/) so we have to reverse it exactly.
+ //
+ concat_data.value += n.dir.representation ();
+ }
+ else
+ concat_data.value += n.value;
}
- else
- concat_data.value += n.value;
}
// The same little hack as in the word case ($empty+foo).
@@ -7830,16 +9188,27 @@ namespace build2
// Nothing else to do here if the result is NULL or empty.
//
- if (result->null || result->empty ())
- continue;
-
- // @@ Could move if nv is result_data; see untypify().
+ // Note that we cannot use value::empty() here since we are
+ // interested in representationally empty.
//
- names nv_storage;
- names_view nv (reverse (*result, nv_storage));
+ if (!result->null)
+ {
+ // @@ Could move if nv is result_data; see untypify().
+ //
+ // Nuance: we should only be reducing empty simple value to empty
+ // list if we are not a second half of a pair.
+ //
+ bool pair (!ns.empty () && ns.back ().pair);
- count = splice_names (
- loc, nv, move (nv_storage), ns, what, pairn, pp, dp, tp);
+ names nv_storage;
+ names_view nv (reverse (*result, nv_storage, !pair /* reduce */));
+
+ if (!nv.empty ())
+ {
+ count = splice_names (
+ loc, nv, move (nv_storage), ns, what, pairn, pp, dp, tp);
+ }
+ }
}
continue;
@@ -8074,8 +9443,10 @@ namespace build2
buildspec parser::
parse_buildspec (istream& is, const path_name& in)
{
- // We do "effective escaping" and only for ['"\$(] (basically what's
- // necessary inside a double-quoted literal plus the single quote).
+ // We do "effective escaping" of the special `'"\$(` characters (basically
+ // what's escapable inside a double-quoted literal plus the single quote;
+ // note, however, that we exclude line continuations and `)` since they
+ // would make directory paths on Windows unusable).
//
path_ = &in;
lexer l (is, *path_, 1 /* line */, "\'\"\\$(");
@@ -8318,6 +9689,9 @@ namespace build2
lookup parser::
lookup_variable (names&& qual, string&& name, const location& loc)
{
+ // Note that this function can be called during execute (for example, from
+ // scripts). In particular, this means we cannot use enter_{scope,target}.
+
if (pre_parse_)
return lookup ();
@@ -8329,9 +9703,6 @@ namespace build2
// If we are qualified, it can be a scope or a target.
//
- enter_scope sg;
- enter_target tg;
-
if (qual.empty ())
{
s = scope_;
@@ -8340,13 +9711,29 @@ namespace build2
}
else
{
+ // What should we do if we cannot find the qualification (scope or
+ // target)? We can "fall through" to an outer scope (there is always the
+ // global scope backstop), we can return NULL straight away, or we can
+ // fail. It feels like in most cases unknown scope or target is a
+ // mistake and doing anything other than failing is just making things
+ // harder to debug.
+ //
switch (qual.front ().pair)
{
case '/':
{
assert (qual.front ().directory ());
- sg = enter_scope (*this, move (qual.front ().dir));
- s = scope_;
+
+ dir_path& d (qual.front ().dir);
+ enter_scope::complete_normalize (*scope_, d);
+
+ s = &ctx->scopes.find_out (d);
+
+ if (s->out_path () != d)
+ fail (loc) << "unknown scope " << d << " in scope-qualified "
+ << "variable " << name << " expansion" <<
+ info << "did you forget to include the corresponding buildfile?";
+
break;
}
default:
@@ -8356,8 +9743,24 @@ namespace build2
if (n.pair)
o = move (qual.back ());
- tg = enter_target (*this, move (n), move (o), true, loc, trace);
- t = target_;
+ t = enter_target::find_target (*this, n, o, loc, trace);
+
+ if (t == nullptr || !operator>= (t->decl, target_decl::implied)) // VC14
+ {
+ diag_record dr (fail (loc));
+
+ dr << "unknown target " << n;
+
+ if (n.pair && !o.dir.empty ())
+ dr << '@' << o.dir;
+
+ dr << " in target-qualified variable " << name << " expansion";
+ }
+
+ // Use the target's var_pool for good measure.
+ //
+ s = &t->base_scope ();
+
break;
}
}
@@ -8365,10 +9768,13 @@ namespace build2
// Lookup.
//
- if (const variable* pvar = scope_->var_pool ().find (name))
+ if (const variable* pvar =
+ (s != nullptr ? s : scope_)->var_pool ().find (name))
{
auto& var (*pvar);
+ // Note: the order of the following blocks is important.
+
if (p != nullptr)
{
// The lookup depth is a bit of a hack but should be harmless since
@@ -8455,23 +9861,27 @@ namespace build2
return r;
}
+ // file.cxx
+ //
+ extern const dir_path std_export_dir;
+ extern const dir_path alt_export_dir;
+
void parser::
- process_default_target (token& t)
+ process_default_target (token& t, const buildfile* bf)
{
tracer trace ("parser::process_default_target", &path_);
// The logic is as follows: if we have an explicit current directory
- // target, then that's the default target. Otherwise, we take the
- // first target and use it as a prerequisite to create an implicit
- // current directory target, effectively making it the default
- // target via an alias. If there are no targets in this buildfile,
- // then we don't do anything.
+ // target, then that's the default target. Otherwise, we take the first
+ // target and use it as a prerequisite to create an implicit current
+ // directory target, effectively making it the default target via an
+ // alias. If this is a project root buildfile, then also add exported
+ // buildfiles. And if there are no targets in this buildfile, then we
+ // don't do anything (reasonably assuming it's not root).
//
if (default_target_ == nullptr) // No targets in this buildfile.
return;
- target& dt (*default_target_);
-
target* ct (
const_cast<target*> ( // Ok (serial execution).
ctx->targets.find (dir::static_type, // Explicit current dir target.
@@ -8481,36 +9891,183 @@ namespace build2
nullopt,
trace)));
- if (ct == nullptr)
+ if (ct != nullptr && ct->decl == target_decl::real)
+ ; // Existing and not implied.
+ else
{
- l5 ([&]{trace (t) << "creating current directory alias for " << dt;});
+ target& dt (*default_target_);
- // While this target is not explicitly mentioned in the buildfile, we
- // say that we behave as if it were. Thus not implied.
- //
- ct = &ctx->targets.insert (dir::static_type,
- scope_->out_path (),
- dir_path (),
- string (),
- nullopt,
- target_decl::real,
- trace).first;
- // Fall through.
+ if (ct == nullptr)
+ {
+ l5 ([&]{trace (t) << "creating current directory alias for " << dt;});
+
+ // While this target is not explicitly mentioned in the buildfile, we
+ // say that we behave as if it were. Thus not implied.
+ //
+ ct = &ctx->targets.insert (dir::static_type,
+ scope_->out_path (),
+ dir_path (),
+ string (),
+ nullopt,
+ target_decl::real,
+ trace).first;
+ }
+ else
+ ct->decl = target_decl::real;
+
+ ct->prerequisites_state_.store (2, memory_order_relaxed);
+ ct->prerequisites_.push_back (prerequisite (dt));
}
- else if (ct->decl != target_decl::real)
+
+ // See if this is a root buildfile and not in a simple project.
+ //
+ if (bf != nullptr &&
+ root_ != nullptr &&
+ root_->root_extra != nullptr &&
+ root_->root_extra->loaded &&
+ *root_->root_extra->project != nullptr &&
+ bf->dir == root_->src_path () &&
+ bf->name == root_->root_extra->buildfile_file.string ())
{
- ct->decl = target_decl::real;
- // Fall through.
- }
- else
- return; // Existing and not implied.
+ // See if we have any exported buildfiles.
+ //
+ const dir_path& export_dir (
+ root_->root_extra->altn ? alt_export_dir : std_export_dir);
+
+ dir_path d (root_->src_path () / export_dir);
+ if (exists (d))
+ {
+ // Make sure prerequisites are set.
+ //
+ ct->prerequisites_state_.store (2, memory_order_relaxed);
+
+ const string& build_ext (root_->root_extra->build_ext);
+
+ // Return true if entered any exported buildfiles.
+ //
+ // Note: recursive lambda.
+ //
+ auto iterate = [this, &trace,
+ ct, &build_ext] (const dir_path& d,
+ const auto& iterate) -> bool
+ {
+ bool r (false);
+
+ try
+ {
+ for (const dir_entry& e:
+ dir_iterator (d, dir_iterator::detect_dangling))
+ {
+ switch (e.type ())
+ {
+ case entry_type::directory:
+ {
+ r = iterate (d / path_cast<dir_path> (e.path ()), iterate) || r;
+ break;
+ }
+ case entry_type::regular:
+ {
+ const path& n (e.path ());
+
+ // Besides the buildfile also export buildscript and C++ files
+ // that are used to provide recipe implementations (see
+ // parse_recipe() for details).
+ //
+ string e (n.extension ());
+ if (const target_type* tt = (
+ e == build_ext ? &buildfile::static_type :
+ e == "buildscript" ? &buildscript::static_type :
+ e == "cxx" ||
+ e == "cpp" ||
+ e == "cc" ? &file::static_type : nullptr))
+ {
+ // Enter as if found by search_existing_file(). Note that
+ // entering it as real would cause file_rule not to match
+ // for clean.
+ //
+ // Note that these targets may already be entered (for
+ // example, if already imported).
+ //
+ const target& bf (
+ ctx->targets.insert (*tt,
+ d,
+ (root_->out_eq_src ()
+ ? dir_path ()
+ : out_src (d, *root_)),
+ n.base ().string (),
+ move (e),
+ target_decl::prereq_file,
+ trace).first);
+
+ ct->prerequisites_.push_back (prerequisite (bf));
+ r = true;
+ }
- ct->prerequisites_state_.store (2, memory_order_relaxed);
- ct->prerequisites_.emplace_back (prerequisite (dt));
+ break;
+ }
+ case entry_type::unknown:
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ fail << (sl ? "dangling symlink" : "inaccessible entry")
+ << ' ' << d / e.path ();
+
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to iterate over " << d << ": " << e;
+ }
+
+ return r;
+ };
+
+ if (iterate (d, iterate))
+ {
+ // Arrange for the exported buildfiles to be installed, recreating
+ // subdirectories inside export/. Essentially, we are arranging for
+ // this:
+ //
+ // build/export/file{*}:
+ // {
+ // install = buildfile/
+ // install.subdirs = true
+ // }
+ //
+ if (cast_false<bool> (root_->vars["install.loaded"]))
+ {
+ enter_scope es (*this, dir_path (export_dir));
+ auto& vars (scope_->target_vars[file::static_type]["*"]);
+
+ // @@ TODO: get cached variables from the module once we have one.
+ //
+ {
+ auto r (vars.insert (*root_->var_pool ().find ("install")));
+
+ if (r.second) // Already set by the user?
+ r.first = path_cast<path> (dir_path ("buildfile"));
+ }
+
+ {
+ auto r (vars.insert (
+ *root_->var_pool (true).find ("install.subdirs")));
+ if (r.second)
+ r.first = true;
+ }
+ }
+ }
+ }
+ }
}
- void parser::
- enter_buildfile (const path& p)
+ template <typename T>
+ const T& parser::
+ enter_buildfile (const path& p, optional<dir_path> out)
{
tracer trace ("parser::enter_buildfile", &path_);
@@ -8518,17 +10075,20 @@ namespace build2
// Figure out if we need out.
//
- dir_path out;
- if (scope_->src_path_ != nullptr &&
- scope_->src_path () != scope_->out_path () &&
- d.sub (scope_->src_path ()))
+ dir_path o;
+ if (out)
+ o = move (*out);
+ else if (root_ != nullptr &&
+ root_->src_path_ != nullptr &&
+ !root_->out_eq_src () &&
+ d.sub (*root_->src_path_))
{
- out = out_src (d, *root_);
+ o = out_src (d, *root_);
}
- ctx->targets.insert<buildfile> (
+ return ctx->targets.insert<T> (
move (d),
- move (out),
+ move (o),
p.leaf ().base ().string (),
p.extension (), // Always specified.
trace);
diff --git a/libbuild2/parser.hxx b/libbuild2/parser.hxx
index b3a5395..3e1d0a0 100644
--- a/libbuild2/parser.hxx
+++ b/libbuild2/parser.hxx
@@ -69,14 +69,20 @@ namespace build2
scope* root,
scope& base,
target* = nullptr,
- prerequisite* = nullptr);
+ prerequisite* = nullptr,
+ bool enter_buildfile = true);
void
parse_buildfile (lexer&,
scope* root,
scope& base,
target* = nullptr,
- prerequisite* = nullptr);
+ prerequisite* = nullptr,
+ bool enter_buildfile = true);
+
+ names
+ parse_export_stub (istream& is, const path_name& name,
+ const scope& rs, scope& gs, scope& ts);
buildspec
parse_buildspec (istream&, const path_name&);
@@ -87,14 +93,6 @@ namespace build2
pair<value, token>
parse_variable_value (lexer&, scope&, const dir_path*, const variable&);
- names
- parse_export_stub (istream& is, const path_name& name,
- scope& rs, scope& bs)
- {
- parse_buildfile (is, name, &rs, bs);
- return move (export_value);
- }
-
// Parse an evaluation context (`(...)`).
//
value
@@ -136,8 +134,20 @@ namespace build2
// config directive result.
//
- vector<pair<lookup, string>> config_report; // Config value and format.
- bool config_report_new = false; // One of values is new.
+ struct config_report
+ {
+ struct value
+ {
+ lookup val; // Value.
+ string fmt; // Format.
+ string org; // Original variable if config.report.variable.
+ };
+
+ project_name module; // Reporting module name.
+ vector<value> values;
+ bool new_value; // One of values is new.
+ };
+ small_vector<config_report, 1> config_reports;
// Misc utilities.
//
@@ -180,24 +190,29 @@ namespace build2
const target_type* = nullptr,
const string& = {});
- // Ad hoc target names inside < ... >.
+ // Group target names inside < ... >.
//
- struct adhoc_names_loc
+ struct group_names_loc
{
+ bool expl = false; // True -- explicit group, fase -- ad hoc.
+ location group_loc; // Group/primary target location.
+ location member_loc; // Members location.
names ns;
- location loc;
};
- using adhoc_names = small_vector<adhoc_names_loc, 1>;
+ using group_names = small_vector<group_names_loc, 1>;
+
+ vector<reference_wrapper<target>>
+ enter_explicit_members (group_names_loc&&, bool);
vector<reference_wrapper<target>>
- enter_adhoc_members (adhoc_names_loc&&, bool);
+ enter_adhoc_members (group_names_loc&&, bool);
small_vector<pair<reference_wrapper<target>, // Target.
vector<reference_wrapper<target>>>, // Ad hoc members.
1>
enter_targets (names&&, const location&,
- adhoc_names&&,
+ group_names&&,
size_t,
const attributes&);
@@ -207,7 +222,7 @@ namespace build2
void
parse_dependency (token&, token_type&,
names&&, const location&,
- adhoc_names&&,
+ group_names&&,
names&&, const location&,
const attributes&);
@@ -257,7 +272,9 @@ namespace build2
parse_if_else (token&, token_type&,
bool,
const function<void (
- token&, token_type&, bool, const string&)>&);
+ token&, token_type&, bool, const string&)>&,
+ const function<void (
+ token&, token_type&, const string&)>&);
void
parse_switch (token&, token_type&);
@@ -266,7 +283,9 @@ namespace build2
parse_switch (token&, token_type&,
bool,
const function<void (
- token&, token_type&, bool, const string&)>&);
+ token&, token_type&, bool, const string&)>&,
+ const function<void (
+ token&, token_type&, const string&)>&);
void
parse_for (token&, token_type&);
@@ -376,15 +395,21 @@ namespace build2
attributes&
attributes_top () {return attributes_.back ();}
- // Source a stream optionnaly performing the default target processing.
- // If the specified path name has a real path, then also enter it as a
- // buildfile.
+ // Source a buildfile as a stream optionally performing the default target
+ // processing. If the specified path name has a real path, then also enter
+ // it as a buildfile.
+ //
+ // If default_target is nullopt, then disable the default target semantics
+ // as when loading boostrap.build or root.build. If it is false, then
+ // continue with the existing default_target value. If it is true, then
+ // start with a new default_value and call process_default_target() at
+ // the end.
//
void
- source (istream&,
- const path_name&,
- const location&,
- bool default_target);
+ source_buildfile (istream&,
+ const path_name&,
+ const location&,
+ optional<bool> default_target);
// The what argument is used in diagnostics (e.g., "expected <what>
// instead of ...".
@@ -416,14 +441,7 @@ namespace build2
const string* separators = &name_separators)
{
names ns;
- parse_names (t, tt,
- ns,
- pmode,
- chunk,
- what,
- separators,
- 0,
- nullopt, nullptr, nullptr);
+ parse_names (t, tt, ns, pmode, chunk, what, separators);
return ns;
}
@@ -445,14 +463,7 @@ namespace build2
bool chunk = false)
{
names ns;
- auto r (parse_names (t, tt,
- ns,
- pmode,
- chunk,
- what,
- separators,
- 0,
- nullopt, nullptr, nullptr));
+ auto r (parse_names (t, tt, ns, pmode, chunk, what, separators));
value v (r.type); // Potentially typed NULL value.
@@ -615,12 +626,15 @@ namespace build2
switch_scope (const dir_path& out_base);
void
- process_default_target (token&);
+ process_default_target (token&, const buildfile*);
- // Enter buildfile as a target.
+ private:
+ // Enter buildfile or buildfile-file like file (e.g., a recipe file) as a
+ // target.
//
- void
- enter_buildfile (const path&);
+ template <typename T>
+ const T&
+ enter_buildfile (const path&, optional<dir_path> out = nullopt);
// Lexer.
//
diff --git a/libbuild2/prerequisite.cxx b/libbuild2/prerequisite.cxx
index 7e14c76..bb77c9e 100644
--- a/libbuild2/prerequisite.cxx
+++ b/libbuild2/prerequisite.cxx
@@ -54,13 +54,13 @@ namespace build2
}
prerequisite::
- prerequisite (const target_type& t)
+ prerequisite (const target_type& t, bool locked)
: proj (nullopt),
type (t.type ()),
dir (t.dir),
out (t.out), // @@ If it's empty, then we treat as undetermined?
name (t.name),
- ext (to_ext (t.ext ())),
+ ext (to_ext (locked ? t.ext_locked () : t.ext ())),
scope (t.base_scope ()),
target (&t),
vars (*this, false /* shared */)
diff --git a/libbuild2/prerequisite.hxx b/libbuild2/prerequisite.hxx
index 33efed0..9b9cccf 100644
--- a/libbuild2/prerequisite.hxx
+++ b/libbuild2/prerequisite.hxx
@@ -63,6 +63,8 @@ namespace build2
// Note that the lookup is often ad hoc (see bin.whole as an example).
// But see also parser::lookup_variable() if adding something here.
//
+ // @@ PERF: redo as vector so can make move constructor noexcept.
+ //
public:
variable_map vars;
@@ -95,10 +97,26 @@ namespace build2
scope (s),
vars (*this, false /* shared */) {}
- // Make a prerequisite from a target.
+ prerequisite (const target_type_type& t,
+ dir_path d,
+ dir_path o,
+ string n,
+ optional<string> e,
+ const scope_type& s)
+ : type (t),
+ dir (move (d)),
+ out (move (o)),
+ name (move (n)),
+ ext (move (e)),
+ scope (s),
+ vars (*this, false /* shared */) {}
+
+ // Make a prerequisite from a target. If the second argument is true,
+ // assume the targets mutex is locked (see ext_locked()/key_locked()
+ // for background).
//
explicit
- prerequisite (const target_type&);
+ prerequisite (const target_type&, bool locked = false);
// Note that the returned key "tracks" the prerequisite; that is, any
// updates to the prerequisite's members will be reflected in the key.
@@ -138,7 +156,10 @@ namespace build2
is_a (const target_type_type& tt) const {return type.is_a (tt);}
public:
- prerequisite (prerequisite&& x)
+ // Note that we have the noexcept specification even though vars
+ // (std::map) could potentially throw.
+ //
+ prerequisite (prerequisite&& x) noexcept
: proj (move (x.proj)),
type (x.type),
dir (move (x.dir)),
@@ -147,7 +168,8 @@ namespace build2
ext (move (x.ext)),
scope (x.scope),
target (x.target.load (memory_order_relaxed)),
- vars (move (x.vars), *this, false /* shared */) {}
+ vars (move (x.vars), *this, false /* shared */)
+ {}
prerequisite (const prerequisite& x, memory_order o = memory_order_consume)
: proj (x.proj),
diff --git a/libbuild2/recipe.cxx b/libbuild2/recipe.cxx
index eeafe87..87d37e7 100644
--- a/libbuild2/recipe.cxx
+++ b/libbuild2/recipe.cxx
@@ -11,4 +11,5 @@ namespace build2
recipe_function* const noop_recipe = &noop_action;
recipe_function* const default_recipe = &default_action;
recipe_function* const group_recipe = &group_action;
+ recipe_function* const inner_recipe = &execute_inner;
}
diff --git a/libbuild2/recipe.hxx b/libbuild2/recipe.hxx
index 5a6e38d..97261f5 100644
--- a/libbuild2/recipe.hxx
+++ b/libbuild2/recipe.hxx
@@ -49,6 +49,7 @@ namespace build2
LIBBUILD2_SYMEXPORT extern recipe_function* const noop_recipe;
LIBBUILD2_SYMEXPORT extern recipe_function* const default_recipe;
LIBBUILD2_SYMEXPORT extern recipe_function* const group_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const inner_recipe;
}
#endif // LIBBUILD2_RECIPE_HXX
diff --git a/libbuild2/rule.cxx b/libbuild2/rule.cxx
index c5366a6..dc1c96c 100644
--- a/libbuild2/rule.cxx
+++ b/libbuild2/rule.cxx
@@ -15,13 +15,27 @@ using namespace butl;
namespace build2
{
- // rule (vtable)
+ // rule
//
rule::
~rule ()
{
}
+ void rule::
+ apply_posthoc (action, target&, match_extra&) const
+ {
+ }
+
+ void rule::
+ reapply (action, target&, match_extra&) const
+ {
+ // Unless the rule overrode cur_options, this function should never get
+ // called. And if it did, then it should override this function.
+ //
+ assert (false);
+ }
+
const target* rule::
import (const prerequisite_key&,
const optional<string>&,
@@ -37,14 +51,14 @@ namespace build2
sub_match (const string& n, operation_id o,
action a, target& t, match_extra& me) const
{
- // First check for an ad hoc recipe (see match_rule() for details).
+ // First check for an ad hoc recipe (see match_rule_impl() for details).
//
if (!t.adhoc_recipes.empty ())
{
// Use scratch match_extra since if there is no recipe, then we don't
// want to keep any changes and if there is, then we want it discarded.
//
- match_extra s;
+ match_extra s (true /* locked */); // Not called from adhoc_rule::match().
if (match_adhoc_recipe (action (a.meta_operation (), o), t, s) != nullptr)
return false;
}
@@ -73,7 +87,7 @@ namespace build2
{
if (!t.adhoc_recipes.empty ())
{
- match_extra s;
+ match_extra s (true /* locked */); // Not called from adhoc_rule::match().
if (match_adhoc_recipe (action (a.meta_operation (), o), t, s) != nullptr)
return false;
}
@@ -95,6 +109,9 @@ namespace build2
{
tracer trace ("file_rule::match");
+ if (match_type_ && !t.is_a<mtime_target> ())
+ return false;
+
// While strictly speaking we should check for the file's existence
// for every action (because that's the condition for us matching),
// for some actions this is clearly a waste. Say, perform_clean: we
@@ -201,7 +218,7 @@ namespace build2
}
const file_rule file_rule::instance;
- const rule_match file_rule::rule_match ("file", file_rule::instance);
+ const rule_match file_rule::rule_match ("build.file", file_rule::instance);
// alias_rule
//
@@ -217,9 +234,25 @@ namespace build2
// Inject dependency on our directory (note: not parent) so that it is
// automatically created on update and removed on clean.
//
- inject_fsdir (a, t, true, false);
+ inject_fsdir (a, t, true, true, false);
- match_prerequisites (a, t);
+ // Handle the alias match-only level.
+ //
+ match_search ms;
+ if (t.ctx.match_only && *t.ctx.match_only == match_only_level::alias)
+ {
+ ms = [] (action,
+ const target& t,
+ const prerequisite& p,
+ include_type i)
+ {
+ return prerequisite_target (
+ p.is_a<alias> () ? &search (t, p) : nullptr,
+ i);
+ };
+ }
+
+ match_prerequisites (a, t, ms);
return default_recipe;
}
@@ -319,16 +352,19 @@ namespace build2
}
void fsdir_rule::
- perform_update_direct (action a, const target& t)
+ perform_update_direct (action a, const fsdir& t)
{
+ assert (t.ctx.phase == run_phase::match);
+
// First create the parent directory. If present, it is always first.
//
- const target* p (t.prerequisite_targets[a].empty ()
- ? nullptr
- : t.prerequisite_targets[a][0]);
-
- if (p != nullptr && p->is_a<fsdir> ())
- perform_update_direct (a, *p);
+ if (const target* p = (t.prerequisite_targets[a].empty ()
+ ? nullptr
+ : t.prerequisite_targets[a][0]))
+ {
+ if (const fsdir* fp = p->is_a<fsdir> ())
+ perform_update_direct (a, *fp);
+ }
// The same code as in perform_update() above.
//
@@ -347,6 +383,8 @@ namespace build2
// Don't fail if we couldn't remove the directory because it is not empty
// (or is current working directory). In this case rmdir() will issue a
// warning when appropriate.
+
+ // The same code as in perform_clean_direct() below.
//
target_state ts (rmdir (t.dir, t, t.ctx.current_diag_noise ? 1 : 2)
? target_state::changed
@@ -358,6 +396,35 @@ namespace build2
return ts;
}
+ void fsdir_rule::
+ perform_clean_direct (action a, const fsdir& t)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ // The same code as in perform_clean() above.
+ //
+ // Except that if there are other dependens of this fsdir{} then this will
+ // likely be a noop (because the directory won't be empty) and it makes
+ // sense to just defer cleaning to such other dependents. See
+ // clean_during_match() for backgound. This is similar logic as in
+ // unmatch::safe.
+ //
+ if (t[a].dependents.load (memory_order_relaxed) == 0)
+ {
+ rmdir (t.dir, t, t.ctx.current_diag_noise ? 1 : 2);
+
+ // Then clean the parent directory. If present, it is always first.
+ //
+ if (const target* p = (t.prerequisite_targets[a].empty ()
+ ? nullptr
+ : t.prerequisite_targets[a][0]))
+ {
+ if (const fsdir* fp = p->is_a<fsdir> ())
+ perform_clean_direct (a, *fp);
+ }
+ }
+ }
+
const fsdir_rule fsdir_rule::instance;
// noop_rule
@@ -387,8 +454,9 @@ namespace build2
}
bool adhoc_rule::
- match (action a, target& t, const string& h, match_extra& me) const
+ match (action a, target& xt, const string& h, match_extra& me) const
{
+ const target& t (xt);
return pattern == nullptr || pattern->match (a, t, h, me);
}
diff --git a/libbuild2/rule.hxx b/libbuild2/rule.hxx
index 913c597..eceb6ad 100644
--- a/libbuild2/rule.hxx
+++ b/libbuild2/rule.hxx
@@ -34,6 +34,13 @@ namespace build2
// implementations. It is also a way for us to later pass more information
// without breaking source compatibility.
//
+ // A rule may adjust post hoc prerequisites by overriding apply_posthoc().
+ // See match_extra::posthoc_prerequisite_targets for background and details.
+ //
+ // A rule may support match options and if such a rule is rematched with
+ // different options, then reapply() is called. See
+ // match_extra::{cur,new}_options for background and details.
+ //
struct match_extra;
class LIBBUILD2_SYMEXPORT rule
@@ -45,6 +52,12 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const = 0;
+ virtual void
+ apply_posthoc (action, target&, match_extra&) const;
+
+ virtual void
+ reapply (action, target&, match_extra&) const;
+
rule () = default;
virtual
@@ -126,10 +139,19 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const override;
- file_rule () {}
+ // While this rule expects an mtime_target-based target, sometimes it's
+ // necessary to register it for something less specific (normally target)
+ // in order to achieve the desired rule matching priority (see the dist
+ // and config modules for an example). For such cases this rule can be
+ // instructed to check the type and only match if it's mtime_target-based.
+ //
+ file_rule (bool match_type = false): match_type_ (match_type) {}
- static const file_rule instance;
+ static const file_rule instance; // Note: does not match the target type.
static const build2::rule_match rule_match;
+
+ private:
+ bool match_type_;
};
class LIBBUILD2_SYMEXPORT alias_rule: public simple_rule
@@ -167,7 +189,10 @@ namespace build2
// of fsdir{} without the overhead of switching to the execute phase.
//
static void
- perform_update_direct (action, const target&);
+ perform_update_direct (action, const fsdir&);
+
+ static void
+ perform_clean_direct (action, const fsdir&);
fsdir_rule () {}
static const fsdir_rule instance;
@@ -252,6 +277,16 @@ namespace build2
// The default implementation forwards to the pattern's match() if there
// is a pattern and returns true otherwise.
//
+ // Note also that in case of a member of a group-based target, match() is
+ // called on the group while apply() on the member (see match_rule_impl()
+ // in algorithms.cxx for details). This means that match() may be called
+ // without having the target locked and as a result match() should (unless
+ // known to only match a non-group) treat the target as const and only
+ // rely on immutable information (type, name, etc) since the group could
+ // be matched concurrenly. This case can be detected by examining
+ // match_extra::locked (see adhoc_rule_regex_pattern::match() for a
+ // use-case).
+ //
virtual bool
match (action, target&, const string&, match_extra&) const override;
@@ -318,18 +353,24 @@ namespace build2
~adhoc_rule_pattern ();
public:
+ // Note: the adhoc_rule::match() restrictions apply here as well.
+ //
virtual bool
- match (action, target&, const string&, match_extra&) const = 0;
+ match (action, const target&, const string&, match_extra&) const = 0;
+ // Append additional group members. Note that this function should handle
+ // both ad hoc and explicit groups.
+ //
virtual void
- apply_adhoc_members (action, target&,
+ apply_group_members (action, target&,
const scope& base,
match_extra&) const = 0;
// The implementation should append pattern prerequisites to
// t.prerequisite_targets[a] but not match. It should set bit 2 in
// prerequisite_target::include to indicate update=match and bit 3
- // to indicate update=unmatch.
+ // to indicate update=unmatch. It should also avoid adding duplicate
+ // fsdir{} similar to the search_prerequisite*() functions.
//
virtual void
apply_prerequisites (action, target&,
diff --git a/libbuild2/scheduler.cxx b/libbuild2/scheduler.cxx
index 5027f90..e3fbcc1 100644
--- a/libbuild2/scheduler.cxx
+++ b/libbuild2/scheduler.cxx
@@ -93,12 +93,11 @@ namespace build2
}
void scheduler::
- deactivate (bool external)
+ deactivate_impl (bool external, lock&& rl)
{
- if (max_active_ == 1) // Serial execution.
- return;
+ // Note: assume non-serial execution.
- lock l (mutex_);
+ lock l (move (rl)); // Make sure unlocked on exception.
active_--;
waiting_++;
@@ -131,11 +130,10 @@ namespace build2
}
}
- void scheduler::
- activate (bool external, bool collision)
+ scheduler::lock scheduler::
+ activate_impl (bool external, bool collision)
{
- if (max_active_ == 1) // Serial execution.
- return;
+ // Note: assume non-serial execution.
lock l (mutex_);
@@ -160,6 +158,8 @@ namespace build2
if (shutdown_)
throw_generic_error (ECANCELED);
+
+ return l;
}
void scheduler::
@@ -207,7 +207,10 @@ namespace build2
deallocate (size_t n)
{
if (max_active_ == 1) // Serial execution.
+ {
+ assert (n == 0);
return;
+ }
lock l (mutex_);
active_ -= n;
@@ -216,13 +219,15 @@ namespace build2
size_t scheduler::
suspend (size_t start_count, const atomic_count& task_count)
{
+ assert (max_active_ != 1); // Suspend during serial execution?
+
wait_slot& s (
wait_queue_[
hash<const atomic_count*> () (&task_count) % wait_queue_size_]);
// This thread is no longer active.
//
- deactivate (false /* external */);
+ deactivate_impl (false /* external */, lock (mutex_));
// Note that the task count is checked while holding the lock. We also
// have to notify while holding the lock (see resume()). The aim here
@@ -259,7 +264,7 @@ namespace build2
// This thread is no longer waiting.
//
- activate (false /* external */, collision);
+ activate_impl (false /* external */, collision);
return tc;
}
diff --git a/libbuild2/scheduler.hxx b/libbuild2/scheduler.hxx
index dc18859..3cc206e 100644
--- a/libbuild2/scheduler.hxx
+++ b/libbuild2/scheduler.hxx
@@ -7,7 +7,8 @@
#include <list>
#include <tuple>
#include <atomic>
-#include <type_traits> // aligned_storage, etc
+#include <cstddef> // max_align_t
+#include <type_traits> // decay, etc
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -191,13 +192,15 @@ namespace build2
//
// The external flag indicates whether the wait is for an event external
// to the scheduler, that is, triggered by something other than one of the
- // threads managed by the scheduler.
+ // threads managed by the scheduler. This is used to suspend deadlock
+ // detection (which is progress-based and which cannot be measured for
+ // external events).
//
void
deactivate (bool external);
void
- activate (bool external, bool = false);
+ activate (bool external);
// Sleep for the specified duration, deactivating the thread before going
// to sleep and re-activating it after waking up (which means this
@@ -216,7 +219,7 @@ namespace build2
// Allocate additional active thread count to the current active thread,
// for example, to be "passed" to an external program:
//
- // scheduler::alloc_guard ag (ctx.sched, ctx.sched.max_active () / 2);
+ // scheduler::alloc_guard ag (*ctx.sched, ctx.sched->max_active () / 2);
// args.push_back ("-flto=" + to_string (1 + ag.n));
// run (args);
// ag.deallocate ();
@@ -241,14 +244,38 @@ namespace build2
void
deallocate (size_t);
+ // Similar to allocate() but reserve all the available threads blocking
+ // until this becomes possible. Call unlock() on the specified lock before
+ // deactivating and lock() after activating (can be used to unlock the
+ // phase). Typical usage:
+ //
+ // scheduler::alloc_guard ag (*ctx.sched,
+ // phase_unlock (ctx, true /* delay */));
+ //
+ // Or, without unlocking the phase:
+ //
+ // scheduler::alloc_guard ag (*ctx.sched, phase_unlock (nullptr));
+ //
+ template <typename L>
+ size_t
+ serialize (L& lock);
+
struct alloc_guard
{
size_t n;
alloc_guard (): n (0), s_ (nullptr) {}
alloc_guard (scheduler& s, size_t m): n (s.allocate (m)), s_ (&s) {}
- alloc_guard (alloc_guard&& x): n (x.n), s_ (x.s_) {x.s_ = nullptr;}
- alloc_guard& operator= (alloc_guard&& x)
+
+ template <typename L,
+ typename std::enable_if<!std::is_integral<L>::value, int>::type = 0>
+ alloc_guard (scheduler& s, L&& l): n (s.serialize (l)), s_ (&s) {}
+
+ alloc_guard (alloc_guard&& x) noexcept
+ : n (x.n), s_ (x.s_) {x.s_ = nullptr;}
+
+ alloc_guard&
+ operator= (alloc_guard&& x) noexcept
{
if (&x != this)
{
@@ -353,12 +380,19 @@ namespace build2
size_t
tune (size_t max_active);
+ bool
+ tuned () const {return max_active_ != orig_max_active_;}
+
struct tune_guard
{
tune_guard (): s_ (nullptr), o_ (0) {}
tune_guard (scheduler& s, size_t ma): s_ (&s), o_ (s_->tune (ma)) {}
- tune_guard (tune_guard&& x): s_ (x.s_), o_ (x.o_) {x.s_ = nullptr;}
- tune_guard& operator= (tune_guard&& x)
+
+ tune_guard (tune_guard&& x) noexcept
+ : s_ (x.s_), o_ (x.o_) {x.s_ = nullptr;}
+
+ tune_guard&
+ operator= (tune_guard&& x) noexcept
{
if (&x != this)
{
@@ -426,8 +460,8 @@ namespace build2
{
explicit
monitor_guard (scheduler* s = nullptr): s_ (s) {}
- monitor_guard (monitor_guard&& x): s_ (x.s_) {x.s_ = nullptr;}
- monitor_guard& operator= (monitor_guard&& x)
+ monitor_guard (monitor_guard&& x) noexcept: s_ (x.s_) {x.s_ = nullptr;}
+ monitor_guard& operator= (monitor_guard&& x) noexcept
{
if (&x != this)
{
@@ -543,8 +577,8 @@ namespace build2
atomic_count* task_count;
size_t start_count;
- func_type func;
args_type args;
+ func_type func;
template <size_t... i>
void
@@ -673,7 +707,11 @@ namespace build2
//
struct task_data
{
- std::aligned_storage<sizeof (void*) * 8>::type data;
+ static const size_t data_size = (sizeof (void*) == 4
+ ? sizeof (void*) * 16
+ : sizeof (void*) * 8);
+
+ alignas (std::max_align_t) unsigned char data[data_size];
void (*thunk) (scheduler&, lock&, void*);
};
@@ -923,6 +961,12 @@ namespace build2
private:
optional<size_t>
wait_impl (size_t, const atomic_count&, work_queue);
+
+ void
+ deactivate_impl (bool, lock&&);
+
+ lock
+ activate_impl (bool, bool);
};
}
diff --git a/libbuild2/scheduler.ixx b/libbuild2/scheduler.ixx
index 96eaee1..f46d035 100644
--- a/libbuild2/scheduler.ixx
+++ b/libbuild2/scheduler.ixx
@@ -44,6 +44,20 @@ namespace build2
return suspend (start_count, task_count);
}
+ inline void scheduler::
+ deactivate (bool external)
+ {
+ if (max_active_ != 1) // Serial execution.
+ deactivate_impl (external, lock (mutex_));
+ }
+
+ inline void scheduler::
+ activate (bool external)
+ {
+ if (max_active_ != 1) // Serial execution.
+ activate_impl (external, false /* collision */);
+ }
+
inline scheduler::queue_mark::
queue_mark (scheduler& s)
: tq_ (s.queue ())
diff --git a/libbuild2/scheduler.txx b/libbuild2/scheduler.txx
index 5c6b339..87c9384 100644
--- a/libbuild2/scheduler.txx
+++ b/libbuild2/scheduler.txx
@@ -64,8 +64,8 @@ namespace build2
new (&td->data) task {
&task_count,
start_count,
- decay_copy (forward<F> (f)),
- typename task::args_type (decay_copy (forward<A> (a))...)};
+ typename task::args_type (decay_copy (forward<A> (a))...),
+ decay_copy (forward<F> (f))};
td->thunk = &task_thunk<F, A...>;
@@ -137,4 +137,42 @@ namespace build2
if (tc.fetch_sub (1, memory_order_release) - 1 <= t.start_count)
s.resume (tc); // Resume waiters, if any.
}
+
+ template <typename L>
+ size_t scheduler::
+ serialize (L& el)
+ {
+ if (max_active_ == 1) // Serial execution.
+ return 0;
+
+ lock l (mutex_);
+
+ if (active_ == 1)
+ active_ = max_active_;
+ else
+ {
+ // Wait until we are the only active thread.
+ //
+ el.unlock ();
+
+ while (active_ != 1)
+ {
+ // While it would have been more efficient to implement this via the
+ // condition variable notifications, that logic is already twisted
+ // enough (and took a considerable time to debug). So for now we keep
+ // it simple and do sleep and re-check. Make the sleep external not to
+ // trip up the deadlock detection.
+ //
+ deactivate_impl (true /* external */, move (l));
+ active_sleep (std::chrono::milliseconds (10));
+ l = activate_impl (true /* external */, false /* collision */);
+ }
+
+ active_ = max_active_;
+ l.unlock (); // Important: unlock before attempting to relock external!
+ el.lock ();
+ }
+
+ return max_active_ - 1;
+ }
}
diff --git a/libbuild2/scope.cxx b/libbuild2/scope.cxx
index 51a1c25..23781a8 100644
--- a/libbuild2/scope.cxx
+++ b/libbuild2/scope.cxx
@@ -685,6 +685,8 @@ namespace build2
pair<const target_type*, optional<string>> scope::
find_target_type (name& n, const location& loc, const target_type* tt) const
{
+ // NOTE: see also functions-name.cxx:filter() if changing anything here.
+
optional<string> ext;
string& v (n.value);
@@ -790,9 +792,11 @@ namespace build2
}
pair<const target_type&, optional<string>> scope::
- find_target_type (name& n, name& o, const location& loc) const
+ find_target_type (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto r (find_target_type (n, loc));
+ auto r (find_target_type (n, loc, tt));
if (r.first == nullptr)
fail (loc) << "unknown target type " << n.type << " in " << n;
@@ -876,14 +880,16 @@ namespace build2
}
target_key scope::
- find_target_key (names& ns, const location& loc) const
+ find_target_key (names& ns,
+ const location& loc,
+ const target_type* tt) const
{
if (size_t n = ns.size ())
{
if (n == (ns[0].pair ? 2 : 1))
{
name dummy;
- return find_target_key (ns[0], n == 1 ? dummy : ns[1], loc);
+ return find_target_key (ns[0], n == 1 ? dummy : ns[1], loc, tt);
}
}
@@ -891,9 +897,11 @@ namespace build2
}
pair<const target_type&, optional<string>> scope::
- find_prerequisite_type (name& n, name& o, const location& loc) const
+ find_prerequisite_type (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto r (find_target_type (n, loc));
+ auto r (find_target_type (n, loc, tt));
if (r.first == nullptr)
fail (loc) << "unknown target type " << n.type << " in " << n;
@@ -917,14 +925,16 @@ namespace build2
}
prerequisite_key scope::
- find_prerequisite_key (names& ns, const location& loc) const
+ find_prerequisite_key (names& ns,
+ const location& loc,
+ const target_type* tt) const
{
if (size_t n = ns.size ())
{
if (n == (ns[0].pair ? 2 : 1))
{
name dummy;
- return find_prerequisite_key (ns[0], n == 1 ? dummy : ns[1], loc);
+ return find_prerequisite_key (ns[0], n == 1 ? dummy : ns[1], loc, tt);
}
}
@@ -952,7 +962,9 @@ namespace build2
}
pair<reference_wrapper<const target_type>, bool> scope::
- derive_target_type (const string& name, const target_type& base)
+ derive_target_type (const string& name,
+ const target_type& base,
+ target_type::flag flags)
{
assert (root_scope () == this);
@@ -970,10 +982,20 @@ namespace build2
//
// Currently, if we define myfile{}: file{}, then myfile{foo} and
// myfile{foo.x} are the same target.
+
+ // Note: copies flags.
//
- unique_ptr<target_type> dt (new target_type (base));
- dt->base = &base;
- dt->factory = &derived_tt_factory;
+ unique_ptr<target_type> dt (
+ new target_type {
+ nullptr, // Will be patched in by insert() below.
+ &base,
+ &derived_tt_factory,
+ base.fixed_extension,
+ base.default_extension,
+ base.pattern,
+ base.print,
+ base.search,
+ base.flags | flags});
#if 0
// @@ We should probably inherit the fixed extension unless overriden with
@@ -1052,8 +1074,17 @@ namespace build2
derive_target_type (const target_type& et)
{
assert (root_scope () == this);
- unique_ptr<target_type> dt (new target_type (et));
- dt->factory = &derived_tt_factory;
+ unique_ptr<target_type> dt (
+ new target_type {
+ nullptr, // Will be patched in by insert() below.
+ et.base,
+ &derived_tt_factory,
+ et.fixed_extension,
+ et.default_extension,
+ et.pattern,
+ et.print,
+ et.search,
+ et.flags});
return root_extra->target_types.insert (et.name, move (dt)).first;
}
@@ -1174,8 +1205,8 @@ namespace build2
}
auto scope_map::
- find (const dir_path& k) const -> pair<scopes::const_iterator,
- scopes::const_iterator>
+ find (const dir_path& k, bool sno) const -> pair<scopes::const_iterator,
+ scopes::const_iterator>
{
assert (k.normalized (false));
auto i (map_.find_sup (k));
@@ -1184,9 +1215,9 @@ namespace build2
auto b (i->second.begin ());
auto e (i->second.end ());
- // Skip NULL first element.
+ // Skip NULL first element if requested.
//
- if (*b == nullptr)
+ if (sno && *b == nullptr)
++b;
assert (b != e);
diff --git a/libbuild2/scope.hxx b/libbuild2/scope.hxx
index 3d31ff1..09d61e9 100644
--- a/libbuild2/scope.hxx
+++ b/libbuild2/scope.hxx
@@ -101,8 +101,8 @@ namespace build2
scope& global_scope () {return const_cast<scope&> (ctx.global_scope);}
const scope& global_scope () const {return ctx.global_scope;}
- // Return true if the specified root scope is a sub-scope of this root
- // scope. Note that both scopes must be root.
+ // Return true if the specified root scope is a sub-scope of (but not the
+ // same as) this root scope. Note that both scopes must be root.
//
bool
sub_root (const scope&) const;
@@ -320,7 +320,7 @@ namespace build2
const target_type&
insert_target_type (const target_type& tt)
{
- return root_extra->target_types.insert (tt);
+ return root_extra->target_types.insert (tt).first;
}
template <typename T>
@@ -358,40 +358,56 @@ namespace build2
// the out directory.
//
pair<const target_type&, optional<string>>
- find_target_type (name&, name&, const location&) const;
+ find_target_type (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
// As above, but return the result as a target key (with its members
// shallow-pointing to processed parts in the two names).
//
target_key
- find_target_key (name&, name&, const location&) const;
+ find_target_key (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
// As above, but the names are passed as a vector. Issue appropriate
// diagnostics if the wrong number of names is passed.
//
target_key
- find_target_key (names&, const location&) const;
+ find_target_key (names&,
+ const location&,
+ const target_type* = nullptr) const;
// Similar to the find_target_type() but does not complete relative
// directories.
//
pair<const target_type&, optional<string>>
- find_prerequisite_type (name&, name&, const location&) const;
+ find_prerequisite_type (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
// As above, but return a prerequisite key.
//
prerequisite_key
- find_prerequisite_key (name&, name&, const location&) const;
+ find_prerequisite_key (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
prerequisite_key
- find_prerequisite_key (names&, const location&) const;
+ find_prerequisite_key (names&,
+ const location&,
+ const target_type* = nullptr) const;
// Dynamically derive a new target type from an existing one. Return the
// reference to the target type and an indicator of whether it was
// actually created.
//
+ // Note: the flags are OR'ed to the base's flags.
+ //
pair<reference_wrapper<const target_type>, bool>
- derive_target_type (const string& name, const target_type& base);
+ derive_target_type (const string& name,
+ const target_type& base,
+ target_type::flag flags = target_type::flag::none);
template <typename T>
pair<reference_wrapper<const target_type>, bool>
@@ -477,7 +493,7 @@ namespace build2
// is not yet determined (happens at the end of bootstrap_src()). NULL
// means there are no subprojects.
//
- optional<const build2::subprojects*> subprojects;
+ optional<build2::subprojects*> subprojects;
bool altn; // True if using alternative build file/directory naming.
bool loaded; // True if already loaded (load_root()).
@@ -510,9 +526,10 @@ namespace build2
build2::meta_operations meta_operations;
build2::operations operations;
- // Modules loaded by this project.
+ // Modules imported/loaded by this project.
//
- module_map modules;
+ module_import_map imported_modules;
+ module_state_map loaded_modules;
// Buildfiles already loaded for this project.
//
@@ -591,21 +608,21 @@ namespace build2
bool
find_module (const string& name) const
{
- return root_extra->modules.find_module<module> (name) != nullptr;
+ return root_extra->loaded_modules.find_module<module> (name) != nullptr;
}
template <typename T>
T*
find_module (const string& name)
{
- return root_extra->modules.find_module<T> (name);
+ return root_extra->loaded_modules.find_module<T> (name);
}
template <typename T>
const T*
find_module (const string& name) const
{
- return root_extra->modules.find_module<T> (name);
+ return root_extra->loaded_modules.find_module<T> (name);
}
public:
@@ -776,6 +793,8 @@ namespace build2
// The first element, if not NULL, is for the "owning" out path. The rest
// of the elements are for the src path shallow references.
//
+ // Note that the global scope is in the first element.
+ //
struct scopes: small_vector<scope*, 3>
{
scopes () = default;
@@ -815,6 +834,10 @@ namespace build2
// Find all the scopes that encompass this path (out or src).
//
+ // If skip_null_out is false, then the first element always corresponds to
+ // the out scope and is NULL if there is none (see struct scopes above for
+ // details).
+ //
// Note that the returned range will never be empty (there is always the
// global scope).
//
@@ -847,7 +870,7 @@ namespace build2
// "island append" restriction we have on loading additional buildfile.
//
LIBBUILD2_SYMEXPORT pair<scopes::const_iterator, scopes::const_iterator>
- find (const dir_path&) const;
+ find (const dir_path&, bool skip_null_out = true) const;
const_iterator begin () const {return map_.begin ();}
const_iterator end () const {return map_.end ();}
diff --git a/libbuild2/scope.ixx b/libbuild2/scope.ixx
index 5d76a7f..5975c76 100644
--- a/libbuild2/scope.ixx
+++ b/libbuild2/scope.ixx
@@ -146,9 +146,11 @@ namespace build2
}
inline target_key scope::
- find_target_key (name& n, name& o, const location& loc) const
+ find_target_key (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto p (find_target_type (n, o, loc));
+ auto p (find_target_type (n, o, loc, tt));
return target_key {
&p.first,
&n.dir,
@@ -158,9 +160,11 @@ namespace build2
}
inline prerequisite_key scope::
- find_prerequisite_key (name& n, name& o, const location& loc) const
+ find_prerequisite_key (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto p (find_prerequisite_type (n, o, loc));
+ auto p (find_prerequisite_type (n, o, loc, tt));
return prerequisite_key {
n.proj,
{
diff --git a/libbuild2/script/builtin-options.cxx b/libbuild2/script/builtin-options.cxx
index 9b4067b..b71b9d3 100644
--- a/libbuild2/script/builtin-options.cxx
+++ b/libbuild2/script/builtin-options.cxx
@@ -187,6 +187,56 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
diff --git a/libbuild2/script/parser.cxx b/libbuild2/script/parser.cxx
index 88e2f06..84d2afc 100644
--- a/libbuild2/script/parser.cxx
+++ b/libbuild2/script/parser.cxx
@@ -1028,7 +1028,7 @@ namespace build2
storage.clear ();
to_stream (os,
- reverse (a.value, storage),
+ reverse (a.value, storage, true /* reduce */),
quote_mode::normal,
'@');
}
@@ -1134,9 +1134,10 @@ namespace build2
if (t.value == "env")
{
parsed_env r (parse_env_builtin (t, tt));
- c.cwd = move (r.cwd);
- c.variables = move (r.variables);
- c.timeout = r.timeout;
+ c.cwd = move (r.cwd);
+ c.variables = move (r.variables);
+ c.timeout = r.timeout;
+ c.timeout_success = r.timeout_success;
env = true;
}
else if (t.value == "for")
@@ -1601,6 +1602,10 @@ namespace build2
{
r.timeout = chrono::seconds (*v);
}
+ else if (o == "-s" || o == "--timeout-success")
+ {
+ r.timeout_success = true;
+ }
else if (optional<dir_path> v = dir ("--cwd", "-c"))
{
r.cwd = move (*v);
@@ -1615,6 +1620,9 @@ namespace build2
break;
}
+ if (r.timeout_success && !r.timeout)
+ fail (l) << "env: -s|--timeout-success specified without -t|--timeout";
+
// Parse arguments (variable sets).
//
for (; i != e; ++i)
@@ -2676,7 +2684,11 @@ namespace build2
if (val.type != nullptr)
{
etype = val.type->element_type;
- untypify (val);
+
+ // Note that here we don't want to be reducing empty simple
+ // values to empty lists.
+ //
+ untypify (val, false /* reduce */);
}
size_t fli (li);
@@ -2759,7 +2771,7 @@ namespace build2
}
parser::parsed_doc::
- parsed_doc (parsed_doc&& d)
+ parsed_doc (parsed_doc&& d) noexcept
: re (d.re), end_line (d.end_line), end_column (d.end_column)
{
if (re)
diff --git a/libbuild2/script/parser.hxx b/libbuild2/script/parser.hxx
index 91f50bf..795ce4e 100644
--- a/libbuild2/script/parser.hxx
+++ b/libbuild2/script/parser.hxx
@@ -140,7 +140,7 @@ namespace build2
parsed_doc (string, uint64_t line, uint64_t column);
parsed_doc (regex_lines&&, uint64_t line, uint64_t column);
- parsed_doc (parsed_doc&&); // Note: move constuctible-only type.
+ parsed_doc (parsed_doc&&) noexcept; // Note: move constuctible-only type.
~parsed_doc ();
};
@@ -163,14 +163,15 @@ namespace build2
pre_parse_line_start (token&, token_type&, lexer_mode);
// Parse the env pseudo-builtin arguments up to the program name. Return
- // the program execution timeout, CWD, the list of the variables that
- // should be unset ("name") and/or set ("name=value") in the command
- // environment, and the token/type that starts the program name. Note
- // that the variable unsets come first, if present.
+ // the program execution timeout and its success flag, CWD, the list of
+ // the variables that should be unset ("name") and/or set ("name=value")
+ // in the command environment, and the token/type that starts the
+ // program name. Note that the variable unsets come first, if present.
//
struct parsed_env
{
optional<duration> timeout;
+ bool timeout_success = false;
optional<dir_path> cwd;
environment_vars variables;
};
diff --git a/libbuild2/script/regex.hxx b/libbuild2/script/regex.hxx
index f6cf566..3c49b31 100644
--- a/libbuild2/script/regex.hxx
+++ b/libbuild2/script/regex.hxx
@@ -271,8 +271,8 @@ namespace build2
template <typename T>
struct line_char_cmp
: public std::enable_if<std::is_integral<T>::value ||
- (std::is_enum<T>::value &&
- !std::is_same<T, char_flags>::value)> {};
+ (std::is_enum<T>::value &&
+ !std::is_same<T, char_flags>::value)> {};
template <typename T, typename = typename line_char_cmp<T>::type>
bool
@@ -470,10 +470,10 @@ namespace std
is (mask m, char_type c) const
{
return m ==
- (c.type () == line_type::special && c.special () >= 0 &&
- build2::digit (static_cast<char> (c.special ()))
- ? digit
- : 0);
+ (c.type () == line_type::special && c.special () >= 0 &&
+ build2::digit (static_cast<char> (c.special ()))
+ ? digit
+ : 0);
}
const char_type*
diff --git a/libbuild2/script/run.cxx b/libbuild2/script/run.cxx
index f486138..f8f98c1 100644
--- a/libbuild2/script/run.cxx
+++ b/libbuild2/script/run.cxx
@@ -1237,6 +1237,17 @@ namespace build2
bool terminated = false; // True if this command has been terminated.
+ // True if this command has been terminated but we failed to read out
+ // its stdout and/or stderr streams in the reasonable timeframe (2
+ // seconds) after the termination.
+ //
+ // Note that this may happen if there is a still running child process
+ // of the terminated command which has inherited the parent's stdout and
+ // stderr file descriptors.
+ //
+ bool unread_stdout = false;
+ bool unread_stderr = false;
+
// Only for diagnostics.
//
const location& loc;
@@ -1397,32 +1408,121 @@ namespace build2
diag_buffer& b (c->dbuf);
if (b.is.is_open ())
- fds.emplace_back (b.is.fd (), &b);
+ fds.emplace_back (b.is.fd (), c);
}
fds.emplace_back (is.fd ());
fdselect_state& ist (fds.back ());
+ size_t unread (fds.size ());
optional<timestamp> dlt (dl ? dl->value : optional<timestamp> ());
// If there are some left-hand side processes/builtins running, then
- // terminate them and reset the deadline to nullopt. Otherwise, fail
- // straigh away.
+ // terminate them and, if there are unread stdout/stderr file
+ // descriptors, then increase the deadline by another 2 seconds and
+ // return true. In this case the term() should be called again upon
+ // reaching the timeout. Otherwise return false. If there are no
+ // left-hand side processes/builtins running, then fail straight away.
//
// Note that in the former case the further reading will be performed
- // without timeout. This, however, is fine since all the processes and
- // builtins are terminated and we only need to read out the buffered
- // data.
- //
- auto term = [&dlt, pipeline, &trace, &ll, what] ()
+ // with the adjusted timeout. We assume that this timeout is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_command for the possible reasons), then term() needs to be
+ // called for the second time and the reading should be interrupted
+ // afterwards.
+ //
+ auto term = [&dlt, pipeline, &fds, &ist, &is, &unread,
+ &trace, &ll, what, terminated = false] () mutable -> bool
{
- if (pipeline != nullptr)
+ // Can only be called if the deadline is specified.
+ //
+ assert (dlt);
+
+ if (pipeline == nullptr)
+ fail (ll) << what << " terminated: execution timeout expired";
+
+ if (!terminated)
{
+ // Terminate the pipeline and adjust the deadline.
+ //
+
+ // Note that if we are still reading the stream and it's a builtin
+ // stdout, then we need to close it before terminating the pipeline.
+ // Not doing so can result in blocking this builtin on the write
+ // operation and thus aborting the build2 process (see term_pipe()
+ // for details).
+ //
+ // Should we do the same for all the pipeline builtins' stderr
+ // streams? No we don't, since the builtin diagnostics is assumed to
+ // always fit the pipe buffer (see libbutl/builtin.cxx for details).
+ // Thus, we will leave them open to fully read out the diagnostics.
+ //
+ if (ist.fd != nullfd && pipeline->bltn != nullptr)
+ {
+ try
+ {
+ is.close ();
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here.
+ }
+
+ ist.fd = nullfd;
+ --unread;
+ }
+
term_pipe (pipeline, trace);
- dlt = nullopt;
+ terminated = true;
+
+ if (unread != 0)
+ dlt = system_clock::now () + chrono::seconds (2);
+
+ return unread != 0;
}
else
- fail (ll) << what << " terminated: execution timeout expired";
+ {
+ // Set the unread_{stderr,stdout} flags to true for the commands
+ // whose streams are not fully read yet.
+ //
+
+ // Can only be called after the first call of term() which would
+ // throw failed if pipeline is NULL.
+ //
+ assert (pipeline != nullptr);
+
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ if (s.data != nullptr) // stderr.
+ {
+ pipe_command* c (static_cast<pipe_command*> (s.data));
+
+ c->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() with a not fully read stream (eof is
+ // not reached, etc).
+ //
+ try
+ {
+ c->dbuf.is.close ();
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here. Anyway the diagnostics will be
+ // issued by complete_pipe().
+ }
+ }
+ else // stdout.
+ pipeline->unread_stdout = true;
+ }
+ }
+
+ return false;
+ }
};
// Note that on Windows if the file descriptor is not a pipe, then
@@ -1434,7 +1534,7 @@ namespace build2
// return true and thus no ifdselect() calls will ever be made.
//
string s;
- for (size_t unread (fds.size ()); unread != 0;)
+ while (unread != 0)
{
// Read any pending data from the input stream.
//
@@ -1448,7 +1548,10 @@ namespace build2
// blocking file descriptor.
//
if (dlt && *dlt <= system_clock::now ())
- term ();
+ {
+ if (!term ())
+ break;
+ }
if (sr.next (s))
{
@@ -1480,8 +1583,10 @@ namespace build2
if (*dlt <= now || ifdselect (fds, *dlt - now) == 0)
{
- term ();
- continue;
+ if (term ())
+ continue;
+ else
+ break;
}
}
else
@@ -1493,7 +1598,7 @@ namespace build2
{
if (s.ready &&
s.data != nullptr &&
- !static_cast<diag_buffer*> (s.data)->read ())
+ !static_cast<pipe_command*> (s.data)->dbuf.read ())
{
s.fd = nullfd;
--unread;
@@ -1964,7 +2069,7 @@ namespace build2
if (c.timeout)
{
- deadline d (system_clock::now () + *c.timeout, false /* success */);
+ deadline d (system_clock::now () + *c.timeout, c.timeout_success);
if (!dl || d < *dl)
dl = d;
}
@@ -2238,10 +2343,14 @@ namespace build2
// Read out all the pipeline's buffered strerr streams watching for the
// deadline, if specified. If the deadline is reached, then terminate
- // the whole pipeline, reset the deadline to nullopt, and continue
- // reading. Note that the further reading will be performed without
- // timeout. This, however, is fine since all the processes and builtins
- // are terminated and we only need to read out the buffered data.
+ // the whole pipeline, move the deadline by another 2 seconds, and
+ // continue reading.
+ //
+ // Note that we assume that this timeout increment is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_command for the possible reasons), then we just set
+ // unread_stderr flag to true for such commands and bail out.
//
// Also note that this is a reduced version of the above read() function.
//
@@ -2253,13 +2362,15 @@ namespace build2
diag_buffer& b (c->dbuf);
if (b.is.is_open ())
- fds.emplace_back (b.is.fd (), &b);
+ fds.emplace_back (b.is.fd (), c);
}
// Note that the current command deadline is the earliest (see above).
//
optional<timestamp> dlt (pc.dl ? pc.dl->value : optional<timestamp> ());
+ bool terminated (false);
+
for (size_t unread (fds.size ()); unread != 0;)
{
try
@@ -2272,9 +2383,37 @@ namespace build2
if (*dlt <= now || ifdselect (fds, *dlt - now) == 0)
{
- term_pipe (&pc, trace);
- dlt = nullopt;
- continue;
+ if (!terminated)
+ {
+ term_pipe (&pc, trace);
+ terminated = true;
+
+ dlt = system_clock::now () + chrono::seconds (2);
+ continue;
+ }
+ else
+ {
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ pipe_command* c (static_cast<pipe_command*> (s.data));
+
+ c->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() (see read() for details).
+ //
+ try
+ {
+ c->dbuf.is.close ();
+ }
+ catch (const io_error&) {}
+ }
+ }
+
+ break;
+ }
}
}
else
@@ -2282,7 +2421,8 @@ namespace build2
for (fdselect_state& s: fds)
{
- if (s.ready && !static_cast<diag_buffer*> (s.data)->read ())
+ if (s.ready &&
+ !static_cast<pipe_command*> (s.data)->dbuf.read ())
{
s.fd = nullfd;
--unread;
@@ -2336,10 +2476,11 @@ namespace build2
// Iterate over the pipeline processes and builtins left to right,
// printing their stderr if buffered and issuing the diagnostics if the
// exit code is not available (terminated abnormally or due to a
- // deadline) or is unexpected. Throw failed at the end if the exit code
- // for any of them is not available. Return false if exit code for any
- // of them is unexpected (the return is used, for example, in the if-
- // conditions).
+ // deadline), is unexpected, or stdout and/or stderr was not fully
+ // read. Throw failed at the end if the exit code for any of them is not
+ // available or stdout and/or stderr was not fully read. Return false if
+ // exit code for any of them is unexpected (the return is used, for
+ // example, in the if-conditions).
//
// Note: must be called after wait_pipe() and only once.
//
@@ -2410,6 +2551,30 @@ namespace build2
path pr (cmd_path (cmd));
+ // Print the diagnostics if the command stdout and/or stderr are not
+ // fully read.
+ //
+ auto unread_output_diag = [&dr, c, w, &pr] (bool main_error)
+ {
+ if (main_error)
+ dr << error (c->loc) << w << ' ' << pr << ' ';
+ else
+ dr << error;
+
+ if (c->unread_stdout)
+ {
+ dr << "stdout ";
+
+ if (c->unread_stderr)
+ dr << "and ";
+ }
+
+ if (c->unread_stderr)
+ dr << "stderr ";
+
+ dr << "not closed after exit";
+ };
+
// Fail if the process is terminated due to reaching the deadline.
//
if (!exit)
@@ -2417,6 +2582,9 @@ namespace build2
dr << error (ll) << w << ' ' << pr
<< " terminated: execution timeout expired";
+ if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (false /* main_error */);
+
if (verb == 1)
{
dr << info << "command line: ";
@@ -2443,10 +2611,18 @@ namespace build2
valid = exit->code () < 256;
#endif
- // In the presense of a valid exit code we print the diagnostics
- // and return false rather than throw.
+ // In the presense of a valid exit code and given stdout and
+ // stderr are fully read out we print the diagnostics and return
+ // false rather than throw.
+ //
+ // Note that there can be a race, so that the process we have
+ // terminated due to reaching the deadline has in fact exited
+ // normally. Thus, the 'unread stderr' situation can also happen
+ // to a successfully terminated process. If that's the case, we
+ // report this problem as the main error and the secondary error
+ // otherwise.
//
- if (!valid)
+ if (!valid || c->unread_stdout || c->unread_stderr)
fail = true;
exit_comparison cmp (cmd.exit
@@ -2473,11 +2649,11 @@ namespace build2
uint16_t ec (exit->code ()); // Make sure printed as integer.
if (!valid)
+ {
dr << "exit code " << ec << " out of 0-255 range";
+ }
else
{
- assert (!success && diag);
-
if (cmd.exit)
dr << "exit code " << ec
<< (cmp == exit_comparison::eq ? " != " : " == ")
@@ -2487,6 +2663,9 @@ namespace build2
}
}
+ if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (false /* main_error */);
+
if (verb == 1)
{
dr << info << "command line: ";
@@ -2506,6 +2685,8 @@ namespace build2
//
print_file (dr, *c->esp, ll);
}
+ else if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (true /* main_error */);
}
// Now print the buffered stderr, if present, and/or flush the
@@ -2803,7 +2984,7 @@ namespace build2
// If/when required we could probably support the precise sleep
// mode (e.g., via an option).
//
- env.context.sched.sleep (t);
+ env.context.sched->sleep (t);
}
};
@@ -3353,8 +3534,7 @@ namespace build2
try
{
size_t n (0);
- for (const dir_entry& de: dir_iterator (p,
- false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (p, dir_iterator::no_follow))
{
if (n++ < 10)
dr << '\n' << (de.ltype () == entry_type::directory
diff --git a/libbuild2/script/script.cxx b/libbuild2/script/script.cxx
index b8dfc68..b53fc23 100644
--- a/libbuild2/script/script.cxx
+++ b/libbuild2/script/script.cxx
@@ -425,9 +425,14 @@ namespace build2
// Timeout.
//
if (c.timeout)
+ {
o << " -t "
<< chrono::duration_cast<chrono::seconds> (*c.timeout).count ();
+ if (c.timeout_success)
+ o << " -s";
+ }
+
// CWD.
//
if (c.cwd)
@@ -768,7 +773,9 @@ namespace build2
{
using script::cleanup;
- assert (!implicit || c.type == cleanup_type::always);
+ // Implicit never-cleanup doesn't make sense.
+ //
+ assert (!implicit || c.type != cleanup_type::never);
const path& p (c.path);
diff --git a/libbuild2/script/script.hxx b/libbuild2/script/script.hxx
index cccad98..f5bd69a 100644
--- a/libbuild2/script/script.hxx
+++ b/libbuild2/script/script.hxx
@@ -331,9 +331,13 @@ namespace build2
process_path program;
strings arguments;
- optional<dir_path> cwd; // From env builtin.
- environment_vars variables; // From env builtin.
- optional<duration> timeout; // From env builtin.
+
+ // These come from the env builtin.
+ //
+ optional<dir_path> cwd;
+ environment_vars variables;
+ optional<duration> timeout;
+ bool timeout_success = false;
optional<redirect> in;
optional<redirect> out;
diff --git a/libbuild2/search.cxx b/libbuild2/search.cxx
index 3bdb503..4e855e3 100644
--- a/libbuild2/search.cxx
+++ b/libbuild2/search.cxx
@@ -15,7 +15,9 @@ using namespace butl;
namespace build2
{
const target*
- search_existing_target (context& ctx, const prerequisite_key& pk)
+ search_existing_target (context& ctx,
+ const prerequisite_key& pk,
+ bool out_only)
{
tracer trace ("search_existing_target");
@@ -39,9 +41,10 @@ namespace build2
// Prerequisite's out directory can be one of the following:
//
- // empty This means out is undetermined and we simply search for a
- // target that is in the out tree which happens to be indicated
- // by an empty value, so we can just pass this as is.
+ // empty This means out is undetermined and we search for a target
+ // first in the out tree (which happens to be indicated by an
+ // empty value, so we can just pass this as is) and if not
+ // found, then in the src tree (unless suppressed).
//
// absolute This is the "final" value that doesn't require any processing
// and we simply use it as is.
@@ -74,6 +77,27 @@ namespace build2
const target* t (
ctx.targets.find (*tk.type, d, o, *tk.name, tk.ext, trace));
+ // Try in the src tree.
+ //
+ if (t == nullptr &&
+ !out_only &&
+ tk.out->empty () &&
+ tk.dir->relative () &&
+ !pk.scope->out_eq_src ())
+ {
+ o = move (d);
+
+ d = pk.scope->src_path ();
+
+ if (!tk.dir->empty ())
+ {
+ d /= *tk.dir;
+ d.normalize ();
+ }
+
+ t = ctx.targets.find (*tk.type, d, o, *tk.name, tk.ext, trace);
+ }
+
if (t != nullptr)
l5 ([&]{trace << "existing target " << *t
<< " for prerequisite " << pk;});
@@ -241,7 +265,24 @@ namespace build2
//
dir_path d;
if (tk.dir->absolute ())
+ {
d = *tk.dir; // Already normalized.
+
+ // Even if out is empty, it may still be (only) in src.
+ //
+ // Note: issue diagnostics consistent with search() after skipping this
+ // function due to non-empty out.
+ //
+ // @@ PERF: we could first check if it's in pk.scope, which feels like
+ // the common case. Though this doesn't seem to affect
+ // performance in any noticeable way.
+ //
+ auto p (ctx.scopes.find (d, false)); // Note: never empty.
+ if (*p.first == nullptr && ++p.first != p.second)
+ {
+ fail << "no existing source file for prerequisite " << pk << endf;
+ }
+ }
else
{
d = pk.scope->out_path ();
@@ -289,7 +330,17 @@ namespace build2
//
dir_path d;
if (tk.dir->absolute ())
+ {
d = *tk.dir; // Already normalized.
+
+ // As above.
+ //
+ auto p (ctx.scopes.find (d, false));
+ if (*p.first == nullptr && ++p.first != p.second)
+ {
+ fail << "no existing source file for prerequisite " << pk << endf;
+ }
+ }
else
{
d = pk.scope->out_path ();
diff --git a/libbuild2/search.hxx b/libbuild2/search.hxx
index aa30648..198c65f 100644
--- a/libbuild2/search.hxx
+++ b/libbuild2/search.hxx
@@ -15,8 +15,13 @@ namespace build2
// Search for an existing target in this prerequisite's scope. Scope can be
// NULL if directories are absolute.
//
+ // If dir is relative and out is not specified, then first search in the out
+ // tree and, if not found, then in the src tree, unless out_only is true.
+ // If dir is absolute, then out is expected to be specified as well, if
+ // necessary.
+ //
LIBBUILD2_SYMEXPORT const target*
- search_existing_target (context&, const prerequisite_key&);
+ search_existing_target (context&, const prerequisite_key&, bool out_only);
// Search for an existing file. If the prerequisite directory is relative,
// then look in the scope's src directory. Otherwise, if the absolute
@@ -32,6 +37,8 @@ namespace build2
// Create a new target in this prerequisite's scope.
//
+ // Fail if the target is in src directory.
+ //
LIBBUILD2_SYMEXPORT const target&
create_new_target (context&, const prerequisite_key&);
diff --git a/libbuild2/target-type.hxx b/libbuild2/target-type.hxx
index eae2caf..93c5744 100644
--- a/libbuild2/target-type.hxx
+++ b/libbuild2/target-type.hxx
@@ -93,7 +93,14 @@ namespace build2
//
bool (*print) (ostream&, const target_key&, bool name_only);
- const target* (*search) (const target&, const prerequisite_key&);
+ // Target type-specific prerequisite to target search.
+ //
+ // If passed target is NULL, then only search for an existing target (and
+ // which can be performed during execute, not only match).
+ //
+ const target* (*search) (context&,
+ const target*,
+ const prerequisite_key&);
// Target type flags.
//
@@ -102,12 +109,15 @@ namespace build2
// group link-up only happens during match, then the hint would be looked
// up before the group is known.
//
+ // Note: consider exposing as an attribute in define if adding a new flag.
+ //
enum class flag: uint64_t
{
none = 0,
group = 0x01, // A (non-adhoc) group.
see_through = group | 0x02, // A group with "see through" semantics.
- member_hint = group | 0x04 // Untyped rule hint applies to members.
+ member_hint = group | 0x04, // Untyped rule hint applies to members.
+ dyn_members = group | 0x08 // A group with dynamic members.
};
flag flags;
@@ -131,6 +141,9 @@ namespace build2
bool
is_a (const char*) const; // Defined in target.cxx
+
+ target_type& operator= (target_type&&) = delete;
+ target_type& operator= (const target_type&) = delete;
};
inline bool
@@ -191,18 +204,18 @@ namespace build2
return type_map_.empty ();
}
- const target_type&
+ pair<reference_wrapper<const target_type>, bool>
insert (const target_type& tt)
{
- type_map_.emplace (tt.name, target_type_ref (tt));
- return tt;
+ auto r (type_map_.emplace (tt.name, target_type_ref (tt)));
+ return {r.second ? tt : r.first->second.get (), r.second};
}
template <typename T>
const target_type&
insert ()
{
- return insert (T::static_type);
+ return insert (T::static_type).first;
}
pair<reference_wrapper<const target_type>, bool>
@@ -248,7 +261,7 @@ namespace build2
target_type_ref (unique_ptr<target_type>&& p)
: p_ (p.release ()), d_ (true) {}
- target_type_ref (target_type_ref&& r)
+ target_type_ref (target_type_ref&& r) noexcept
: p_ (r.p_), d_ (r.d_) {r.p_ = nullptr;}
~target_type_ref () {if (p_ != nullptr && d_) delete p_;}
diff --git a/libbuild2/target.cxx b/libbuild2/target.cxx
index 76d45c7..2a134a4 100644
--- a/libbuild2/target.cxx
+++ b/libbuild2/target.cxx
@@ -38,7 +38,9 @@ namespace build2
if (!name->empty ())
{
v = *name;
- target::combine_name (v, ext, false /* @@ TODO: what to do? */);
+ // @@ TMP: see also other calls to combine_name() -- need to fix.
+ //
+ target::combine_name (v, ext, false /* @@ TMP: what to do? */);
}
else
assert (!ext || ext->empty ()); // Unspecified or none.
@@ -141,7 +143,8 @@ namespace build2
pair<lookup, size_t> target::
lookup_original (const variable& var,
bool target_only,
- const scope* bs) const
+ const scope* bs,
+ bool locked) const
{
pair<lookup_type, size_t> r (lookup_type (), 0);
@@ -202,9 +205,14 @@ namespace build2
{
if (!target_only)
{
- target_key tk (key ());
- target_key g1k (g1 != nullptr ? g1->key () : target_key {});
- target_key g2k (g2 != nullptr ? g2->key () : target_key {});
+ auto key = [locked] (const target* t)
+ {
+ return locked ? t->key_locked () : t->key ();
+ };
+
+ target_key tk (key (this));
+ target_key g1k (g1 != nullptr ? key (g1) : target_key {});
+ target_key g2k (g2 != nullptr ? key (g2) : target_key {});
if (bs == nullptr)
bs = &base_scope ();
@@ -225,14 +233,30 @@ namespace build2
}
value& target::
- append (const variable& var)
+ append (const variable& var, const scope* bs)
{
// Note: see also prerequisite::append() if changing anything here.
// Note that here we want the original value without any overrides
// applied.
//
- auto l (lookup_original (var).first);
+ auto l (lookup_original (var, false, bs).first);
+
+ if (l.defined () && l.belongs (*this)) // Existing var in this target.
+ return vars.modify (l); // Ok since this is original.
+
+ value& r (assign (var)); // NULL.
+
+ if (l.defined ())
+ r = *l; // Copy value (and type) from the outer scope.
+
+ return r;
+ }
+
+ value& target::
+ append_locked (const variable& var, const scope* bs)
+ {
+ auto l (lookup_original (var, false, bs, true /* locked */).first);
if (l.defined () && l.belongs (*this)) // Existing var in this target.
return vars.modify (l); // Ok since this is original.
@@ -564,16 +588,33 @@ namespace build2
context& ctx (t.ctx);
include_type r (include_type::normal);
-
- if (const string* v = cast_null<string> (p.vars[ctx.var_include]))
{
- if (*v == "false") r = include_type::excluded;
- else if (*v == "true") r = include_type::normal;
- else if (*v == "adhoc") r = include_type::adhoc;
- else if (*v == "posthoc") r = include_type::posthoc;
- else
- fail << "invalid " << *ctx.var_include << " variable value "
- << "'" << *v << "' specified for prerequisite " << p;
+ lookup l (p.vars[ctx.var_include]);
+
+ if (l.defined ())
+ {
+ if (l->null)
+ {
+ // @@ TMP (added in 0.16.0).
+ //
+ warn << "null " << *ctx.var_include << " variable value specified "
+ << "for prerequisite " << p <<
+ info << "treated as undefined for backwards compatibility" <<
+ info << "this warning will become error in the future";
+ }
+ else
+ {
+ const string& v (cast<string> (*l));
+
+ if (v == "false") r = include_type::excluded;
+ else if (v == "true") r = include_type::normal;
+ else if (v == "adhoc") r = include_type::adhoc;
+ else if (v == "posthoc") r = include_type::posthoc;
+ else
+ fail << "invalid " << *ctx.var_include << " variable value '"
+ << v << "' specified for prerequisite " << p;
+ }
+ }
}
// Handle operation-specific override (see var_include documentation
@@ -599,31 +640,40 @@ namespace build2
? ctx.current_outer_oif
: ctx.current_inner_oif)->id].ovar;
- if (ovar != nullptr && (l = p.vars[*ovar]))
+ if (ovar != nullptr)
{
- // Maybe we should optimize this for the common cases (bool, path,
- // name)? But then again we don't expect many such overrides. Plus
- // will complicate the diagnostics below.
- //
- ns = reverse (*l, storage);
+ l = p.vars[*ovar];
- if (ns.size () == 1)
+ if (l.defined ())
{
- const name& n (ns[0]);
+ if (l->null)
+ fail << "null " << *ovar << " variable value specified for "
+ << "prerequisite " << p;
- if (n.simple ())
+ // Maybe we should optimize this for the common cases (bool, path,
+ // name)? But then again we don't expect many such overrides. Plus
+ // will complicate the diagnostics below.
+ //
+ ns = reverse (*l, storage, true /* reduce */);
+
+ if (ns.size () == 1)
{
- const string& v (n.value);
+ const name& n (ns[0]);
+
+ if (n.simple ())
+ {
+ const string& v (n.value);
- if (v == "false")
- r1 = false;
- else if (v == "true")
- r1 = true;
+ if (v == "false")
+ r1 = false;
+ else if (v == "true")
+ r1 = true;
+ }
}
- }
- if (r1 && !*r1)
- r = include_type::excluded;
+ if (r1 && !*r1)
+ r = include_type::excluded;
+ }
}
}
@@ -644,8 +694,8 @@ namespace build2
// Note: we have to delay this until the meta-operation callback above
// had a chance to override it.
//
- fail << "unrecognized " << *ovar << " variable value "
- << "'" << ns << "' specified for prerequisite " << p;
+ fail << "unrecognized " << *ovar << " variable value '" << ns
+ << "' specified for prerequisite " << p;
}
}
@@ -976,14 +1026,19 @@ namespace build2
case run_phase::load: break;
case run_phase::match:
{
- // Similar logic to matched_state_impl().
+ // Similar logic to target::matched().
//
const opstate& s (state[action () /* inner */]);
- // Note: already synchronized.
- size_t o (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ // Note: use acquire for group_state().
+ //
+ size_t c (s.task_count.load (memory_order_acquire));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- if (o != offset_applied && o != offset_executed)
+ if (!(c == (b + offset_applied) ||
+ c == (b + offset_executed) ||
+ (c >= (b + offset_busy) &&
+ s.match_extra.cur_options_.load (memory_order_relaxed) != 0)))
break;
}
// Fall through.
@@ -1106,25 +1161,27 @@ namespace build2
//
const target*
- target_search (const target& t, const prerequisite_key& pk)
+ target_search (context& ctx, const target*, const prerequisite_key& pk)
{
// The default behavior is to look for an existing target in the
// prerequisite's directory scope.
//
- return search_existing_target (t.ctx, pk);
+ return search_existing_target (ctx, pk, true /* out_only */);
}
const target*
- file_search (const target& t, const prerequisite_key& pk)
+ file_search (context& ctx, const target* t, const prerequisite_key& pk)
{
- // First see if there is an existing target.
+ // First see if there is an existing target in the out or src tree.
//
- if (const target* e = search_existing_target (t.ctx, pk))
+ if (const target* e = search_existing_target (ctx,
+ pk,
+ false /* out_only */))
return e;
// Then look for an existing file in the src tree.
//
- return search_existing_file (t.ctx, pk);
+ return t != nullptr ? search_existing_file (ctx, pk) : nullptr;
}
extern const char target_extension_none_[] = "";
@@ -1215,19 +1272,79 @@ namespace build2
target_type::flag::none
};
+ // group
+ //
+ group_view group::
+ group_members (action a) const
+ {
+ if (members_on == 0) // Not yet discovered.
+ return group_view {nullptr, 0};
+
+ // Members discovered during anything other than perform_update are only
+ // good for that operation. For example, we only return the static members
+ // ("representative sample") for perform_configure.
+ //
+ // We also re-discover the members on each update and clean not to
+ // overcomplicate the already twisted adhoc_buildscript_rule::apply()
+ // logic.
+ //
+ if (members_on != ctx.current_on)
+ {
+ if (members_action != perform_update_id ||
+ a == perform_update_id ||
+ a == perform_clean_id)
+ return group_view {nullptr, 0};
+ }
+
+ // Note that we may have no members (e.g., perform_configure and there are
+ // no static members). However, whether std::vector returns a non-NULL
+ // pointer in this case is undefined.
+ //
+ size_t n (members.size ());
+ return group_view {
+ n != 0
+ ? members.data ()
+ : reinterpret_cast<const target* const*> (this),
+ n};
+ }
+
+ const target_type group::static_type
+ {
+ "group",
+ &mtime_target::static_type,
+ &target_factory<group>,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ //
+ // Note that the dyn_members semantics is used not only to handle
+ // depdb-dyndep --dyn-target, but also pattern rule-static members.
+ //
+ target_type::flag::group | target_type::flag::dyn_members
+ };
+
+ // alias
+ //
static const target*
- alias_search (const target& t, const prerequisite_key& pk)
+ alias_search (context& ctx, const target* t, const prerequisite_key& pk)
{
// For an alias we don't want to silently create a target since it will do
- // nothing and it most likely not what the user intended.
+ // nothing and it most likely not what the user intended (but omit this
+ // check when searching for an existing target since presumably a new one
+ // won't be created in this case).
//
// But, allowing implied aliases seems harmless since all the alias does
// is pull its prerequisites. And they are handy to use as metadata
// carriers.
//
- const target* e (search_existing_target (t.ctx, pk));
+ // Doesn't feel like an alias in the src tree makes much sense.
+ //
+ const target* e (search_existing_target (ctx, pk, true /* out_only */));
- if (e == nullptr || !(operator>= (e->decl, target_decl::implied)))
+ if ((e == nullptr ||
+ !(operator>= (e->decl, target_decl::implied))) && t != nullptr)
fail << "no explicit target for " << pk;
return e;
@@ -1253,7 +1370,7 @@ namespace build2
{
try
{
- for (const dir_entry& e: dir_iterator (d, true /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::detect_dangling))
{
switch (e.type ())
{
@@ -1271,6 +1388,16 @@ namespace build2
break;
}
+ case entry_type::unknown:
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / e.path ();
+
+ break;
+ }
default:
break;
}
@@ -1292,17 +1419,26 @@ namespace build2
try
{
- for (const dir_entry& e: dir_iterator (d, true /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::detect_dangling))
{
if (e.type () == entry_type::directory)
+ {
r.push_back (
- prerequisite (nullopt,
- dir::static_type,
+ prerequisite (dir::static_type,
dir_path (e.path ().representation ()), // Relative.
dir_path (), // In the out tree.
string (),
nullopt,
bs));
+ }
+ else if (e.type () == entry_type::unknown)
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / e.path ();
+ }
}
}
catch (const system_error& e)
@@ -1314,18 +1450,27 @@ namespace build2
}
static const target*
- dir_search (const target& t, const prerequisite_key& pk)
+ dir_search (context& ctx, const target* t, const prerequisite_key& pk)
{
tracer trace ("dir_search");
// The first step is like in alias_search(): looks for an existing target
// (but unlike alias, no implied, think `test/: install=false`).
//
- const target* e (search_existing_target (t.ctx, pk));
+ // Likewise, dir{} in the src tree doesn't make much sense.
+ //
+ const target* e (search_existing_target (ctx, pk, true /* out_only */));
if (e != nullptr && e->decl == target_decl::real)
return e;
+ // The search for an existing target can also be done during execute so
+ // none of the below code applied. Note: return implied instead of NULL
+ // (to be consistent with search_new(), for example).
+ //
+ if (t == nullptr)
+ return e;
+
// If not found (or is implied), then try to load the corresponding
// buildfile (which would normally define this target). Failed that, see
// if we can assume an implied buildfile which would be equivalent to:
@@ -1359,18 +1504,18 @@ namespace build2
//
bool retest (false);
- assert (t.ctx.phase == run_phase::match);
+ assert (ctx.phase == run_phase::match);
{
// Switch the phase to load.
//
- phase_switch ps (t.ctx, run_phase::load);
+ phase_switch ps (ctx, run_phase::load);
// This is subtle: while we were fussing around another thread may have
// loaded the buildfile. So re-test now that we are in an exclusive
// phase.
//
if (e == nullptr)
- e = search_existing_target (t.ctx, pk);
+ e = search_existing_target (ctx, pk, true);
if (e != nullptr && e->decl == target_decl::real)
retest = true;
@@ -1408,14 +1553,14 @@ namespace build2
}
}
- assert (t.ctx.phase == run_phase::match);
+ assert (ctx.phase == run_phase::match);
// If we loaded/implied the buildfile, examine the target again.
//
if (retest)
{
if (e == nullptr)
- e = search_existing_target (t.ctx, pk);
+ e = search_existing_target (ctx, pk, true);
if (e != nullptr && e->decl == target_decl::real)
return e;
@@ -1540,7 +1685,7 @@ namespace build2
nullptr,
#endif
nullptr,
- &file_search,
+ &file_search, // Note: can also be a script in src.
target_type::flag::none
};
@@ -1630,6 +1775,55 @@ namespace build2
target_type::flag::none
};
+ static const char*
+ buildscript_target_extension (const target_key& tk, const scope*)
+ {
+ // If the name is special 'buildscript', then there is no extension,
+ // otherwise it is .buildscript.
+ //
+ return *tk.name == "buildscript" ? "" : "buildscript";
+ }
+
+ static bool
+ buildscript_target_pattern (const target_type&,
+ const scope&,
+ string& v,
+ optional<string>& e,
+ const location& l,
+ bool r)
+ {
+ if (r)
+ {
+ assert (e);
+ e = nullopt;
+ }
+ else
+ {
+ e = target::split_name (v, l);
+
+ if (!e && v != "buildscript")
+ {
+ e = "buildscript";
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ const target_type buildscript::static_type
+ {
+ "buildscript",
+ &file::static_type,
+ &target_factory<buildscript>,
+ &buildscript_target_extension,
+ nullptr, /* default_extension */
+ &buildscript_target_pattern,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
const target_type doc::static_type
{
"doc",
diff --git a/libbuild2/target.hxx b/libbuild2/target.hxx
index 26c7208..20cd32d 100644
--- a/libbuild2/target.hxx
+++ b/libbuild2/target.hxx
@@ -4,8 +4,9 @@
#ifndef LIBBUILD2_TARGET_HXX
#define LIBBUILD2_TARGET_HXX
+#include <cstddef> // max_align_t
#include <iterator> // tags, etc.
-#include <type_traits> // aligned_storage
+#include <type_traits> // is_*
#include <unordered_map>
#include <libbutl/multi-index.hxx> // map_iterator_adapter
@@ -88,9 +89,15 @@ namespace build2
prerequisite_target (const target_type* t, bool a = false, uintptr_t d = 0)
: target (t), include (a ? include_adhoc : 0), data (d) {}
+ prerequisite_target (const target_type& t, bool a = false, uintptr_t d = 0)
+ : prerequisite_target (&t, a, d) {}
+
prerequisite_target (const target_type* t, include_type a, uintptr_t d = 0)
: prerequisite_target (t, a == include_type::adhoc, d) {}
+ prerequisite_target (const target_type& t, include_type a, uintptr_t d = 0)
+ : prerequisite_target (&t, a, d) {}
+
const target_type* target;
operator const target_type*& () {return target;}
@@ -110,8 +117,15 @@ namespace build2
// prerequisites that are updated during match should have this bit set
// (see dyndep_rule::*_existing_file() for details).
//
- static const uintptr_t include_adhoc = 0x01;
- static const uintptr_t include_udm = 0x02;
+ // target
+ //
+ // The data member contains the target pointer that has been "blanked
+ // out" for some reason (updated during match, unmatched, etc). See
+ // dyndep_rule::updated_during_match() for details.
+ //
+ static const uintptr_t include_adhoc = 0x01;
+ static const uintptr_t include_udm = 0x02;
+ static const uintptr_t include_target = 0x80;
uintptr_t include;
@@ -169,7 +183,112 @@ namespace build2
//
struct match_extra
{
- bool fallback; // True if matching a fallback rule (see match_rule()).
+ bool locked; // Normally true (see adhoc_rule::match() for background).
+ bool fallback; // True if matching a fallback rule (see match_rule_impl()).
+
+ // When matching a rule, the caller may wish to request a subset of the
+ // full functionality of performing the operation on the target. This is
+ // achieved with match options.
+ //
+ // Since the match caller normally has no control over which rule will be
+ // matched, the options are not specific to a particular rule. Rather,
+ // options are defined for performing a specific operation on a specific
+ // target type and would normally be part of the target type semantics.
+ // To put it another way, when a rule matches a target of certain type for
+ // certain operation, there is an expectation of certain semantics, some
+ // parts of which could be made optional.
+ //
+ // As a concrete example, consider installing libs{}, which traditionally
+ // has two parts: runtime (normally just the versioned shared library) and
+ // build-time (non-versioned symlinks, pkg-config files, headers, etc).
+ // The option to install only the runtime files is part of the bin::libs{}
+ // semantics, not of, say, cc::install_rule.
+ //
+ // The match options are specified as a uint64_t mask, which means there
+ // can be a maximum of 64 options per operation/target type. Options are
+ // opt-out rather than opt-in. That is, by default, all the options are
+ // enabled unless the match caller explicitly opted out of some
+ // functionality. Even if the caller opted out, there is no guarantee that
+ // the matching rule will honor this request (for example, because it is a
+ // user-provided ad hoc recipe). To put it another way, support for
+ // options is a quality of implementation matter.
+ //
+ // From the rule implementation's point view, match options are handled as
+ // follows: On initial match()/apply(), cur_options is initialized to ~0
+ // (all options enabled) and the matching rule is expected to override it
+ // with new_options in apply() (note that match() should no base any
+ // decisions on new_options since they may change between match() and
+ // apply()). This way a rule that does not support any match options does
+ // not need to do anything. Subsequent match calls may add new options
+ // which causes a rematch that manifests in the rule's reapply() call. In
+ // reapply(), cur_options are the currently enabled options and
+ // new_options are the newly requested options. Here the rule is expected
+ // to factor new_options to cur_options as appropriate. Note also that on
+ // rematch, if current options already include new options, then no call
+ // to reapply() is made. This, in particular, means that a rule that does
+ // not adjust cur_options in match() will never get a reapply() call
+ // (because all the options are enabled from the start). Note that
+ // cur_options should only be modfied in apply() or reapply().
+ //
+ // If a rematch is triggered after the rule has already been executed, an
+ // error is issued. This means that match options are not usable for
+ // operation/target types that could plausibly be executed during
+ // match. In particular, using match options for update and clean
+ // operations is a bad idea (update of pretty much any target can happen
+ // during match as a result of a tool update while clean might have to be
+ // performed during match to provide the mirror semantics).
+ //
+ // Note also that with rematches the assumption that in the match phase
+ // after matching the target we can MT-safely examine its state (such as
+ // its prerequisite_targets) no longer holds since such state could be
+ // modified during a rematch. As a result, if the target type specifies
+ // options for a certain operation, then you should not rely on this
+ // assumption for targets of this type during this operation.
+ //
+ // A rule that supports match options must also be prepared to handle the
+ // apply() call with new_options set to 0, for example, by using a
+ // minimally supported set of options instead. While 0 usually won't be
+ // passed by the match caller, this value is passed in the following
+ // circumstances:
+ //
+ // - match to resolve group (resolve_group())
+ // - match to resolve members (resolve_members())
+ // - match of ad hoc group via one of its ad hoc members
+ //
+ // Note that the 0 cur_options value is illegal.
+ //
+ // When it comes to match options specified for group members, the
+ // semantics differs between explicit and ad hoc groups. For explicit
+ // groups, the standard semantics described above applies and the group's
+ // reapply() function will be called both for the group itself as well as
+ // for its members and its the responsibility of the rule to decide what
+ // to do with the two sets of options (e.g., factor member's options into
+ // group's options, etc). For ad hoc groups, members are not matched to a
+ // rule but to the group_recipe directly (so there cannot be a call to
+ // reapply()). Currently, ad hoc group members cannot have options (more
+ // precisely, their options should always be ~0). An alternative semantics
+ // where the group rule is called to translate member options to group
+ // options may be implemented in the future (see match_impl_impl() for
+ // details).
+ //
+ // Note: match options are currently not exposed in Buildscript ad hoc
+ // recipes/rules (but are in C++).
+ //
+ static constexpr uint64_t all_options = ~uint64_t (0);
+
+ uint64_t cur_options;
+ uint64_t new_options;
+
+ atomic<uint64_t> cur_options_; // Implementation detail (see lock_impl()).
+
+ // The list of post hoc prerequisite targets for this target. Only not
+ // NULL in rule::apply_posthoc() and rule::reapply() functions and only if
+ // there are post hoc prerequisites. Primarily useful for adjusting match
+ // options for post hoc prerequisites (but can also be used to blank some
+ // of them out).
+ //
+ vector<context::posthoc_target::prerequisite_target>*
+ posthoc_prerequisite_targets;
// Auxiliary data storage.
//
@@ -189,7 +308,7 @@ namespace build2
? sizeof (string)
: sizeof (void*) * 4);
- std::aligned_storage<data_size>::type data_;
+ alignas (std::max_align_t) unsigned char data_[data_size];
void (*data_dtor_) (void*) = nullptr;
template <typename R,
@@ -236,9 +355,17 @@ namespace build2
// Implementation details.
//
+ // NOTE: see match_rule_impl() in algorithms.cxx if changing anything here.
+ //
public:
+ explicit
+ match_extra (bool l = true, bool f = false)
+ : locked (l), fallback (f),
+ cur_options (all_options), new_options (0),
+ posthoc_prerequisite_targets (nullptr) {}
+
void
- init (bool fallback);
+ reinit (bool fallback);
// Force freeing of the dynamically-allocated memory.
//
@@ -271,6 +398,10 @@ namespace build2
// fuzzy: they feel more `real` than `implied`. Maybe introduce
// `synthesized` in-between?
//
+ // @@ There are also now dynamically-discovered targets (ad hoc group
+ // members; see depdb-dyndep --dyn-target) which currently end up
+ // with prereq_new.
+ //
enum class target_decl: uint8_t
{
prereq_new = 1, // Created from prerequisite (create_new_target()).
@@ -369,9 +500,12 @@ namespace build2
//
// Note that the group-member link-up can happen anywhere between the
// member creation and rule matching so reading the group before the
- // member has been matched can be racy.
+ // member has been matched can be racy. However, once the member is linked
+ // up to the group, this relationship is immutable. As a result, one can
+ // atomically query the current value to see if already linked up (can be
+ // used as an optimization, to avoid deadlocks, etc).
//
- const target* group = nullptr;
+ relaxed_atomic<const target*> group = nullptr;
// What has been described above is an "explicit" group. That is, there is
// a dedicated target type that explicitly serves as a group and there is
@@ -404,7 +538,7 @@ namespace build2
// usually needed is to derive its path.
//
// - Unless declared, members are discovered lazily, they are only known
- // after the group's rule's apply() call.
+ // after the matching rule's apply() call.
//
// - Only declared members can be used as prerequisites but all can be
// used as targets (e.g., to set variables, etc).
@@ -434,7 +568,11 @@ namespace build2
// target for the ad hoc members (with a special target type that rules
// like install could recognize). See also the variable lookup semantics.
// We could also probably support see_through via an attribute or some
- // such.
+ // such. Or perhaps such cases should be handled through explicit groups
+ // and the ad hoc semantics is left to the non-see_through "primary
+ // targets with a bunch of subordinates" cases. In other words, if the
+ // members are "equal/symmetrical", then perhaps an explicit group is the
+ // correct approach.
//
const_ptr<target> adhoc_member = nullptr;
@@ -461,7 +599,8 @@ namespace build2
public:
// Normally you should not call this function directly and rather use
- // resolve_members() from <libbuild2/algorithm.hxx>.
+ // resolve_members() from <libbuild2/algorithm.hxx>. Note that action
+ // is always inner.
//
virtual group_view
group_members (action) const;
@@ -563,7 +702,8 @@ namespace build2
prerequisites () const;
// Swap-in a list of prerequisites. Return false if unsuccessful (i.e.,
- // someone beat us to it). Note that it can be called on const target.
+ // someone beat us to it), in which case the passed prerequisites are
+ // not moved. Note that it can be called on const target.
//
bool
prerequisites (prerequisites_type&&) const;
@@ -648,12 +788,14 @@ namespace build2
// If target_only is true, then only look in target and its target group
// without continuing in scopes. As an optimization, the caller can also
- // pass the base scope of the target, if already known.
+ // pass the base scope of the target, if already known. If locked is true,
+ // assume the targets mutex is locked.
//
pair<lookup_type, size_t>
lookup_original (const variable&,
bool target_only = false,
- const scope* bs = nullptr) const;
+ const scope* bs = nullptr,
+ bool locked = false) const;
// Return a value suitable for assignment. See scope for details.
//
@@ -663,11 +805,41 @@ namespace build2
value&
assign (const variable* var) {return vars.assign (var);} // For cached.
+ // Note: variable must already be entered.
+ //
+ value&
+ assign (const string& name)
+ {
+ return vars.assign (base_scope ().var_pool ().find (name));
+ }
+
// Return a value suitable for appending. See scope for details.
//
value&
- append (const variable&);
+ append (const variable&, const scope* bs = nullptr);
+ // Note: variable must already be entered.
+ //
+ value&
+ append (const string& name)
+ {
+ const scope& bs (base_scope ());
+ return append (*bs.var_pool ().find (name), &bs);
+ }
+
+ // As above but assume the targets mutex is locked.
+ //
+ value&
+ append_locked (const variable&, const scope* bs = nullptr);
+
+ // Note: variable must already be entered.
+ //
+ value&
+ append_locked (const string& name)
+ {
+ const scope& bs (base_scope ());
+ return append_locked (*bs.var_pool ().find (name), &bs);
+ }
// Rule hints.
//
@@ -737,7 +909,11 @@ namespace build2
//
mutable atomic_count dependents {0};
- // Match state storage between the match() and apply() calls.
+ // Match state storage between the match() and apply() calls with only
+ // the *_options members extended to reapply().
+ //
+ // Note: in reality, cur_options are used beyong (re)apply() as an
+ // implementation detail.
//
build2::match_extra match_extra;
@@ -761,6 +937,12 @@ namespace build2
//
target_state state;
+ // Set to true (only for the inner action) if this target has been
+ // matched but not executed as a result of the resolve_members() call.
+ // See also context::resolve_count.
+ //
+ bool resolve_counted;
+
// Rule-specific variables.
//
// The rule (for this action) has to be matched before these variables
@@ -843,8 +1025,11 @@ namespace build2
// Return true if the target has been matched for the specified action.
// This function can only be called during the match or execute phases.
//
+ // If you need to observe something in the matched target (e.g., the
+ // matched rule or recipe), use memory_order_acquire.
+ //
bool
- matched (action) const;
+ matched (action, memory_order mo = memory_order_relaxed) const;
// This function can only be called during match if we have observed
// (synchronization-wise) that this target has been matched (i.e., the
@@ -873,6 +1058,12 @@ namespace build2
target_state
executed_state (action, bool fail = true) const;
+ // Return true if the state comes from the group. Target must be at least
+ // matched except for ad hoc group members during the execute phase.
+ //
+ bool
+ group_state (action) const;
+
protected:
// Version that should be used during match after the target has been
// matched for this action.
@@ -889,24 +1080,28 @@ namespace build2
target_state
executed_state_impl (action) const;
- // Return true if the state comes from the group. Target must be at least
- // matched.
- //
- bool
- group_state (action) const;
-
public:
// Targets to which prerequisites resolve for this action. Note that
// unlike prerequisite::target, these can be resolved to group members.
// NULL means the target should be skipped (or the rule may simply not add
// such a target to the list).
//
- // Note also that it is possible the target can vary from action to
- // action, just like recipes. We don't need to keep track of the action
- // here since the targets will be updated if the recipe is updated,
- // normally as part of rule::apply().
- //
- // Note that the recipe may modify this list.
+ // A rule should make sure that the target's prerequisite_targets are in
+ // the "canonical" form (that is, all the prerequisites that need to be
+ // executed are present with prerequisite_target::target pointing to the
+ // corresponding target). This is relied upon in a number of places,
+ // including in dump and to be able to pretend-execute the operation on
+ // this target without actually calling the recipe (see perform_execute(),
+ // resolve_members_impl() for background). Note that a rule should not
+ // store targets that are semantically prerequisites in an ad hoc manner
+ // (e.g., in match data) with a few well-known execeptions (see
+ // group_recipe and inner_recipe).
+ //
+ // Note that the recipe may modify this list during execute. Normally this
+ // would be just blanking out of ad hoc prerequisites, in which case check
+ // for ad hoc first and for not NULL second if accessing prerequisites of
+ // targets that you did not execute (see the library metadata protocol in
+ // cc for an example).
//
mutable action_state<build2::prerequisite_targets> prerequisite_targets;
@@ -1024,13 +1219,28 @@ namespace build2
}
template <typename T>
- typename std::enable_if<!data_invocable<T>::value, T&>::type&
+ typename std::enable_if<!data_invocable<T>::value, T&>::type
data (action a) const
{
using V = typename std::remove_cv<T>::type;
return state[a].recipe.target<data_wrapper<V>> ()->d;
}
+ // Return NULL if there is no data or the data is of a different type.
+ //
+ template <typename T>
+ typename std::enable_if<!data_invocable<T>::value, T*>::type
+ try_data (action a) const
+ {
+ using V = typename std::remove_cv<T>::type;
+
+ if (auto& r = state[a].recipe)
+ if (auto* t = r.target<data_wrapper<V>> ())
+ return &t->d;
+
+ return nullptr;
+ }
+
// Note that in this case we don't strip const (the expectation is that we
// move the recipe in/out of data).
//
@@ -1055,18 +1265,18 @@ namespace build2
}
template <typename T>
- typename std::enable_if<data_invocable<T>::value, T&>::type&
+ typename std::enable_if<data_invocable<T>::value, T&>::type
data (action a) const
{
return *state[a].recipe.target<T> ();
}
- void
- clear_data (action a) const
+ template <typename T>
+ typename std::enable_if<data_invocable<T>::value, T*>::type
+ try_data (action a) const
{
- const opstate& s (state[a]);
- s.recipe = nullptr;
- s.recipe_keep = false;
+ auto& r = state[a].recipe;
+ return r ? r.target<T> () : nullptr;
}
// Target type info and casting.
@@ -1446,9 +1656,7 @@ namespace build2
}
include_type
- include (action, const target&,
- const prerequisite_member&,
- lookup* = nullptr);
+ include (action, const target&, const prerequisite_member&, lookup* = nullptr);
// A "range" that presents a sequence of prerequisites (e.g., from
// group_prerequisites()) as a sequence of prerequisite_member's. For each
@@ -1624,8 +1832,7 @@ namespace build2
group_view g_;
size_t j_; // 1-based index, to support enter_group().
const target* k_; // Current member of ad hoc group or NULL.
- mutable typename std::aligned_storage<sizeof (value_type),
- alignof (value_type)>::type m_;
+ alignas (value_type) mutable unsigned char m_[sizeof (value_type)];
};
iterator
@@ -1896,7 +2103,7 @@ namespace build2
dynamic_type = &static_type;
}
- // Modification time is an "atomic cash". That is, it can be set at any
+ // Modification time is an "atomic cache". That is, it can be set at any
// time (including on a const instance) and we assume everything will be
// ok regardless of the order in which racing updates happen because we do
// not modify the external state (which is the source of timestemps) while
@@ -1929,8 +2136,7 @@ namespace build2
// If the mtime is unknown, then load it from the filesystem also caching
// the result.
//
- // Note: can only be called during executing and must not be used if the
- // target state is group.
+ // Note: must not be used if the target state is group.
//
timestamp
load_mtime (const path&) const;
@@ -1991,7 +2197,7 @@ namespace build2
// Target path. Must be absolute and normalized.
//
- // Target path is an "atomic consistent cash". That is, it can be set at
+ // Target path is an "atomic consistent cache". That is, it can be set at
// any time (including on a const instance) but any subsequent updates
// must set the same path. Or, in other words, once the path is set, it
// never changes.
@@ -2144,6 +2350,54 @@ namespace build2
static const target_type static_type;
};
+ // Mtime-based group target.
+ //
+ // Used to support explicit groups in buildfiles: can be derived from,
+ // populated with static members using the group{foo}<...> syntax, and
+ // matched with an ad hoc recipe/rule, including dynamic member extraction.
+ // Note that it is not see-through but a derived group can be made see-
+ // through via the [see_through] attribute.
+ //
+ // Note also that you shouldn't use it as a base for a custom group defined
+ // in C++, instead deriving from mtime_target directly and using a custom
+ // members layout more appropriate for the group's semantics. To put it
+ // another way, a group-based target should only be matched by an ad hoc
+ // recipe/rule (see match_rule_impl() in algorithms.cxx for details).
+ //
+ class LIBBUILD2_SYMEXPORT group: public mtime_target
+ {
+ public:
+ vector<reference_wrapper<const target>> static_members;
+
+ // Note: we expect no NULL entries in members.
+ //
+ vector<const target*> members; // Layout compatible with group_view.
+ action members_action; // Action on which members were resolved.
+ size_t members_on = 0; // Operation number on which members were resolved.
+ size_t members_static; // Number of static ones in members (always first).
+
+ void
+ reset_members (action a)
+ {
+ members.clear ();
+ members_action = a;
+ members_on = ctx.current_on;
+ members_static = 0;
+ }
+
+ virtual group_view
+ group_members (action) const override;
+
+ group (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
// Alias target. It represents a list of targets (its prerequisites)
// as a single "name".
//
@@ -2270,6 +2524,22 @@ namespace build2
static const target_type static_type;
};
+ // This target type is primarily used for files mentioned in the `recipe`
+ // directive.
+ //
+ class LIBBUILD2_SYMEXPORT buildscript: public file
+ {
+ public:
+ buildscript (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
// Common documentation file target.
//
class LIBBUILD2_SYMEXPORT doc: public file
@@ -2332,7 +2602,7 @@ namespace build2
// in the generic install rule. @@ This is still a TODO.
//
// Note that handling subsections with man1..9{} is easy, we
- // simply specify the extension explicitly, e.g., man{foo.1p}.
+ // simply specify the extension explicitly, e.g., man1{foo.1p}.
//
class LIBBUILD2_SYMEXPORT man: public doc
{
@@ -2420,7 +2690,7 @@ namespace build2
string&, optional<string>&, const location&,
bool);
- // Target print functions.
+ // Target print functions (target_type::print).
//
// Target type uses the extension but it is fixed and there is no use
@@ -2435,17 +2705,24 @@ namespace build2
LIBBUILD2_SYMEXPORT bool
target_print_1_ext_verb (ostream&, const target_key&, bool);
+ // Target search functions (target_type::search).
+ //
+
// The default behavior, that is, look for an existing target in the
// prerequisite's directory scope.
//
+ // Note that this implementation assumes a target can only be found in the
+ // out tree (targets that can be in the src tree would normally use
+ // file_search() below).
+ //
LIBBUILD2_SYMEXPORT const target*
- target_search (const target&, const prerequisite_key&);
+ target_search (context&, const target*, const prerequisite_key&);
- // First look for an existing target as above. If not found, then look
- // for an existing file in the target-type-specific list of paths.
+ // First look for an existing target both in out and src. If not found, then
+ // look for an existing file in src.
//
LIBBUILD2_SYMEXPORT const target*
- file_search (const target&, const prerequisite_key&);
+ file_search (context&, const target*, const prerequisite_key&);
}
#include <libbuild2/target.ixx>
diff --git a/libbuild2/target.ixx b/libbuild2/target.ixx
index 899e829..39b81e7 100644
--- a/libbuild2/target.ixx
+++ b/libbuild2/target.ixx
@@ -136,10 +136,13 @@ namespace build2
// match_extra
//
inline void match_extra::
- init (bool f)
+ reinit (bool f)
{
clear_data ();
fallback = f;
+ cur_options = all_options;
+ new_options = 0;
+ posthoc_prerequisite_targets = nullptr;
}
inline void match_extra::
@@ -235,22 +238,30 @@ namespace build2
}
inline bool target::
- matched (action a) const
+ matched (action a, memory_order mo) const
{
assert (ctx.phase == run_phase::match ||
ctx.phase == run_phase::execute);
const opstate& s (state[a]);
- size_t c (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ size_t c (s.task_count.load (mo));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
if (ctx.phase == run_phase::match)
{
- // While it will normally be applied, it could also be already executed.
+ // While it will normally be applied, it could also be already executed
+ // or being relocked to reapply match options (see lock_impl() for
+ // background).
//
- // Note that we can't do >= offset_applied since offset_busy means it is
- // being matched.
+ // Note that we can't just do >= offset_applied since offset_busy can
+ // also mean it is being matched.
//
- return c == offset_applied || c == offset_executed;
+ // See also matched_state_impl(), mtime() for similar logic.
+ //
+ return (c == (b + offset_applied) ||
+ c == (b + offset_executed) ||
+ (c >= (b + offset_busy) &&
+ s.match_extra.cur_options_.load (memory_order_relaxed) != 0));
}
else
{
@@ -258,13 +269,10 @@ namespace build2
// least offset_matched since it must have been "achieved" before the
// phase switch.
//
- return c >= offset_matched;
+ return c >= (b + offset_matched);
}
}
- LIBBUILD2_SYMEXPORT target_state
- group_action (action, const target&); // <libbuild2/algorithm.hxx>
-
inline bool target::
group_state (action a) const
{
@@ -278,6 +286,19 @@ namespace build2
// @@ Hm, I wonder why not just return s.recipe_group_action now that we
// cache it.
//
+
+ // This special hack allows us to do things like query an ad hoc member's
+ // state or mtime without matching/executing the member, only the group.
+ // Requiring matching/executing the member would be too burdensome and
+ // this feels harmless (ad hoc membership cannot be changed during the
+ // execute phase).
+ //
+ // Note: this test must come first since the member may not be matched and
+ // thus its state uninitialized.
+ //
+ if (ctx.phase == run_phase::execute && adhoc_group_member ())
+ return true;
+
const opstate& s (state[a]);
if (s.state == target_state::group)
@@ -298,17 +319,22 @@ namespace build2
// Note: already synchronized.
//
- size_t o (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ size_t c (s.task_count.load (memory_order_relaxed));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- if (o == offset_tried)
+ if (c == (b + offset_tried))
return make_pair (false, target_state::unknown);
else
{
- // Normally applied but can also be already executed. Note that in the
- // latter case we are guaranteed to be synchronized since we are in the
- // match phase.
+ // The same semantics as in target::matched(). Note that in the executed
+ // case we are guaranteed to be synchronized since we are in the match
+ // phase.
//
- assert (o == offset_applied || o == offset_executed);
+ assert (c == (b + offset_applied) ||
+ c == (b + offset_executed) ||
+ (c >= (b + offset_busy) &&
+ s.match_extra.cur_options_.load (memory_order_relaxed) != 0));
+
return make_pair (true, (group_state (a) ? group->state[a] : s).state);
}
}
@@ -720,8 +746,13 @@ namespace build2
inline timestamp mtime_target::
load_mtime (const path& p) const
{
- assert (ctx.phase == run_phase::execute &&
- !group_state (action () /* inner */));
+ // We can only enforce "not group state" during the execute phase. During
+ // match (e.g., the target is being matched), we will just have to pay
+ // attention.
+ //
+ assert (ctx.phase == run_phase::match ||
+ (ctx.phase == run_phase::execute &&
+ !group_state (action () /* inner */)));
duration::rep r (mtime_.load (memory_order_consume));
if (r == timestamp_unknown_rep)
@@ -738,6 +769,8 @@ namespace build2
inline bool mtime_target::
newer (timestamp mt, target_state s) const
{
+ assert (s != target_state::unknown); // Should be executed.
+
timestamp mp (mtime ());
// What do we do if timestamps are equal? This can happen, for example,
diff --git a/libbuild2/test/common.cxx b/libbuild2/test/common.cxx
index 7fdb347..89f3dd6 100644
--- a/libbuild2/test/common.cxx
+++ b/libbuild2/test/common.cxx
@@ -150,8 +150,7 @@ namespace build2
t.name == n->value && // Name matches.
tt.name == n->type && // Target type matches.
d == n->dir && // Directory matches.
- (search_existing (*n, *root_) == &t ||
- search_existing (*n, *root_, d) == &t);
+ search_existing (*n, *root_) == &t;
if (r)
break;
@@ -198,8 +197,7 @@ namespace build2
t.name == n->value &&
tt.name == n->type &&
d == n->dir &&
- (search_existing (*n, *root_) == &t ||
- search_existing (*n, *root_, d) == &t);
+ search_existing (*n, *root_) == &t;
if (!r)
continue; // Not our target.
diff --git a/libbuild2/test/init.cxx b/libbuild2/test/init.cxx
index b7cf25f..32548f4 100644
--- a/libbuild2/test/init.cxx
+++ b/libbuild2/test/init.cxx
@@ -23,6 +23,8 @@ namespace build2
{
namespace test
{
+ static const file_rule file_rule_ (true /* check_type */);
+
void
boot (scope& rs, const location&, module_boot_extra& extra)
{
@@ -300,18 +302,18 @@ namespace build2
{
default_rule& dr (m);
- // Note: register for mtime_target to take priority over the fallback
- // rule below.
- //
- rs.insert_rule<target> (perform_test_id, "test", dr);
- rs.insert_rule<mtime_target> (perform_test_id, "test", dr);
- rs.insert_rule<alias> (perform_test_id, "test", dr);
+ rs.insert_rule<target> (perform_test_id, "test", dr);
+ rs.insert_rule<alias> (perform_test_id, "test", dr);
// Register the fallback file rule for the update-for-test operation,
// similar to update.
//
- rs.global_scope ().insert_rule<mtime_target> (
- perform_test_id, "test.file", file_rule::instance);
+ // Note: use target instead of anything more specific (such as
+ // mtime_target) in order not to take precedence over the "test" rule
+ // above.
+ //
+ rs.global_scope ().insert_rule<target> (
+ perform_test_id, "test.file", file_rule_);
}
return true;
diff --git a/libbuild2/test/operation.cxx b/libbuild2/test/operation.cxx
index 841abb5..2535adb 100644
--- a/libbuild2/test/operation.cxx
+++ b/libbuild2/test/operation.cxx
@@ -17,14 +17,8 @@ namespace build2
namespace test
{
static operation_id
- test_pre (context&,
- const values& params,
- meta_operation_id mo,
- const location& l)
+ pre_test (context&, const values&, meta_operation_id mo, const location&)
{
- if (!params.empty ())
- fail (l) << "unexpected parameters for operation test";
-
// Run update as a pre-operation, unless we are disfiguring.
//
return mo != disfigure_id ? update_id : 0;
@@ -70,7 +64,9 @@ namespace build2
"has nothing to test", // We cannot "be tested".
execution_mode::first,
1 /* concurrency */,
- &test_pre,
+ &pre_test,
+ nullptr,
+ nullptr,
nullptr,
nullptr,
&adhoc_apply
@@ -90,6 +86,8 @@ namespace build2
op_update.concurrency,
op_update.pre_operation,
op_update.post_operation,
+ op_update.operation_pre,
+ op_update.operation_post,
op_update.adhoc_match,
op_update.adhoc_apply
};
diff --git a/libbuild2/test/rule.cxx b/libbuild2/test/rule.cxx
index 0ee7641..28eb35b 100644
--- a/libbuild2/test/rule.cxx
+++ b/libbuild2/test/rule.cxx
@@ -563,22 +563,22 @@ namespace build2
{
scope_state& r (res.back ());
- if (!ctx.sched.async (ctx.count_busy (),
- t[a].task_count,
- [this] (const diag_frame* ds,
- scope_state& r,
- const target& t,
- const testscript& ts,
- const dir_path& wd)
- {
- diag_frame::stack_guard dsg (ds);
- r = perform_script_impl (t, ts, wd, *this);
- },
- diag_frame::stack (),
- ref (r),
- cref (t),
- cref (ts),
- cref (wd)))
+ if (!ctx.sched->async (ctx.count_busy (),
+ t[a].task_count,
+ [this] (const diag_frame* ds,
+ scope_state& r,
+ const target& t,
+ const testscript& ts,
+ const dir_path& wd)
+ {
+ diag_frame::stack_guard dsg (ds);
+ r = perform_script_impl (t, ts, wd, *this);
+ },
+ diag_frame::stack (),
+ ref (r),
+ cref (t),
+ cref (ts),
+ cref (wd)))
{
// Executed synchronously. If failed and we were not asked to
// keep going, bail out.
@@ -668,6 +668,16 @@ namespace build2
//
bool terminated = false;
+ // True if this process has been terminated but we failed to read out
+ // its stderr stream in the reasonable timeframe (2 seconds) after the
+ // termination.
+ //
+ // Note that this may happen if there is a still running child process
+ // of the terminated process which has inherited the parent's stderr
+ // file descriptor.
+ //
+ bool unread_stderr = false;
+
pipe_process* prev; // NULL for the left-most program.
pipe_process* next; // Left-most program for the right-most program.
@@ -780,10 +790,14 @@ namespace build2
// Read out all the pipeline's buffered strerr streams watching for
// the deadline, if specified. If the deadline is reached, then
- // terminate the whole pipeline, reset the deadline to nullopt, and
- // continue reading. Note that the further reading will be performed
- // without timeout. This, however, is fine since all the processes are
- // terminated and we only need to read out the buffered data.
+ // terminate the whole pipeline, move the deadline by another 2
+ // seconds, and continue reading.
+ //
+ // Note that we assume that this timeout increment is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_process for the possible reasons), then we just set
+ // unread_stderr flag to true for such processes and bail out.
//
// Also note that this implementation is inspired by the
// script::run_pipe::read_pipe() lambda.
@@ -800,6 +814,7 @@ namespace build2
}
optional<timestamp> dl (deadline);
+ bool terminated (false);
for (size_t unread (fds.size ()); unread != 0;)
{
@@ -814,9 +829,38 @@ namespace build2
if (*dl <= now || ifdselect (fds, *dl - now) == 0)
{
- term_pipe (&pp);
- dl = nullopt;
- continue;
+ if (!terminated)
+ {
+ term_pipe (&pp);
+ terminated = true;
+
+ dl = system_clock::now () + chrono::seconds (2);
+ continue;
+ }
+ else
+ {
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ pipe_process* p (static_cast<pipe_process*> (s.data));
+
+ p->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() (see script::read() for
+ // details).
+ //
+ try
+ {
+ p->dbuf.is.close ();
+ }
+ catch (const io_error&) {}
+ }
+ }
+
+ break;
+ }
}
}
else
@@ -869,9 +913,9 @@ namespace build2
// Iterate over the pipeline processes left to right, printing their
// stderr if buffered and issuing the diagnostics if the exit code is
- // not available (terminated abnormally or due to a deadline) or is
- // non-zero. Afterwards, fail if any of the processes didn't terminate
- // normally with zero code.
+ // not available (terminated abnormally or due to a deadline), is
+ // non-zero, or stderr was not fully read. Afterwards, fail if any of
+ // such a faulty processes were encountered.
//
// Note that we only issue diagnostics for the first failure.
//
@@ -920,18 +964,44 @@ namespace build2
{
diag_record dr;
- if (!pe || !pe->normal () || pe->code () != 0)
+ // Note that there can be a race, so that the process we have
+ // terminated due to reaching the deadline has in fact exited
+ // normally. Thus, the 'unread stderr' situation can also happen
+ // to a successfully terminated process. If that's the case, we
+ // report this problem as the main error and the secondary error
+ // otherwise.
+ //
+ if (!pe ||
+ !pe->normal () ||
+ pe->code () != 0 ||
+ p->unread_stderr)
{
fail = true;
dr << error << "test " << t << " failed" // Multi test: test 1.
<< error << "process " << p->args[0] << ' ';
- if (pe)
- dr << *pe;
- else
+ if (!pe)
+ {
dr << "terminated: execution timeout expired";
+ if (p->unread_stderr)
+ dr << error << "stderr not closed after exit";
+ }
+ else if (!pe->normal () || pe->code () != 0)
+ {
+ dr << *pe;
+
+ if (p->unread_stderr)
+ dr << error << "stderr not closed after exit";
+ }
+ else
+ {
+ assert (p->unread_stderr);
+
+ dr << "stderr not closed after exit";
+ }
+
if (verb == 1)
{
dr << info << "test command line: ";
@@ -1091,7 +1161,7 @@ namespace build2
fail << "invalid test executable override: '" << *n << "'";
else
{
- // Must be a target name.
+ // Must be a target name. Could be from src (e.g., a script).
//
// @@ OUT: what if this is a @-qualified pair of names?
//
diff --git a/libbuild2/test/script/lexer.cxx b/libbuild2/test/script/lexer.cxx
index b470d25..aec91fc 100644
--- a/libbuild2/test/script/lexer.cxx
+++ b/libbuild2/test/script/lexer.cxx
@@ -339,15 +339,17 @@ namespace build2
}
token lexer::
- word (state st, bool sep)
+ word (const state& st, bool sep)
{
- lexer_mode m (st.mode);
+ lexer_mode m (st.mode); // Save.
token r (base_lexer::word (st, sep));
if (m == lexer_mode::variable)
{
- if (r.value.size () == 1 && digit (r.value[0])) // $N
+ if (r.type == type::word &&
+ r.value.size () == 1 &&
+ digit (r.value[0])) // $N
{
xchar c (peek ());
diff --git a/libbuild2/test/script/lexer.hxx b/libbuild2/test/script/lexer.hxx
index 993a9db..39b950a 100644
--- a/libbuild2/test/script/lexer.hxx
+++ b/libbuild2/test/script/lexer.hxx
@@ -77,7 +77,7 @@ namespace build2
next_description ();
virtual token
- word (state, bool) override;
+ word (const state&, bool) override;
};
}
}
diff --git a/libbuild2/test/script/parser.cxx b/libbuild2/test/script/parser.cxx
index 60656a1..b712c21 100644
--- a/libbuild2/test/script/parser.cxx
+++ b/libbuild2/test/script/parser.cxx
@@ -1831,19 +1831,19 @@ namespace build2
// UBSan workaround.
//
const diag_frame* df (diag_frame::stack ());
- if (!ctx->sched.async (task_count,
- [] (const diag_frame* ds,
- scope& s,
- script& scr,
- runner& r)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (s, scr, r);
- },
- df,
- ref (*chain),
- ref (*script_),
- ref (*runner_)))
+ if (!ctx->sched->async (task_count,
+ [] (const diag_frame* ds,
+ scope& s,
+ script& scr,
+ runner& r)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (s, scr, r);
+ },
+ df,
+ ref (*chain),
+ ref (*script_),
+ ref (*runner_)))
{
// Bail out if the scope has failed and we weren't instructed
// to keep going.
diff --git a/libbuild2/test/script/script.cxx b/libbuild2/test/script/script.cxx
index 05dc7b0..f7827f6 100644
--- a/libbuild2/test/script/script.cxx
+++ b/libbuild2/test/script/script.cxx
@@ -268,7 +268,7 @@ namespace build2
v = path (n->dir);
else
{
- // Must be a target name.
+ // Must be a target name. Could be from src (e.g., a script).
//
// @@ OUT: what if this is a @-qualified pair of names?
//
diff --git a/libbuild2/token.cxx b/libbuild2/token.cxx
index ab14388..cc102cc 100644
--- a/libbuild2/token.cxx
+++ b/libbuild2/token.cxx
@@ -29,21 +29,30 @@ namespace build2
os << (r ? "\n" : "<newline>");
break;
}
- case token_type::pair_separator:
+ case token_type::word:
{
if (r)
- os << t.value[0];
+ os << t.value;
else
- os << "<pair separator " << t.value[0] << ">";
+ os << '\'' << t.value << '\'';
break;
}
- case token_type::word:
+ case token_type::escape:
{
if (r)
- os << t.value;
+ os << '\\' << t.value;
else
- os << '\'' << t.value << '\'';
+ os << "<escape sequence \\" << t.value << ">";
+
+ break;
+ }
+ case token_type::pair_separator:
+ {
+ if (r)
+ os << t.value[0];
+ else
+ os << "<pair separator " << t.value[0] << ">";
break;
}
diff --git a/libbuild2/token.hxx b/libbuild2/token.hxx
index fca888c..f9ede65 100644
--- a/libbuild2/token.hxx
+++ b/libbuild2/token.hxx
@@ -30,6 +30,7 @@ namespace build2
eos,
newline,
word,
+ escape, // token::value is <...> in $\<...>
pair_separator, // token::value[0] is the pair separator char.
colon, // :
@@ -159,16 +160,13 @@ namespace build2
token (string v, bool s,
quote_type qt, bool qc, bool qf,
uint64_t l, uint64_t c)
- : token (token_type::word, move (v), s,
- qt, qc, qf,
- l, c,
- &token_printer) {}
+ : token (token_type::word, move (v), s, qt, qc, qf, l, c) {}
token (token_type t,
string v, bool s,
quote_type qt, bool qc, bool qf,
uint64_t l, uint64_t c,
- printer_type* p)
+ printer_type* p = &token_printer)
: type (t), separated (s),
qtype (qt), qcomp (qc), qfirst (qf),
value (move (v)),
diff --git a/libbuild2/types-parsers.cxx b/libbuild2/types-parsers.cxx
index d220541..9c3dc52 100644
--- a/libbuild2/types-parsers.cxx
+++ b/libbuild2/types-parsers.cxx
@@ -52,6 +52,24 @@ namespace build2
parse_path (x, s);
}
+ static names
+ parse_names (const char* o, const char* v)
+ {
+ using build2::parser;
+ using std::istringstream;
+
+ istringstream is (v);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ // @@ TODO: currently this issues diagnostics to diag_stream.
+ // Perhaps we should redirect it? Also below.
+ //
+ path_name in (o);
+ lexer l (is, in, 1 /* line */, "\'\"\\$("); // Effective.
+ parser p (nullptr);
+ return p.parse_names (l, nullptr, parser::pattern_mode::preserve);
+ }
+
void parser<name>::
parse (name& x, bool& xs, scanner& s)
{
@@ -64,19 +82,7 @@ namespace build2
try
{
- using build2::parser;
- using std::istringstream;
-
- istringstream is (v);
- is.exceptions (istringstream::failbit | istringstream::badbit);
-
- // @@ TODO: currently this issues diagnostics to diag_stream.
- // Perhaps we should redirect it?
- //
- path_name in (o);
- lexer l (is, in, 1 /* line */, "\'\"\\$("); // Effective.
- parser p (nullptr);
- names r (p.parse_names (l, nullptr, parser::pattern_mode::preserve));
+ names r (parse_names (o, v));
if (r.size () != 1)
throw invalid_value (o, v);
@@ -90,6 +96,41 @@ namespace build2
}
}
+ void parser<pair<name, optional<name>>>::
+ parse (pair<name, optional<name>>& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ names r (parse_names (o, v));
+
+ if (r.size () == 1)
+ {
+ x.first = move (r.front ());
+ x.second = nullopt;
+ }
+ else if (r.size () == 2 && r.front ().pair == '@')
+ {
+ x.first = move (r.front ());
+ x.second = move (r.back ());
+ }
+ else
+ throw invalid_value (o, v);
+
+ xs = true;
+ }
+ catch (const failed&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
void parser<structured_result_format>::
parse (structured_result_format& x, bool& xs, scanner& s)
{
diff --git a/libbuild2/types-parsers.hxx b/libbuild2/types-parsers.hxx
index ebd2a02..42fc60d 100644
--- a/libbuild2/types-parsers.hxx
+++ b/libbuild2/types-parsers.hxx
@@ -54,6 +54,17 @@ namespace build2
};
template <>
+ struct parser<pair<name, optional<name>>>
+ {
+ static void
+ parse (pair<name, optional<name>>&, bool&, scanner&);
+
+ static void
+ merge (pair<name, optional<name>>& b,
+ const pair<name, optional<name>>& a) {b = a;}
+ };
+
+ template <>
struct parser<structured_result_format>
{
static void
diff --git a/libbuild2/types.hxx b/libbuild2/types.hxx
index bf412a3..ea84701 100644
--- a/libbuild2/types.hxx
+++ b/libbuild2/types.hxx
@@ -351,10 +351,16 @@ namespace build2
// Path printing potentially relative with trailing slash for directories.
//
LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::path&); // utility.cxx
+ operator<< (ostream&, const path&); // utility.cxx
+
+ inline ostream&
+ operator<< (ostream& os, const dir_path& d) // For overload resolution.
+ {
+ return build2::operator<< (os, static_cast<const path&> (d));
+ }
LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::path_name_view&); // utility.cxx
+ operator<< (ostream&, const path_name_view&); // utility.cxx
// <libbutl/timestamp.hxx>
//
@@ -425,7 +431,7 @@ namespace build2
// Print as recall[@effect].
//
LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::process_path&); // utility.cxx
+ operator<< (ostream&, const process_path&); // utility.cxx
// <libbutl/fdstream.hxx>
//
@@ -511,9 +517,9 @@ namespace build2
location_value (const location&);
- location_value (location_value&&);
+ location_value (location_value&&) noexcept;
location_value (const location_value&);
- location_value& operator= (location_value&&);
+ location_value& operator= (location_value&&) noexcept;
location_value& operator= (const location_value&);
};
diff --git a/libbuild2/types.ixx b/libbuild2/types.ixx
index 750c8c7..ee2a605 100644
--- a/libbuild2/types.ixx
+++ b/libbuild2/types.ixx
@@ -10,7 +10,7 @@ namespace build2
{
if (!l.empty ())
{
- o << l.file;
+ build2::operator<< (o, l.file); // Disambiguate.
if (l.line != 0)
{
@@ -43,7 +43,7 @@ namespace build2
}
inline location_value::
- location_value (location_value&& l)
+ location_value (location_value&& l) noexcept
: location (l.line, l.column),
file (std::move (l.file))
{
@@ -58,7 +58,7 @@ namespace build2
}
inline location_value& location_value::
- operator= (location_value&& l)
+ operator= (location_value&& l) noexcept
{
if (this != &l)
{
diff --git a/libbuild2/utility-installed.cxx b/libbuild2/utility-installed.cxx
index 441e31b..e23add1 100644
--- a/libbuild2/utility-installed.cxx
+++ b/libbuild2/utility-installed.cxx
@@ -14,6 +14,14 @@ namespace build2
#ifdef BUILD2_INSTALL_LIB
const dir_path build_install_lib (BUILD2_INSTALL_LIB);
#endif
+
+#ifdef BUILD2_INSTALL_BUILDFILE
+ const dir_path build_install_buildfile (BUILD2_INSTALL_BUILDFILE);
+#endif
+
+#ifdef BUILD2_INSTALL_DATA
+ const dir_path build_install_data (BUILD2_INSTALL_DATA);
+#endif
}
#endif
diff --git a/libbuild2/utility-uninstalled.cxx b/libbuild2/utility-uninstalled.cxx
index a6bad55..f836de6 100644
--- a/libbuild2/utility-uninstalled.cxx
+++ b/libbuild2/utility-uninstalled.cxx
@@ -7,4 +7,16 @@ namespace build2
{
const bool build_installed = false;
const dir_path build_install_lib; // Empty.
+
+#ifdef BUILD2_INSTALL_BUILDFILE
+ const dir_path build_install_buildfile (BUILD2_INSTALL_BUILDFILE);
+#else
+ const dir_path build_install_buildfile; // Empty.
+#endif
+
+#ifdef BUILD2_INSTALL_DATA
+ const dir_path build_install_data (BUILD2_INSTALL_DATA);
+#else
+ const dir_path build_install_data; // Empty (during bootstrap).
+#endif
}
diff --git a/libbuild2/utility.cxx b/libbuild2/utility.cxx
index 5a58287..1135851 100644
--- a/libbuild2/utility.cxx
+++ b/libbuild2/utility.cxx
@@ -42,7 +42,7 @@ namespace build2
}
ostream&
- operator<< (ostream& os, const ::butl::path& p)
+ operator<< (ostream& os, const path& p)
{
using namespace build2;
@@ -53,7 +53,7 @@ namespace build2
}
ostream&
- operator<< (ostream& os, const ::butl::path_name_view& v)
+ operator<< (ostream& os, const path_name_view& v)
{
assert (!v.empty ());
@@ -61,7 +61,7 @@ namespace build2
}
ostream&
- operator<< (ostream& os, const ::butl::process_path& p)
+ operator<< (ostream& os, const process_path& p)
{
using namespace build2;
@@ -86,6 +86,51 @@ namespace build2
//
namespace build2
{
+ static const char hex_digits[] = "0123456789abcdef";
+
+ string
+ to_string (uint64_t i, int b, size_t w)
+ {
+ // One day we can switch to C++17 std::to_chars().
+ //
+ string r;
+ switch (b)
+ {
+ case 10:
+ {
+ r = to_string (i);
+ if (w > r.size ())
+ r.insert (0, w - r.size (), '0');
+ break;
+ }
+ case 16:
+ {
+ r.reserve (18);
+ r += "0x";
+
+ for (size_t j (64); j != 0; )
+ {
+ j -= 4;
+ size_t d ((i >> j) & 0x0f);
+
+ // Omit leading zeros but watch out for the i==0 corner case.
+ //
+ if (d != 0 || r.size () != 2 || j == 0)
+ r += hex_digits[d];
+ }
+
+ if (w > r.size () - 2)
+ r.insert (2, w - (r.size () - 2), '0');
+
+ break;
+ }
+ default:
+ throw invalid_argument ("unsupported base");
+ }
+
+ return r;
+ }
+
void (*terminate) (bool);
process_path argv0;
diff --git a/libbuild2/utility.hxx b/libbuild2/utility.hxx
index c12fae7..b534f41 100644
--- a/libbuild2/utility.hxx
+++ b/libbuild2/utility.hxx
@@ -9,7 +9,7 @@
#include <string> // to_string()
#include <utility> // move(), forward(), declval(), make_pair(), swap()
#include <cassert> // assert()
-#include <iterator> // make_move_iterator()
+#include <iterator> // make_move_iterator(), back_inserter()
#include <algorithm> // *
#include <functional> // ref(), cref()
#include <type_traits>
@@ -51,10 +51,17 @@ namespace build2
using std::make_tuple;
using std::make_shared;
using std::make_move_iterator;
- using std::to_string;
+ using std::back_inserter;
using std::stoul;
using std::stoull;
+ using std::to_string;
+
+ // Currently only supports base 10 and 16. Note: adds `0x` if base 16.
+ //
+ LIBBUILD2_SYMEXPORT string
+ to_string (uint64_t, int base, size_t width = 0);
+
// <libbutl/utility.hxx>
//
using butl::reverse_iterate;
@@ -99,6 +106,7 @@ namespace build2
// <libbutl/path-pattern.hxx>
//
using butl::path_pattern;
+ using butl::path_match;
// Perform process-wide initializations/adjustments/workarounds. Should be
// called once early in main(). In particular, besides other things, this
@@ -129,6 +137,7 @@ namespace build2
init_diag (uint16_t verbosity,
bool silent = false,
optional<bool> progress = nullopt,
+ optional<bool> diag_color = nullopt,
bool no_lines = false,
bool no_columns = false,
bool stderr_term = false);
@@ -138,13 +147,21 @@ namespace build2
LIBBUILD2_SYMEXPORT extern bool silent;
// --[no-]progress
+ // --[no-]diag-color
//
LIBBUILD2_SYMEXPORT extern optional<bool> diag_progress_option;
+ LIBBUILD2_SYMEXPORT extern optional<bool> diag_color_option;
LIBBUILD2_SYMEXPORT extern bool diag_no_line; // --no-line
LIBBUILD2_SYMEXPORT extern bool diag_no_column; // --no-column
- LIBBUILD2_SYMEXPORT extern bool stderr_term; // True if stderr is a terminal.
+ // True if stderr is a terminal.
+ //
+ LIBBUILD2_SYMEXPORT extern bool stderr_term;
+
+ // True if the color can be used on the stderr terminal.
+ //
+ LIBBUILD2_SYMEXPORT extern bool stderr_term_color;
// Global state (verbosity, home/work directories, etc).
@@ -173,11 +190,15 @@ namespace build2
LIBBUILD2_SYMEXPORT extern const standard_version build_version;
LIBBUILD2_SYMEXPORT extern const string build_version_interface;
- // Whether running installed build and, if so, the library installation
- // directory (empty otherwise).
+ // Whether running installed build as well as the library installation
+ // directory (only if installed, empty otherwise), the exported buildfile
+ // installation directory (only if configured, empty otherwise), and data
+ // installation directory (only if installed, src_root otherwise).
//
LIBBUILD2_SYMEXPORT extern const bool build_installed;
LIBBUILD2_SYMEXPORT extern const dir_path build_install_lib; // $install.lib
+ LIBBUILD2_SYMEXPORT extern const dir_path build_install_buildfile; // $install.buildfile
+ LIBBUILD2_SYMEXPORT extern const dir_path build_install_data; // $install.data
// --[no-]mtime-check
//
diff --git a/libbuild2/variable.cxx b/libbuild2/variable.cxx
index 0017633..078c13a 100644
--- a/libbuild2/variable.cxx
+++ b/libbuild2/variable.cxx
@@ -3,10 +3,15 @@
#include <libbuild2/variable.hxx>
-#include <cstring> // memcmp()
+#include <cstring> // memcmp(), memcpy()
#include <libbutl/path-pattern.hxx>
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/parser.hxx>
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/target.hxx>
#include <libbuild2/diagnostics.hxx>
@@ -47,7 +52,7 @@ namespace build2
}
value::
- value (value&& v)
+ value (value&& v) noexcept
: type (v.type), null (v.null), extra (v.extra)
{
if (!null)
@@ -57,7 +62,7 @@ namespace build2
else if (type->copy_ctor != nullptr)
type->copy_ctor (*this, v, true);
else
- data_ = v.data_; // Copy as POD.
+ memcpy (data_, v.data_, size_); // Copy as POD.
}
}
@@ -72,7 +77,7 @@ namespace build2
else if (type->copy_ctor != nullptr)
type->copy_ctor (*this, v, false);
else
- data_ = v.data_; // Copy as POD.
+ memcpy (data_, v.data_, size_); // Copy as POD.
}
}
@@ -99,12 +104,14 @@ namespace build2
if (null)
new (&data_) names (move (v).as<names> ());
else
+ // Note: can throw (see small_vector for details).
+ //
as<names> () = move (v).as<names> ();
}
else if (auto f = null ? type->copy_ctor : type->copy_assign)
f (*this, v, true);
else
- data_ = v.data_; // Assign as POD.
+ memcpy (data_, v.data_, size_); // Assign as POD.
null = v.null;
}
@@ -143,7 +150,7 @@ namespace build2
else if (auto f = null ? type->copy_ctor : type->copy_assign)
f (*this, v, false);
else
- data_ = v.data_; // Assign as POD.
+ memcpy (data_, v.data_, size_); // Assign as POD.
null = v.null;
}
@@ -367,8 +374,8 @@ namespace build2
// Typification is kind of like caching so we reuse that mutex shard.
//
shared_mutex& m (
- ctx.mutexes.variable_cache[
- hash<value*> () (&v) % ctx.mutexes.variable_cache_size]);
+ ctx.mutexes->variable_cache[
+ hash<value*> () (&v) % ctx.mutexes->variable_cache_size]);
// Note: v.type is rechecked by typify() under lock.
//
@@ -377,7 +384,7 @@ namespace build2
}
void
- untypify (value& v)
+ untypify (value& v, bool reduce)
{
if (v.type == nullptr)
return;
@@ -389,7 +396,7 @@ namespace build2
}
names ns;
- names_view nv (v.type->reverse (v, ns));
+ names_view nv (v.type->reverse (v, ns, reduce));
if (nv.empty () || nv.data () == ns.data ())
{
@@ -458,7 +465,7 @@ namespace build2
m += "name '" + to_string (n) + '\'';
}
- throw invalid_argument (m);
+ throw invalid_argument (move (m));
}
// names
@@ -493,6 +500,7 @@ namespace build2
type_name,
sizeof (bool),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -503,7 +511,9 @@ namespace build2
&simple_reverse<bool>,
nullptr, // No cast (cast data_ directly).
nullptr, // No compare (compare as POD).
- nullptr // Never empty.
+ nullptr, // Never empty.
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// int64_t value
@@ -550,6 +560,7 @@ namespace build2
type_name,
sizeof (int64_t),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -560,7 +571,9 @@ namespace build2
&simple_reverse<int64_t>,
nullptr, // No cast (cast data_ directly).
nullptr, // No compare (compare as POD).
- nullptr // Never empty.
+ nullptr, // Never empty.
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// uint64_t value
@@ -576,6 +589,8 @@ namespace build2
if (!wspace (v[0]))
{
+ // Note: see also similar code in to_json_value().
+ //
int b (v[0] == '0' && (v[1] == 'x' || v[1] == 'X') ? 16 : 10);
// May throw invalid_argument or out_of_range.
@@ -607,6 +622,7 @@ namespace build2
type_name,
sizeof (uint64_t),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -617,7 +633,9 @@ namespace build2
&simple_reverse<uint64_t>,
nullptr, // No cast (cast data_ directly).
nullptr, // No compare (compare as POD).
- nullptr // Never empty.
+ nullptr, // Never empty.
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// string value
@@ -701,6 +719,7 @@ namespace build2
type_name,
sizeof (string),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<string>,
&default_copy_ctor<string>,
@@ -711,7 +730,9 @@ namespace build2
&simple_reverse<string>,
nullptr, // No cast (cast data_ directly).
&simple_compare<string>,
- &default_empty<string>
+ &default_empty<string>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// path value
@@ -768,6 +789,7 @@ namespace build2
type_name,
sizeof (path),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<path>,
&default_copy_ctor<path>,
@@ -778,7 +800,9 @@ namespace build2
&simple_reverse<path>,
nullptr, // No cast (cast data_ directly).
&simple_compare<path>,
- &default_empty<path>
+ &default_empty<path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// dir_path value
@@ -835,6 +859,7 @@ namespace build2
sizeof (dir_path),
&value_traits<path>::value_type, // Base (assuming direct cast works for
// both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<dir_path>,
&default_copy_ctor<dir_path>,
@@ -845,7 +870,9 @@ namespace build2
&simple_reverse<dir_path>,
nullptr, // No cast (cast data_ directly).
&simple_compare<dir_path>,
- &default_empty<dir_path>
+ &default_empty<dir_path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// abs_dir_path value
@@ -869,7 +896,14 @@ namespace build2
return abs_dir_path (move (d));
}
- catch (const invalid_path&) {} // Fall through.
+ catch (invalid_path& e)
+ {
+ // We moved from name so reconstruct the path. Let's always make it
+ // simple since we may not be able to construct dir_path. Should be
+ // good enough for diagnostics.
+ //
+ n.value = move (e.path);
+ }
}
throw_invalid_argument (n, r, "abs_dir_path");
@@ -883,6 +917,7 @@ namespace build2
sizeof (abs_dir_path),
&value_traits<dir_path>::value_type, // Base (assuming direct cast works
// for both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<abs_dir_path>,
&default_copy_ctor<abs_dir_path>,
@@ -893,7 +928,9 @@ namespace build2
&simple_reverse<abs_dir_path>,
nullptr, // No cast (cast data_ directly).
&simple_compare<abs_dir_path>,
- &default_empty<abs_dir_path>
+ &default_empty<abs_dir_path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// name value
@@ -908,10 +945,10 @@ namespace build2
}
static names_view
- name_reverse (const value& v, names&)
+ name_reverse (const value& v, names&, bool reduce)
{
const name& n (v.as<name> ());
- return n.empty () ? names_view (nullptr, 0) : names_view (&n, 1);
+ return reduce && n.empty () ? names_view (nullptr, 0) : names_view (&n, 1);
}
const char* const value_traits<name>::type_name = "name";
@@ -921,6 +958,7 @@ namespace build2
type_name,
sizeof (name),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<name>,
&default_copy_ctor<name>,
@@ -931,7 +969,9 @@ namespace build2
&name_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<name>,
- &default_empty<name>
+ &default_empty<name>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// name_pair
@@ -975,13 +1015,13 @@ namespace build2
}
static names_view
- name_pair_reverse (const value& v, names& ns)
+ name_pair_reverse (const value& v, names& ns, bool reduce)
{
const name_pair& p (v.as<name_pair> ());
const name& f (p.first);
const name& s (p.second);
- if (f.empty () && s.empty ())
+ if (reduce && f.empty () && s.empty ())
return names_view (nullptr, 0);
if (f.empty ())
@@ -1003,6 +1043,7 @@ namespace build2
type_name,
sizeof (name_pair),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<name_pair>,
&default_copy_ctor<name_pair>,
@@ -1013,7 +1054,9 @@ namespace build2
&name_pair_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<name_pair>,
- &default_empty<name_pair>
+ &default_empty<name_pair>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// process_path value
@@ -1133,10 +1176,14 @@ namespace build2
}
static names_view
- process_path_reverse (const value& v, names& s)
+ process_path_reverse (const value& v, names& s, bool)
{
const auto& x (v.as<process_path> ());
+ // Note that strictly speaking process_path doesn't have empty
+ // representation (see convert() above). Thus we always return reduced
+ // representation.
+ //
if (!x.empty ())
{
s.reserve (x.effect.empty () ? 1 : 2);
@@ -1153,6 +1200,7 @@ namespace build2
type_name,
sizeof (process_path),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<process_path>,
&process_path_copy_ctor<process_path>,
@@ -1163,7 +1211,9 @@ namespace build2
&process_path_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<process_path>,
- &default_empty<process_path>
+ &default_empty<process_path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// process_path_ex value
@@ -1301,10 +1351,13 @@ namespace build2
}
static names_view
- process_path_ex_reverse (const value& v, names& s)
+ process_path_ex_reverse (const value& v, names& s, bool)
{
const auto& x (v.as<process_path_ex> ());
+ // Note that process_path_ex only has reduced empty representation (see
+ // convert() above).
+ //
if (!x.empty ())
{
s.reserve ((x.effect.empty () ? 1 : 2) +
@@ -1348,6 +1401,7 @@ namespace build2
sizeof (process_path_ex),
&value_traits< // Base (assuming direct cast works
process_path>::value_type, // for both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<process_path_ex>,
&process_path_ex_copy_ctor,
@@ -1358,7 +1412,9 @@ namespace build2
&process_path_ex_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<process_path>, // For now compare as process_path.
- &default_empty<process_path_ex>
+ &default_empty<process_path_ex>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// target_triplet value
@@ -1389,6 +1445,7 @@ namespace build2
type_name,
sizeof (target_triplet),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<target_triplet>,
&default_copy_ctor<target_triplet>,
@@ -1399,7 +1456,9 @@ namespace build2
&simple_reverse<target_triplet>,
nullptr, // No cast (cast data_ directly).
&simple_compare<target_triplet>,
- &default_empty<target_triplet>
+ &default_empty<target_triplet>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// project_name value
@@ -1433,6 +1492,7 @@ namespace build2
type_name,
sizeof (project_name),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<project_name>,
&default_copy_ctor<project_name>,
@@ -1443,7 +1503,970 @@ namespace build2
&simple_reverse<project_name>,
nullptr, // No cast (cast data_ directly).
&simple_compare<project_name>,
- &default_empty<project_name>
+ &default_empty<project_name>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
+ };
+
+ // json
+ //
+ static string
+ to_string_value (name& n, const char* what)
+ {
+ if (n.typed () || n.qualified () || n.pattern)
+ throw_invalid_argument (n, nullptr, what);
+
+ string s;
+
+ if (n.simple ())
+ s.swap (n.value);
+ else
+ {
+ // Note that here we cannot assume what's in dir is really a path (think
+ // s/foo/bar/) so we have to reverse it exactly.
+ //
+ s = move (n.dir).representation (); // Move out of path.
+
+ if (!n.value.empty ())
+ s += n.value; // Separator is already there.
+ }
+
+ return s;
+ }
+
+ static json_value
+ to_json_value (name& n, const char* what)
+ {
+ if (n.typed () || n.qualified () || n.pattern)
+ throw_invalid_argument (n, nullptr, what);
+
+ string s;
+
+ if (n.simple ())
+ s.swap (n.value);
+ else
+ {
+ // Note that here we cannot assume what's in dir is really a path (think
+ // s/foo/bar/) so we have to reverse it exactly.
+ //
+ s = move (n.dir).representation (); // Move out of path.
+
+ if (!n.value.empty ())
+ s += n.value; // Separator is already there.
+
+ // A path is always interpreted as a JSON string.
+ //
+ return json_value (move (s));
+ }
+
+ bool f;
+ if (s.empty ())
+ return json_value (string ());
+ if (s == "null")
+ return json_value ();
+ else if ((f = (s == "true")) || s == "false")
+ return json_value (f);
+ else if (s.find_first_not_of (
+ "0123456789", (f = (s[0] == '-')) ? 1 : 0) == string::npos)
+ {
+ name n (move (s));
+ return f
+ ? json_value (value_traits<int64_t>::convert (n, nullptr))
+ : json_value (value_traits<uint64_t>::convert (n, nullptr));
+ }
+ //
+ // Handle the hex notation similar to <uint64_t>::convert() (and JSON5).
+ //
+ else if (s[0] == '0' &&
+ (s[1] == 'x' || s[1] == 'X') &&
+ s.size () > 2 &&
+ s.find_first_not_of ("0123456789aAbBcCdDeEfF", 2) == string::npos)
+ {
+ return json_value (
+ value_traits<uint64_t>::convert (name (move (s)), nullptr),
+ true /* hex */);
+ }
+ else
+ {
+ // If this is not a JSON representation of string, array, or object,
+ // then treat it as a string.
+ //
+ // Note that the special `"`, `{`, and `[` characters could be preceded
+ // with whitespaces. Note: see similar test in json_object below.
+ //
+ size_t p (s.find_first_not_of (" \t\n\r"));
+
+ if (p == string::npos || (s[p] != '"' && s[p] != '{' && s[p] != '['))
+ return json_value (move (s));
+
+ // Parse as valid JSON input text.
+ //
+#ifndef BUILD2_BOOTSTRAP
+ try
+ {
+ json_parser p (s, nullptr /* name */);
+ return json_value (p);
+ }
+ catch (const invalid_json_input& e)
+ {
+ // Turned out printing line/column/offset can be misleading since we
+ // could be parsing a single name from a potential list of names.
+ // feels like without also printing the value this is of not much use.
+ //
+#if 0
+ string m ("invalid json input at line ");
+ m += to_string (e.line);
+ m += ", column ";
+ m += to_string (e.column);
+ m += ", byte offset ";
+ m += to_string (e.position);
+ m += ": ";
+ m += e.what ();
+#else
+ string m ("invalid json input: ");
+ m += e.what ();
+#endif
+ throw invalid_argument (move (m));
+ }
+#else
+ throw invalid_argument ("json parsing requested during bootstrap");
+#endif
+ }
+ }
+
+ json_value value_traits<json_value>::
+ convert (name&& l, name* r)
+ {
+ // Here we expect either a simple value or a serialized representation.
+ //
+ if (r != nullptr)
+ throw invalid_argument ("pair in json element value");
+
+ return to_json_value (l, "json element");
+ }
+
+ json_value value_traits<json_value>::
+ convert (names&& ns)
+ {
+ size_t n (ns.size ());
+
+ if (n == 0)
+ {
+ // Note: this is the ([json] ) case, not ([json] ""). See also the
+ // relevant note in json_reverse() below.
+ //
+ return json_value (); // null
+ }
+ else if (n == 1)
+ {
+ return to_json_value (ns.front (), "json");
+ }
+ else
+ {
+ if (ns.front ().pair) // object
+ {
+ json_value r (json_type::object);
+ r.object.reserve (n / 2);
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ if (!i->pair)
+ throw invalid_argument (
+ "expected pair in json member value '" + to_string (*i) + '\'');
+
+ // Note that we could support JSON-quoted member names but it's
+ // unclear why would someone want that (and if they do, they can
+ // always specify JSON text instead).
+ //
+ // @@ The empty pair value ([json] one@ ) which is currently empty
+ // string is inconsistent with empty value ([json] ) above which
+ // is null. Maybe we could distinguish the one@ and one@"" cases
+ // via type hints?
+ //
+ string n (to_string_value (*i, "json member name"));
+ json_value v (to_json_value (*++i, "json member"));
+
+ // Check for duplicates. One can use append/prepend to merge.
+ //
+ if (find_if (r.object.begin (), r.object.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }) != r.object.end ())
+ {
+ throw invalid_argument (
+ "duplicate json object member '" + n + '\'');
+ }
+
+ r.object.push_back (json_member {move (n), move (v)});
+ }
+
+ return r;
+ }
+ else // array
+ {
+ json_value r (json_type::array);
+ r.array.reserve (n);
+
+ for (name& n: ns)
+ {
+ if (n.pair)
+ throw invalid_argument (
+ "unexpected pair in json array element value '" +
+ to_string (n) + '\'');
+
+ r.array.push_back (to_json_value (n, "json array element"));
+ }
+
+ return r;
+ }
+ }
+ }
+
+ static void
+ json_assign (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_value>;
+
+ try
+ {
+ traits::assign (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json value";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_append (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_value>;
+
+ try
+ {
+ traits::append (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json value";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_prepend (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_value>;
+
+ try
+ {
+ traits::prepend (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json value";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ name value_traits<json_value>::
+ reverse (const json_value& v)
+ {
+ switch (v.type)
+ {
+ case json_type::null:
+ {
+ // Note that here we cannot return empty (e.g., to be consistent with
+ // other places) because we treat empty name (as opposed to empty
+ // names) as string, not null (see to_json_value() above).
+ //
+ // Thankfully this version of reverse() is only used when json_value
+ // representation is needed as part of a container. Which means in
+ // "consumption" contexts (e.g., result of subscript) null will still
+ // decay to empty.
+ //
+#if 1
+ return name ("null");
+#else
+ return name ();
+#endif
+ }
+ case json_type::boolean:
+ {
+ return name (v.boolean ? "true" : "false");
+ }
+ case json_type::signed_number:
+ {
+ return value_traits<int64_t>::reverse (v.signed_number);
+ }
+ case json_type::unsigned_number:
+ {
+ return value_traits<uint64_t>::reverse (v.unsigned_number);
+ }
+ case json_type::hexadecimal_number:
+ {
+ return name (to_string (v.unsigned_number, 16));
+ }
+ case json_type::string:
+ //
+ // @@ Hm, it would be nice if this somehow got mapped to unquoted
+ // string but still be round-trippable to JSON value. Perhaps via
+ // the type hint idea? This is pretty bad. See also subscript we
+ // hacked around this somewhat.
+ //
+ // Note that it may be tempting to fix this by only quoting strings
+ // that would otherwise be mis-interpreted (null, true, all digits,
+ // etc). But that would be worse: things would seem to work but
+ // fall apart in the perhaps unlikely event of encountering one of
+ // the problematic values. It is better to produce a consistent
+ // result.
+ //
+ case json_type::array:
+ case json_type::object:
+ {
+ // Serialize as JSON output text.
+ //
+ string o;
+
+#ifndef BUILD2_BOOTSTRAP
+ try
+ {
+ // Disable pretty-printing so that the output is all on the same
+ // line. While it's not going to be easy to read for larger JSON
+ // outputs, it will fit better into the existing model where none of
+ // the value representations use formatting newlines. If a pretty-
+ // printed representation is required, then the $json.serialize()
+ // function can be used to obtain it.
+ //
+ json_buffer_serializer s (o, 0 /* indentation */);
+ v.serialize (s);
+ }
+ catch (const invalid_json_output& e)
+ {
+ // Note that while it feels like value_traits::reverse() should
+ // throw invalid_argument, we don't currently handle it anywhere so
+ // for now let's just fail.
+ //
+ // Note: the same diagnostics as in $json.serialize().
+ //
+ diag_record dr;
+ dr << fail << "invalid json value: " << e;
+
+ if (e.event)
+ dr << info << "while serializing " << to_string (*e.event);
+
+ if (e.offset != string::npos)
+ dr << info << "offending byte offset " << e.offset;
+ }
+#else
+ fail << "json serialization requested during bootstrap";
+#endif
+ return name (move (o));
+ }
+ }
+
+ assert (false);
+ return name ();
+ }
+
+ static names_view
+ json_reverse (const value& x, names& ns, bool reduce)
+ {
+ const json_value& v (x.as<json_value> ());
+
+ // @@ Hm, it would be nice if JSON null somehow got mapped to [null]/empty
+ // but still be round-trippable to JSON null. Perhaps via type hint?
+ //
+ // But won't `print ([json] null)` printing nothing be surprising.
+ // Also, it's not clear that mapping JSON null to out [null] is a good
+ // idea since our [null] means "no value" while JSON null means "null
+ // value".
+ //
+ // Maybe the current semantics is the best: we map our [null] and empty
+ // names to JSON null (naturally) but we always reverse JSON null to
+ // the JSON "null" literal. Or maybe we could reverse it to null but
+ // type-hint it that it's a spelling or [null]/empty. Quite fuzzy,
+ // admittedly. In our model null values decay to empty so JSON null
+ // decaying to "null" literal is strange. Let's try and see how it
+ // goes. See also json_subscript_impl() below.
+ //
+ if (v.type != json_type::null || !reduce)
+ ns.push_back (value_traits<json_value>::reverse (v));
+
+ return ns;
+ }
+
+ static int
+ json_compare (const value& l, const value& r)
+ {
+ return l.as<json_value> ().compare (r.as<json_value> ());
+ }
+
+ // Return the value as well as the indication of whether the index/name is
+ // in range.
+ //
+ static pair<value, bool>
+ json_subscript_impl (const value& val, value* val_data,
+ uint64_t i, const string& n, bool index)
+ {
+ const json_value& jv (val.as<json_value> ());
+
+ json_value jr;
+
+ if (index)
+ {
+ if (i >= (jv.type == json_type::array ? jv.array.size () :
+ jv.type == json_type::object ? jv.object.size () :
+ jv.type == json_type::null ? 0 : 1))
+ return make_pair (value (), false);
+
+ switch (jv.type)
+ {
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ case json_type::string:
+ {
+ // Steal the value if possible.
+ //
+ jr = (&val == val_data
+ ? json_value (move (const_cast<json_value&> (jv)))
+ : json_value (jv));
+ break;
+ }
+ case json_type::array:
+ {
+ // Steal the value if possible.
+ //
+ const json_value& r (jv.array[i]);
+ jr = (&val == val_data
+ ? json_value (move (const_cast<json_value&> (r)))
+ : json_value (r));
+ break;
+ }
+ case json_type::object:
+ {
+ // Represent as an object with one member.
+ //
+ new (&jr.object) json_value::object_type ();
+ jr.type = json_type::object;
+
+ // Steal the member if possible.
+ //
+ const json_member& m (jv.object[i]);
+ jr.object.push_back (&val == val_data
+ ? json_member (move (const_cast<json_member&> (m)))
+ : json_member (m));
+ break;
+ }
+ case json_type::null:
+ assert (false);
+ }
+ }
+ else
+ {
+ auto i (find_if (jv.object.begin (),
+ jv.object.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }));
+
+ if (i == jv.object.end ())
+ return make_pair (value (), false);
+
+ // Steal the member value if possible.
+ //
+ jr = (&val == val_data
+ ? json_value (move (const_cast<json_value&> (i->value)))
+ : json_value (i->value));
+ }
+
+ // @@ As a temporary work around for the lack of type hints (see
+ // json_reverse() for background), reverse simple JSON values to the
+ // corresponding fundamental type values. The thinking here is that
+ // subscript (and iteration) is primarily meant for consumption (as
+ // opposed to reverse() where it is used to build up values and thus
+ // needs things to be fully reversible). Once we add type hints, then
+ // this should become unnecessary and we should be able to just always
+ // return json_value. See also $json.member_value() where we do the
+ // same thing.
+ //
+ // @@ TODO: split this function into two (index/name) once get rid of this.
+ //
+ value r;
+ switch (jr.type)
+ {
+ // Seeing that we are reversing for consumption, it feels natural to
+ // reverse JSON null to our [null] rather than empty. This, in
+ // particular, helps chained subscript.
+ //
+#if 0
+ case json_type::null: r = value (names {}); break;
+#else
+ case json_type::null: r = value (); break;
+#endif
+ case json_type::boolean: r = value (jr.boolean); break;
+ case json_type::signed_number: r = value (jr.signed_number); break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: r = value (jr.unsigned_number); break;
+ case json_type::string: r = value (move (jr.string)); break;
+ case json_type::array:
+ case json_type::object: r = value (move (jr)); break;
+ }
+
+ return make_pair (move (r), true);
+ }
+
+ static value
+ json_subscript (const value& val, value* val_data,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ const json_value* jv (val.null ? nullptr : &val.as<json_value> ());
+
+ // For consistency with other places treat JSON null value as maybe
+ // missing array/object. In particular, we don't want to fail trying to
+ // lookup by-name on a null value which could have been an object.
+ //
+ if (jv != nullptr && jv->type == json_type::null)
+ jv = nullptr;
+
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ bool index;
+ uint64_t i (0);
+ string n;
+
+ // Always interpret uint64-typed subscript as index even for objects.
+ // This can be used to, for example, to iterate with an index over object
+ // members.
+ //
+ if (!sub.null && sub.type == &value_traits<uint64_t>::value_type)
+ {
+ i = sub.as<uint64_t> ();
+ index = true;
+ }
+ else
+ {
+ // How we interpret the subscript depends on the JSON value type. For
+ // objects we treat it as a string (member name) and for everything else
+ // as an index.
+ //
+ // What if the value is null and we don't have a JSON type? In this case
+ // we treat as a string since a valid number is also a valid string.
+ //
+ try
+ {
+ if (jv == nullptr || jv->type == json_type::object)
+ {
+ n = convert<string> (move (sub));
+ index = false;
+ }
+ else
+ {
+ i = convert<uint64_t> (move (sub));
+ index = true;
+ }
+ }
+ catch (const invalid_argument& e)
+ {
+ // We will likely be trying to interpret a member name as an integer
+ // due to the incorrect value type so issue appropriate diagnostics.
+ //
+ diag_record dr;
+ dr << fail (sloc) << "invalid json value subscript: " << e;
+
+ if (jv != nullptr && jv->type != json_type::object)
+ dr << info << "json value type is " << jv->type;
+
+ dr << info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern" << endf;
+ }
+ }
+
+ value r (jv != nullptr
+ ? json_subscript_impl (val, val_data, i, n, index).first
+ : value ());
+
+ // Typify null values so that we get called for chained subscripts.
+ //
+ if (r.null)
+ r.type = &value_traits<json_value>::value_type;
+
+ return r;
+ }
+
+ static void
+ json_iterate (const value& val,
+ const function<void (value&&, bool first)>& f)
+ {
+ // Implement in terms of subscript for consistency (in particular,
+ // iterating over simple values like number, string).
+ //
+ for (uint64_t i (0);; ++i)
+ {
+ pair<value, bool> e (json_subscript_impl (val, nullptr, i, {}, true));
+
+ if (!e.second)
+ break;
+
+ f (move (e.first), i == 0);
+ }
+ }
+
+ const json_value value_traits<json_value>::empty_instance;
+ const char* const value_traits<json_value>::type_name = "json";
+
+ // Note that whether the json value is a container or not depends on its
+ // payload type. However, for our purposes it feels correct to assume it is
+ // a container rather than not with itself as the element type (see
+ // value_traits::{container, element_type} usage for details).
+ //
+ const value_type value_traits<json_value>::value_type
+ {
+ type_name,
+ sizeof (json_value),
+ nullptr, // No base.
+ true, // Container.
+ &value_traits<json_value>::value_type, // Element (itself).
+ &default_dtor<json_value>,
+ &default_copy_ctor<json_value>,
+ &default_copy_assign<json_value>,
+ &json_assign,
+ json_append,
+ json_prepend,
+ &json_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &json_compare,
+ &default_empty<json_value>,
+ &json_subscript,
+ &json_iterate
+ };
+
+ // json_array
+ //
+ json_array value_traits<json_array>::
+ convert (names&& ns)
+ {
+ json_array r;
+
+ size_t n (ns.size ());
+ if (n == 0)
+ ; // Empty.
+ else if (n == 1)
+ {
+ // Tricky: this can still be JSON input text that is an array. And if
+ // it's not, then make it an element of an array.
+ //
+ // @@ Hm, this is confusing: [json_array] a = null ! Maybe not? But then
+ // this won't work: [json_array] a = ([json_array] null). Maybe
+ // distinguish in assign?
+ //
+ json_value v (to_json_value (ns.front (), "json"));
+
+ if (v.type == json_type::array)
+ r.array = move (v.array);
+ else
+ r.array.push_back (move (v));
+ }
+ else
+ {
+ r.array.reserve (n);
+
+ for (name& n: ns)
+ {
+ if (n.pair)
+ throw invalid_argument (
+ "unexpected pair in json array element value '" +
+ to_string (n) + '\'');
+
+ r.array.push_back (to_json_value (n, "json array element"));
+ }
+ }
+
+ return r;
+ }
+
+ static void
+ json_array_assign (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_array>;
+
+ try
+ {
+ traits::assign (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json array";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_array_append (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using arr_traits = value_traits<json_array>;
+
+ try
+ {
+ arr_traits::append (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json array";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_array_prepend (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using arr_traits = value_traits<json_array>;
+
+ try
+ {
+ arr_traits::prepend (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json array";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ const json_array value_traits<json_array>::empty_instance;
+ const char* const value_traits<json_array>::type_name = "json_array";
+
+ const value_type value_traits<json_array>::value_type
+ {
+ type_name,
+ sizeof (json_array),
+ &value_traits<json_value>::value_type, // Base (assuming direct cast works
+ // for both).
+ true, // Container.
+ &value_traits<json_value>::value_type, // Element (json_value).
+ &default_dtor<json_array>,
+ &default_copy_ctor<json_array>,
+ &default_copy_assign<json_array>,
+ &json_array_assign,
+ &json_array_append,
+ &json_array_prepend,
+ &json_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &json_compare,
+ &default_empty<json_array>,
+ &json_subscript,
+ &json_iterate
+ };
+
+ // json_object
+ //
+ json_object value_traits<json_object>::
+ convert (names&& ns)
+ {
+ json_object r;
+
+ size_t n (ns.size ());
+ if (n == 0)
+ ; // Empty.
+ else if (n == 1)
+ {
+ // Tricky: this can still be JSON input text that is an object. So do
+ // a similar check as in to_json_value() above.
+ //
+ name& n (ns.front ());
+
+ if (!n.simple () || n.pattern)
+ throw_invalid_argument (n, nullptr, "json object");
+
+ string& s (n.value);
+ size_t p (s.find_first_not_of (" \t\n\r"));
+
+ if (p == string::npos || s[p] != '{')
+ {
+ // Unlike for array above, we cannot turn any value into a member.
+ //
+ throw invalid_argument ("expected json object instead of '" + s + '\'');
+ }
+
+ json_value v (to_json_value (ns.front (), "json object"));
+ assert (v.type == json_type::object);
+ r.object = move (v.object);
+ }
+ else
+ {
+ r.object.reserve (n / 2);
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ if (!i->pair)
+ throw invalid_argument (
+ "expected pair in json member value '" + to_string (*i) + '\'');
+
+ string n (to_string_value (*i, "json member name"));
+ json_value v (to_json_value (*++i, "json member"));
+
+ if (find_if (r.object.begin (), r.object.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }) != r.object.end ())
+ {
+ throw invalid_argument (
+ "duplicate json object member '" + n + '\'');
+ }
+
+ r.object.push_back (json_member {move (n), move (v)});
+ }
+ }
+
+ return r;
+ }
+
+ static void
+ json_object_assign (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_object>;
+
+ try
+ {
+ traits::assign (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json object";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_object_append (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using obj_traits = value_traits<json_object>;
+
+ try
+ {
+ obj_traits::append (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json object";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_object_prepend (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using obj_traits = value_traits<json_object>;
+
+ try
+ {
+ obj_traits::prepend (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json object";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ const json_object value_traits<json_object>::empty_instance;
+ const char* const value_traits<json_object>::type_name = "json_object";
+
+ const value_type value_traits<json_object>::value_type
+ {
+ type_name,
+ sizeof (json_object),
+ &value_traits<json_value>::value_type, // Base (assuming direct cast works
+ // for both).
+ true, // Container.
+ &value_traits<json_value>::value_type, // Element (json_value).
+ &default_dtor<json_object>,
+ &default_copy_ctor<json_object>,
+ &default_copy_assign<json_object>,
+ &json_object_assign,
+ &json_object_append,
+ &json_object_prepend,
+ &json_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &json_compare,
+ &default_empty<json_object>,
+ &json_subscript,
+ &json_iterate
};
// cmdline
@@ -1500,7 +2523,7 @@ namespace build2
new (&v.data_) cmdline (move (x));
}
- void
+ static void
cmdline_assign (value& v, names&& ns, const variable*)
{
if (!v)
@@ -1513,7 +2536,7 @@ namespace build2
make_move_iterator (ns.end ()));
}
- void
+ static void
cmdline_append (value& v, names&& ns, const variable*)
{
if (!v)
@@ -1528,7 +2551,7 @@ namespace build2
make_move_iterator (ns.end ()));
}
- void
+ static void
cmdline_prepend (value& v, names&& ns, const variable*)
{
if (!v)
@@ -1544,7 +2567,7 @@ namespace build2
}
static names_view
- cmdline_reverse (const value& v, names&)
+ cmdline_reverse (const value& v, names&, bool)
{
const auto& x (v.as<cmdline> ());
return names_view (x.data (), x.size ());
@@ -1565,7 +2588,8 @@ namespace build2
type_name,
sizeof (cmdline),
nullptr, // No base.
- &value_traits<string>::value_type,
+ true, // Container.
+ &value_traits<string>::value_type, // Element type.
&default_dtor<cmdline>,
&default_copy_ctor<cmdline>,
&default_copy_assign<cmdline>,
@@ -1575,7 +2599,9 @@ namespace build2
&cmdline_reverse,
nullptr, // No cast (cast data_ directly).
&cmdline_compare,
- &default_empty<cmdline>
+ &default_empty<cmdline>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// variable_pool
@@ -2296,10 +3322,16 @@ namespace build2
template struct LIBBUILD2_DEFEXPORT
value_traits<vector<pair<string, optional<bool>>>>;
+ template struct LIBBUILD2_DEFEXPORT value_traits<set<string>>;
+ template struct LIBBUILD2_DEFEXPORT value_traits<set<json_value>>;
+
template struct LIBBUILD2_DEFEXPORT
value_traits<map<string, string>>;
template struct LIBBUILD2_DEFEXPORT
+ value_traits<map<json_value, json_value>>;
+
+ template struct LIBBUILD2_DEFEXPORT
value_traits<map<string, optional<string>>>;
template struct LIBBUILD2_DEFEXPORT
diff --git a/libbuild2/variable.hxx b/libbuild2/variable.hxx
index 400aaf1..aed3350 100644
--- a/libbuild2/variable.hxx
+++ b/libbuild2/variable.hxx
@@ -4,7 +4,8 @@
#ifndef LIBBUILD2_VARIABLE_HXX
#define LIBBUILD2_VARIABLE_HXX
-#include <type_traits> // aligned_storage
+#include <cstddef> // max_align_t
+#include <type_traits> // is_*
#include <unordered_map>
#include <libbutl/prefix-map.hxx>
@@ -14,6 +15,8 @@
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
+#include <libbuild2/json.hxx>
+
#include <libbuild2/context.hxx>
#include <libbuild2/target-type.hxx>
#include <libbuild2/diagnostics.hxx>
@@ -48,7 +51,11 @@ namespace build2
template <typename T> const value_type* is_a () const;
- // Element type, if this is a vector.
+ // True if the type is a container.
+ //
+ bool container;
+
+ // Element type, if this is a container and the element type is named.
//
const value_type* element_type;
@@ -75,9 +82,11 @@ namespace build2
void (*const prepend) (value&, names&&, const variable*);
// Reverse the value back to a vector of names. Storage can be used by the
- // implementation if necessary. Cannot be NULL.
+ // implementation if necessary. If reduce is true, then for an empty
+ // simple value return an empty list rather than a list of one empty name.
+ // Note that the value cannot be NULL.
//
- names_view (*const reverse) (const value&, names& storage);
+ names_view (*const reverse) (const value&, names& storage, bool reduce);
// Cast value::data_ storage to value type so that the result can be
// static_cast to const T*. If it is NULL, then cast data_ directly. Note
@@ -91,7 +100,33 @@ namespace build2
// If NULL, then the value is never empty.
//
+ // Note that this is "semantically empty", not necessarily
+ // "representationally empty". For example, an empty JSON array is
+ // semantically empty but its representation (`[]`) is not.
+ //
bool (*const empty) (const value&);
+
+ // Custom subscript function. If NULL, then the generic implementation is
+ // used.
+ //
+ // Note that val can be NULL. If val_data points to val, then it can be
+ // moved from. The sloc and bloc arguments are the subscript and brace
+ // locations, respectively.
+ //
+ // Note: should normally be consistent with iterate.
+ //
+ value (*/*const*/ subscript) (const value& val,
+ value* val_data,
+ value&& subscript,
+ const location& sloc,
+ const location& bloc);
+
+ // Custom iteration function. It should invoked the specified function for
+ // each element in order. If NULL, then the generic implementation is
+ // used. The passed value is never NULL.
+ //
+ void (*const iterate) (const value&,
+ const function<void (value&&, bool first)>&);
};
// The order of the enumerators is arranged so that their integral values
@@ -108,8 +143,9 @@ namespace build2
target, // Target and target type/pattern-specific.
prereq // Prerequisite-specific.
- // Note: remember to update the visibility attribute parsing if adding
- // any new values here.
+ // Note: remember to update the visibility attribute parsing if adding any
+ // new values here. As well as the $builtin.visibility() function
+ // documentation.
};
// VC14 reports ambiguity but seems to work if we don't provide any.
@@ -315,6 +351,10 @@ namespace build2
// Check in a type-independent way if the value is empty. The value must
// not be NULL.
//
+ // Note that this is "semantically empty", not necessarily
+ // "representationally empty". For example, an empty JSON array is
+ // semantically empty but its representation (`[]`) is not.
+ //
bool
empty () const;
@@ -352,9 +392,13 @@ namespace build2
value&
operator= (nullptr_t) {if (!null) reset (); return *this;}
- value (value&&);
+ // Note that we have the noexcept specification even though copy_ctor()
+ // could potentially throw (for example, for std::map).
+ //
+ value (value&&) noexcept;
+
explicit value (const value&);
- value& operator= (value&&);
+ value& operator= (value&&); // Note: can throw for untyped RHS.
value& operator= (const value&);
value& operator= (reference_wrapper<value>);
value& operator= (reference_wrapper<const value>);
@@ -363,8 +407,8 @@ namespace build2
//
public:
// Assign/append/prepend a typed value. For assign, LHS should be either
- // of the same type or untyped. For append, LHS should be either of the
- // same type or untyped and NULL.
+ // of the same type or untyped. For append/prepend, LHS should be either
+ // of the same type or untyped and NULL.
//
template <typename T> value& operator= (T);
template <typename T> value& operator+= (T);
@@ -413,8 +457,8 @@ namespace build2
// specialization below). Types that don't fit will have to be handled
// with an extra dynamic allocation.
//
- static constexpr size_t size_ = sizeof (name_pair);
- std::aligned_storage<size_>::type data_;
+ static constexpr size_t size_ = sizeof (name_pair);
+ alignas (std::max_align_t) unsigned char data_[size_];
// Make sure we have sufficient storage for untyped values.
//
@@ -454,37 +498,37 @@ namespace build2
template <typename T> T& cast (value&);
template <typename T> T&& cast (value&&);
template <typename T> const T& cast (const value&);
- template <typename T> const T& cast (const lookup&);
+ template <typename T> const T& cast (lookup);
// As above but returns NULL if the value is NULL (or not defined, in
// case of lookup).
//
template <typename T> T* cast_null (value&);
template <typename T> const T* cast_null (const value&);
- template <typename T> const T* cast_null (const lookup&);
+ template <typename T> const T* cast_null (lookup);
// As above but returns empty value if the value is NULL (or not defined, in
// case of lookup).
//
template <typename T> const T& cast_empty (const value&);
- template <typename T> const T& cast_empty (const lookup&);
+ template <typename T> const T& cast_empty (lookup);
// As above but returns the specified default if the value is NULL (or not
// defined, in case of lookup). Note that the return is by value, not by
// reference.
//
template <typename T> T cast_default (const value&, const T&);
- template <typename T> T cast_default (const lookup&, const T&);
+ template <typename T> T cast_default (lookup, const T&);
// As above but returns false/true if the value is NULL (or not defined,
// in case of lookup). Note that the template argument is only for
// documentation and should be bool (or semantically compatible).
//
template <typename T> T cast_false (const value&);
- template <typename T> T cast_false (const lookup&);
+ template <typename T> T cast_false (lookup);
template <typename T> T cast_true (const value&);
- template <typename T> T cast_true (const lookup&);
+ template <typename T> T cast_true (lookup);
// Assign value type to the value. The variable is optional and is only used
// for diagnostics.
@@ -497,18 +541,20 @@ namespace build2
typify_atomic (context&, value&, const value_type&, const variable*);
// Remove value type from the value reversing it to names. This is similar
- // to reverse() below except that it modifies the value itself.
+ // to reverse() below except that it modifies the value itself. Note that
+ // the reduce semantics applies to empty but not null.
//
- LIBBUILD2_SYMEXPORT void untypify (value&);
+ LIBBUILD2_SYMEXPORT void untypify (value&, bool reduce);
// Reverse the value back to names. The value should not be NULL and storage
- // should be empty.
+ // should be empty. If reduce is true, then for an empty simple value return
+ // an empty list rather than a list of one empty name.
//
vector_view<const name>
- reverse (const value&, names& storage);
+ reverse (const value&, names& storage, bool reduce);
vector_view<name>
- reverse (value&, names& storage);
+ reverse (value&, names& storage, bool reduce);
// Variable lookup result, AKA, binding of a variable to a value.
//
@@ -653,7 +699,7 @@ namespace build2
// case (container) if invalid_argument is thrown, the names are not
// guaranteed to be unchanged.
//
- //template <typename T> T convert (names&&); (declaration causes ambiguity)
+ template <typename T> T convert (names&&);
// Convert value to T. If value is already of type T, then simply cast it.
// Otherwise call convert(names) above. If value is NULL, then throw
@@ -931,7 +977,7 @@ namespace build2
// pair of two empties).
//
// @@ Maybe we should redo this with optional<> to signify which half can
- // be missing?
+ // be missing? See also dump_value(json).
//
template <>
struct LIBBUILD2_SYMEXPORT value_traits<name_pair>
@@ -1139,12 +1185,35 @@ namespace build2
static const pair_vector_value_type<K, V> value_type;
};
+ // set<T>
+ //
+ template <typename T>
+ struct set_value_type;
+
+ template <typename T>
+ struct value_traits<set<T>>
+ {
+ static_assert (sizeof (set<T>) <= value::size_, "insufficient space");
+
+ static set<T> convert (names&&);
+ static void assign (value&, set<T>&&);
+ static void append (value&, set<T>&&);
+ static void prepend (value&, set<T>&&);
+ static bool empty (const set<T>& x) {return x.empty ();}
+
+ static const set<T> empty_instance;
+ static const set_value_type<T> value_type;
+ };
+
// map<K, V>
//
// Either K or V can be optional<T> making the key or value optional.
//
- // Note that append/+= is non-overriding (like insert()) while prepend/=+
- // is (like insert_or_assign()).
+ // Note that append/+= is overriding (like insert_or_assign()) while
+ // prepend/=+ is not (like insert()). In a sense, whatever appears last
+ // (from left to right) is kept, which is consistent with what we expect to
+ // happen when specifying the same key repeatedly in a representation (e.g.,
+ // a@0 a@1).
//
template <typename K, typename V>
struct map_value_type;
@@ -1165,6 +1234,79 @@ namespace build2
static const map_value_type<K, V> value_type;
};
+ // json
+ //
+ // Note that we do not expose json_member as a value type instead
+ // representing it as an object with one member. While we could expose
+ // member (and reverse it as a pair since there is no valid JSON
+ // representation for a standalone member), this doesn't seem to buy us much
+ // but will cause complications (for example, in supporting append/prepend).
+ // On the other hand, representing a member as an object only requires a bit
+ // of what looks like harmless looseness in a few contexts (such as the
+ // $json.member_*() functions).
+ //
+ // Note that similar to map, JSON object append/+= is overriding while
+ // prepend/=+ is not. In a sense, whatever appears last (from left to right)
+ // is kept, which is consistent with what we expect to happen when
+ // specifying the same name repeatedly (provided it's not considered
+ // invalid) in a representation (e.g., {"a":1,"a":2}).
+ //
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<json_value>
+ {
+ static_assert (sizeof (json_value) <= value::size_, "insufficient space");
+
+ static json_value convert (names&&);
+ static void assign (value&, json_value&&);
+ static void append (value&, json_value&&);
+ static void prepend (value&, json_value&&);
+ static bool empty (const json_value&); // null or empty array/object
+
+ // These are provided to make it possible to use json_value as a container
+ // element.
+ //
+ static json_value convert (name&&, name*);
+ static name reverse (const json_value&);
+ static int compare (const json_value& x, const json_value& y) {
+ return x.compare (y);}
+
+ static const json_value empty_instance; // null
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<json_array>
+ {
+ static_assert (sizeof (json_array) <= value::size_, "insufficient space");
+
+ static json_array convert (names&&);
+ static void assign (value&, json_array&&);
+ static void append (value&, json_value&&); // Note: value, not array.
+ static void prepend (value&, json_value&&);
+ static bool empty (const json_array& v) {return v.array.empty ();}
+
+ static const json_array empty_instance; // empty array
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<json_object>
+ {
+ static_assert (sizeof (json_object) <= value::size_, "insufficient space");
+
+ static json_object convert (names&&);
+ static void assign (value&, json_object&&);
+ static void append (value&, json_value&&); // Note: value, not object.
+ static void prepend (value&, json_value&&);
+ static bool empty (const json_object& v) {return v.object.empty ();}
+
+ static const json_object empty_instance; // empty object
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
// Canned command line to be re-lexed (used in {Build,Test}scripts).
//
// Note that because the executable can be specific as a target or as
@@ -1200,6 +1342,8 @@ namespace build2
// value type objects for the same traits type (and we use their addressed
// as identity; see cast(const value&) for an example).
//
+ // NOTE: REMEMBER TO UPDATE dump_value(json) IF CHANGING ANYTHING HERE!
+ //
extern template struct LIBBUILD2_DECEXPORT value_traits<strings>;
extern template struct LIBBUILD2_DECEXPORT value_traits<vector<name>>;
extern template struct LIBBUILD2_DECEXPORT value_traits<paths>;
@@ -1219,10 +1363,16 @@ namespace build2
extern template struct LIBBUILD2_DECEXPORT
value_traits<vector<pair<string, optional<bool>>>>;
+ extern template struct LIBBUILD2_DECEXPORT value_traits<set<string>>;
+ extern template struct LIBBUILD2_DECEXPORT value_traits<set<json_value>>;
+
extern template struct LIBBUILD2_DECEXPORT
value_traits<map<string, string>>;
extern template struct LIBBUILD2_DECEXPORT
+ value_traits<map<json_value, json_value>>;
+
+ extern template struct LIBBUILD2_DECEXPORT
value_traits<map<string, optional<string>>>;
extern template struct LIBBUILD2_DECEXPORT
@@ -1851,7 +2001,7 @@ namespace build2
variable_map (const variable_map&, const prerequisite&, bool shared = false);
variable_map&
- operator= (variable_map&& v) {m_ = move (v.m_); return *this;}
+ operator= (variable_map&& v) noexcept {m_ = move (v.m_); return *this;}
variable_map&
operator= (const variable_map& v) {m_ = v.m_; return *this;}
@@ -1862,6 +2012,8 @@ namespace build2
variable_map (context& c, bool shared)
: shared_ (shared), owner_ (owner::context), ctx (&c) {}
+ // Note: std::map's move constructor can throw.
+ //
variable_map (variable_map&& v)
: shared_ (v.shared_), owner_ (v.owner_), ctx (v.ctx), m_ (move (v.m_))
{
diff --git a/libbuild2/variable.ixx b/libbuild2/variable.ixx
index c9dfad4..ca84a33 100644
--- a/libbuild2/variable.ixx
+++ b/libbuild2/variable.ixx
@@ -224,7 +224,7 @@ namespace build2
template <typename T>
inline const T&
- cast (const lookup& l)
+ cast (lookup l)
{
return cast<T> (*l);
}
@@ -245,7 +245,7 @@ namespace build2
template <typename T>
inline const T*
- cast_null (const lookup& l)
+ cast_null (lookup l)
{
return l ? &cast<T> (*l) : nullptr;
}
@@ -259,7 +259,7 @@ namespace build2
template <typename T>
inline const T&
- cast_empty (const lookup& l)
+ cast_empty (lookup l)
{
return l ? cast<T> (l) : value_traits<T>::empty_instance;
}
@@ -273,7 +273,7 @@ namespace build2
template <typename T>
inline T
- cast_default (const lookup& l, const T& d)
+ cast_default (lookup l, const T& d)
{
return l ? cast<T> (l) : d;
}
@@ -287,7 +287,7 @@ namespace build2
template <typename T>
inline T
- cast_false (const lookup& l)
+ cast_false (lookup l)
{
return l && cast<T> (l);
}
@@ -301,7 +301,7 @@ namespace build2
template <typename T>
inline T
- cast_true (const lookup& l)
+ cast_true (lookup l)
{
return !l || cast<T> (l);
}
@@ -326,18 +326,21 @@ namespace build2
}
inline vector_view<const name>
- reverse (const value& v, names& storage)
+ reverse (const value& v, names& storage, bool reduce)
{
assert (v &&
storage.empty () &&
(v.type == nullptr || v.type->reverse != nullptr));
- return v.type == nullptr ? v.as<names> () : v.type->reverse (v, storage);
+
+ return v.type == nullptr
+ ? v.as<names> ()
+ : v.type->reverse (v, storage, reduce);
}
inline vector_view<name>
- reverse (value& v, names& storage)
+ reverse (value& v, names& storage, bool reduce)
{
- names_view cv (reverse (static_cast<const value&> (v), storage));
+ names_view cv (reverse (static_cast<const value&> (v), storage, reduce));
return vector_view<name> (const_cast<name*> (cv.data ()), cv.size ());
}
@@ -359,13 +362,53 @@ namespace build2
// This one will be SFINAE'd out unless T is a container.
//
+ // If T is both (e.g., json_value), then make this version preferable.
+ //
template <typename T>
inline auto
- convert (names&& ns) -> decltype (value_traits<T>::convert (move (ns)))
+ convert_impl (names&& ns, int)
+ -> decltype (value_traits<T>::convert (move (ns)))
{
return value_traits<T>::convert (move (ns));
}
+ // This one will be SFINAE'd out unless T is a simple value.
+ //
+ // If T is both (e.g., json_value), then make this version less preferable.
+ //
+ template <typename T>
+ auto // NOTE: not inline!
+ convert_impl (names&& ns, ...) ->
+ decltype (value_traits<T>::convert (move (ns[0]), nullptr))
+ {
+ size_t n (ns.size ());
+
+ if (n == 0)
+ {
+ if (value_traits<T>::empty_value)
+ return T ();
+ }
+ else if (n == 1)
+ {
+ return convert<T> (move (ns[0]));
+ }
+ else if (n == 2 && ns[0].pair != '\0')
+ {
+ return convert<T> (move (ns[0]), move (ns[1]));
+ }
+
+ throw invalid_argument (
+ string ("invalid ") + value_traits<T>::type_name +
+ (n == 0 ? " value: empty" : " value: multiple names"));
+ }
+
+ template <typename T>
+ inline T
+ convert (names&& ns)
+ {
+ return convert_impl<T> (move (ns), 0);
+ }
+
// bool value
//
inline void value_traits<bool>::
@@ -850,6 +893,44 @@ namespace build2
new (&v.data_) vector<pair<K, V>> (move (x));
}
+ // set<T> value
+ //
+ template <typename T>
+ inline void value_traits<set<T>>::
+ assign (value& v, set<T>&& x)
+ {
+ if (v)
+ v.as<set<T>> () = move (x);
+ else
+ new (&v.data_) set<T> (move (x));
+ }
+
+ template <typename T>
+ inline void value_traits<set<T>>::
+ append (value& v, set<T>&& x)
+ {
+ if (v)
+ {
+ set<T>& p (v.as<set<T>> ());
+
+ if (p.empty ())
+ p.swap (x);
+ else
+ // Keys (being const) can only be copied.
+ //
+ p.insert (x.begin (), x.end ());
+ }
+ else
+ new (&v.data_) set<T> (move (x));
+ }
+
+ template <typename T>
+ inline void value_traits<set<T>>::
+ prepend (value& v, set<T>&& x)
+ {
+ append (v, move (x));
+ }
+
// map<K, V> value
//
template <typename K, typename V>
@@ -903,6 +984,113 @@ namespace build2
new (&v.data_) map<K, V> (move (x));
}
+ // json
+ //
+ inline bool value_traits<json_value>::
+ empty (const json_value& v)
+ {
+ // Note: should be consistent with $json.size().
+ //
+ switch (v.type)
+ {
+ case json_type::null: return true;
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ case json_type::string: break;
+ case json_type::array: return v.array.empty ();
+ case json_type::object: return v.object.empty ();
+ }
+
+ return false;
+ }
+
+ inline void value_traits<json_value>::
+ assign (value& v, json_value&& x)
+ {
+ if (v)
+ v.as<json_value> () = move (x);
+ else
+ new (&v.data_) json_value (move (x));
+ }
+
+ inline void value_traits<json_value>::
+ append (value& v, json_value&& x)
+ {
+ if (v)
+ v.as<json_value> ().append (move (x));
+ else
+ new (&v.data_) json_value (move (x));
+ }
+
+ inline void value_traits<json_value>::
+ prepend (value& v, json_value&& x)
+ {
+ if (v)
+ v.as<json_value> ().prepend (move (x));
+ else
+ new (&v.data_) json_value (move (x));
+ }
+
+ // json_array
+ //
+ inline void value_traits<json_array>::
+ assign (value& v, json_array&& x)
+ {
+ if (v)
+ v.as<json_array> () = move (x);
+ else
+ new (&v.data_) json_array (move (x));
+ }
+
+ inline void value_traits<json_array>::
+ append (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_array ();
+
+ v.as<json_array> ().append (move (x));
+ }
+
+ inline void value_traits<json_array>::
+ prepend (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_array ();
+
+ v.as<json_array> ().prepend (move (x));
+ }
+
+ // json_object
+ //
+ inline void value_traits<json_object>::
+ assign (value& v, json_object&& x)
+ {
+ if (v)
+ v.as<json_object> () = move (x);
+ else
+ new (&v.data_) json_object (move (x));
+ }
+
+ inline void value_traits<json_object>::
+ append (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_object ();
+
+ v.as<json_object> ().append (move (x));
+ }
+
+ inline void value_traits<json_object>::
+ prepend (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_object ();
+
+ v.as<json_object> ().prepend (move (x));
+ }
+
// variable_pool
//
inline const variable* variable_pool::
diff --git a/libbuild2/variable.txx b/libbuild2/variable.txx
index fda3486..0b831e9 100644
--- a/libbuild2/variable.txx
+++ b/libbuild2/variable.txx
@@ -27,34 +27,6 @@ namespace build2
return false;
}
- // This one will be SFINAE'd out unless T is a simple value.
- //
- template <typename T>
- auto
- convert (names&& ns) ->
- decltype (value_traits<T>::convert (move (ns[0]), nullptr))
- {
- size_t n (ns.size ());
-
- if (n == 0)
- {
- if (value_traits<T>::empty_value)
- return T ();
- }
- else if (n == 1)
- {
- return convert<T> (move (ns[0]));
- }
- else if (n == 2 && ns[0].pair != '\0')
- {
- return convert<T> (move (ns[0]), move (ns[1]));
- }
-
- throw invalid_argument (
- string ("invalid ") + value_traits<T>::type_name +
- (n == 0 ? " value: empty" : " value: multiple names"));
- }
-
[[noreturn]] LIBBUILD2_SYMEXPORT void
convert_throw (const value_type* from, const value_type& to);
@@ -229,13 +201,13 @@ namespace build2
template <typename T>
names_view
- simple_reverse (const value& v, names& s)
+ simple_reverse (const value& v, names& s, bool reduce)
{
const T& x (v.as<T> ());
- // Represent an empty simple value as empty name sequence rather than
- // a single empty name. This way, for example, during serialization we
- // end up with a much saner looking:
+ // Unless requested otherwise, represent an empty simple value as empty
+ // name sequence rather than a single empty name. This way, for example,
+ // during serialization we end up with a much saner looking:
//
// config.import.foo =
//
@@ -245,6 +217,8 @@ namespace build2
//
if (!value_traits<T>::empty (x))
s.emplace_back (value_traits<T>::reverse (x));
+ else if (!reduce)
+ s.push_back (name ());
return s;
}
@@ -477,6 +451,7 @@ namespace build2
convert (names&& ns)
{
vector<T> v;
+ v.reserve (ns.size ()); // Normally there won't be any pairs.
// Similar to vector_append() below except we throw instead of issuing
// diagnostics.
@@ -509,6 +484,8 @@ namespace build2
? v.as<vector<T>> ()
: *new (&v.data_) vector<T> ());
+ p.reserve (p.size () + ns.size ()); // Normally there won't be any pairs.
+
// Convert each element to T while merging pairs.
//
for (auto i (ns.begin ()); i != ns.end (); ++i)
@@ -589,8 +566,8 @@ namespace build2
}
template <typename T>
- static names_view
- vector_reverse (const value& v, names& s)
+ names_view
+ vector_reverse (const value& v, names& s, bool)
{
auto& vv (v.as<vector<T>> ());
s.reserve (vv.size ());
@@ -602,7 +579,7 @@ namespace build2
}
template <typename T>
- static int
+ int
vector_compare (const value& l, const value& r)
{
auto& lv (l.as<vector<T>> ());
@@ -624,6 +601,68 @@ namespace build2
return 0;
}
+ // Provide subscript for vector<T> for efficiency.
+ //
+ template <typename T>
+ value
+ vector_subscript (const value& val, value* val_data,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ size_t i;
+ try
+ {
+ i = static_cast<size_t> (convert<uint64_t> (move (sub)));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid " << value_traits<vector<T>>::value_type.name
+ << " value subscript: " << e <<
+ info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern";
+ }
+
+ value r;
+ if (!val.null)
+ {
+ const auto& v (val.as<vector<T>> ());
+ if (i < v.size ())
+ {
+ const T& e (v[i]);
+
+ // Steal the value if possible.
+ //
+ r = &val == val_data ? T (move (const_cast<T&> (e))) : T (e);
+ }
+ }
+
+ // Typify null values so that type-specific subscript (e.g., for
+ // json_value) gets called for chained subscripts.
+ //
+ if (r.null)
+ r.type = &value_traits<T>::value_type;
+
+ return r;
+ }
+
+ // Provide iterate for vector<T> for efficiency.
+ //
+ template <typename T>
+ void
+ vector_iterate (const value& val,
+ const function<void (value&&, bool first)>& f)
+ {
+ const auto& v (val.as<vector<T>> ()); // Never NULL.
+
+ for (auto b (v.begin ()), i (b), e (v.end ()); i != e; ++i)
+ {
+ f (value (*i), i == b);
+ }
+ }
+
// Make sure these are static-initialized together. Failed that VC will make
// sure it's done in the wrong order.
//
@@ -635,6 +674,8 @@ namespace build2
vector_value_type (value_type&& v)
: value_type (move (v))
{
+ // Note: vector<T> always has a convenience alias.
+ //
type_name = value_traits<T>::type_name;
type_name += 's';
name = type_name.c_str ();
@@ -651,7 +692,8 @@ namespace build2
nullptr, // Patched above.
sizeof (vector<T>),
nullptr, // No base.
- &value_traits<T>::value_type,
+ true, // Container.
+ &value_traits<T>::value_type, // Element type.
&default_dtor<vector<T>>,
&default_copy_ctor<vector<T>>,
&default_copy_assign<vector<T>>,
@@ -661,7 +703,9 @@ namespace build2
&vector_reverse<T>,
nullptr, // No cast (cast data_ directly).
&vector_compare<T>,
- &default_empty<vector<T>>
+ &default_empty<vector<T>>,
+ &vector_subscript<T>,
+ &vector_iterate<T>
};
// vector<pair<K, V>> value
@@ -701,8 +745,8 @@ namespace build2
}
template <typename K, typename V>
- static names_view
- pair_vector_reverse (const value& v, names& s)
+ names_view
+ pair_vector_reverse (const value& v, names& s, bool)
{
auto& vv (v.as<vector<pair<K, V>>> ());
s.reserve (2 * vv.size ());
@@ -714,7 +758,7 @@ namespace build2
}
template <typename K, typename V>
- static int
+ int
pair_vector_compare (const value& l, const value& r)
{
auto& lv (l.as<vector<pair<K, V>>> ());
@@ -749,10 +793,13 @@ namespace build2
pair_vector_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += '_';
+ // vector<pair<K,V>>
+ //
+ type_name = "vector<pair<";
+ type_name += value_traits<K>::type_name;
+ type_name += ',';
type_name += value_traits<V>::type_name;
- type_name += "_pair_vector";
+ type_name += ">>";
name = type_name.c_str ();
}
};
@@ -768,10 +815,13 @@ namespace build2
pair_vector_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += "_optional_";
+ // vector<pair<K,optional<V>>>
+ //
+ type_name = "vector<pair<";
+ type_name += value_traits<K>::type_name;
+ type_name += ",optional<";
type_name += value_traits<V>::type_name;
- type_name += "_pair_vector";
+ type_name += ">>>";
name = type_name.c_str ();
}
};
@@ -784,11 +834,13 @@ namespace build2
pair_vector_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = "optional_";
+ // vector<pair<optional<K>,V>>
+ //
+ type_name = "vector<pair<optional<";
type_name += value_traits<K>::type_name;
- type_name += '_';
+ type_name += ">,";
type_name += value_traits<V>::type_name;
- type_name += "_pair_vector";
+ type_name += ">>";
name = type_name.c_str ();
}
};
@@ -803,7 +855,8 @@ namespace build2
nullptr, // Patched above.
sizeof (vector<pair<K, V>>),
nullptr, // No base.
- nullptr, // No element.
+ true, // Container.
+ nullptr, // No element (not named).
&default_dtor<vector<pair<K, V>>>,
&default_copy_ctor<vector<pair<K, V>>>,
&default_copy_assign<vector<pair<K, V>>>,
@@ -813,7 +866,244 @@ namespace build2
&pair_vector_reverse<K, V>,
nullptr, // No cast (cast data_ directly).
&pair_vector_compare<K, V>,
- &default_empty<vector<pair<K, V>>>
+ &default_empty<vector<pair<K, V>>>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
+ };
+
+ // set<T> value
+ //
+ template <typename T>
+ set<T> value_traits<set<T>>::
+ convert (names&& ns)
+ {
+ set<T> s;
+
+ // Similar to set_append() below except we throw instead of issuing
+ // diagnostics.
+ //
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i);
+ name* r (nullptr);
+
+ if (n.pair)
+ {
+ r = &*++i;
+
+ if (n.pair != '@')
+ throw invalid_argument (
+ string ("invalid pair character: '") + n.pair + '\'');
+ }
+
+ s.insert (value_traits<T>::convert (move (n), r));
+ }
+
+ return s;
+ }
+
+ template <typename T>
+ void
+ set_append (value& v, names&& ns, const variable* var)
+ {
+ set<T>& s (v ? v.as<set<T>> () : *new (&v.data_) set<T> ());
+
+ // Convert each element to T while merging pairs.
+ //
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i);
+ name* r (nullptr);
+
+ if (n.pair)
+ {
+ r = &*++i;
+
+ if (n.pair != '@')
+ {
+ diag_record dr (fail);
+
+ dr << "unexpected pair style for "
+ << value_traits<T>::value_type.name << " value "
+ << "'" << n << "'" << n.pair << "'" << *r << "'";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+ }
+ }
+
+ try
+ {
+ s.insert (value_traits<T>::convert (move (n), r));
+ }
+ catch (const invalid_argument& e)
+ {
+ diag_record dr (fail);
+
+ dr << e;
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << info << "while converting ";
+ if (n.pair)
+ dr << " element pair '" << n << "'@'" << *r << "'";
+ else
+ dr << " element '" << n << "'";
+ }
+ }
+ }
+
+ template <typename T>
+ void
+ set_assign (value& v, names&& ns, const variable* var)
+ {
+ if (v)
+ v.as<set<T>> ().clear ();
+
+ set_append<T> (v, move (ns), var);
+ }
+
+ template <typename T>
+ names_view
+ set_reverse (const value& v, names& s, bool)
+ {
+ auto& sv (v.as<set<T>> ());
+ s.reserve (sv.size ());
+
+ for (const T& x: sv)
+ s.push_back (value_traits<T>::reverse (x));
+
+ return s;
+ }
+
+ template <typename T>
+ int
+ set_compare (const value& l, const value& r)
+ {
+ auto& ls (l.as<set<T>> ());
+ auto& rs (r.as<set<T>> ());
+
+ auto li (ls.begin ()), le (ls.end ());
+ auto ri (rs.begin ()), re (rs.end ());
+
+ for (; li != le && ri != re; ++li, ++ri)
+ if (int r = value_traits<T>::compare (*li, *ri))
+ return r;
+
+ if (li == le && ri != re) // l shorter than r.
+ return -1;
+
+ if (ri == re && li != le) // r shorter than l.
+ return 1;
+
+ return 0;
+ }
+
+ // Map subscript to set::contains().
+ //
+ template <typename T>
+ value
+ set_subscript (const value& val, value*,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ T k;
+ try
+ {
+ k = convert<T> (move (sub));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid " << value_traits<set<T>>::value_type.name
+ << " value subscript: " << e <<
+ info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern";
+ }
+
+ bool r (false);
+ if (!val.null)
+ {
+ const auto& s (val.as<set<T>> ());
+ r = s.find (k) != s.end ();
+ }
+
+ return value (r);
+ }
+
+ // Provide iterate for set<T> for efficiency.
+ //
+ template <typename T>
+ void
+ set_iterate (const value& val,
+ const function<void (value&&, bool first)>& f)
+ {
+ const auto& v (val.as<set<T>> ()); // Never NULL.
+
+ for (auto b (v.begin ()), i (b), e (v.end ()); i != e; ++i)
+ {
+ f (value (*i), i == b);
+ }
+ }
+
+ // Make sure these are static-initialized together. Failed that VC will make
+ // sure it's done in the wrong order.
+ //
+ template <typename T>
+ struct set_value_type: value_type
+ {
+ string type_name;
+
+ set_value_type (value_type&& v)
+ : value_type (move (v))
+ {
+ // set<T>
+ //
+ type_name = "set<";
+ type_name += value_traits<T>::type_name;
+ type_name += '>';
+ name = type_name.c_str ();
+ }
+ };
+
+ // Convenience aliases for certain set<T> cases.
+ //
+ template <>
+ struct set_value_type<string>: value_type
+ {
+ set_value_type (value_type&& v)
+ : value_type (move (v))
+ {
+ name = "string_set";
+ }
+ };
+
+ template <typename T>
+ const set<T> value_traits<set<T>>::empty_instance;
+
+ template <typename T>
+ const set_value_type<T>
+ value_traits<set<T>>::value_type = build2::value_type // VC14 wants =.
+ {
+ nullptr, // Patched above.
+ sizeof (set<T>),
+ nullptr, // No base.
+ true, // Container.
+ &value_traits<T>::value_type, // Element type.
+ &default_dtor<set<T>>,
+ &default_copy_ctor<set<T>>,
+ &default_copy_assign<set<T>>,
+ &set_assign<T>,
+ &set_append<T>,
+ &set_append<T>, // Prepend the same as append.
+ &set_reverse<T>,
+ nullptr, // No cast (cast data_ directly).
+ &set_compare<T>,
+ &default_empty<set<T>>,
+ &set_subscript<T>,
+ &set_iterate<T>
};
// map<K, V> value
@@ -839,7 +1129,9 @@ namespace build2
"element",
var));
- p.emplace (move (v.first), move (v.second));
+ // Poor man's emplace_or_assign().
+ //
+ p.emplace (move (v.first), V ()).first->second = move (v.second);
}
}
@@ -864,9 +1156,7 @@ namespace build2
"element",
var));
- // Poor man's emplace_or_assign().
- //
- p.emplace (move (v.first), V ()).first->second = move (v.second);
+ p.emplace (move (v.first), move (v.second));
}
}
@@ -881,8 +1171,8 @@ namespace build2
}
template <typename K, typename V>
- static names_view
- map_reverse (const value& v, names& s)
+ names_view
+ map_reverse (const value& v, names& s, bool)
{
auto& vm (v.as<map<K, V>> ());
s.reserve (2 * vm.size ());
@@ -894,7 +1184,7 @@ namespace build2
}
template <typename K, typename V>
- static int
+ int
map_compare (const value& l, const value& r)
{
auto& lm (l.as<map<K, V>> ());
@@ -918,6 +1208,59 @@ namespace build2
return 0;
}
+ // Note that unlike json_value, we don't provide index support for maps.
+ // There are two reasons for this: Firstly, consider map<uint64_t,...>.
+ // Secondly, even something like map<string,...> may contain integers as
+ // keys (in JSON, there is a strong convention for object member names not
+ // to be integers). Instead, we provide the $keys() function which allows
+ // one to implement an index-based access with a bit of overhead, if needed.
+ //
+ template <typename K, typename V>
+ value
+ map_subscript (const value& val, value* val_data,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ K k;
+ try
+ {
+ k = convert<K> (move (sub));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid " << value_traits<map<K, V>>::value_type.name
+ << " value subscript: " << e <<
+ info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern";
+ }
+
+ value r;
+ if (!val.null)
+ {
+ const auto& m (val.as<map<K, V>> ());
+ auto i (m.find (k));
+ if (i != m.end ())
+ {
+ // Steal the value if possible.
+ //
+ r = (&val == val_data
+ ? V (move (const_cast<V&> (i->second)))
+ : V (i->second));
+ }
+ }
+
+ // Typify null values so that type-specific subscript (e.g., for
+ // json_value) gets called for chained subscripts.
+ //
+ if (r.null)
+ r.type = &value_traits<V>::value_type;
+
+ return r;
+ }
+
// Make sure these are static-initialized together. Failed that VC will make
// sure it's done in the wrong order.
//
@@ -929,11 +1272,15 @@ namespace build2
map_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += '_';
+ // map<K,V>
+ //
+ type_name = "map<";
+ type_name += value_traits<K>::type_name;
+ type_name += ',';
type_name += value_traits<V>::type_name;
- type_name += "_map";
+ type_name += '>';
name = type_name.c_str ();
+ subscript = &map_subscript<K, V>;
}
};
@@ -948,11 +1295,15 @@ namespace build2
map_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += "_optional_";
+ // map<K,optional<V>>
+ //
+ type_name = "map<";
+ type_name += value_traits<K>::type_name;
+ type_name += ",optional<";
type_name += value_traits<V>::type_name;
- type_name += "_map";
+ type_name += ">>";
name = type_name.c_str ();
+ // @@ TODO: subscript
}
};
@@ -964,18 +1315,42 @@ namespace build2
map_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = "optional_";
+ // map<optional<K>,V>
+ //
+ type_name = "map<optional<";
type_name += value_traits<K>::type_name;
- type_name += '_';
+ type_name += ">,";
type_name += value_traits<V>::type_name;
- type_name += "_map";
+ type_name += '>';
name = type_name.c_str ();
+ // @@ TODO: subscript
+ }
+ };
+
+ // Convenience aliases for certain map<T,T> cases.
+ //
+ template <>
+ struct map_value_type<string, string>: value_type
+ {
+ map_value_type (value_type&& v)
+ : value_type (move (v))
+ {
+ name = "string_map";
+ subscript = &map_subscript<string, string>;
}
};
template <typename K, typename V>
const map<K, V> value_traits<map<K, V>>::empty_instance;
+ // Note that custom iteration would be better (more efficient, return typed
+ // value), but we don't yet have pair<> as value type so we let the generic
+ // implementation return an untyped pair.
+ //
+ // BTW, one negative consequence of returning untyped pair is that
+ // $first()/$second() don't return types values either, which is quite
+ // unfortunate for something like json_map.
+ //
template <typename K, typename V>
const map_value_type<K, V>
value_traits<map<K, V>>::value_type = build2::value_type // VC14 wants =
@@ -983,7 +1358,8 @@ namespace build2
nullptr, // Patched above.
sizeof (map<K, V>),
nullptr, // No base.
- nullptr, // No element.
+ true, // Container.
+ nullptr, // No element (pair<> not a value type yet).
&default_dtor<map<K, V>>,
&default_copy_ctor<map<K, V>>,
&default_copy_assign<map<K, V>>,
@@ -993,7 +1369,9 @@ namespace build2
&map_reverse<K, V>,
nullptr, // No cast (cast data_ directly).
&map_compare<K, V>,
- &default_empty<map<K, V>>
+ &default_empty<map<K, V>>,
+ nullptr, // Subscript (patched in by map_value_type above).
+ nullptr // Iterate.
};
// variable_cache
@@ -1014,8 +1392,8 @@ namespace build2
: 0);
shared_mutex& m (
- ctx.mutexes.variable_cache[
- hash<variable_cache*> () (this) % ctx.mutexes.variable_cache_size]);
+ ctx.mutexes->variable_cache[
+ hash<variable_cache*> () (this) % ctx.mutexes->variable_cache_size]);
slock sl (m);
ulock ul (m, defer_lock);
diff --git a/libbuild2/version/rule.cxx b/libbuild2/version/rule.cxx
index 98dc2da..65c1117 100644
--- a/libbuild2/version/rule.cxx
+++ b/libbuild2/version/rule.cxx
@@ -93,6 +93,12 @@ namespace build2
if (!fi)
l5 ([&]{trace << "no in file prerequisite for target " << t;});
+ // If we match, derive the file name early as recommended by the in
+ // rule.
+ //
+ if (fm && fi)
+ t.derive_path ();
+
return fm && fi;
}
diff --git a/manifest b/manifest
index 02cfd7d..4781eef 100644
--- a/manifest
+++ b/manifest
@@ -1,6 +1,6 @@
: 1
name: build2
-version: 0.16.0-a.0.z
+version: 0.17.0-a.0.z
summary: build2 build system
license: MIT
topics: build system, build toolchain
@@ -12,11 +12,11 @@ doc-url: https://build2.org/doc.xhtml
src-url: https://git.build2.org/cgit/build2/tree/
email: users@build2.org
build-warning-email: builds@build2.org
-builds: host
+builds: all : &host
requires: c++14
-depends: * build2 >= 0.15.0-
-depends: * bpkg >= 0.15.0-
+depends: * build2 >= 0.16.0-
+depends: * bpkg >= 0.16.0-
# @@ DEP Should probably become conditional dependency.
#requires: ? cli ; Only required if changing .cli files.
-depends: libbutl [0.16.0-a.0.1 0.16.0-a.1)
-depends: libpkg-config ~0.1.0
+depends: libbutl [0.17.0-a.0.1 0.17.0-a.1)
+depends: libpkg-config ~0.1.1
diff --git a/old-tests/variable/override/buildfile b/old-tests/variable/override/buildfile
index 87dc273..c0330cb 100644
--- a/old-tests/variable/override/buildfile
+++ b/old-tests/variable/override/buildfile
@@ -3,6 +3,8 @@ if ($p.t != [null])
[$p.t] p.v = [null]
}
+/:
+
print "/ :" $(/: p.v)
if ($p.a == as)
@@ -22,6 +24,8 @@ print ". :" $p.v
d/
{
+ file{t}:
+
if ($p.d_a == as)
{
p.v = x
diff --git a/old-tests/variable/override/p/buildfile b/old-tests/variable/override/p/buildfile
index 166d869..8f4df28 100644
--- a/old-tests/variable/override/p/buildfile
+++ b/old-tests/variable/override/p/buildfile
@@ -15,6 +15,8 @@ print "p :" $p.v
d/
{
+ file{t}:
+
if ($p.p_d_a == as)
{
p.v = x
diff --git a/old-tests/variable/type-pattern-append/buildfile b/old-tests/variable/type-pattern-append/buildfile
index 348f70f..3077c32 100644
--- a/old-tests/variable/type-pattern-append/buildfile
+++ b/old-tests/variable/type-pattern-append/buildfile
@@ -1,3 +1,5 @@
+./ sub/:
+
# Typed append/prepend.
#
#dir{a*}: x += [bool] true
diff --git a/tests/build/root.build b/tests/build/root.build
index 8f9a482..712e73c 100644
--- a/tests/build/root.build
+++ b/tests/build/root.build
@@ -14,9 +14,16 @@ if ($cxx.target.system == 'win32-msvc')
if ($cxx.class == 'msvc')
cxx.coptions += /wd4251 /wd4275 /wd4800
elif ($cxx.id == 'gcc')
+{
cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object \
-Wno-stringop-overread # libbutl
+ if ($cxx.version.major >= 13)
+ cxx.coptions += -Wno-dangling-reference
+}
+elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15)
+ cxx.coptions += -Wno-unqualified-std-cast-call
+
# Setup the build system driver that we are testing (which may not be the same
# as our $build.path). We also need to disable importation using the built-in
# path.
diff --git a/tests/cc/modules/common.testscript b/tests/cc/modules/common.testscript
index b383dc1..9883e42 100644
--- a/tests/cc/modules/common.testscript
+++ b/tests/cc/modules/common.testscript
@@ -21,7 +21,15 @@ cxx.std = experimental
cxx.features.symexport = true
# @@ TMP revise
-if ($cxx.id == 'gcc')
+#
+# Note: there are some issues with enabling modules in Apple Clang 15 so
+# for now we only test vanilla Clang.
+#
+if (($cxx.id == 'gcc' && $cxx.version.major >= 11) || \
+ ($cxx.id == 'clang' && $cxx.version.major >= 16) || \
+ ($cxx.id == 'msvc' && ($cxx.version.major > 19 || \
+ ($cxx.version.major == 19 && \
+ $cxx.version.minor >= 36))))
cxx.features.modules = true
using cxx
@@ -38,7 +46,7 @@ if ($cxx.target.class == 'windows')
exe{*}: test = true
EOI
-# Determine if we have modules and header units support.
+# Determine if we have named modules and header units support.
#
+$* noop <<EOI | set modules
print $cxx.features.modules
@@ -48,9 +56,11 @@ EOI
print ($cxx.features.modules && $cxx.id == 'gcc')
EOI
-# @@ TMP: modules support is completely broken in MinGW GCC 11.x.
+# @@ TMP: modules support is broken in MinGW GCC (not just symexport).
+# @@ TMP: try modules with Clang on Windows (symexport seems to work).
#
-if ($cxx.target.class == 'windows' && $cxx.id == 'gcc')
+if ($cxx.target.class == 'windows' && \
+ ($cxx.id == 'gcc' || $cxx.id.type == 'clang'))
modules = false
headers = false
end
diff --git a/tests/cc/modules/modules.testscript b/tests/cc/modules/modules.testscript
index 8762885..c286c1f 100644
--- a/tests/cc/modules/modules.testscript
+++ b/tests/cc/modules/modules.testscript
@@ -205,15 +205,11 @@ $* test clean <<EOI
:
: Test global module fragment/leading module marker (module;).
:
-if ($cxx.id != 'msvc') # Disabled for MSVC due to issue 845845.
-{
cat <<EOI >=g.hxx;
void g ();
EOI
cat <<EOI >=core.mxx;
-#if __cpp_modules >= 201810
module;
-#endif
#include "g.hxx"
EOI
@@ -222,7 +218,6 @@ ln -s ../core.cxx ../driver.cxx ./;
$* test clean <<EOI
exe{test}: cxx{driver} {mxx cxx}{core}
EOI
-}
: re-export
:
@@ -373,20 +368,28 @@ cat <<EOI >=core.mxx;
export __symexport int f (int);
- __symexport int g_impl (int i) {return i - 1;}
+ __symexport int g_impl (int i);
export __symexport inline int g (int i) {return g_impl (i);}
+
+ export __symexport int v1 = 1;
+ export __symexport extern int v2;
EOI
ln -s ../core.cxx core-f.cxx;
cat <<EOI >=core-g.cxx;
module foo.core;
int g_impl (int i) {return i - 1;}
+ int v = 1;
+ EOI
+cat <<EOI >=core-v.cxx;
+ module foo.core;
+ int v2 = -1;
EOI
cat <<EOI >=driver.cxx;
import foo.core;
- int main (int argc, char*[]) {return f (argc) + g (argc);}
+ int main (int argc, char*[]) {return f (argc) + g (argc) + v1 + v2;}
EOI
$* test clean <<EOI
./: lib{foo} exe{test} # Full build.
exe{test}: cxx{driver} lib{foo}
- lib{foo}: mxx{core} cxx{core-f} # @@ VC: core-g
+ lib{foo}: mxx{core} cxx{core-g core-f core-v}
EOI
diff --git a/tests/cc/preprocessed/testscript b/tests/cc/preprocessed/testscript
index 507a92d..53e7755 100644
--- a/tests/cc/preprocessed/testscript
+++ b/tests/cc/preprocessed/testscript
@@ -98,6 +98,7 @@ $* &test* <<EOI 2>>EOE != 0
exe{test}: cxx{test}
EOI
error: modules support required by cxx{test}
+ info: consider enabling modules with cxx.features.modules=true in root.build
EOE
: all
diff --git a/tests/dependency/recipe/testscript b/tests/dependency/recipe/testscript
index f43111e..a581724 100644
--- a/tests/dependency/recipe/testscript
+++ b/tests/dependency/recipe/testscript
@@ -406,7 +406,7 @@ alias{x}:
echo
}
EOI
-<stdin>:3:1: error: expected recipe block instead of '{'
+<stdin>:3:1: error: expected recipe block or 'recipe' instead of '{'
EOE
: duplicate-action-single
diff --git a/tests/directive/config.testscript b/tests/directive/config.testscript
index fba858f..ebdd6ac 100644
--- a/tests/directive/config.testscript
+++ b/tests/directive/config.testscript
@@ -212,12 +212,14 @@ test.arguments =
config [strings, config.report=multiline] config.test.d ?= 1 2 3
config [string, config.report.variable=e] config.test.e ?= abc
config [ config.report] f
+ config [ config.report.variable=g] gg
config [bool] config.test.n ?= [null]
config [bool] config.test.p
config [bool] config.test.p ?= true
e = "'$config.test.e'"
f = ($config.test.b || $config.test.c)
+ g = abc
EOI
@@ -240,6 +242,7 @@ test.arguments =
3
e 'abc'
f true
+ gg abc
n [null]
p true
EOO
@@ -262,6 +265,7 @@ test.arguments =
3
e 'xyz'
f true
+ gg abc
n true
p false
EOO
diff --git a/tests/expansion/escape.testscript b/tests/expansion/escape.testscript
new file mode 100644
index 0000000..1140032
--- /dev/null
+++ b/tests/expansion/escape.testscript
@@ -0,0 +1,17 @@
+# file : tests/expansion/type.testscript
+# license : MIT; see accompanying LICENSE file
+
+# Test escape sequence expansion.
+
+.include ../common.testscript
+
+: simple
+:
+$* <<EOI >>EOO
+print "foo$\nbar"
+print $size([string] "foo$\0bar")
+EOI
+foo
+bar
+7
+EOO
diff --git a/tests/function/builtin/testscript b/tests/function/builtin/testscript
index 714a38d..04e8bd8 100644
--- a/tests/function/builtin/testscript
+++ b/tests/function/builtin/testscript
@@ -53,6 +53,34 @@
$* <'print $empty(abc)' >'false' : name
$* <'print $empty(abc cxx{foo})' >'false' : names
$* <'print $empty([bool] false)' >'false' : bool
+ $* <'print $empty([json] null)' >'true' : json-null
+ $* <'print $empty([json] "[]")' >'true' : json-array
+ $* <'print $empty([json] "{}")' >'true' : json-object
+}
+
+: first-second
+:
+{
+ $* <'print $first(a@1)' >'a' : first
+ $* <'print $second(a@1)' >'1' : second
+
+ $* <'print $first(@1)' >'{}' : first-empty
+ $* <'print $second(a@)' >'{}' : second-empty
+
+ $* <'print $first(1)' >'[null]' : first-null
+ $* <'print $second(a)' >'[null]' : second-null
+
+ $* <'print $first(1, true)' >'1' : first-all
+ $* <'print $second(a, true)' >'a' : second-all
+
+ $* <'print $first(0 a@1 b@2 c@ 4)' >'a b c' : firsts
+ $* <'print $second(z a@1 b@2 @3 d)' >'1 2 3' : seconds
+
+ $* <'print $first(0 a@1 b@2 c@ 4, true)' >'0 a b c 4' : firsts-all
+ $* <'print $second(z a@1 b@2 @3 d, true)' >'z 1 2 3 d' : seconds-all
+
+ $* <'print $first([name_pair] a@1)' >'a' : first-typed
+ $* <'print $second([name_pair] a@1)' >'1' : second-typed
}
: identity
diff --git a/tests/function/json/buildfile b/tests/function/json/buildfile
new file mode 100644
index 0000000..45c60d2
--- /dev/null
+++ b/tests/function/json/buildfile
@@ -0,0 +1,4 @@
+# file : tests/function/json/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/function/json/testscript b/tests/function/json/testscript
new file mode 100644
index 0000000..54e524f
--- /dev/null
+++ b/tests/function/json/testscript
@@ -0,0 +1,257 @@
+# file : tests/function/json/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in type/json/.
+
+.include ../../common.testscript
+
+: type
+:
+$* <<EOI >>EOO
+print $value_type([json] )
+print $value_type([json] null)
+print $value_type([json] true)
+print $value_type([json] 123)
+print $value_type([json] -123)
+print $value_type([json] 123, true)
+print $value_type([json] -123, true)
+print $value_type([json] 1 2 3)
+print $value_type([json] one@1 two@2 three@3)
+
+j = [json] one@1 two@2 three@3
+i = [uint64] 1
+m = ($j[$i])
+print $value_type($j[$i])
+print $value_type($m)
+EOI
+null
+null
+boolean
+number
+number
+unsigned number
+signed number
+array
+object
+object
+object
+EOO
+
+: value-size
+:
+$* <<EOI >>EOO
+print $value_size([json] null)
+print $value_size([json] true)
+print $value_size([json] 123)
+print $value_size([json] abc)
+print $size([string] ([json] abc)) # @@ Should be 3 (quoted, type hint).
+print $value_size([json] 1 2 3)
+print $value_size([json] one@1 two@2 three@3)
+
+print $array_size([json] 1 2 3)
+print $array_size([json] null)
+EOI
+0
+1
+1
+1
+5
+3
+3
+3
+0
+EOO
+
+: member
+:
+$* <<EOI >>EOO
+j = [json] one@1 two@2 three@3
+i = [uint64] 1
+m = ($j[$i])
+print $member_name($j[$i]) $member_value($j[$i])
+print $member_name($m) $member_value($m)
+for m: $j
+ print $member_name($m) $member_value($m)
+EOI
+two 2
+two 2
+one 1
+two 2
+three 3
+EOO
+
+: names
+:
+$* <<EOI >>EOO
+j = [json] one@1 two@2 three@3
+for n: $object_names($j)
+ print $n ($j[$n])
+
+print $object_names([json] null)
+EOI
+one 1
+two 2
+three 3
+
+EOO
+
+: find
+:
+$* <<EOI >>EOO
+j = [json] 1 ([json] one@1 two@2) 2 true 3 null 4 abc -5 null ([json] 1 2 3)
+print $array_find_index($j, null)
+print $array_find_index($j, true)
+print $array_find_index($j, 3)
+print $array_find_index($j, 0x4)
+print $array_find_index($j, -5)
+print $array_find_index($j, abc)
+print $array_find_index($j, [json] 1 2 3)
+print $array_find_index($j, [json] two@2 one@1)
+print $array_find_index($j, [json] 1 2)
+print $array_find_index($j, [json] one@1)
+print $array_find_index($j, [json] one@1 two@2 three@3)
+print $array_find_index($j, [json] one@1 TWO@3)
+print $array_find_index($j, [json] one@1 two@3)
+print $array_find_index([json] null, 1)
+EOI
+5
+3
+4
+6
+8
+7
+10
+1
+11
+11
+11
+11
+11
+0
+EOO
+
+: parse
+:
+{
+ : basics
+ :
+ $* <<EOI >>EOO
+ print $json.parse('[123, "abc", {"one":1, "two":2}]')
+ EOI
+ [123,"abc",{"one":1,"two":2}]
+ EOO
+
+ : diagnostics-invalid-input
+ :
+ $* <<EOI 2>>EOE != 0
+ print $json.parse('{"one":, "two":2}]')
+ EOI
+ error: invalid json input: unexpected byte ',' in value
+ info: line 1, column 8, byte offset 8
+ <stdin>:1:8: info: while calling json.parse(<untyped>)
+ EOE
+
+ : diagnostics-duplicate-input
+ :
+ $* <<EOI 2>>EOE != 0
+ print $json.parse('{"one":1, "one":2}]')
+ EOI
+ error: invalid json input: duplicate object member 'one'
+ info: line 1, column 11, byte offset 15
+ <stdin>:1:8: info: while calling json.parse(<untyped>)
+ EOE
+}
+
+: serialize
+:
+{
+ : basics
+ :
+ $* <<EOI >>EOO
+ j = [json] 123 abc ([json] one@1 two@2)
+ print $json.serialize($j)
+ print $json.serialize($j, 0)
+ EOI
+ [
+ 123,
+ "abc",
+ {
+ "one": 1,
+ "two": 2
+ }
+ ]
+ [123,"abc",{"one":1,"two":2}]
+ EOO
+
+ : diagnostics
+ :
+ if false
+ {
+ # This is not easy to trigger normally so we have a normally-disabled
+ # special hack in the $json.serialize() implementation to trigger this.
+ #
+ $* <<EOI 2>>EOE != 0
+ print $json.serialize([json] deadbeef)
+ EOI
+ error: invalid json value: invalid UTF-8 text
+ info: while serializing string value
+ info: offending byte offset 4
+ <stdin>:1:8: info: while calling json.serialize(json)
+ EOE
+ }
+
+}
+
+: load
+:
+{
+ : basics
+ :
+ cat <<EOI >=input.json;
+ {
+ "str":"abc",
+ "num":123,
+ "arr":[1, 2, 3],
+ "obj":{"one":1, "two":2, "three":3}
+ }
+ EOI
+ $* <<EOI >>EOO
+ j = $json.load(input.json)
+ for m: $j
+ print $member_name($m) $member_value($m)
+ EOI
+ str abc
+ num 123
+ arr [1,2,3]
+ obj {"one":1,"two":2,"three":3}
+ EOO
+
+ : diagnostics
+ :
+ cat <<EOI >=input.json;
+ {
+ "str":"abc",
+ "num":,
+ "arr":[1, 2, 3],
+ "obj":{"one":1, "two":2, "three":3}
+ }
+ EOI
+ $* <<EOI 2>>EOE != 0
+ j = $json.load(input.json)
+ EOI
+ input.json:3:9: error: invalid json input: unexpected byte ',' in value
+ info: byte offset 26
+ <stdin>:1:6: info: while calling json.load(<untyped>)
+ EOE
+}
+
+: size
+:
+{
+ $* <'print $size([json_set] a b b)' >'2' : json-set
+ $* <'print $size([json_map] a@1 b@2 b@3)' >'2' : json-map
+}
+
+: keys
+:
+$* <'print $keys([json_map] 2@([json] a@1 b@2 c@3) 1@([json] 1 2 3))' >'[1,2]'
diff --git a/tests/function/path/testscript b/tests/function/path/testscript
index c58bbf8..1ed89ca 100644
--- a/tests/function/path/testscript
+++ b/tests/function/path/testscript
@@ -8,6 +8,78 @@ posix = (!$windows)
s = ($posix ? '/' : '\')
+: posix-string
+:
+{
+ : relative
+ :
+ {
+ s = ($posix ? '/' : '\\')
+
+ $* <"print \$posix_string\([path] a$(s)b)" >'a/b' : path
+ $* <"print \$posix_string\([paths] a$(s)b a$(s)c$(s))" >'a/b a/c' : paths
+ $* <"print \$posix_string\([dir_path] a$(s)b)" >'a/b' : dir-path
+ $* <"print \$posix_string\([dir_paths] a$(s)b a$(s)c$(s))" >'a/b a/c' : dir-paths
+ $* <"print \$path.posix_string\(a$(s)b a$(s)c$(s))" >'a/b a/c' : untyped
+ }
+
+ : absolute
+ :
+ {
+ if $posix
+ {
+ $* <'print $posix_string([paths] /a/b /a/c/)' >'/a/b /a/c' : paths
+ $* <'print $posix_string([dir_paths] /a/b /a/c/)' >'/a/b /a/c' : dir-paths
+ $* <'print $posix_string([dir_path] /)' >'/' : root-dir
+ $* <'print $path.posix_string(/a/b /a/c/)' >'/a/b /a/c' : untyped
+ }
+ else
+ {
+ $* <'print $posix_string([paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c' : paths
+ $* <'print $posix_string([dir_paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c' : dir-paths
+ $* <'print $posix_string([dir_paths] "c:\\" "C:")' >'c:/ C:/' : root-dir
+ $* <'print $path.posix_string("c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c' : untyped
+ $* <'print $path.posix_string("c:\\" "C:")' >'c:/ C:/' : untyped-root
+ }
+ }
+}
+
+: posix-representation
+:
+{
+ : relative
+ :
+ {
+ s = ($posix ? '/' : '\\')
+
+ $* <"print \$posix_representation\([path] a$(s)b)" >'a/b' : path
+ $* <"print \$posix_representation\([paths] a$(s)b a$(s)c$(s))" >'a/b a/c/' : paths
+ $* <"print \$posix_representation\([dir_path] a$(s)b)" >'a/b/' : dir-path
+ $* <"print \$posix_representation\([dir_paths] a$(s)b a$(s)c$(s))" >'a/b/ a/c/' : dir-paths
+ $* <"print \$path.posix_representation\(a$(s)b a$(s)c$(s))" >'a/b a/c/' : untyped
+ }
+
+ : absolute
+ :
+ {
+ if $posix
+ {
+ $* <'print $posix_representation([paths] /a/b /a/c/)' >'/a/b /a/c/' : paths
+ $* <'print $posix_representation([dir_paths] /a/b /a/c/)' >'/a/b/ /a/c/' : dir-paths
+ $* <'print $posix_representation([dir_path] /)' >'/' : root-dir
+ $* <'print $path.posix_representation(/a/b /a/c/)' >'/a/b /a/c/' : untyped
+ }
+ else
+ {
+ $* <'print $posix_representation([paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c/' : paths
+ $* <'print $posix_representation([dir_paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b/ C:/a/c/' : dir-paths
+ $* <'print $posix_representation([dir_paths] "c:\\" "C:")' >'c:/ C:/' : root-dir
+ $* <'print $path.posix_representation("c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c/' : untyped
+ $* <'print $path.posix_representation("c:\\" "C:")' >'c:/ C:/' : untyped-root
+ }
+ }
+}
+
: canonicalize
:
{
diff --git a/tests/function/regex/testscript b/tests/function/regex/testscript
index 5167390..538bdab 100644
--- a/tests/function/regex/testscript
+++ b/tests/function/regex/testscript
@@ -478,6 +478,64 @@
}
}
+: filter-match
+:
+{
+ : match
+ :
+ {
+ : string
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_match(-g -O2 -O3, [string] '-O[23]')
+ EOI
+
+ : untyped
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_match(-g -O2 -O3, '-O[23]')
+ EOI
+
+ : strings
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_match([strings] -g -O2 -O3, '-O[23]')
+ EOI
+
+ : nomatch
+ :
+ $* <<EOI >''
+ print $regex.filter_match(-g -O1, '-O[23]')
+ EOI
+ }
+
+ : filter-out
+ :
+ {
+ : untyped
+ :
+ $* <<EOI >'-g'
+ print $regex.filter_out_match(-g -O2 -O3, '-O[23]')
+ EOI
+
+ : all-match
+ :
+ $* <<EOI >''
+ print $regex.filter_out_match(-O2 -O3, '-O[23]')
+ EOI
+ }
+
+ : flags
+ :
+ {
+ : icase
+ :
+ $* <<EOI >'Foo.cxx'
+ print $regex.filter_match(Foo.cxx, 'f[^.]+.*', icase)
+ EOI
+ }
+}
+
: find-search
:
{
@@ -520,6 +578,64 @@
}
}
+: filter-search
+:
+{
+ : match
+ :
+ {
+ : string
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_search(-g -O2 -O3, [string] '-O')
+ EOI
+
+ : untyped
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_search(-g -O2 -O3, '-O')
+ EOI
+
+ : strings
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_search([strings] -g -O2 -O3, '-O')
+ EOI
+
+ : nomatch
+ :
+ $* <<EOI >''
+ print $regex.filter_search(-g, '-O')
+ EOI
+ }
+
+ : filter-out
+ :
+ {
+ : untyped
+ :
+ $* <<EOI >'-g'
+ print $regex.filter_out_search(-g -O2 -O3, '-O')
+ EOI
+
+ : all-match
+ :
+ $* <<EOI >''
+ print $regex.filter_out_search(-O2 -O3, '-O')
+ EOI
+ }
+
+ : flags
+ :
+ {
+ : icase
+ :
+ $* <<EOI >'Foo.cxx'
+ print $regex.filter_search(Foo.cxx, 'f', icase)
+ EOI
+ }
+}
+
: merge
:
{
diff --git a/tests/function/string/testscript b/tests/function/string/testscript
index 364ce42..244ace8 100644
--- a/tests/function/string/testscript
+++ b/tests/function/string/testscript
@@ -25,6 +25,51 @@
}
}
+: replace
+:
+{
+ : basics
+ :
+ {
+ $* <'print $string.replace( abcb, b, BB)' >'aBBcBB' : expand
+ $* <'print $string.replace( aabbccbb, bb, B)' >'aaBccB' : shrink
+ $* <'print $replace([string] abc, b, B)' >'aBc' : typed
+ $* <'print $replace([string] "", b, B)' >'' : empty
+ $* <'print $replace([string] bbb, b, "")' >'' : to-empty
+ }
+
+ : icase
+ :
+ {
+ $* <'print $string.replace(abcB, b, X, icase)' >'aXcX'
+ }
+
+ : first
+ :
+ {
+ $* <'print $string.replace(babc, b, B, first_only)' >'Babc' : first
+ $* <'print $string.replace(abcb, b, B, first_only)' >'aBcb' : middle
+ $* <'print $string.replace(b, b, B, first_only)' >'B' : only
+ }
+
+ : last
+ :
+ {
+ $* <'print $string.replace(babc, b, B, last_only)' >'baBc' : middle
+ $* <'print $string.replace(abcb, b, B, last_only)' >'abcB' : last
+ $* <'print $string.replace(b, b, B, last_only)' >'B' : only
+ }
+
+ : first-and-last
+ :
+ {
+ $* <'print $string.replace(ac, b, B, first_only last_only)' >'ac' : zero
+ $* <'print $string.replace(abc, b, B, first_only last_only)' >'aBc' : one
+ $* <'print $string.replace(abcb, b, B, first_only last_only)' >'abcb' : two
+ $* <'print $string.replace(b, b, B, first_only last_only)' >'B' : only
+ }
+}
+
: trim
:
{
@@ -43,8 +88,11 @@
: size
:
{
- $* <'print $size([string] abc)' >'3' : basics
- $* <'print $size([string] )' >'0' : zero
+ $* <'print $size([string] abc)' >'3' : basics
+ $* <'print $size([string] )' >'0' : zero
+ $* <'print $size([strings] a b c)' >'3' : strings
+ $* <'print $size([string_set] a b b)' >'2' : string-set
+ $* <'print $size([string_map] a@1 b@2 b@3)' >'2' : string-map
}
: find
@@ -62,3 +110,7 @@
$* <'print $find_index([strings] x y z, Y)' >'3' : basics-false
$* <'print $find_index([strings] x y z, Y, icase)' >'1' : icase
}
+
+: keys
+:
+$* <'print $keys([string_map] a@1 b@2 c@3)' >'a b c'
diff --git a/tests/test/script/runner/set.testscript b/tests/test/script/runner/set.testscript
index ff77f6f..1800a7d 100644
--- a/tests/test/script/runner/set.testscript
+++ b/tests/test/script/runner/set.testscript
@@ -337,7 +337,10 @@
$c <<EOI && $b
echo "$s" >=f;
timeout --success 2;
- $* -o 'foo' -l 10 | cat f - | set bar
+
+ # Suppress cat's 'broken pipe' diagnostics.
+ #
+ $* -o 'foo' -l 10 | cat f - 2>- | set bar
EOI
}
diff --git a/tests/type/json/buildfile b/tests/type/json/buildfile
new file mode 100644
index 0000000..5bc6bf2
--- /dev/null
+++ b/tests/type/json/buildfile
@@ -0,0 +1,4 @@
+# file : tests/type/json/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/type/json/testscript b/tests/type/json/testscript
new file mode 100644
index 0000000..36287b7
--- /dev/null
+++ b/tests/type/json/testscript
@@ -0,0 +1,504 @@
+# file : tests/type/json/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in function/json/.
+
+.include ../../common.testscript
+
+: basics
+:
+{
+ : empty-null
+ :
+ $* <<EOI >>EOO
+ print ([json, null] )
+ print ([json] null)
+ print ([json] )
+ print ([json] "")
+ print ([json_array] )
+ print ([json_object] )
+ print ([json] one@null)
+ print ([json] one@) # @@ Would be more consistent if were null (type hints?)
+ print ([json] one@"")
+ EOI
+ [null]
+
+
+ ""
+ []
+ {}
+ {"one":null}
+ {"one":""}
+ {"one":""}
+ EOO
+
+ : reverse
+ :
+ $* <<EOI >>EOO
+ print ([json] null)
+ print ([json] true)
+ print ([json] 123)
+ print ([json] -123)
+ print ([json] 0xdecaf)
+ print ([json] abc) # @@ Ideally we would like this to be reversed unquoted.
+ print ([json] '"abc"') # @@ Ditto.
+ print (([json] abc)[0]) # @@ Workaround.
+ print ([json] dir/{file1 file2})
+ print ([json] ' ["dir/file1", "dir/file2"] ')
+ print ([json] zero@null one@1 two@abc three@([json] x@123 y@-123) four@([json] null true))
+ print ([json] '{"zero":null,"one":1,"two":"abc","three":{"x":123,"y":-123},"four":[null,true]}')
+ EOI
+
+ true
+ 123
+ -123
+ 0xdecaf
+ "abc"
+ "abc"
+ abc
+ ["dir/file1","dir/file2"]
+ ["dir/file1","dir/file2"]
+ {"zero":null,"one":1,"two":"abc","three":{"x":123,"y":-123},"four":[null,true]}
+ {"zero":null,"one":1,"two":"abc","three":{"x":123,"y":-123},"four":[null,true]}
+ EOO
+
+
+ : hex
+ :
+ $* <<EOI >>EOO
+ print ([json] 0xffffFFFF)
+
+ # These should be in the hexadecimal notation once we switch to JSON5.
+ #
+ print ([json] 0x0 0x01 0xff 0xFFFF)
+ print ([json] ff@0xff FFFF@0xFFFF)
+
+ # @@ This should start working once we switch to type hints in subscript.
+ #
+ #j = [json] ff@0xff
+ #print $value_type($j[ff], true)
+ print 'hexadecimal number'
+ EOI
+ 0xffffffff
+ [0,1,255,65535]
+ {"ff":255,"FFFF":65535}
+ hexadecimal number
+ EOO
+
+ : diagnostics-reverse-invalid
+ :
+ $* <<EOI 2>>EOE != 0
+ o = [json] '{"one":1, "two":}'
+ EOI
+ error: invalid json value in variable o: invalid json input: unexpected byte '}' in value
+ <stdin>:1:5: info: variable o value is assigned here
+ EOE
+
+ : diagnostics-duplicate-member
+ :
+ $* <<EOI 2>>EOE != 0
+ o = [json] one@1 one@2
+ EOI
+ error: invalid json value in variable o: duplicate json object member 'one'
+ <stdin>:1:5: info: variable o value is assigned here
+ EOE
+}
+
+: compare
+:
+{
+ : type
+ :
+ $* <<EOI >>EOO
+ print (([json] null) < ([json] true))
+ print (([json] true) < ([json] 0))
+ print (([json] 123) < ([json] '"0"'))
+ print (([json] abc) < ([json] xxx yyy))
+ print (([json] xxx yyy) < ([json] xxx@null yyy@null))
+ EOI
+ true
+ true
+ true
+ true
+ true
+ EOO
+
+ : simple
+ :
+ $* <<EOI >>EOO
+ print (([json] false) == ([json] false))
+ print (([json] false) < ([json] true))
+
+ print (([json] 123) == ([json] 123))
+ print (([json] -123) == ([json] -123))
+ print (([json] 0xff) == ([json] 255))
+ print (([json] 0) == ([json] -0))
+ print (([json] -1) < ([json] 0))
+ print (([json] 123) < ([json] 234))
+ print (([json] -234) < ([json] -123))
+
+ print (([json] abc) == ([json] abc))
+ print (([json] abc) < ([json] abz))
+ print (([json] abc) < ([json] abcd))
+ EOI
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ EOO
+
+ : array
+ :
+ $* <<EOI >>EOO
+ print (([json] 1 2 3) == ([json] 1 2 3))
+ print (([json] 1 2 3) < ([json] 1 2 4))
+ print (([json] 1 2 3) < ([json] 1 2 3 4))
+ EOI
+ true
+ true
+ true
+ EOO
+
+ : object
+ :
+ $* <<EOI >>EOO
+ print (([json] one@1 two@2 three@3) == ([json] three@3 one@1 two@2))
+ print (([json] one@1 two@2 three@3) < ([json] three@3 one@1 two@4))
+ print (([json] one@1 three@3) < ([json] three@3 one@1 two@2))
+ EOI
+ true
+ true
+ true
+ EOO
+}
+
+: append-prepend
+:
+{
+ : array
+ :
+ $* <<EOI >'[0,1,2,3,4,5,6,7,8]'
+ a = [json] 2 3
+ a += 4
+ a += 5 6
+ a += [json] 7 8
+ a =+ [json] 0 1
+ print $a
+ EOI
+
+ : array-type
+ :
+ $* <<EOI >'[1,2,3,4,5]'
+ [json_array] a =
+ a += 1
+ a += 2 3
+ a += [json_array] 4 5 # @@ Should be possible to use json.
+ print $a
+ EOI
+
+ : object
+ :
+ $* <<EOI >'{"zero":0,"one":6,"two":8,"three":3,"four":4,"five":5,"seven":7}'
+ o = [json] one@1 two@2 three@3
+ o += four@4
+ o += five@5 one@6
+ o += [json] seven@7 two@8
+ o =+ [json] zero@0 three@9
+ print $o
+ EOI
+
+ : object-type
+ :
+ $* <<EOI >'{"one":1,"two":2,"three":3,"four":4,"five":5}'
+ [json_object] o =
+ o += one@1
+ o += two@2 three@3
+ o += [json_object] four@4 five@5 # @@ Should be possible to use json.
+ print $o
+ EOI
+
+ : boolean
+ :
+ $* <<EOI >>EOO
+ b = [json] false
+ b += [json] true
+ print $b
+ EOI
+ true
+ EOO
+
+ : number
+ :
+ $* <<EOI >>EOO
+ n = [json] -2
+ print $value_type($n, true) $n
+ n += 1
+ print $value_type($n, true) $n
+ n += 1
+ print $value_type($n, true) $n
+ n += 1
+ print $value_type($n, true) $n
+ n += [json] -1
+ print $value_type($n, true) $n
+ n += [json] -1
+ print $value_type($n, true) $n
+ EOI
+ signed number -2
+ signed number -1
+ unsigned number 0
+ unsigned number 1
+ unsigned number 0
+ signed number -1
+ EOO
+
+ : string
+ :
+ $* <<EOI >>EOO
+ s = [json] yyy
+ s += [json] zzz
+ s =+ [json] xxx
+ print $s
+ EOI
+ "xxxyyyzzz"
+ EOO
+
+ : invalid
+ :
+ $* <<EOI 2>>EOE != 0
+ a = [json] 1 2 3
+ s = [json] str
+ s += $a
+ print $s
+ EOI
+ error: invalid json value in variable s: unable to append array to string
+ <stdin>:3:6: info: variable s value is assigned here
+ EOE
+}
+
+: subscript
+:
+{
+ : null
+ :
+ $* <<EOI >>EOO
+ j = [json] null
+ print ($j[0])
+ print ($j[one])
+ EOI
+ [null]
+ [null]
+ EOO
+
+ : array
+ :
+ $* <<EOI >>EOO
+ j = [json] 1 2 3 null
+ print ($j[1])
+ print ($j[3])
+ print ($j[4])
+ EOI
+ 2
+ [null]
+ [null]
+ EOO
+
+ : object-name
+ :
+ $* <<EOI >>EOO
+ j = [json] one@1 two@2 three@3 four@null
+ print ($j[two])
+ print ($j[four])
+ print ($j[five])
+ EOI
+ 2
+ [null]
+ [null]
+ EOO
+
+ : object-index
+ :
+ $* <<EOI >>EOO
+ j = [json] one@1 two@2 three@3
+ print ($j[([uint64] 1)])
+ EOI
+ {"two":2}
+ EOO
+
+ : nested
+ :
+ $* <<EOI >>EOO
+ o = [json] one@([json] 1 2 ([json] a@3 b@4) null) two@([json] x@x y@([json] 5 6))
+ print ($o[one][1])
+ print ($o[one][2][b])
+ print ($o[two][y][1])
+ print ($o[two][bogus][junk])
+ print ($o[two][bogus][junk][garbage])
+ print ($o[one][3][junk]) # JSON null
+ print ($o[one][3][junk][garbage])
+
+ a = [json] ([json] one@1 two@([json] 2 3)) ([json] 4 5) null
+ print ($a[0][one])
+ print ($a[0][two][1])
+ print ($a[1][1])
+ print ($a[1][123][junk])
+ print ($a[1][123][junk][garbage])
+ print ($a[2][junk]) # JSON null
+ print ($a[2][junk][garbage])
+ EOI
+ 2
+ 4
+ 6
+ [null]
+ [null]
+ [null]
+ [null]
+ 1
+ 3
+ 5
+ [null]
+ [null]
+ [null]
+ [null]
+ EOO
+
+ : reverse
+ :
+ $* <<EOI >>EOO
+ print (([json] one@null)[one])
+ print (([json] one@true)[one])
+ print (([json] one@123)[one])
+ print (([json] one@-123)[one])
+ print (([json] one@0xdecaf)[one])
+ print (([json] one@abc)[one])
+ EOI
+ [null]
+ true
+ 123
+ -123
+ 912559
+ abc
+ EOO
+
+ : diagnostics-not-object
+ :
+ $* <<EOI 2>>EOE != 0
+ j = [json] 1 2 3
+ print ($j[one])
+ EOI
+ <stdin>:2:11: error: invalid json value subscript: invalid uint64 value 'one'
+ info: json value type is array
+ <stdin>:2:9: info: use the '\[' escape sequence if this is a wildcard pattern
+ EOE
+}
+
+: iteration
+:
+{
+ : null
+ :
+ $* <<EOI
+ for v: ([json] null)
+ print $v
+ EOI
+
+ : simple
+ :
+ $* <<EOI >>EOO
+ for v: ([json] 123)
+ print $v
+ EOI
+ 123
+ EOO
+
+ : array
+ :
+ $* <<EOI >>EOO
+ for v: ([json] 1 2 3)
+ print $v
+ EOI
+ 1
+ 2
+ 3
+ EOO
+
+ : object
+ :
+ $* <<EOI >>EOO
+ for v: ([json] one@1 two@2 three@3)
+ print $v
+ EOI
+ {"one":1}
+ {"two":2}
+ {"three":3}
+ EOO
+
+ : reverse
+ :
+ $* <<EOI >>EOO
+ for v: ([json] null true 123 -123 0xdecaf abc)
+ print $v
+ EOI
+ [null]
+ true
+ 123
+ -123
+ 912559
+ abc
+ EOO
+}
+
+: json-map
+:
+{
+ : basics
+ :
+ $* <<EOI >>EOO
+ m = [json_map] 2@([json] a@1 b@2) 1@([json] 1 2) 0@([json] null) -1@null
+ print $m
+ for p: $m
+ print $first($p) $second($p)
+ print ($m[1])
+ print $type($m[1])
+ print ($m[2][b])
+ print ($m[0])
+ print ($m[-1])
+ EOI
+ -1@null 0@null 1@[1,2] 2@{"a":1,"b":2}
+ -1 null
+ 0 null
+ 1 [1,2]
+ 2 {"a":1,"b":2}
+ [1,2]
+ json
+ 2
+
+
+ EOO
+}
+
+: json-set
+:
+{
+ : basics
+ :
+ $* <<EOI >>EOO
+ s = [json_set] ([json] x@1 y@2) ([json] a@1 b@2)
+ print $s
+ for v: $s
+ print $type($v) $v
+ print ($s[([json] y@2 x@1)])
+ EOI
+ {"a":1,"b":2} {"x":1,"y":2}
+ json {"a":1,"b":2}
+ json {"x":1,"y":2}
+ true
+ EOO
+}
diff --git a/tests/type/map/buildfile b/tests/type/map/buildfile
new file mode 100644
index 0000000..7f2cdcf
--- /dev/null
+++ b/tests/type/map/buildfile
@@ -0,0 +1,4 @@
+# file : tests/type/map/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/type/map/testscript b/tests/type/map/testscript
new file mode 100644
index 0000000..29f5ed4
--- /dev/null
+++ b/tests/type/map/testscript
@@ -0,0 +1,70 @@
+# file : tests/type/map/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in function/*/ (size(), keys()), type/json/ (json_map).
+
+.include ../../common.testscript
+
+: basics
+:
+$* <<EOI >>EOO
+m = [string_map] a@0 b@2 a@1
+print $m
+m += c@3 b@0
+print $m
+m =+ d@4 b@1
+print $m
+EOI
+a@1 b@2
+a@1 b@0 c@3
+a@1 b@0 c@3 d@4
+EOO
+
+: type
+:
+$* <<EOI >>EOO
+m = [string_map]
+print $type($m)
+EOI
+string_map
+EOO
+
+: subscript
+:
+$* <<EOI >>EOO
+m = [string_map] a@1 b@2 c@3
+print ($m[b])
+print $type($m[b])
+print ($m[z])
+EOI
+2
+string
+[null]
+EOO
+
+: iteration
+:
+$* <<EOI >>EOO
+for p: [string_map] a@1 b@2 c@3
+ print $first($p) $second($p)
+
+for p: [string_map, null]
+ fail bad
+EOI
+a 1
+b 2
+c 3
+EOO
+
+: iteration-index
+:
+$* <<EOI >>EOO
+m = [string_map] a@1 b@2 c@3
+k = $keys($m)
+for i: $integer_sequence(0, $size($k))
+ print $i ($k[$i]) ($m[($k[$i])]) # @@ TMP: nested subscript
+EOI
+0 a 1
+1 b 2
+2 c 3
+EOO
diff --git a/tests/type/set/buildfile b/tests/type/set/buildfile
new file mode 100644
index 0000000..55b37bb
--- /dev/null
+++ b/tests/type/set/buildfile
@@ -0,0 +1,4 @@
+# file : tests/type/set/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/type/set/testscript b/tests/type/set/testscript
new file mode 100644
index 0000000..aca4c2d
--- /dev/null
+++ b/tests/type/set/testscript
@@ -0,0 +1,55 @@
+# file : tests/type/set/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in function/*/ (size()), type/json/ (json_set).
+
+.include ../../common.testscript
+
+: basics
+:
+$* <<EOI >>EOO
+s = [string_set] a b a
+print $s
+s += c b
+print $s
+s =+ d b
+print $s
+EOI
+a b
+a b c
+a b c d
+EOO
+
+: type
+:
+$* <<EOI >>EOO
+s = [string_set]
+print $type($s)
+EOI
+string_set
+EOO
+
+: subscript
+:
+$* <<EOI >>EOO
+s = [string_set] a b c
+print ($s[b])
+print ($s[z])
+EOI
+true
+false
+EOO
+
+: iteration
+:
+$* <<EOI >>EOO
+for s: [string_set] a b c
+ print $type($s) $s
+
+for s: [string_set, null]
+ fail bad
+EOI
+string a
+string b
+string c
+EOO
diff --git a/tests/type/vector/buildfile b/tests/type/vector/buildfile
new file mode 100644
index 0000000..5b2aa0e
--- /dev/null
+++ b/tests/type/vector/buildfile
@@ -0,0 +1,4 @@
+# file : tests/type/vector/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/type/vector/testscript b/tests/type/vector/testscript
new file mode 100644
index 0000000..9b3aaba
--- /dev/null
+++ b/tests/type/vector/testscript
@@ -0,0 +1,57 @@
+# file : tests/type/vector/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in function/*/ (size(), find(), etc).
+
+.include ../../common.testscript
+
+: basics
+:
+$* <<EOI >>EOO
+v = [strings] b c
+print $v
+v += d
+print $v
+v =+ a
+print $v
+EOI
+b c
+b c d
+a b c d
+EOO
+
+: type
+:
+$* <<EOI >>EOO
+v = [strings]
+print $type($v)
+EOI
+strings
+EOO
+
+: subscript
+:
+$* <<EOI >>EOO
+v = [strings] a b c
+print ($v[1])
+print $type($v[1])
+print ($v[3])
+EOI
+b
+string
+[null]
+EOO
+
+: iteration
+:
+$* <<EOI >>EOO
+for s: [strings] a b c
+ print $type($s) $s
+
+for s: [strings, null]
+ fail bad
+EOI
+string a
+string b
+string c
+EOO
diff --git a/tests/value/concat.testscript b/tests/value/concat.testscript
index 97391c4..69ec9fc 100644
--- a/tests/value/concat.testscript
+++ b/tests/value/concat.testscript
@@ -3,6 +3,48 @@
.include ../common.testscript
+: null
+:
+{
+ : untyped
+ :
+ $* <<EOI >>/EOO
+ x = [null]
+
+ print y "$x x"
+ print "x $x" y
+
+ print $x"x"
+ print "x"$x
+ print $x$x
+ EOI
+ y x
+ x y
+ x
+ x
+ {}
+ EOO
+
+ : string
+ :
+ $* <<EOI >>/EOO
+ x = [string,null]
+
+ print y "$x x"
+ print "x $x" y
+
+ print $x"x"
+ print "x"$x
+ print $x$x
+ EOI
+ y x
+ x y
+ x
+ x
+ {}
+ EOO
+}
+
: dir_path
:
{
diff --git a/tests/value/reverse.testscript b/tests/value/reverse.testscript
index 9f73981..921d14b 100644
--- a/tests/value/reverse.testscript
+++ b/tests/value/reverse.testscript
@@ -89,3 +89,58 @@
EOO
}
}
+
+: reduce
+:
+: Test empty simple value reduction heuristics.
+:
+{
+ : typed
+ :
+ $* <<EOI >>"EOO"
+ x = [string]
+ n = [string,null]
+ y = [strings] $x
+ y += $x
+ y += $n
+ print $size($y)
+
+ file{*}: y += $x
+ file{x}:
+ print $size($(file{x}: y))
+
+ for i: $x
+ print iteration
+
+ print $null($x[0])
+ EOI
+ 2
+ 3
+ iteration
+ false
+ EOO
+
+ : untyped
+ :
+ $* <<EOI >>"EOO"
+ x =
+ n = [null]
+ y = $x
+ y += $x
+ y += $n
+ print $size($y)
+
+ file{*}: y += $x
+ file{x}:
+ print $size($(file{x}: y))
+
+ for i: $x
+ print iteration
+
+ print $null($x[0])
+ EOI
+ 0
+ 0
+ true
+ EOO
+}
diff --git a/tests/variable/override/testscript b/tests/variable/override/testscript
index 9ee4643..7b973c0 100644
--- a/tests/variable/override/testscript
+++ b/tests/variable/override/testscript
@@ -63,6 +63,8 @@
p.x = 0
file{*}: p.x += a
+ file{foo}:
+
print $(file{foo}:p.x)
p.x = 1 # Should invalidate both caches.
diff --git a/tests/variable/target-type-pattern-specific/testscript b/tests/variable/target-type-pattern-specific/testscript
index 016380b..9c600ca 100644
--- a/tests/variable/target-type-pattern-specific/testscript
+++ b/tests/variable/target-type-pattern-specific/testscript
@@ -12,6 +12,9 @@ x = x
y = y
dir{*}: x = X
dir{*}: y += Y
+
+./:
+
print $(./: x)
print $(./: y)
EOI
@@ -26,6 +29,7 @@ dir{*}: x = y
x = z
dir{*-foo}: x = $x # 'z'
+bar-foo/:
print $(bar-foo/: x)
x = G
@@ -59,6 +63,7 @@ print $(file{x-foz}: x)
*: x1 = X1
{*}: x2 = X2
target{*}: x3 = X3
+file{x}:
print $(file{x}: x1)
print $(file{x}: x2)
print $(file{x}: x3)
@@ -89,6 +94,9 @@ dir{*}:
y += Y
z = $x # Note: from scope.
}
+
+./:
+
print $(./: x)
print $(./: y)
print $(./: z)
@@ -108,6 +116,9 @@ file{f*} file{b*}:
x = X
y += Y
}
+
+file{foo bar}:
+
print $(file{foo}: x)
print $(file{bar}: y)
EOI
@@ -123,6 +134,8 @@ EOO
$* <<EOI >>EOO
file{~/'.+\.txt'/i}: x = 1
+ file{foo.txt foo.TXT}:
+
print $(file{foo.txt}: x)
print $(file{foo.TXT}: x)
EOI
@@ -140,6 +153,8 @@ EOO
txt{~/'.+\.tx'/e}: x = 2
txt{~/'.+\.txt'/e}: x = 3
+ txt{foo.x foo.tx foo.txt foo.bar...}:
+
print $(txt{foo.x}: x)
print $(txt{foo.tx}: x)
print $(txt{foo.txt}: x)
@@ -157,6 +172,8 @@ EOO
x = 0
file{~/'(.+)-\1'/}: x = 1
+ file{foo-foo foo-bar}:
+
print $(file{foo-foo}: x)
print $(file{foo-bar}: x)
EOI
@@ -169,6 +186,8 @@ EOO
$* <<EOI >>EOO
foo/dir{~/b.+/}: x = 1
+ foo/dir{bar}:
+
print $(foo/dir{bar}: x)
EOI
1