aboutsummaryrefslogtreecommitdiff
path: root/libbuild2
diff options
context:
space:
mode:
Diffstat (limited to 'libbuild2')
-rw-r--r--libbuild2/action.hxx17
-rw-r--r--libbuild2/adhoc-rule-buildscript.cxx1699
-rw-r--r--libbuild2/adhoc-rule-buildscript.hxx27
-rw-r--r--libbuild2/adhoc-rule-cxx.cxx116
-rw-r--r--libbuild2/adhoc-rule-cxx.hxx30
-rw-r--r--libbuild2/adhoc-rule-regex-pattern.cxx187
-rw-r--r--libbuild2/adhoc-rule-regex-pattern.hxx12
-rw-r--r--libbuild2/algorithm.cxx2830
-rw-r--r--libbuild2/algorithm.hxx557
-rw-r--r--libbuild2/algorithm.ixx455
-rw-r--r--libbuild2/b-cmdline.cxx516
-rw-r--r--libbuild2/b-cmdline.hxx45
-rw-r--r--libbuild2/b-options.cxx1607
-rw-r--r--libbuild2/b-options.hxx366
-rw-r--r--libbuild2/b-options.ixx405
-rw-r--r--libbuild2/b.cli1074
-rw-r--r--libbuild2/bash/init.cxx15
-rw-r--r--libbuild2/bash/rule.cxx155
-rw-r--r--libbuild2/bash/rule.hxx17
-rw-r--r--libbuild2/bash/target.cxx2
-rw-r--r--libbuild2/bash/target.hxx7
-rw-r--r--libbuild2/bash/utility.hxx20
-rw-r--r--libbuild2/bin/def-rule.cxx175
-rw-r--r--libbuild2/bin/def-rule.hxx2
-rw-r--r--libbuild2/bin/guess.cxx73
-rw-r--r--libbuild2/bin/guess.hxx8
-rw-r--r--libbuild2/bin/init.cxx201
-rw-r--r--libbuild2/bin/init.hxx6
-rw-r--r--libbuild2/bin/rule.cxx149
-rw-r--r--libbuild2/bin/rule.hxx31
-rw-r--r--libbuild2/bin/target.cxx61
-rw-r--r--libbuild2/bin/target.hxx208
-rw-r--r--libbuild2/bin/utility.cxx9
-rw-r--r--libbuild2/build/script/builtin-options.cxx594
-rw-r--r--libbuild2/build/script/builtin-options.hxx377
-rw-r--r--libbuild2/build/script/builtin-options.ixx281
-rw-r--r--libbuild2/build/script/builtin.cli74
-rw-r--r--libbuild2/build/script/lexer+for-loop.test.testscript188
-rw-r--r--libbuild2/build/script/lexer.cxx48
-rw-r--r--libbuild2/build/script/lexer.hxx9
-rw-r--r--libbuild2/build/script/lexer.test.cxx1
-rw-r--r--libbuild2/build/script/parser+command-if.test.testscript2
-rw-r--r--libbuild2/build/script/parser+command-re-parse.test.testscript6
-rw-r--r--libbuild2/build/script/parser+diag.test.testscript106
-rw-r--r--libbuild2/build/script/parser+expansion.test.testscript6
-rw-r--r--libbuild2/build/script/parser+for.test.testscript656
-rw-r--r--libbuild2/build/script/parser+while.test.testscript133
-rw-r--r--libbuild2/build/script/parser.cxx1718
-rw-r--r--libbuild2/build/script/parser.hxx137
-rw-r--r--libbuild2/build/script/parser.test.cxx170
-rw-r--r--libbuild2/build/script/runner.cxx59
-rw-r--r--libbuild2/build/script/runner.hxx31
-rw-r--r--libbuild2/build/script/script.cxx54
-rw-r--r--libbuild2/build/script/script.hxx33
-rw-r--r--libbuild2/build/script/types-parsers.cxx56
-rw-r--r--libbuild2/build/script/types-parsers.hxx49
-rw-r--r--libbuild2/buildfile261
-rw-r--r--libbuild2/buildspec.cxx4
-rw-r--r--libbuild2/c/init.cxx338
-rw-r--r--libbuild2/c/init.hxx19
-rw-r--r--libbuild2/c/target.hxx3
-rw-r--r--libbuild2/cc/buildfile30
-rw-r--r--libbuild2/cc/common.cxx908
-rw-r--r--libbuild2/cc/common.hxx148
-rw-r--r--libbuild2/cc/common.txx19
-rw-r--r--libbuild2/cc/compile-rule.cxx2588
-rw-r--r--libbuild2/cc/compile-rule.hxx39
-rw-r--r--libbuild2/cc/functions.cxx144
-rw-r--r--libbuild2/cc/gcc.cxx312
-rw-r--r--libbuild2/cc/guess.cxx404
-rw-r--r--libbuild2/cc/guess.hxx3
-rw-r--r--libbuild2/cc/init.cxx67
-rw-r--r--libbuild2/cc/install-rule.cxx544
-rw-r--r--libbuild2/cc/install-rule.hxx55
-rw-r--r--libbuild2/cc/lexer+comment.test.testscript5
-rw-r--r--libbuild2/cc/lexer+raw-string-literal.test.testscript2
-rw-r--r--libbuild2/cc/lexer.cxx34
-rw-r--r--libbuild2/cc/lexer.hxx30
-rw-r--r--libbuild2/cc/lexer.test.cxx3
-rw-r--r--libbuild2/cc/link-rule.cxx1072
-rw-r--r--libbuild2/cc/link-rule.hxx37
-rw-r--r--libbuild2/cc/module.cxx189
-rw-r--r--libbuild2/cc/module.hxx52
-rw-r--r--libbuild2/cc/msvc.cxx162
-rw-r--r--libbuild2/cc/parser.cxx24
-rw-r--r--libbuild2/cc/parser.hxx11
-rw-r--r--libbuild2/cc/parser.test.cxx2
-rw-r--r--libbuild2/cc/pkgconfig-libpkg-config.cxx271
-rw-r--r--libbuild2/cc/pkgconfig-libpkgconf.cxx355
-rw-r--r--libbuild2/cc/pkgconfig.cxx1675
-rw-r--r--libbuild2/cc/pkgconfig.hxx129
-rw-r--r--libbuild2/cc/predefs-rule.cxx379
-rw-r--r--libbuild2/cc/predefs-rule.hxx45
-rw-r--r--libbuild2/cc/std.cppm6781
-rw-r--r--libbuild2/cc/target.cxx58
-rw-r--r--libbuild2/cc/target.hxx93
-rw-r--r--libbuild2/cc/types.cxx15
-rw-r--r--libbuild2/cc/types.hxx4
-rw-r--r--libbuild2/cc/windows-rpath.cxx42
-rw-r--r--libbuild2/cli/buildfile71
-rw-r--r--libbuild2/cli/export.hxx37
-rw-r--r--libbuild2/cli/init.cxx287
-rw-r--r--libbuild2/cli/init.hxx31
-rw-r--r--libbuild2/cli/module.hxx30
-rw-r--r--libbuild2/cli/rule.cxx340
-rw-r--r--libbuild2/cli/rule.hxx46
-rw-r--r--libbuild2/cli/target.cxx75
-rw-r--r--libbuild2/cli/target.hxx61
-rw-r--r--libbuild2/common-options.cxx809
-rw-r--r--libbuild2/common-options.hxx484
-rw-r--r--libbuild2/common-options.ixx312
-rw-r--r--libbuild2/common.cli9
-rw-r--r--libbuild2/config/functions.cxx57
-rw-r--r--libbuild2/config/host-config.cxx.in3
-rw-r--r--libbuild2/config/init.cxx260
-rw-r--r--libbuild2/config/module.hxx2
-rw-r--r--libbuild2/config/operation.cxx122
-rw-r--r--libbuild2/config/operation.hxx6
-rw-r--r--libbuild2/config/types.hxx25
-rw-r--r--libbuild2/config/utility.cxx67
-rw-r--r--libbuild2/config/utility.hxx55
-rw-r--r--libbuild2/config/utility.txx4
-rw-r--r--libbuild2/context.cxx770
-rw-r--r--libbuild2/context.hxx387
-rw-r--r--libbuild2/context.ixx8
-rw-r--r--libbuild2/cxx/init.cxx626
-rw-r--r--libbuild2/cxx/init.hxx16
-rw-r--r--libbuild2/cxx/target.cxx41
-rw-r--r--libbuild2/cxx/target.hxx71
-rw-r--r--libbuild2/depdb.cxx135
-rw-r--r--libbuild2/depdb.hxx15
-rw-r--r--libbuild2/depdb.ixx6
-rw-r--r--libbuild2/diagnostics.cxx822
-rw-r--r--libbuild2/diagnostics.hxx673
-rw-r--r--libbuild2/diagnostics.ixx126
-rw-r--r--libbuild2/dist/init.cxx74
-rw-r--r--libbuild2/dist/module.hxx14
-rw-r--r--libbuild2/dist/operation.cxx687
-rw-r--r--libbuild2/dist/rule.cxx92
-rw-r--r--libbuild2/dist/rule.hxx22
-rw-r--r--libbuild2/dist/types.hxx41
-rw-r--r--libbuild2/dump.cxx1086
-rw-r--r--libbuild2/dump.hxx32
-rw-r--r--libbuild2/dyndep.cxx508
-rw-r--r--libbuild2/dyndep.hxx112
-rw-r--r--libbuild2/file-cache.cxx2
-rw-r--r--libbuild2/file-cache.hxx19
-rw-r--r--libbuild2/file-cache.ixx24
-rw-r--r--libbuild2/file.cxx898
-rw-r--r--libbuild2/file.hxx154
-rw-r--r--libbuild2/file.ixx38
-rw-r--r--libbuild2/filesystem.cxx83
-rw-r--r--libbuild2/filesystem.hxx8
-rw-r--r--libbuild2/filesystem.txx30
-rw-r--r--libbuild2/forward.hxx1
-rw-r--r--libbuild2/function.cxx13
-rw-r--r--libbuild2/function.hxx12
-rw-r--r--libbuild2/function.test.cxx10
-rw-r--r--libbuild2/functions-bool.cxx26
-rw-r--r--libbuild2/functions-builtin.cxx163
-rw-r--r--libbuild2/functions-filesystem.cxx38
-rw-r--r--libbuild2/functions-integer.cxx156
-rw-r--r--libbuild2/functions-json.cxx335
-rw-r--r--libbuild2/functions-name.cxx356
-rw-r--r--libbuild2/functions-name.hxx30
-rw-r--r--libbuild2/functions-path.cxx354
-rw-r--r--libbuild2/functions-process-path.cxx53
-rw-r--r--libbuild2/functions-process.cxx86
-rw-r--r--libbuild2/functions-project-name.cxx39
-rw-r--r--libbuild2/functions-regex.cxx475
-rw-r--r--libbuild2/functions-string.cxx275
-rw-r--r--libbuild2/functions-target-triplet.cxx30
-rw-r--r--libbuild2/functions-target.cxx108
-rw-r--r--libbuild2/in/init.cxx27
-rw-r--r--libbuild2/in/rule.cxx134
-rw-r--r--libbuild2/in/rule.hxx27
-rw-r--r--libbuild2/in/target.cxx22
-rw-r--r--libbuild2/in/target.hxx7
-rw-r--r--libbuild2/install/functions.cxx116
-rw-r--r--libbuild2/install/init.cxx340
-rw-r--r--libbuild2/install/operation.cxx358
-rw-r--r--libbuild2/install/operation.hxx64
-rw-r--r--libbuild2/install/rule.cxx853
-rw-r--r--libbuild2/install/rule.hxx189
-rw-r--r--libbuild2/install/utility.cxx261
-rw-r--r--libbuild2/install/utility.hxx47
-rw-r--r--libbuild2/json.cxx904
-rw-r--r--libbuild2/json.hxx369
-rw-r--r--libbuild2/json.ixx349
-rw-r--r--libbuild2/lexer.cxx177
-rw-r--r--libbuild2/lexer.hxx70
-rw-r--r--libbuild2/make-parser.test.cxx2
-rw-r--r--libbuild2/module.cxx323
-rw-r--r--libbuild2/module.hxx63
-rw-r--r--libbuild2/name.cxx54
-rw-r--r--libbuild2/name.hxx31
-rw-r--r--libbuild2/name.test.cxx16
-rw-r--r--libbuild2/operation.cxx787
-rw-r--r--libbuild2/operation.hxx124
-rw-r--r--libbuild2/options-types.hxx16
-rw-r--r--libbuild2/parser.cxx4155
-rw-r--r--libbuild2/parser.hxx278
-rw-r--r--libbuild2/prerequisite.cxx6
-rw-r--r--libbuild2/prerequisite.hxx38
-rw-r--r--libbuild2/recipe.cxx9
-rw-r--r--libbuild2/recipe.hxx20
-rw-r--r--libbuild2/rule-map.hxx58
-rw-r--r--libbuild2/rule.cxx158
-rw-r--r--libbuild2/rule.hxx139
-rw-r--r--libbuild2/scheduler.cxx70
-rw-r--r--libbuild2/scheduler.hxx108
-rw-r--r--libbuild2/scheduler.ixx14
-rw-r--r--libbuild2/scheduler.test.cxx1
-rw-r--r--libbuild2/scheduler.txx42
-rw-r--r--libbuild2/scope.cxx133
-rw-r--r--libbuild2/scope.hxx204
-rw-r--r--libbuild2/scope.ixx43
-rw-r--r--libbuild2/script/builtin-options.cxx798
-rw-r--r--libbuild2/script/builtin-options.hxx450
-rw-r--r--libbuild2/script/builtin-options.ixx215
-rw-r--r--libbuild2/script/builtin.cli9
-rw-r--r--libbuild2/script/lexer.cxx11
-rw-r--r--libbuild2/script/lexer.hxx2
-rw-r--r--libbuild2/script/parser.cxx767
-rw-r--r--libbuild2/script/parser.hxx80
-rw-r--r--libbuild2/script/regex.cxx18
-rw-r--r--libbuild2/script/regex.hxx20
-rw-r--r--libbuild2/script/regex.test.cxx5
-rw-r--r--libbuild2/script/run.cxx1704
-rw-r--r--libbuild2/script/run.hxx54
-rw-r--r--libbuild2/script/script.cxx50
-rw-r--r--libbuild2/script/script.hxx56
-rw-r--r--libbuild2/search.cxx126
-rw-r--r--libbuild2/search.hxx9
-rw-r--r--libbuild2/target-key.hxx17
-rw-r--r--libbuild2/target-state.hxx17
-rw-r--r--libbuild2/target-type.hxx82
-rw-r--r--libbuild2/target.cxx595
-rw-r--r--libbuild2/target.hxx1095
-rw-r--r--libbuild2/target.ixx234
-rw-r--r--libbuild2/target.txx35
-rw-r--r--libbuild2/test/common.cxx6
-rw-r--r--libbuild2/test/init.cxx44
-rw-r--r--libbuild2/test/operation.cxx14
-rw-r--r--libbuild2/test/rule.cxx514
-rw-r--r--libbuild2/test/rule.hxx8
-rw-r--r--libbuild2/test/script/lexer+for-loop.test.testscript231
-rw-r--r--libbuild2/test/script/lexer.cxx55
-rw-r--r--libbuild2/test/script/lexer.hxx13
-rw-r--r--libbuild2/test/script/lexer.test.cxx1
-rw-r--r--libbuild2/test/script/parser+command-if.test.testscript6
-rw-r--r--libbuild2/test/script/parser+command-re-parse.test.testscript2
-rw-r--r--libbuild2/test/script/parser+description.test.testscript4
-rw-r--r--libbuild2/test/script/parser+expansion.test.testscript2
-rw-r--r--libbuild2/test/script/parser+for.test.testscript1029
-rw-r--r--libbuild2/test/script/parser+while.test.testscript265
-rw-r--r--libbuild2/test/script/parser.cxx547
-rw-r--r--libbuild2/test/script/parser.hxx18
-rw-r--r--libbuild2/test/script/parser.test.cxx83
-rw-r--r--libbuild2/test/script/runner.cxx55
-rw-r--r--libbuild2/test/script/runner.hxx19
-rw-r--r--libbuild2/test/script/script.cxx119
-rw-r--r--libbuild2/test/script/script.hxx29
-rw-r--r--libbuild2/test/target.cxx2
-rw-r--r--libbuild2/test/target.hxx7
-rw-r--r--libbuild2/token.cxx21
-rw-r--r--libbuild2/token.hxx8
-rw-r--r--libbuild2/types-parsers.cxx153
-rw-r--r--libbuild2/types-parsers.hxx83
-rw-r--r--libbuild2/types.hxx107
-rw-r--r--libbuild2/types.ixx6
-rw-r--r--libbuild2/utility-installed.cxx8
-rw-r--r--libbuild2/utility-uninstalled.cxx12
-rw-r--r--libbuild2/utility.cxx497
-rw-r--r--libbuild2/utility.hxx543
-rw-r--r--libbuild2/utility.ixx177
-rw-r--r--libbuild2/utility.txx64
-rw-r--r--libbuild2/variable.cxx1531
-rw-r--r--libbuild2/variable.hxx652
-rw-r--r--libbuild2/variable.ixx237
-rw-r--r--libbuild2/variable.txx523
-rw-r--r--libbuild2/version/init.cxx131
-rw-r--r--libbuild2/version/module.hxx1
-rw-r--r--libbuild2/version/rule.cxx69
-rw-r--r--libbuild2/version/rule.hxx10
-rw-r--r--libbuild2/version/snapshot-git.cxx19
-rw-r--r--libbuild2/version/snapshot.cxx4
287 files changed, 60046 insertions, 12137 deletions
diff --git a/libbuild2/action.hxx b/libbuild2/action.hxx
index e149574..85012ba 100644
--- a/libbuild2/action.hxx
+++ b/libbuild2/action.hxx
@@ -45,16 +45,17 @@ namespace build2
// inner rule. In particular, it should not replace or override the inner's
// logic.
//
- // While most of the relevant target state is duplicated, certain things are
- // shared among the inner/outer rules, such as the target data pad and the
- // group state. In particular, it is assumed the group state is always
- // determined by the inner rule (see resolve_members()).
+ // While most of the action-specific target state is duplicated (see
+ // target::opstate), certain things are shared among the inner/outer rules,
+ // such as the path, mtime, and group state. In particular, it is assumed
+ // the group state is always determined by the inner rule (see
+ // resolve_members()).
//
// Normally, an outer rule will be responsible for any additional, outer
// operation-specific work. Sometimes, however, the inner rule needs to
// customize its behavior. In this case the outer and inner rules must
- // communicate this explicitly (normally via the target's data pad) and
- // there is a number of restrictions to this approach. See
+ // communicate this explicitly (normally via the target's auxiliary data
+ // storage) and there is a number of restrictions to this approach. See
// cc::{link,install}_rule for details.
//
struct action
@@ -150,6 +151,7 @@ namespace build2
// Id constants for build-in and pre-defined meta/operations.
//
// Note: currently max 15 (see above).
+ // Note: update small_vector in meta_operations if adding more.
//
const meta_operation_id noop_id = 1; // nomop?
const meta_operation_id perform_id = 2;
@@ -164,6 +166,7 @@ namespace build2
// something here remember to update the man page.
//
// Note: currently max 15 (see above).
+ // Note: update small_vector in operations if adding more.
//
const operation_id default_id = 1; // Shall be first.
const operation_id update_id = 2; // Shall be second.
@@ -176,6 +179,8 @@ namespace build2
const operation_id uninstall_id = 7;
const operation_id update_for_install_id = 8; // update(for install) alias.
+ // Commonly-used action ids.
+ //
const action_id perform_update_id = (perform_id << 4) | update_id;
const action_id perform_clean_id = (perform_id << 4) | clean_id;
const action_id perform_test_id = (perform_id << 4) | test_id;
diff --git a/libbuild2/adhoc-rule-buildscript.cxx b/libbuild2/adhoc-rule-buildscript.cxx
index c64dbfb..3e868a6 100644
--- a/libbuild2/adhoc-rule-buildscript.cxx
+++ b/libbuild2/adhoc-rule-buildscript.cxx
@@ -5,6 +5,8 @@
#include <sstream>
+#include <libbutl/filesystem.hxx> // try_rm_file(), path_entry()
+
#include <libbuild2/depdb.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
@@ -27,10 +29,11 @@ namespace build2
static inline void
hash_script_vars (sha256& cs,
const build::script::script& s,
+ const scope& bs,
const target& t,
names& storage)
{
- context& ctx (t.ctx);
+ auto& vp (bs.var_pool ());
for (const string& n: s.vars)
{
@@ -38,7 +41,7 @@ namespace build2
lookup l;
- if (const variable* var = ctx.var_pool.find (n))
+ if (const variable* var = vp.find (n))
l = t[var];
cs.append (!l.defined () ? '\x1' : l->null ? '\x2' : '\x3');
@@ -46,7 +49,7 @@ namespace build2
if (l)
{
storage.clear ();
- names_view ns (reverse (*l, storage));
+ names_view ns (reverse (*l, storage, true /* reduce */));
for (const name& n: ns)
to_checksum (cs, n);
@@ -183,7 +186,7 @@ namespace build2
{
os << " [";
os << "diag=";
- to_stream (os, name (*script.diag_name), true /* quote */, '@');
+ to_stream (os, name (*script.diag_name), quote_mode::normal, '@');
os << ']';
}
}
@@ -198,11 +201,7 @@ namespace build2
os << ind << "depdb clear" << endl;
script::dump (os, ind, script.depdb_preamble);
-
- if (script.diag_line)
- {
- os << ind; script::dump (os, *script.diag_line, true /* newline */);
- }
+ script::dump (os, ind, script.diag_preamble);
script::dump (os, ind, script.body);
ind.resize (ind.size () - 2);
@@ -212,22 +211,28 @@ namespace build2
bool adhoc_buildscript_rule::
reverse_fallback (action a, const target_type& tt) const
{
- // We can provide clean for a file target if we are providing update.
+ // We can provide clean for a file or group target if we are providing
+ // update.
//
- return a == perform_clean_id && tt.is_a<file> () &&
- find (actions.begin (), actions.end (),
- perform_update_id) != actions.end ();
+ return (a == perform_clean_id &&
+ (tt.is_a<file> () || tt.is_a<group> ()) &&
+ find (actions.begin (), actions.end (),
+ perform_update_id) != actions.end ());
}
+ using dynamic_target = build::script::parser::dynamic_target;
+ using dynamic_targets = build::script::parser::dynamic_targets;
+
struct adhoc_buildscript_rule::match_data
{
- match_data (action a, const target& t, bool temp_dir)
- : env (a, t, temp_dir) {}
+ match_data (action a, const target& t, const scope& bs, bool temp_dir)
+ : env (a, t, bs, temp_dir) {}
build::script::environment env;
build::script::default_runner run;
path dd;
+ dynamic_targets dyn_targets;
const scope* bs;
timestamp mt;
@@ -236,8 +241,10 @@ namespace build2
struct adhoc_buildscript_rule::match_data_byproduct
{
- match_data_byproduct (action a, const target& t, bool temp_dir)
- : env (a, t, temp_dir) {}
+ match_data_byproduct (action a, const target& t,
+ const scope& bs,
+ bool temp_dir)
+ : env (a, t, bs, temp_dir) {}
build::script::environment env;
build::script::default_runner run;
@@ -253,22 +260,27 @@ namespace build2
};
bool adhoc_buildscript_rule::
- match (action a, target& t, const string& h, match_extra& me) const
+ match (action a, target& xt, const string& h, match_extra& me) const
{
+ const target& t (xt); // See adhoc_rule::match().
+
// We pre-parsed the script with the assumption it will be used on a
- // non/file-based target. Note that this should not be possible with
- // patterns.
+ // non/file-based (or file group-based) target. Note that this should not
+ // be possible with patterns.
//
if (pattern == nullptr)
{
- if ((t.is_a<file> () != nullptr) != ttype->is_a<file> ())
- {
+ // Let's not allow mixing file/group.
+ //
+ if ((t.is_a<file> () != nullptr) == ttype->is_a<file> () ||
+ (t.is_a<group> () != nullptr) == ttype->is_a<group> ())
+ ;
+ else
fail (loc) << "incompatible target types used with shared recipe" <<
- info << "all targets must be file-based or non-file-based";
- }
+ info << "all targets must be file- or file group-based or non";
}
- return adhoc_rule::match (a, t, h, me);
+ return adhoc_rule::match (a, xt, h, me);
}
recipe adhoc_buildscript_rule::
@@ -279,17 +291,28 @@ namespace build2
recipe adhoc_buildscript_rule::
apply (action a,
- target& xt,
+ target& t,
match_extra& me,
- const optional<timestamp>& d) const
+ const optional<timestamp>& deadline) const
{
tracer trace ("adhoc_buildscript_rule::apply");
+ // Handle matching group members (see adhoc_rule::match() for background).
+ //
+ if (const group* g = t.group != nullptr ? t.group->is_a<group> () : nullptr)
+ {
+ // Note: this looks very similar to how we handle ad hoc group members.
+ //
+ match_sync (a, *g, 0 /* options */);
+ return group_recipe; // Execute the group's recipe.
+ }
+
// We don't support deadlines for any of these cases (see below).
//
- if (d && (a.outer () ||
- me.fallback ||
- (a == perform_update_id && xt.is_a<file> ())))
+ if (deadline && (a.outer () ||
+ me.fallback ||
+ (a == perform_update_id &&
+ (t.is_a<file> () || t.is_a<group> ()))))
return empty_recipe;
// If this is an outer operation (e.g., update-for-test), then delegate to
@@ -297,23 +320,98 @@ namespace build2
//
if (a.outer ())
{
- match_inner (a, xt);
- return execute_inner;
+ match_inner (a, t);
+ return inner_recipe;
}
- // Inject pattern's ad hoc group members, if any.
+ context& ctx (t.ctx);
+ const scope& bs (t.base_scope ());
+
+ group* g (t.is_a<group> ()); // Explicit group.
+
+ // Inject pattern's ad hoc group members, if any (explicit group members
+ // are injected after reset below).
//
- if (pattern != nullptr)
- pattern->apply_adhoc_members (a, xt, me);
+ if (g == nullptr && pattern != nullptr)
+ pattern->apply_group_members (a, t, bs, me);
- // Derive file names for the target and its ad hoc group members, if any.
+ // Derive file names for the target and its static/ad hoc group members,
+ // if any.
//
if (a == perform_update_id || a == perform_clean_id)
{
- for (target* m (&xt); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
{
- if (auto* p = m->is_a<path_target> ())
- p->derive_path ();
+ g->reset_members (a); // See group::group_members() for background.
+
+ // Note that we rely on the fact that if the group has static members,
+ // then they always come first in members and the first static member
+ // is a file.
+ //
+ for (const target& m: g->static_members)
+ g->members.push_back (&m);
+
+ g->members_static = g->members.size ();
+
+ if (pattern != nullptr)
+ {
+ pattern->apply_group_members (a, *g, bs, me);
+ g->members_static = g->members.size ();
+ }
+
+ if (g->members_static == 0)
+ {
+ if (!script.depdb_dyndep_dyn_target)
+ fail << "group " << *g << " has no static or dynamic members";
+ }
+ else
+ {
+ if (!g->members.front ()->is_a<file> ())
+ {
+ // We use the first static member to derive depdb path, get mtime,
+ // etc. So it must be file-based.
+ //
+ fail << "first static member " << g->members.front ()
+ << " of group " << *g << " is not a file";
+ }
+
+ // Derive paths for all the static members.
+ //
+ for (const target* m: g->members)
+ if (auto* p = m->is_a<path_target> ())
+ p->derive_path ();
+ }
+ }
+ else
+ {
+ for (target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (auto* p = m->is_a<path_target> ())
+ p->derive_path ();
+ }
+ }
+ }
+ else if (g != nullptr)
+ {
+ // This could be, for example, configure/dist update which could need a
+ // "representative sample" of members (in order to be able to match the
+ // rules). So add static members unless we already have something
+ // cached.
+ //
+ if (g->group_members (a).members == nullptr) // Note: not g->member.
+ {
+ g->reset_members (a);
+
+ for (const target& m: g->static_members)
+ g->members.push_back (&m);
+
+ g->members_static = g->members.size ();
+
+ if (pattern != nullptr)
+ {
+ pattern->apply_group_members (a, *g, bs, me);
+ g->members_static = g->members.size ();
+ }
}
}
@@ -322,46 +420,389 @@ namespace build2
// We do it always instead of only if one of the targets is path-based in
// case the recipe creates temporary files or some such.
//
- const fsdir* dir (inject_fsdir (a, xt));
+ // Note that we disable the prerequisite search for fsdir{} because of the
+ // prerequisites injected by the pattern. So we have to handle this ad hoc
+ // below.
+ //
+ const fsdir* dir (inject_fsdir (a, t, true /*match*/, false /*prereq*/));
// Match prerequisites.
//
- match_prerequisite_members (a, xt);
+ // This is essentially match_prerequisite_members() but with support
+ // for update=unmatch|match.
+ //
+ auto& pts (t.prerequisite_targets[a]);
+ {
+ // Re-create the clean semantics as in match_prerequisite_members().
+ //
+ bool clean (a.operation () == clean_id && !t.is_a<alias> ());
+
+ // Add target's prerequisites.
+ //
+ for (prerequisite_member p: group_prerequisite_members (a, t))
+ {
+ // Note that we have to recognize update=unmatch|match for *(update),
+ // not just perform(update). But only actually do anything about it
+ // for perform(update).
+ //
+ lookup l; // The `update` variable value, if any.
+ include_type pi (
+ include (a, t, p, a.operation () == update_id ? &l : nullptr));
+
+ // Use prerequisite_target::include to signal update during match or
+ // unmatch.
+ //
+ uintptr_t mask (0);
+ if (l)
+ {
+ const string& v (cast<string> (l));
+
+ if (v == "match")
+ {
+ if (a == perform_update_id)
+ mask = prerequisite_target::include_udm;
+ }
+ else if (v == "unmatch")
+ {
+ if (a == perform_update_id)
+ mask = include_unmatch;
+ }
+ else if (v != "false" && v != "true" && v != "execute")
+ {
+ fail << "unrecognized update variable value '" << v
+ << "' specified for prerequisite " << p.prerequisite;
+ }
+ }
+
+ // Skip excluded.
+ //
+ if (!pi)
+ continue;
+
+ const target& pt (p.search (t));
+
+ if (&pt == dir) // Don't add injected fsdir{} twice.
+ continue;
+
+ if (clean && !pt.in (*bs.root_scope ()))
+ continue;
+
+ prerequisite_target pto (&pt, pi);
- // Inject pattern's prerequisites, if any.
+ if (mask != 0)
+ pto.include |= mask;
+
+ pts.push_back (move (pto));
+ }
+
+ // Inject pattern's prerequisites, if any.
+ //
+ if (pattern != nullptr)
+ pattern->apply_prerequisites (a, t, bs, me);
+
+ // Start asynchronous matching of prerequisites. Wait with unlocked
+ // phase to allow phase switching.
+ //
+ wait_guard wg (ctx, ctx.count_busy (), t[a].task_count, true);
+
+ for (const prerequisite_target& pt: pts)
+ {
+ if (pt.target == dir) // Don't match injected fsdir{} twice.
+ continue;
+
+ match_async (a, *pt.target, ctx.count_busy (), t[a].task_count);
+ }
+
+ wg.wait ();
+
+ // Finish matching all the targets that we have started.
+ //
+ for (prerequisite_target& pt: pts)
+ {
+ if (pt.target == dir) // See above.
+ continue;
+
+ // Handle update=unmatch.
+ //
+ unmatch um ((pt.include & include_unmatch) != 0
+ ? unmatch::safe
+ : unmatch::none);
+
+ pair<bool, target_state> mr (match_complete (a, *pt.target, um));
+
+ if (um != unmatch::none)
+ {
+ l6 ([&]{trace << "unmatch " << *pt.target << ": " << mr.first;});
+
+ // If we managed to unmatch, blank it out so that it's not executed,
+ // etc. Otherwise, leave it as is (but we still automatically avoid
+ // hashing it, updating it during match in exec_depdb_dyndep(), and
+ // making us out of date in execute_update_prerequisites()).
+ //
+ // The hashing part is tricky: by not hashing it we won't detect the
+ // case where it was removed as a prerequisite altogether. The
+ // thinking is that it was added with update=unmatch to extract some
+ // information (e.g., poptions from a library) and those will be
+ // change-tracked.
+ //
+ // Note: set the include_target flag for the updated_during_match()
+ // check.
+ //
+ if (mr.first)
+ {
+ pt.data = reinterpret_cast<uintptr_t> (pt.target);
+ pt.target = nullptr;
+ pt.include |= prerequisite_target::include_target;
+
+ // Note that this prerequisite could also be ad hoc and we must
+ // clear that flag if we managed to unmatch (failed that we will
+ // treat it as ordinary ad hoc since it has the target pointer in
+ // data).
+ //
+ // But that makes it impossible to distinguish ad hoc unmatch from
+ // ordinary unmatch prerequisites later when setting $<. Another
+ // flag to the rescue.
+ //
+ if ((pt.include & prerequisite_target::include_adhoc) != 0)
+ {
+ pt.include &= ~prerequisite_target::include_adhoc;
+ pt.include |= include_unmatch_adhoc;
+ }
+ }
+ }
+ }
+ }
+
+ // Read the list of dynamic targets and, optionally, fsdir{} prerequisites
+ // from depdb, if exists (used in a few depdb-dyndep --dyn-target handling
+ // places below).
+ //
+ auto read_dyn_targets = [] (path ddp, bool fsdir)
+ -> pair<dynamic_targets, dir_paths>
+ {
+ depdb dd (move (ddp), true /* read_only */);
+
+ pair<dynamic_targets, dir_paths> r;
+ while (dd.reading ()) // Breakout loop.
+ {
+ string* l;
+ auto read = [&dd, &l] () -> bool
+ {
+ return (l = dd.read ()) != nullptr;
+ };
+
+ if (!read ()) // Rule id.
+ break;
+
+ // We can omit this for as long as we don't break our blank line
+ // anchors semantics.
+ //
+#if 0
+ if (*l != rule_id_)
+ fail << "unable to clean dynamic target group " << t
+ << " with old depdb";
+#endif
+
+ // Note that we cannot read out expected lines since there can be
+ // custom depdb builtins. So we use the blank lines as anchors to
+ // skip to the parts we need.
+ //
+ // Skip until the first blank that separated custom depdb entries from
+ // the prerequisites list.
+ {
+ bool g;
+ while ((g = read ()) && !l->empty ()) ;
+ if (!g)
+ break;
+ }
+
+ // Next read the prerequisites, detecting fsdir{} entries if asked.
+ //
+ {
+ bool g;
+ while ((g = read ()) && !l->empty ())
+ {
+ if (fsdir)
+ {
+ path p (*l);
+ if (p.to_directory ())
+ r.second.push_back (path_cast<dir_path> (move (p)));
+ }
+ }
+
+ if (!g)
+ break;
+ }
+
+ // Read the dynamic target files. We should always end with a blank
+ // line.
+ //
+ for (;;)
+ {
+ if (!read () || l->empty ())
+ break;
+
+ // Split into type and path.
+ //
+ size_t p (l->find (' '));
+ if (p == string::npos || // Invalid format.
+ p == 0 || // Empty type.
+ p + 1 == l->size ()) // Empty path.
+ break;
+
+ r.first.push_back (
+ dynamic_target {string (*l, 0, p), path (*l, p + 1, string::npos)});
+ }
+
+ break;
+ }
+
+ return r;
+ };
+
+ // Target path to derive the depdb path, query mtime (if file), etc.
+ //
+ // To derive the depdb path for a group with at least one static member we
+ // use the path of the first member. For a group without any static
+ // members we use the group name with the target type name as the
+ // second-level extension.
//
- if (pattern != nullptr)
- pattern->apply_prerequisites (a, xt, me);
+ auto target_path = [&t, g, p = path ()] () mutable -> const path&
+ {
+ return
+ g == nullptr ? t.as<file> ().path () :
+ g->members_static != 0 ? g->members.front ()->as<file> ().path () :
+ (p = g->dir / (g->name + '.' + g->type ().name));
+ };
// See if we are providing the standard clean as a fallback.
//
if (me.fallback)
- return &perform_clean_file;
+ {
+ // For depdb-dyndep --dyn-target use depdb to clean dynamic targets.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ {
+ // Note that only removing the relevant filesystem entries is not
+ // enough: we actually have to populate the group with members since
+ // this information could be used to clean derived targets (for
+ // example, object files). So we just do that and let the standard
+ // clean logic take care of them the same as static members.
+ //
+ // NOTE that this logic should be consistent with what we have in
+ // exec_depdb_dyndep().
+ //
+ using dyndep = dyndep_rule;
+
+ function<dyndep::group_filter_func> filter;
+ if (g != nullptr)
+ {
+ filter = [] (mtime_target& g, const build2::file& m)
+ {
+ auto& ms (g.as<group> ().members);
+ return find (ms.begin (), ms.end (), &m) == ms.end ();
+ };
+ }
+
+ pair<dynamic_targets, dir_paths> p (
+ read_dyn_targets (target_path () + ".d", true));
+
+ for (dynamic_target& dt: p.first)
+ {
+ path& f (dt.path);
+
+ // Resolve target type. Clean it as file if unable to.
+ //
+ const target_type* tt (bs.find_target_type (dt.type));
+ if (tt == nullptr)
+ tt = &file::static_type;
+
+ if (g != nullptr)
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_group_member (a, bs, *g, move (f), *tt, filter));
+
+ if (r.second)
+ g->members.push_back (&r.first);
+ }
+ else
+ {
+ // Note that here we don't bother cleaning any old dynamic targets
+ // -- the more we can clean, the merrier.
+ //
+ dyndep::inject_adhoc_group_member (a, bs, t, move (f), *tt);
+ }
+ }
- // See if this is not update or not on a file-based target.
+ // Enter fsdir{} prerequisites.
+ //
+ // See the add lambda in exec_depdb_dyndep() for background.
+ //
+ for (dir_path& d: p.second)
+ {
+ dir_path o; string n; // For GCC 13 -Wdangling-reference.
+ const fsdir& dt (search<fsdir> (t,
+ move (d),
+ move (o),
+ move (n), nullptr, nullptr));
+ match_sync (a, dt);
+ pts.push_back (prerequisite_target (&dt, true /* adhoc */));
+ }
+ }
+
+ return g == nullptr ? perform_clean_file : perform_clean_group;
+ }
+
+ // If we have any update during match prerequisites, now is the time to
+ // update them.
+ //
+ // Note that we ignore the result and whether it renders us out of date,
+ // leaving it to the common execute logic in perform_update_*().
+ //
+ // Note also that update_during_match_prerequisites() spoils
+ // prerequisite_target::data.
//
- if (a != perform_update_id || !xt.is_a<file> ())
+ if (a == perform_update_id)
+ update_during_match_prerequisites (trace, a, t);
+
+ // See if this is not update or not on a file/group-based target.
+ //
+ if (a != perform_update_id || !(g != nullptr || t.is_a<file> ()))
{
- return [d, this] (action a, const target& t)
+ // Make sure we get small object optimization.
+ //
+ if (deadline)
{
- return default_action (a, t, d);
- };
+ return [dv = *deadline, this] (action a, const target& t)
+ {
+ return default_action (a, t, dv);
+ };
+ }
+ else
+ {
+ return [this] (action a, const target& t)
+ {
+ return default_action (a, t, nullopt);
+ };
+ }
}
+ // This is a perform update on a file or group target.
+ //
// See if this is the simple case with only static dependencies.
//
if (!script.depdb_dyndep)
{
return [this] (action a, const target& t)
{
- return perform_update_file (a, t);
+ return perform_update_file_or_group (a, t);
};
}
- // This is a perform update on a file target with extraction of dynamic
- // dependency information either in the depdb preamble (depdb-dyndep
- // without --byproduct) or as a byproduct of the recipe body execution
- // (depdb-dyndep with --byproduct).
+ // This is a perform update on a file or group target with extraction of
+ // dynamic dependency information either in the depdb preamble
+ // (depdb-dyndep without --byproduct) or as a byproduct of the recipe body
+ // execution (depdb-dyndep with --byproduct).
//
// For the former case, we may need to add additional prerequisites (or
// even target group members). We also have to save any such additional
@@ -379,37 +820,97 @@ namespace build2
// example and all this logic is based on the prior work in the cc module
// where you can often find more detailed rationale for some of the steps
// performed (like the fsdir update below).
- //
- context& ctx (xt.ctx);
- file& t (xt.as<file> ());
- const path& tp (t.path ());
- const scope& bs (t.base_scope ());
+ // Re-acquire fsdir{} specified by the user, similar to inject_fsdir()
+ // (which we have disabled; see above).
+ //
+ if (dir == nullptr)
+ {
+ for (const target* pt: pts)
+ {
+ if (pt != nullptr)
+ {
+ if (const fsdir* dt = pt->is_a<fsdir> ())
+ {
+ if (dt->dir == t.dir)
+ {
+ dir = dt;
+ break;
+ }
+ }
+ }
+ }
+ }
if (dir != nullptr)
- fsdir_rule::perform_update_direct (a, t);
+ fsdir_rule::perform_update_direct (a, *dir);
// Because the depdb preamble can access $<, we have to blank out all the
// ad hoc prerequisites. Since we will still need them later, we "move"
// them to the auxiliary data member in prerequisite_target (see
// execute_update_prerequisites() for details).
//
- auto& pts (t.prerequisite_targets[a]);
+ // Note: set the include_target flag for the updated_during_match() check.
+ //
for (prerequisite_target& p: pts)
{
// Note that fsdir{} injected above is adhoc.
//
- if (p.target != nullptr && p.adhoc)
+ if (p.target != nullptr && p.adhoc ())
{
p.data = reinterpret_cast<uintptr_t> (p.target);
p.target = nullptr;
+ p.include |= prerequisite_target::include_target;
}
}
+ const path& tp (target_path ());
+
+ // Note that while it's tempting to turn match_data* into recipes, some of
+ // their members are not movable. And in the end we will have the same
+ // result: one dynamic memory allocation.
+ //
+ unique_ptr<match_data> md;
+ unique_ptr<match_data_byproduct> mdb;
+
+ dynamic_targets old_dyn_targets;
+
+ if (script.depdb_dyndep_byproduct)
+ {
+ mdb.reset (new match_data_byproduct (
+ a, t, bs, script.depdb_preamble_temp_dir));
+ }
+ else
+ {
+ md.reset (new match_data (a, t, bs, script.depdb_preamble_temp_dir));
+
+ // If the set of dynamic targets can change based on changes to the
+ // inputs (say, each entity, such as a type, in the input file gets its
+ // own output file), then we can end up with a large number of old
+ // output files laying around because they are not part of the new
+ // dynamic target set. So we try to clean them up based on the old depdb
+ // information, similar to how we do it for perform_clean above (except
+ // here we will just keep the list of old files).
+ //
+ // Note: do before opening depdb, which can start over-writing it.
+ //
+ // We also have to do this speculatively, without knowing whether we
+ // will need to update. Oh, well, being dynamic ain't free.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ old_dyn_targets = read_dyn_targets (tp + ".d", false).first;
+ }
+
depdb dd (tp + ".d");
// NOTE: see the "static dependencies" version (with comments) below.
//
+ // NOTE: We use blank lines as anchors to skip directly to certain entries
+ // (e.g., dynamic targets). So make sure none of the other entries
+ // can be blank (for example, see `depdb string` builtin).
+ //
+ // NOTE: KEEP IN SYNC WITH read_dyn_targets ABOVE!
+ //
if (dd.expect ("<ad hoc buildscript recipe> 1") != nullptr)
l4 ([&]{trace << "rule mismatch forcing update of " << t;});
@@ -426,25 +927,50 @@ namespace build2
{
if (const target* pt =
(p.target != nullptr ? p.target :
- p.adhoc ? reinterpret_cast<target*> (p.data) :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
nullptr))
{
+ if ((p.include & include_unmatch) != 0) // Skip update=unmatch.
+ continue;
+
hash_prerequisite_target (prq_cs, exe_cs, env_cs, *pt, storage);
}
}
{
sha256 cs;
- hash_script_vars (cs, script, t, storage);
+ hash_script_vars (cs, script, bs, t, storage);
if (dd.expect (cs.string ()) != nullptr)
l4 ([&]{trace << "recipe variable change forcing update of " << t;});
}
+ // Static targets and prerequisites (there can also be dynamic targets;
+ // see dyndep --dyn-target).
+ //
{
sha256 tcs;
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
- hash_target (tcs, *m, storage);
+ if (g == nullptr)
+ {
+ // There is a nuance: in an operation batch (e.g., `b update
+ // update`) we will already have the dynamic targets as members on
+ // the subsequent operations and we need to make sure we don't treat
+ // them as static. Using target_decl to distinguish the two seems
+ // like a natural way.
+ //
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (m->decl == target_decl::real)
+ hash_target (tcs, *m, storage);
+ }
+ }
+ else
+ {
+ // Feels like there is not much sense in hashing the group itself.
+ //
+ for (const target* m: g->members)
+ hash_target (tcs, *m, storage);
+ }
if (dd.expect (tcs.string ()) != nullptr)
l4 ([&]{trace << "target set change forcing update of " << t;});
@@ -462,18 +988,8 @@ namespace build2
}
}
- unique_ptr<match_data> md;
- unique_ptr<match_data_byproduct> mdb;
-
- if (script.depdb_dyndep_byproduct)
- {
- mdb.reset (new match_data_byproduct (
- a, t, script.depdb_preamble_temp_dir));
- }
- else
- md.reset (new match_data (a, t, script.depdb_preamble_temp_dir));
-
-
+ // Get ready to run the depdb preamble.
+ //
build::script::environment& env (mdb != nullptr ? mdb->env : md->env);
build::script::default_runner& run (mdb != nullptr ? mdb->run : md->run);
@@ -484,21 +1000,51 @@ namespace build2
{
build::script::parser p (ctx);
p.execute_depdb_preamble (a, bs, t, env, script, run, dd);
+
+ // Write a blank line after the custom depdb entries and before
+ // prerequisites, which we use as an anchor (see read_dyn_targets
+ // above). We only do it for the new --dyn-target mode in order not to
+ // invalidate the existing depdb instances.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ dd.expect ("");
}
// Determine if we need to do an update based on the above checks.
//
- bool update;
+ bool update (false);
timestamp mt;
if (dd.writing ())
update = true;
else
{
- if ((mt = t.mtime ()) == timestamp_unknown)
- t.mtime (mt = mtime (tp)); // Cache.
+ if (g == nullptr)
+ {
+ const file& ft (t.as<file> ());
- update = dd.mtime > mt;
+ if ((mt = ft.mtime ()) == timestamp_unknown)
+ ft.mtime (mt = mtime (tp)); // Cache.
+ }
+ else
+ {
+ // Use static member, old dynamic, or force update.
+ //
+ const path* p (
+ g->members_static != 0
+ ? &tp /* first static member path */
+ : (!old_dyn_targets.empty ()
+ ? &old_dyn_targets.front ().path
+ : nullptr));
+
+ if (p != nullptr)
+ mt = g->load_mtime (*p);
+ else
+ update = true;
+ }
+
+ if (!update)
+ update = dd.mtime > mt;
}
if (update)
@@ -522,7 +1068,8 @@ namespace build2
// update on encountering any non-existent files in depbd, we may
// actually incorrectly "validate" some number of depdb entires while
// having an out-of-date main source file. We could probably avoid the
- // update if we are already updating.
+ // update if we are already updating (or not: there is pre-generation
+ // to consider; see inject_existing_file() for details).
//
{
build::script::parser p (ctx);
@@ -558,7 +1105,7 @@ namespace build2
size_t& skip_count (mdb->skip_count);
auto add = [&trace, what,
- a, &bs, &t,
+ a, &bs, &t, pts_n = mdb->pts_n,
&byp, &map_ext,
&skip_count, mt] (path fp) -> optional<bool>
{
@@ -573,7 +1120,7 @@ namespace build2
//
if (optional<bool> u = dyndep::inject_existing_file (
trace, what,
- a, t,
+ a, t, pts_n,
*ft, mt,
false /* fail */,
false /* adhoc */,
@@ -633,7 +1180,7 @@ namespace build2
// Note that in case of dry run we will have an incomplete (but valid)
// database which will be updated on the next non-dry run.
//
- if (!update || ctx.dry_run)
+ if (!update || ctx.dry_run_option)
dd.close (false /* mtime_check */);
else
mdb->dd = dd.close_to_reopen ();
@@ -643,87 +1190,109 @@ namespace build2
mdb->bs = &bs;
mdb->mt = update ? timestamp_nonexistent : mt;
- // @@ TMP: re-enable once recipe becomes move_only_function.
- //
-#if 0
- return [this, md = move (mdb)] (action a, const target& t) mutable
+ return [this, md = move (mdb)] (action a, const target& t)
{
- auto r (perform_update_file_dyndep_byproduct (a, t, *md));
- md.reset (); // @@ TMP: is this really necessary (+mutable)?
- return r;
+ return perform_update_file_or_group_dyndep_byproduct (a, t, *md);
};
-#else
- t.data (move (mdb));
- return recipe ([this] (action a, const target& t) mutable
- {
- auto md (move (t.data<unique_ptr<match_data_byproduct>> ()));
- return perform_update_file_dyndep_byproduct (a, t, *md);
- });
-#endif
}
else
{
// Run the second half of the preamble (depdb-dyndep commands) to update
- // our prerequisite targets and extract dynamic dependencies.
+ // our prerequisite targets and extract dynamic dependencies (targets
+ // and prerequisites).
//
// Note that this should be the last update to depdb (the invalidation
// order semantics).
//
- bool deferred_failure (false);
+ md->deferred_failure = false;
{
build::script::parser p (ctx);
p.execute_depdb_preamble_dyndep (a, bs, t,
env, script, run,
dd,
+ md->dyn_targets,
update,
mt,
- deferred_failure);
+ md->deferred_failure);
}
- if (update && dd.reading () && !ctx.dry_run)
+ if (update && dd.reading () && !ctx.dry_run_option)
dd.touch = timestamp_unknown;
dd.close (false /* mtime_check */);
- md->dd = move (dd.path);
- // Pass on base scope and update/mtime.
+ // Remove previous dynamic targets since their set may change with
+ // changes to the inputs.
+ //
+ // The dry-run mode complicates things: if we don't remove the old
+ // files, then that information will be gone (since we update depdb even
+ // in the dry-run mode). But if we remove everything in the dry-run
+ // mode, then we may also remove some of the current files, which would
+ // be incorrect. So let's always remove but only files that are not in
+ // the current set.
+ //
+ // Note that we used to do this in perform_update_file_or_group_dyndep()
+ // but that had a tricky issue: if we end up performing match but not
+ // execute (e.g., via the resolve_members() logic), then we will not
+ // cleanup old targets but loose this information (since the depdb has
+ // be updated). So now we do it here, which is a bit strange, but it
+ // sort of fits into that dry-run logic above. Note also that we do this
+ // unconditionally, update or not, since if everything is up to date,
+ // then old and new sets should be the same.
+ //
+ for (const dynamic_target& dt: old_dyn_targets)
+ {
+ const path& f (dt.path);
+
+ if (find_if (md->dyn_targets.begin (), md->dyn_targets.end (),
+ [&f] (const dynamic_target& dt)
+ {
+ return dt.path == f;
+ }) == md->dyn_targets.end ())
+ {
+ // This is an optimization so best effort.
+ //
+ if (optional<rmfile_status> s = butl::try_rmfile_ignore_error (f))
+ {
+ if (s == rmfile_status::success && verb >= 2)
+ text << "rm " << f;
+ }
+ }
+ }
+
+ // Pass on the base scope, depdb path, and update/mtime.
//
md->bs = &bs;
+ md->dd = move (dd.path);
md->mt = update ? timestamp_nonexistent : mt;
- md->deferred_failure = deferred_failure;
- // @@ TMP: re-enable once recipe becomes move_only_function.
- //
-#if 0
- return [this, md = move (md)] (action a, const target& t) mutable
+ return [this, md = move (md)] (action a, const target& t)
{
- auto r (perform_update_file_dyndep (a, t, *md));
- md.reset (); // @@ TMP: is this really necessary (+mutable)?
- return r;
+ return perform_update_file_or_group_dyndep (a, t, *md);
};
-#else
- t.data (move (md));
- return recipe ([this] (action a, const target& t) mutable
- {
- auto md (move (t.data<unique_ptr<match_data>> ()));
- return perform_update_file_dyndep (a, t, *md);
- });
-#endif
}
}
target_state adhoc_buildscript_rule::
- perform_update_file_dyndep_byproduct (action a,
- const target& xt,
- match_data_byproduct& md) const
+ perform_update_file_or_group_dyndep_byproduct (
+ action a, const target& t, match_data_byproduct& md) const
{
// Note: using shared function name among the three variants.
//
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ tracer trace (
+ "adhoc_buildscript_rule::perform_update_file_or_group_dyndep_byproduct");
- context& ctx (xt.ctx);
+ context& ctx (t.ctx);
- const file& t (xt.as<file> ());
+ // For a group we use the first (for now static) member as a source of
+ // mtime.
+ //
+ // @@ TODO: expl: byproduct: Note that until we support dynamic targets in
+ // the byproduct mode, we verify there is at least one static member in
+ // apply() above. Once we do support this, we will need to verify after
+ // the dependency extraction below.
+ //
+ const group* g (t.is_a<group> ());
// Note that even if we've updated all our prerequisites in apply(), we
// still need to execute them here to keep the dependency counts straight.
@@ -752,7 +1321,14 @@ namespace build2
if (!ctx.dry_run || verb != 0)
{
- execute_update_file (bs, a, t, env, run);
+ if (g == nullptr)
+ execute_update_file (bs, a, t.as<file> (), env, run);
+ else
+ {
+ // Note: no dynamic members yet.
+ //
+ execute_update_group (bs, a, *g, env, run);
+ }
}
// Extract the dynamic dependency information as byproduct of the recipe
@@ -792,15 +1368,25 @@ namespace build2
const auto& pts (t.prerequisite_targets[a]);
auto add = [&trace, what,
- a, &bs, &t, &pts, pts_n = md.pts_n,
+ a, &bs, &t, g, &pts, pts_n = md.pts_n,
&byp, &map_ext, &dd, &skip] (path fp)
{
normalize_external (fp, what);
+ // Note that unless we take into account dynamic targets, the skip
+ // logic below falls apart since we neither see targets entered via
+ // prerequsites (skip static prerequisites) nor by the cache=true code
+ // above (skip depdb entries).
+ //
+ // If this turns out to be racy (which is the reason we would skip
+ // dynamic targets; see the fine_file() implementation for details),
+ // then the only answer for now is to not use the byproduct mode.
+ //
if (const build2::file* ft = dyndep::find_file (
trace, what,
a, bs, t,
fp, false /* cache */, true /* normalized */,
+ true /* dynamic */,
map_ext, *byp.default_type).first)
{
// Skip if this is one of the static prerequisites provided it was
@@ -812,23 +1398,33 @@ namespace build2
if (const target* pt =
(p.target != nullptr ? p.target :
- p.adhoc ? reinterpret_cast<target*> (p.data) :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
nullptr))
{
- if (ft == pt && (p.adhoc || p.data == 1))
+ if (ft == pt && (p.adhoc () || p.data == 1))
return;
}
}
- // Skip if this is one of the targets.
+ // Skip if this is one of the targets (see the non-byproduct version
+ // for background).
//
if (byp.drop_cycles)
{
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
{
- if (ft == m)
+ auto& ms (g->members);
+ if (find (ms.begin (), ms.end (), ft) != ms.end ())
return;
}
+ else
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ return;
+ }
+ }
}
// Skip until where we left off.
@@ -844,7 +1440,7 @@ namespace build2
// @@ Currently we will issue an imprecise diagnostics if this is
// a static prerequisite that was not updated (see above).
//
- dyndep::verify_existing_file (trace, what, a, t, *ft);
+ dyndep::verify_existing_file (trace, what, a, t, pts_n, *ft);
}
dd.write (fp);
@@ -905,7 +1501,7 @@ namespace build2
if (r.second.empty ())
continue;
- // @@ TODO: what should we do about targets?
+ // Note: no support for dynamic targets in byproduct mode.
//
if (r.first == make_type::target)
continue;
@@ -915,10 +1511,11 @@ namespace build2
if (f.relative ())
{
if (!byp.cwd)
- fail (il) << "relative path '" << f << "' in make dependency"
- << " declaration" <<
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in make dependency declaration" <<
info << "consider using --cwd to specify relative path "
- << "base";
+ << "base";
f = *byp.cwd / f;
}
@@ -933,6 +1530,52 @@ namespace build2
break;
}
+ case dyndep_format::lines:
+ {
+ for (string l;; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ break;
+
+ if (l.empty ())
+ fail (il) << "blank line in prerequisites list";
+
+ if (l.front () == ' ')
+ fail (il) << "non-existent prerequisite in --byproduct mode";
+
+ path f;
+ try
+ {
+ f = path (l);
+
+ // fsdir{} prerequisites only make sense with dynamic targets.
+ //
+ if (f.to_directory ())
+ throw invalid_path ("");
+
+ if (f.relative ())
+ {
+ if (!byp.cwd)
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --cwd to specify "
+ << "relative path base";
+
+ f = *byp.cwd / f;
+ }
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what << " prerequisite path '"
+ << l << "'";
+ }
+
+ add (move (f));
+ }
+
+ break;
+ }
}
// Add the terminating blank line.
@@ -940,6 +1583,8 @@ namespace build2
dd.expect ("");
dd.close ();
+ //@@ TODO: expl: byproduct: verify have at least one member.
+
md.dd.path = move (dd.path); // For mtime check below.
}
@@ -948,20 +1593,36 @@ namespace build2
timestamp now (system_clock::now ());
if (!ctx.dry_run)
- depdb::check_mtime (start, md.dd.path, t.path (), now);
+ {
+ // Only now we know for sure there must be a member in the group.
+ //
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+
+ depdb::check_mtime (start, md.dd.path, ft.path (), now);
+ }
+
+ (g == nullptr
+ ? static_cast<const mtime_target&> (t.as<file> ())
+ : static_cast<const mtime_target&> (*g)).mtime (now);
- t.mtime (now);
return target_state::changed;
}
target_state adhoc_buildscript_rule::
- perform_update_file_dyndep (action a, const target& xt, match_data& md) const
+ perform_update_file_or_group_dyndep (
+ action a, const target& t, match_data& md) const
{
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ tracer trace (
+ "adhoc_buildscript_rule::perform_update_file_or_group_dyndep");
- context& ctx (xt.ctx);
+ context& ctx (t.ctx);
- const file& t (xt.as<file> ());
+ // For a group we use the first (static or dynamic) member as a source of
+ // mtime. Note that in this case there must be at least one since we fail
+ // if we were unable to extract any dynamic members and there are no
+ // static (see exec_depdb_dyndep()).
+ //
+ const group* g (t.is_a<group> ());
// Note that even if we've updated all our prerequisites in apply(), we
// still need to execute them here to keep the dependency counts straight.
@@ -990,7 +1651,11 @@ namespace build2
if (!ctx.dry_run || verb != 0)
{
- execute_update_file (*md.bs, a, t, env, run, md.deferred_failure);
+ if (g == nullptr)
+ execute_update_file (
+ *md.bs, a, t.as<file> (), env, run, md.deferred_failure);
+ else
+ execute_update_group (*md.bs, a, *g, env, run, md.deferred_failure);
}
run.leave (env, script.end_loc);
@@ -998,26 +1663,67 @@ namespace build2
timestamp now (system_clock::now ());
if (!ctx.dry_run)
- depdb::check_mtime (start, md.dd, t.path (), now);
+ {
+ // Note: in case of deferred failure we may not have any members.
+ //
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+ depdb::check_mtime (start, md.dd, ft.path (), now);
+ }
+
+ (g == nullptr
+ ? static_cast<const mtime_target&> (t)
+ : static_cast<const mtime_target&> (*g)).mtime (now);
- t.mtime (now);
return target_state::changed;
}
target_state adhoc_buildscript_rule::
- perform_update_file (action a, const target& xt) const
+ perform_update_file_or_group (action a, const target& t) const
{
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ tracer trace ("adhoc_buildscript_rule::perform_update_file_or_group");
+
+ context& ctx (t.ctx);
+ const scope& bs (t.base_scope ());
+
+ // For a group we use the first (static) member to derive depdb path, as a
+ // source of mtime, etc. Note that in this case there must be a static
+ // member since in this version of perform_update we don't extract dynamic
+ // dependencies (see apply() details).
+ //
+ const group* g (t.is_a<group> ());
- context& ctx (xt.ctx);
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+ const path& tp (ft.path ());
- const file& t (xt.as<file> ());
- const path& tp (t.path ());
+ // Support creating file symlinks using ad hoc recipes.
+ //
+ auto path_symlink = [&tp] ()
+ {
+ pair<bool, butl::entry_stat> r (
+ butl::path_entry (tp,
+ false /* follow_symlinks */,
+ true /* ignore_errors */));
+
+ return r.first && r.second.type == butl::entry_type::symlink;
+ };
// Update prerequisites and determine if any of them render this target
// out-of-date.
//
- timestamp mt (t.load_mtime ());
+ // If the file entry exists, check if its a symlink.
+ //
+ bool symlink (false);
+ timestamp mt;
+
+ if (g == nullptr)
+ {
+ mt = ft.load_mtime ();
+
+ if (mt != timestamp_nonexistent)
+ symlink = path_symlink ();
+ }
+ else
+ mt = g->load_mtime (tp);
// This is essentially ps=execute_prerequisites(a, t, mt) which we
// cannot use because we need to see ad hoc prerequisites.
@@ -1036,9 +1742,12 @@ namespace build2
{
if (const target* pt =
(p.target != nullptr ? p.target :
- p.adhoc ? reinterpret_cast<target*> (p.data)
+ p.adhoc () ? reinterpret_cast<target*> (p.data)
: nullptr))
{
+ if ((p.include & include_unmatch) != 0) // Skip update=unmatch.
+ continue;
+
hash_prerequisite_target (prq_cs, exe_cs, env_cs, *pt, storage);
}
}
@@ -1098,7 +1807,7 @@ namespace build2
//
{
sha256 cs;
- hash_script_vars (cs, script, t, storage);
+ hash_script_vars (cs, script, bs, t, storage);
if (dd.expect (cs.string ()) != nullptr)
l4 ([&]{trace << "recipe variable change forcing update of " << t;});
@@ -1108,8 +1817,18 @@ namespace build2
//
{
sha256 tcs;
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
- hash_target (tcs, *m, storage);
+ if (g == nullptr)
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ hash_target (tcs, *m, storage);
+ }
+ else
+ {
+ // Feels like there is not much sense in hashing the group itself.
+ //
+ for (const target* m: g->members)
+ hash_target (tcs, *m, storage);
+ }
if (dd.expect (tcs.string ()) != nullptr)
l4 ([&]{trace << "target set change forcing update of " << t;});
@@ -1129,8 +1848,6 @@ namespace build2
}
}
- const scope* bs (nullptr);
-
// Execute the custom dependency change tracking commands, if present.
//
// Note that we share the environment between the execute_depdb_preamble()
@@ -1145,7 +1862,10 @@ namespace build2
if (!depdb_preamble)
{
- if (dd.writing () || dd.mtime > mt)
+ // If this is a symlink, depdb mtime could be greater than the symlink
+ // target.
+ //
+ if (dd.writing () || (dd.mtime > mt && !symlink))
update = true;
if (!update)
@@ -1155,25 +1875,23 @@ namespace build2
}
}
- build::script::environment env (a, t, false /* temp_dir */);
+ build::script::environment env (a, t, bs, false /* temp_dir */);
build::script::default_runner run;
if (depdb_preamble)
{
- bs = &t.base_scope ();
-
if (script.depdb_preamble_temp_dir)
env.set_temp_dir_variable ();
build::script::parser p (ctx);
run.enter (env, script.start_loc);
- p.execute_depdb_preamble (a, *bs, t, env, script, run, dd);
+ p.execute_depdb_preamble (a, bs, t, env, script, run, dd);
}
// Update if depdb mismatch.
//
- if (dd.writing () || dd.mtime > mt)
+ if (dd.writing () || (dd.mtime > mt && !symlink))
update = true;
dd.close ();
@@ -1195,28 +1913,51 @@ namespace build2
bool r (false);
if (!ctx.dry_run || verb != 0)
{
- // Prepare to execute the script diag line and/or body.
+ // Prepare to execute the script diag preamble and/or body.
//
- if (bs == nullptr)
- bs = &t.base_scope ();
+ r = g == nullptr
+ ? execute_update_file (bs, a, ft, env, run)
+ : execute_update_group (bs, a, *g, env, run);
- if ((r = execute_update_file (*bs, a, t, env, run)))
+ if (r)
{
if (!ctx.dry_run)
- dd.check_mtime (tp);
+ {
+ if (g == nullptr)
+ symlink = path_symlink ();
+
+ // Again, if this is a symlink, depdb mtime will be greater than
+ // the symlink target.
+ //
+ if (!symlink)
+ dd.check_mtime (tp);
+ }
}
}
if (r || depdb_preamble)
run.leave (env, script.end_loc);
- t.mtime (system_clock::now ());
+ // Symlinks don't play well with dry-run: we can't extract accurate target
+ // timestamp without creating the symlink. Overriding the dry-run doesn't
+ // seem to be an option since we don't know whether it will be a symlink
+ // until it's created. At least we are being pessimistic rather than
+ // optimistic here.
+ //
+ (g == nullptr
+ ? static_cast<const mtime_target&> (ft)
+ : static_cast<const mtime_target&> (*g)).mtime (
+ symlink
+ ? build2::mtime (tp)
+ : system_clock::now ());
+
return target_state::changed;
}
// Update prerequisite targets.
//
- // Each prerequisite target should be in one of the following states:
+ // Each (non-NULL) prerequisite target should be in one of the following
+ // states:
//
// target adhoc data
// --------------------
@@ -1224,6 +1965,7 @@ namespace build2
// !NULL false 1 - normal prerequisite already updated
// !NULL true 0 - ad hoc prerequisite to be updated and blanked
// NULL true !NULL - ad hoc prerequisite already updated and blanked
+ // NULL false !NULL - unmatched prerequisite (ignored by this function)
//
// Note that we still execute already updated prerequisites to keep the
// dependency counts straight. But we don't consider them for the "renders
@@ -1231,6 +1973,8 @@ namespace build2
//
// See also environment::set_special_variables().
//
+ // See also perform_execute() which has to deal with these shenanigans.
+ //
optional<target_state> adhoc_buildscript_rule::
execute_update_prerequisites (action a, const target& t, timestamp mt) const
{
@@ -1239,7 +1983,6 @@ namespace build2
// This is essentially a customized execute_prerequisites(a, t, mt).
//
size_t busy (ctx.count_busy ());
- size_t exec (ctx.count_executed ());
target_state rs (target_state::unchanged);
@@ -1251,7 +1994,7 @@ namespace build2
{
if (const target* pt =
(p.target != nullptr ? p.target :
- p.adhoc ? reinterpret_cast<target*> (p.data) : nullptr))
+ p.adhoc () ? reinterpret_cast<target*> (p.data) : nullptr))
{
target_state s (execute_async (a, *pt, busy, t[a].task_count));
assert (s != target_state::postponed);
@@ -1265,18 +2008,18 @@ namespace build2
{
if (const target* pt =
(p.target != nullptr ? p.target :
- p.adhoc ? reinterpret_cast<target*> (p.data) : nullptr))
+ p.adhoc () ? reinterpret_cast<target*> (p.data) : nullptr))
{
- ctx.sched.wait (exec, (*pt)[a].task_count, scheduler::work_none);
+ target_state s (execute_complete (a, *pt));
if (p.data == 0)
{
- target_state s (pt->executed_state (a));
rs |= s;
- // Compare our timestamp to this prerequisite's.
+ // Compare our timestamp to this prerequisite's skipping
+ // update=unmatch.
//
- if (!e)
+ if (!e && (p.include & include_unmatch) == 0)
{
// If this is an mtime-based target, then compare timestamps.
//
@@ -1297,10 +2040,14 @@ namespace build2
// Blank out adhoc.
//
- if (p.adhoc)
+ // Note: set the include_target flag for the updated_during_match()
+ // check.
+ //
+ if (p.adhoc ())
{
p.data = reinterpret_cast<uintptr_t> (p.target);
p.target = nullptr;
+ p.include |= prerequisite_target::include_target;
}
}
}
@@ -1309,16 +2056,18 @@ namespace build2
return e ? nullopt : optional<target_state> (rs);
}
- // Return true if execute_body() was called and thus the caller should call
- // run.leave().
+ // Return true if execute_diag_preamble() and/or execute_body() were called
+ // and thus the caller should call run.leave().
//
bool adhoc_buildscript_rule::
execute_update_file (const scope& bs,
- action, const file& t,
+ action a, const file& t,
build::script::environment& env,
build::script::default_runner& run,
bool deferred_failure) const
{
+ // NOTE: similar to execute_update_group() below.
+ //
context& ctx (t.ctx);
const scope& rs (*bs.root_scope ());
@@ -1329,28 +2078,73 @@ namespace build2
//
build::script::parser p (ctx);
- if (verb == 1)
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () && (exec_body || verb == 1));
+ bool exec_depdb (!script.depdb_preamble.empty ());
+
+ if (script.diag_name)
{
- if (script.diag_line)
- {
- text << p.execute_special (rs, bs, env, *script.diag_line);
- }
- else
+ if (verb == 1)
{
- // @@ TODO (and in default_action() below):
+ // By default we print the first non-ad hoc prerequisite target as the
+ // "main" prerequisite, unless there isn't any or it's not file-based,
+ // in which case we fallback to the second form without the
+ // prerequisite. Potential future improvements:
//
- // - we are printing target, not source (like in most other places)
+ // - Somehow detect that the first prerequisite target is a tool being
+ // executed and fallback to the second form. It's tempting to just
+ // exclude all exe{} targets, but this could be a rule for something
+ // like strip.
//
- // - printing of ad hoc target group (the {hxx cxx}{foo} idea)
- //
- // - if we are printing prerequisites, should we print all of them
- // (including tools)?
- //
- text << *script.diag_name << ' ' << t;
+ const file* pt (nullptr);
+ for (const prerequisite_target& p: t.prerequisite_targets[a])
+ {
+ // See execute_update_prerequisites().
+ //
+ if (p.target != nullptr && !p.adhoc ())
+ {
+ pt = p.target->is_a<file> ();
+ break;
+ }
+ }
+
+ if (t.adhoc_member == nullptr)
+ {
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), *pt, t);
+ else
+ print_diag (script.diag_name->c_str (), t);
+ }
+ else
+ {
+ vector<target_key> ts;
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ ts.push_back (m->key ());
+
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), pt->key (), move (ts));
+ else
+ print_diag (script.diag_name->c_str (), move (ts));
+ }
}
}
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir && !script.depdb_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ env, script, run,
+ verb == 1 /* diag */,
+ !exec_depdb /* enter */,
+ false /* leave */));
+
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
+ }
- if (!ctx.dry_run || verb >= 2)
+ if (exec_body)
{
// On failure remove the target files that may potentially exist but
// be invalid.
@@ -1366,13 +2160,15 @@ namespace build2
}
}
- if (script.body_temp_dir && !script.depdb_preamble_temp_dir)
+ if (script.body_temp_dir &&
+ !script.depdb_preamble_temp_dir &&
+ !script.diag_preamble_temp_dir)
env.set_temp_dir_variable ();
p.execute_body (rs, bs,
env, script, run,
- script.depdb_preamble.empty () /* enter */,
- false /* leave */);
+ !exec_depdb && !exec_diag /* enter */,
+ false /* leave */);
if (!ctx.dry_run)
{
@@ -1401,11 +2197,131 @@ namespace build2
for (auto& rm: rms)
rm.cancel ();
}
+ }
+
+ return exec_diag || exec_body;
+ }
+
+ bool adhoc_buildscript_rule::
+ execute_update_group (const scope& bs,
+ action a, const group& g,
+ build::script::environment& env,
+ build::script::default_runner& run,
+ bool deferred_failure) const
+ {
+ // Note: similar to execute_update_file() above (see there for comments).
+ //
+ // NOTE: when called from perform_update_file_or_group_dyndep_byproduct(),
+ // the group does not contain dynamic members yet and thus could
+ // have no members at all.
+ //
+ context& ctx (g.ctx);
+
+ const scope& rs (*bs.root_scope ());
+
+ build::script::parser p (ctx);
+
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () && (exec_body || verb == 1));
+ bool exec_depdb (!script.depdb_preamble.empty ());
+
+ if (script.diag_name)
+ {
+ if (verb == 1)
+ {
+ const file* pt (nullptr);
+ for (const prerequisite_target& p: g.prerequisite_targets[a])
+ {
+ if (p.target != nullptr && !p.adhoc ())
+ {
+ pt = p.target->is_a<file> ();
+ break;
+ }
+ }
+
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), *pt, g);
+ else
+ print_diag (script.diag_name->c_str (), g);
+ }
+ }
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir && !script.depdb_preamble_temp_dir)
+ env.set_temp_dir_variable ();
- return true;
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ env, script, run,
+ verb == 1 /* diag */,
+ !exec_depdb /* enter */,
+ false /* leave */));
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
}
- else
- return false;
+
+ if (exec_body)
+ {
+ // On failure remove the target files that may potentially exist but
+ // be invalid.
+ //
+ // Note: we may leave dynamic members if we don't know about them yet.
+ // Feels natural enough.
+ //
+ small_vector<auto_rmfile, 8> rms;
+
+ if (!ctx.dry_run)
+ {
+ for (const target* m: g.members)
+ {
+ if (auto* f = m->is_a<file> ())
+ rms.emplace_back (f->path ());
+ }
+ }
+
+ if (script.body_temp_dir &&
+ !script.depdb_preamble_temp_dir &&
+ !script.diag_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ p.execute_body (rs, bs,
+ env, script, run,
+ !exec_depdb && !exec_diag /* enter */,
+ false /* leave */);
+
+ if (!ctx.dry_run)
+ {
+ if (deferred_failure)
+ fail << "expected error exit status from recipe body";
+
+ // @@ TODO: expl: byproduct
+ //
+ // Note: will not work for dynamic members if we don't know about them
+ // yet. Could probably fix by doing this later, after the dynamic
+ // dependency extraction.
+ //
+#ifndef _WIN32
+ auto chmod = [] (const path& p)
+ {
+ path_perms (p,
+ (path_perms (p) |
+ permissions::xu |
+ permissions::xg |
+ permissions::xo));
+ };
+
+ for (const target* m: g.members)
+ {
+ if (auto* p = m->is_a<exe> ())
+ chmod (p->path ());
+ }
+#endif
+ for (auto& rm: rms)
+ rm.cancel ();
+ }
+ }
+
+ return exec_diag || exec_body;
}
target_state adhoc_buildscript_rule::
@@ -1420,7 +2336,41 @@ namespace build2
// temporary directory ($~) is that it's next to other output which makes
// it easier to examine during recipe troubleshooting.
//
- return perform_clean_extra (a, t.as<file> (), {".d", ".t"});
+ // Finally, we print the entire ad hoc group at verbosity level 1, similar
+ // to the default update diagnostics.
+ //
+ // @@ TODO: .t may also be a temporary directory (and below).
+ //
+ return perform_clean_extra (a,
+ t.as<file> (),
+ {".d", ".t"},
+ {},
+ true /* show_adhoc_members */);
+ }
+
+ target_state adhoc_buildscript_rule::
+ perform_clean_group (action a, const target& xt)
+ {
+ const group& g (xt.as<group> ());
+
+ path d, t;
+ if (g.members_static != 0)
+ {
+ const path& p (g.members.front ()->as<file> ().path ());
+ d = p + ".d";
+ t = p + ".t";
+ }
+ else
+ {
+ // See target_path lambda in apply().
+ //
+ t = g.dir / (g.name + '.' + g.type ().name);
+ d = t + ".d";
+ t += ".t";
+ }
+
+ return perform_clean_group_extra (a, g, {d.string ().c_str (),
+ t.string ().c_str ()});
}
target_state adhoc_buildscript_rule::
@@ -1432,37 +2382,340 @@ namespace build2
context& ctx (t.ctx);
- execute_prerequisites (a, t);
+ target_state ts (target_state::unchanged);
- if (!ctx.dry_run || verb != 0)
+ if (ctx.current_mode == execution_mode::first)
+ ts |= straight_execute_prerequisites (a, t);
+
+ bool exec (!ctx.dry_run || verb != 0);
+
+ // Special handling for fsdir{} (which is the recommended if somewhat
+ // hackish way to represent directory symlinks). See fsdir_rule for
+ // background.
+ //
+ // @@ Note that because there is no depdb, we cannot detect the target
+ // directory change (or any other changes in the script).
+ //
+ if (exec &&
+ (a == perform_update_id || a == perform_clean_id) &&
+ t.is_a<fsdir> ())
+ {
+ // For update we only want to skip if it's a directory. For clean we
+ // want to (try) to clean up any filesystem entry, including a dangling
+ // symlink.
+ //
+ exec = a == perform_update_id
+ ? !exists (t.dir, true /* ignore_errors */)
+ : build2::entry_exists (t.dir, false /* follow_symlinks */);
+ }
+
+ if (exec)
{
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
- build::script::environment e (a, t, script.body_temp_dir, deadline);
+ build::script::environment e (a, t, bs, false /* temp_dir */, deadline);
build::script::parser p (ctx);
+ build::script::default_runner r;
- if (verb == 1)
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () &&
+ (exec_body || verb == 1));
+
+ if (script.diag_name)
{
- if (script.diag_line)
+ if (verb == 1)
{
- text << p.execute_special (rs, bs, e, *script.diag_line);
+ // For operations other than update (as well as for non-file
+ // targets), we default to the second form (without the
+ // prerequisite). Think test.
+ //
+ if (t.adhoc_member == nullptr)
+ print_diag (script.diag_name->c_str (), t);
+ else
+ {
+ vector<target_key> ts;
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ ts.push_back (m->key ());
+
+ print_diag (script.diag_name->c_str (), move (ts));
+ }
}
- else
+ }
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ e, script, r,
+ verb == 1 /* diag */,
+ true /* enter */,
+ !exec_body /* leave */));
+
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
+ }
+
+ if (exec_body)
+ {
+ if (script.body_temp_dir && !script.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ p.execute_body (rs, bs, e, script, r, !exec_diag /* enter */);
+ }
+
+ ts |= target_state::changed;
+ }
+
+ if (ctx.current_mode == execution_mode::last)
+ ts |= reverse_execute_prerequisites (a, t);
+
+ return ts;
+ }
+
+ void adhoc_buildscript_rule::
+ print_custom_diag (const scope& bs, names&& ns, const location& l) const
+ {
+ // The straightforward thing to do would be to just print the diagnostics
+ // as specified by the user. But that will make some of the tidying up
+ // done by print_diag() unavailable to custom diagnostics. Things like
+ // omitting the out-qualification as well as compact printing of the
+ // groups. Also, in the future we may want to support colorization of the
+ // diagnostics, which will be difficult to achive with such a "just print"
+ // approach.
+ //
+ // So instead we are going to parse the custom diagnostics, translate
+ // names back to targets (where appropriate), and call one of the
+ // print_diag() functions. Specifically, we expect the custom diagnostics
+ // to be in one of the following two forms (which correspond to the two
+ // forms of pring_diag()):
+ //
+ // diag <prog> <l-target> <comb> <r-target>...
+ // diag <prog> <r-target>...
+ //
+ // And the way we are going to disambiguate this is by analyzing name
+ // types. Specifically, we expect <comb> to be a simple name that also
+ // does not contain any directory separators (so we can distinguish it
+ // from both target names as well as paths, which can be specified on
+ // either side). We will also recognize `-` as the special stdout path
+ // name (so <comb> cannot be `-`). Finally, <l-target> (but not
+ // <r-target>) can be a string (e.g., an argument) but that should not
+ // pose an ambiguity.
+ //
+ // With this approach, the way to re-create the default diagnostics would
+ // be:
+ //
+ // diag <prog> ($<[0]) -> $>
+ // diag <prog> $>
+ //
+ auto i (ns.begin ()), e (ns.end ());
+
+ // <prog>
+ //
+ if (i == e)
+ fail (l) << "missing program name in diag builtin";
+
+ if (!i->simple () || i->empty ())
+ fail (l) << "expected simple name as program name in diag builtin";
+
+ const char* prog (i->value.c_str ());
+ ++i;
+
+ // <l-target>
+ //
+ const target* l_t (nullptr);
+ path l_p;
+ string l_s;
+
+ auto parse_target = [&bs, &l, &i, &e] () -> const target&
+ {
+ name& n (*i++);
+ name o;
+
+ if (n.pair)
+ {
+ if (i == e)
+ fail (l) << "invalid target name pair in diag builtin";
+
+ o = move (*i++);
+ }
+
+ // Similar to to_target() in $target.*().
+ //
+ if (const target* r = search_existing (n, bs, o.dir))
+ return *r;
+
+ fail (l) << "target "
+ << (n.pair ? names {move (n), move (o)} : names {move (n)})
+ << " not found in diag builtin" << endf;
+ };
+
+ auto parse_first = [&l, &i, &e,
+ &parse_target] (const target*& t, path& p, string& s,
+ const char* after)
+ {
+ if (i == e)
+ fail (l) << "missing target after " << after << " in diag builtin";
+
+ try
+ {
+ if (i->typed ())
{
- // @@ TODO: as above (execute_update_file()).
- //
- text << *script.diag_name << ' ' << t;
+ t = &parse_target ();
+ return; // i is already incremented.
+ }
+ else if (!i->dir.empty ())
+ {
+ p = move (i->dir);
+ p /= i->value;
+ }
+ else if (path_traits::find_separator (i->value) != string::npos)
+ {
+ p = path (move (i->value));
}
+ else if (!i->value.empty ())
+ {
+ s = move (i->value);
+ }
+ else
+ fail (l) << "expected target, path, or argument after "
+ << after << " in diag builtin";
+ }
+ catch (const invalid_path& e)
+ {
+ fail (l) << "invalid path '" << e.path << "' after "
+ << after << " in diag builtin";
+ }
+
+ ++i;
+ };
+
+ parse_first (l_t, l_p, l_s, "program name");
+
+ // Now detect which form it is.
+ //
+ if (i != e &&
+ i->simple () &&
+ !i->empty () &&
+ path_traits::find_separator (i->value) == string::npos)
+ {
+ // The first form.
+
+ // <comb>
+ //
+ const char* comb (i->value.c_str ());
+ ++i;
+
+ // <r-target>
+ //
+ const target* r_t (nullptr);
+ path r_p;
+ string r_s;
+
+ parse_first (r_t, r_p, r_s, "combiner");
+
+ path_name r_pn;
+
+ if (r_t != nullptr)
+ ;
+ else if (!r_p.empty ())
+ r_pn = path_name (&r_p);
+ else
+ {
+ if (r_s != "-")
+ fail (l) << "expected target or path instead of '" << r_s
+ << "' after combiner in diag builtin";
+
+ r_pn = path_name (move (r_s));
}
- if (!ctx.dry_run || verb >= 2)
+ if (i == e)
{
- build::script::default_runner r;
- p.execute_body (rs, bs, e, script, r);
+ if (r_t != nullptr)
+ {
+ if (l_t != nullptr) print_diag (prog, *l_t, *r_t, comb);
+ else if (!l_p.empty ()) print_diag (prog, l_p, *r_t, comb);
+ else print_diag (prog, l_s, *r_t, comb);
+ }
+ else
+ {
+ if (l_t != nullptr) print_diag (prog, *l_t, r_pn, comb);
+ else if (!l_p.empty ()) print_diag (prog, l_p, r_pn, comb);
+ else print_diag (prog, l_s, r_pn, comb);
+ }
+
+ return;
}
+
+ // We can only have multiple targets, not paths.
+ //
+ if (r_t == nullptr)
+ fail (l) << "unexpected name after path in diag builtin";
+
+ // <r-target>...
+ //
+ vector<target_key> r_ts {r_t->key ()};
+
+ do r_ts.push_back (parse_target ().key ()); while (i != e);
+
+ if (l_t != nullptr) print_diag (prog, l_t->key (), move (r_ts), comb);
+ else if (!l_p.empty ()) print_diag (prog, l_p, move (r_ts), comb);
+ else print_diag (prog, l_s, move (r_ts), comb);
}
+ else
+ {
+ // The second form.
- return target_state::changed;
+ // First "absorb" the l_* values as the first <r-target>.
+ //
+ const target* r_t (nullptr);
+ path_name r_pn;
+
+ if (l_t != nullptr)
+ r_t = l_t;
+ else if (!l_p.empty ())
+ r_pn = path_name (&l_p);
+ else
+ {
+ if (l_s != "-")
+ {
+ diag_record dr (fail (l));
+
+ dr << "expected target or path instead of '" << l_s
+ << "' after program name in diag builtin";
+
+ if (i != e)
+ dr << info << "alternatively, missing combiner after '"
+ << l_s << "'";
+ }
+
+ r_pn = path_name (move (l_s));
+ }
+
+ if (i == e)
+ {
+ if (r_t != nullptr)
+ print_diag (prog, *r_t);
+ else
+ print_diag (prog, r_pn);
+
+ return;
+ }
+
+ // We can only have multiple targets, not paths.
+ //
+ if (r_t == nullptr)
+ fail (l) << "unexpected name after path in diag builtin";
+
+ // <r-target>...
+ //
+ vector<target_key> r_ts {r_t->key ()};
+
+ do r_ts.push_back (parse_target ().key ()); while (i != e);
+
+ print_diag (prog, move (r_ts));
+ }
}
}
diff --git a/libbuild2/adhoc-rule-buildscript.hxx b/libbuild2/adhoc-rule-buildscript.hxx
index e7b18e2..336dceb 100644
--- a/libbuild2/adhoc-rule-buildscript.hxx
+++ b/libbuild2/adhoc-rule-buildscript.hxx
@@ -36,16 +36,17 @@ namespace build2
const optional<timestamp>&) const override;
target_state
- perform_update_file (action, const target&) const;
+ perform_update_file_or_group (action, const target&) const;
struct match_data;
struct match_data_byproduct;
target_state
- perform_update_file_dyndep (action, const target&, match_data&) const;
+ perform_update_file_or_group_dyndep (
+ action, const target&, match_data&) const;
target_state
- perform_update_file_dyndep_byproduct (
+ perform_update_file_or_group_dyndep_byproduct (
action, const target&, match_data_byproduct&) const;
optional<target_state>
@@ -58,9 +59,19 @@ namespace build2
build::script::default_runner&,
bool deferred_failure = false) const;
+ bool
+ execute_update_group (const scope&,
+ action a, const group&,
+ build::script::environment&,
+ build::script::default_runner&,
+ bool deferred_failure = false) const;
+
static target_state
perform_clean_file (action, const target&);
+ static target_state
+ perform_clean_group (action, const target&);
+
target_state
default_action (action, const target&, const optional<timestamp>&) const;
@@ -79,9 +90,19 @@ namespace build2
virtual void
dump_text (ostream&, string&) const override;
+ void
+ print_custom_diag (const scope&, names&&, const location&) const;
+
public:
using script_type = build::script::script;
+ // The prerequisite_target::include bits that indicate update=unmatch and
+ // an ad hoc version of that.
+ //
+ static const uintptr_t include_unmatch = 0x100;
+ static const uintptr_t include_unmatch_adhoc = 0x200;
+
+
script_type script;
string checksum; // Script text hash.
const target_type* ttype; // First target/pattern type.
diff --git a/libbuild2/adhoc-rule-cxx.cxx b/libbuild2/adhoc-rule-cxx.cxx
index df6467f..8a91809 100644
--- a/libbuild2/adhoc-rule-cxx.cxx
+++ b/libbuild2/adhoc-rule-cxx.cxx
@@ -10,6 +10,7 @@
#include <libbuild2/target.hxx>
#include <libbuild2/context.hxx>
#include <libbuild2/algorithm.hxx>
+#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
using namespace butl;
@@ -19,11 +20,18 @@ namespace build2
// cxx_rule_v1
//
bool cxx_rule_v1::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true;
}
+ recipe cxx_rule_v1::
+ apply (action, target&) const
+ {
+ assert (false); // This (or the match_extra version) must be overriden.
+ return empty_recipe;
+ }
+
// adhoc_cxx_rule
//
adhoc_cxx_rule::
@@ -94,8 +102,10 @@ namespace build2
load_module_library (const path& lib, const string& sym, string& err);
bool adhoc_cxx_rule::
- match (action a, target& t, const string& hint, match_extra& me) const
+ match (action a, target& xt, const string& hint, match_extra& me) const
{
+ const target& t (xt); // See adhoc_rule::match() for background.
+
if (pattern != nullptr && !pattern->match (a, t, hint, me))
return false;
@@ -301,9 +311,9 @@ namespace build2
//
auto_thread_env penv (nullptr);
context& ctx (*t.ctx.module_context);
- scheduler::phase_guard pg (ctx.sched);
+ scheduler::phase_guard pg (*ctx.sched);
- const uint16_t verbosity (3); // Project creation command verbosity.
+ uint16_t verbosity (3); // Project creation command verbosity.
// Project and location signatures.
//
@@ -325,6 +335,17 @@ namespace build2
if (!create && (create = !check_sig (bf, psig)))
rmdir_r (ctx, pd, false, verbosity); // Never dry-run.
+ auto diag = [verbosity] (const path& f)
+ {
+ if (verb >= verbosity)
+ {
+ if (verb >= 2)
+ text << "cat >" << f;
+ else if (verb)
+ print_diag ("save", f);
+ }
+ };
+
path of;
ofdstream ofs;
@@ -337,6 +358,46 @@ namespace build2
// This way the configuration will be always in sync with ~build2
// and we can update the recipe manually (e.g., for debugging).
//
+ // Should we use ~build2 or ~build2-no-warnings? This case is similar
+ // to private host/module configurations in that the user doesn't have
+ // any control over the options used, etc. So it would be natural to
+ // use the no-warnings variant. However, unlike with tools/modules
+ // which can be configured in a user-created configuration (and which
+ // will normally be the case during development), for recipes it's
+ // always this automatically-create configuration. It feels like the
+ // best we can do is use ~build2-no-warnings by default but switch to
+ // ~build2 if the project is configured for development
+ // (config.<project>.develop).
+ //
+ string cfg;
+ {
+ const project_name& pn (named_project (rs));
+
+ if (!pn.empty ())
+ {
+ string var ("config." + pn.variable () + ".develop");
+
+ if (lookup l = rs[var])
+ {
+ // The value could be untyped if the project didn't declare this
+ // variable. Let's handle that case gracefully.
+ //
+ try
+ {
+ if (convert<bool> (*l))
+ cfg = "~build2";
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "invalid " << var << " value: " << e;
+ }
+ }
+ }
+
+ if (cfg.empty ())
+ cfg = "~build2-no-warnings";
+ }
+
create_project (
pd,
dir_path (), /* amalgamation */
@@ -345,7 +406,7 @@ namespace build2
{"cxx."}, /* root_modules */
"", /* root_post */
string ("config"), /* config_module */
- string ("config.config.load = ~build2"), /* config_file */
+ "config.config.load = " + cfg, /* config_file */
false, /* buildfile */
"build2 core", /* who */
verbosity); /* verbosity */
@@ -355,8 +416,7 @@ namespace build2
//
of = path (pd / "rule.cxx");
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << of;
+ diag (of);
ofs.open (of);
@@ -376,6 +436,8 @@ namespace build2
<< "#include <libbuild2/depdb.hxx>" << '\n'
<< "#include <libbuild2/scope.hxx>" << '\n'
<< "#include <libbuild2/target.hxx>" << '\n'
+ << "#include <libbuild2/recipe.hxx>" << '\n'
+ << "#include <libbuild2/dyndep.hxx>" << '\n'
<< "#include <libbuild2/context.hxx>" << '\n'
<< "#include <libbuild2/variable.hxx>" << '\n'
<< "#include <libbuild2/algorithm.hxx>" << '\n'
@@ -485,8 +547,7 @@ namespace build2
//
of = bf;
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << of;
+ diag (of);
ofs.open (of);
@@ -558,8 +619,7 @@ namespace build2
entry_time et (file_time (of));
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << of;
+ diag (of);
ofs.open (of);
@@ -604,10 +664,10 @@ namespace build2
l = find_target ();
phase_switch mp (ctx, run_phase::match);
- if (build2::match (perform_update_id, *l) != target_state::unchanged)
+ if (match_sync (perform_update_id, *l) != target_state::unchanged)
{
phase_switch ep (ctx, run_phase::execute);
- execute (a, *l);
+ execute_sync (a, *l);
}
}
else
@@ -664,13 +724,41 @@ namespace build2
}
}
- return impl->match (a, t, hint, me);
+ return impl->match (a, xt, hint, me);
}
#endif // BUILD2_BOOTSTRAP || LIBBUILD2_STATIC_BUILD
recipe adhoc_cxx_rule::
apply (action a, target& t, match_extra& me) const
{
+ // Handle matching explicit group member (see adhoc_rule::match() for
+ // background).
+ //
+ if (const group* g = (t.group != nullptr
+ ? t.group->is_a<group> ()
+ : nullptr))
+ {
+ // @@ Hm, this looks very similar to how we handle ad hoc group members.
+ // Shouldn't impl be given a chance to translate options or some
+ // such?
+ //
+ match_sync (a, *g, 0 /* options */);
+ return group_recipe; // Execute the group's recipe.
+ }
+
+ // Note that while we probably could call pattern's apply_group_members()
+ // here, apply_group_prerequisites() is normally called after adding
+ // prerequisites but before matching, which can only be done from the
+ // rule's implementation. Also, for apply_group_members(), there is the
+ // explicit group special case which may also require custom logic.
+ // So it feels best to leave both to the implementation.
+
return impl.load (memory_order_relaxed)->apply (a, t, me);
}
+
+ void adhoc_cxx_rule::
+ reapply (action a, target& t, match_extra& me) const
+ {
+ return impl.load (memory_order_relaxed)->reapply (a, t, me);
+ }
}
diff --git a/libbuild2/adhoc-rule-cxx.hxx b/libbuild2/adhoc-rule-cxx.hxx
index 466c0e5..2ac2281 100644
--- a/libbuild2/adhoc-rule-cxx.hxx
+++ b/libbuild2/adhoc-rule-cxx.hxx
@@ -26,7 +26,7 @@ namespace build2
};
// Note that when used as part of a pattern, the implementation cannot use
- // the match_extra::buffer nor the target auxilary data storage.
+ // the match_extra::data() facility nor the target auxiliary data storage.
//
class LIBBUILD2_SYMEXPORT cxx_rule_v1: public cxx_rule
{
@@ -36,11 +36,6 @@ namespace build2
// cannot be injected as a real prerequisite since it's from a different
// build context).
//
- // If pattern is not NULL then this recipe belongs to an ad hoc pattern
- // rule and apply() may need to call the pattern's apply_*() functions if
- // the pattern has any ad hoc group member substitutions or prerequisite
- // substitutions/non-patterns, respectively.
- //
const location recipe_loc; // Buildfile location of the recipe.
const target_state recipe_state; // State of recipe library target.
const adhoc_rule_pattern* pattern; // Ad hoc pattern rule of recipe.
@@ -52,8 +47,26 @@ namespace build2
// Return true by default.
//
+ // Note: must treat target as const (unless known to match a non-group).
+ // See adhoc_rule::match() for background.
+ //
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
+
+ using simple_rule::match; // Unhide the match_extra version.
+
+ // Either this version or the one with match_extra must be overridden.
+ //
+ // If the pattern member above is not NULL then this recipe belongs to an
+ // ad hoc pattern rule and the implementation may need to call the
+ // pattern's apply_*() functions if the pattern has any ad hoc group
+ // member substitutions or prerequisite substitutions/non-patterns,
+ // respectively.
+ //
+ virtual recipe
+ apply (action, target&) const override;
+
+ using simple_rule::apply; // Unhide the match_extra version.
};
// Note: not exported.
@@ -67,6 +80,9 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const override;
+ virtual void
+ reapply (action, target&, match_extra&) const override;
+
adhoc_cxx_rule (string, const location&, size_t,
uint64_t ver,
optional<string> sep);
diff --git a/libbuild2/adhoc-rule-regex-pattern.cxx b/libbuild2/adhoc-rule-regex-pattern.cxx
index b0de827..2d60520 100644
--- a/libbuild2/adhoc-rule-regex-pattern.cxx
+++ b/libbuild2/adhoc-rule-regex-pattern.cxx
@@ -86,7 +86,9 @@ namespace build2
tt = n.untyped () ? &file::static_type : s.find_target_type (n.type);
if (tt == nullptr)
- fail (loc) << "unknown target type " << n.type;
+ fail (loc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *s.root_scope ();
}
bool e (n.pattern &&
@@ -126,10 +128,13 @@ namespace build2
}
bool adhoc_rule_regex_pattern::
- match (action a, target& t, const string&, match_extra& me) const
+ match (action a, const target& t, const string&, match_extra& me) const
{
tracer trace ("adhoc_rule_regex_pattern::match");
+ // Note: target may not be locked in which case we should not modify
+ // target or match_extra (see adhoc_rule::match() for background).
+
// The plan is as follows: First check the "type signature" of the target
// and its prerequisites (the primary target type has already been matched
// by the rule matching machinery). If there is a match, then concatenate
@@ -158,11 +163,23 @@ namespace build2
auto find_prereq = [a, &t] (const target_type& tt) -> optional<target_key>
{
// We use the standard logic that one would use in the rule::match()
- // implementation.
+ // implementation. Except we support the unmatch and match values in
+ // the update variable.
+ //
+ // Note: assuming group prerequisites are immutable (not locked).
//
for (prerequisite_member p: group_prerequisite_members (a, t))
{
- if (include (a, t, p) == include_type::normal && p.is_a (tt))
+ // Note that here we don't validate the update operation override
+ // value (since we may not match). Instead the rule does this in
+ // apply().
+ //
+ // Note: assuming include()'s use of target only relied on immutable
+ // data (not locked).
+ //
+ lookup l;
+ if (include (a, t, p, a.operation () == update_id ? &l : nullptr) ==
+ include_type::normal && p.is_a (tt))
return p.key ().tk;
}
return nullopt;
@@ -190,11 +207,21 @@ namespace build2
// iterators pointing to the string being matched. Which means this string
// must be kept around until we are done with replacing the subsitutions.
// In fact, we cannot even move it because this may invalidate the
- // iterators (e.g., in case of a small string optimization). So the plan
- // is to store the string in match_extra::buffer and regex_match_results
- // (which we can move) in the auxiliary data storage.
+ // iterators (e.g., in case of a small string optimization). We also
+ // cannot set the data ahead of time because we may not match. Plus,
+ // resorting to a dynamic memory allocation even if we don't match feels
+ // heavy-handed.
+ //
+ // So the plan is to store the string in match_extra::data() and
+ // regex_match_results (which we can move) in the auxiliary data storage.
//
- string& ns (me.buffer);
+ // Note: only cache if locked.
+ //
+ static_assert (sizeof (string) <= match_extra::data_size,
+ "match data too large");
+
+ string tmp;
+ string& ns (me.locked ? me.data (string ()) : tmp);
auto append_name = [&ns,
first = true,
@@ -212,10 +239,12 @@ namespace build2
// Primary target (always a pattern).
//
auto te (targets_.end ()), ti (targets_.begin ());
- append_name (t.key (), *ti);
+ append_name (t.key (), *ti); // Immutable (not locked).
// Match ad hoc group members.
//
+ // Note: shouldn't be in effect for an explicit group (not locked).
+ //
while ((ti = find_if (ti + 1, te, pattern)) != te)
{
const target* at (find_adhoc_member (t, ti->type));
@@ -265,9 +294,8 @@ namespace build2
return false;
}
- static_assert (sizeof (regex_match_results) <= target::data_size,
- "insufficient space");
- t.data (move (mr));
+ if (me.locked)
+ t.data (a, move (mr));
return true;
}
@@ -293,9 +321,15 @@ namespace build2
}
void adhoc_rule_regex_pattern::
- apply_adhoc_members (action, target& t, match_extra&) const
+ apply_group_members (action a, target& t, const scope& bs,
+ match_extra&) const
{
- const auto& mr (t.data<regex_match_results> ());
+ if (targets_.size () == 1) // The group/primary target is always present.
+ return;
+
+ group* g (t.is_a<group> ());
+
+ const auto& mr (t.data<regex_match_results> (a));
for (auto i (targets_.begin () + 1); i != targets_.end (); ++i)
{
@@ -322,39 +356,113 @@ namespace build2
d.normalize ();
}
- // @@ TODO: currently this uses type as the ad hoc member identity.
+ string n (substitute (
+ t,
+ mr,
+ e.name.value,
+ (g != nullptr
+ ? "explicit target group member"
+ : "ad hoc target group member")));
+
+ // @@ TODO: save location in constructor?
//
- add_adhoc_member (
- t,
- e.type,
- move (d),
- dir_path () /* out */,
- substitute (t, mr, e.name.value, "ad hoc target group member"));
+ location loc;
+
+ optional<string> ext (target::split_name (n, loc));
+
+ if (g != nullptr)
+ {
+ auto& ms (g->members);
+
+ // These are conceptually static but they behave more like dynamic in
+ // that we likely need to insert the target, set its group, etc. In a
+ // sense, they are rule-static, but group-dynamic.
+ //
+ // Note: a custom version of the dyndep_rule::inject_group_member()
+ // logic.
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ e.type,
+ move (d),
+ dir_path (), // Always in out.
+ move (n),
+ ext ? &*ext : nullptr,
+ &bs));
+
+ const target& t (l.first); // Note: non-const only if have lock.
+
+ if (l.second)
+ {
+ l.first.group = g;
+ l.second.unlock ();
+ }
+ else
+ {
+ if (find (ms.begin (), ms.end (), &t) != ms.end ())
+ continue;
+
+ if (t.group != g) // Note: atomic.
+ {
+ // We can only update the group under lock.
+ //
+ target_lock tl (lock (a, t));
+
+ if (!tl)
+ fail << "group " << *g << " member " << t << " is already matched" <<
+ info << "static group members specified by pattern rules cannot "
+ << "be used as prerequisites directly, only via group";
+
+ if (t.group == nullptr)
+ tl.target->group = g;
+ else if (t.group != g)
+ {
+ fail << "group " << *g << " member " << t
+ << " is already member of group " << *t.group;
+ }
+ }
+ }
+
+ ms.push_back (&t);
+ }
+ else
+ {
+ add_adhoc_member_identity (
+ t,
+ e.type,
+ move (d),
+ dir_path (), // Always in out.
+ move (n),
+ move (ext),
+ loc);
+ }
}
}
void adhoc_rule_regex_pattern::
- apply_prerequisites (action a, target& t, match_extra&) const
+ apply_prerequisites (action a, target& t,
+ const scope& bs,
+ match_extra&) const
{
- const auto& mr (t.data<regex_match_results> ());
-
- // Resolve and cache target scope lazily.
- //
- auto base_scope = [&t, bs = (const scope*) nullptr] () mutable
- -> const scope&
- {
- if (bs == nullptr)
- bs = &t.base_scope ();
-
- return *bs;
- };
+ const auto& mr (t.data<regex_match_results> (a));
// Re-create the same clean semantics as in match_prerequisite_members().
//
bool clean (a.operation () == clean_id && !t.is_a<alias> ());
auto& pts (t.prerequisite_targets[a]);
- size_t start (pts.size ());
+
+ // Avoid duplicating fsdir{} that may have already been injected by
+ // inject_fsdir() (in which case it is expected to be first).
+ //
+ const target* dir (nullptr);
+ if (!pts.empty ())
+ {
+ const prerequisite_target& pt (pts.front ());
+
+ if (pt.target != nullptr && pt.adhoc () && pt.target->is_a<fsdir> ())
+ dir = pt.target;
+ }
for (const element& e: prereqs_)
{
@@ -382,7 +490,7 @@ namespace build2
n = name (e.name.dir,
e.name.type,
substitute (t, mr, e.name.value, "prerequisite"));
- s = &base_scope ();
+ s = &bs;
}
else
{
@@ -392,18 +500,15 @@ namespace build2
const target& pt (search (t, move (n), *s, &e.type));
- if (clean && !pt.in (*base_scope ().root_scope ()))
+ if (&pt == dir || (clean && !pt.in (*bs.root_scope ())))
continue;
// @@ TODO: it could be handy to mark a prerequisite (e.g., a tool)
// ad hoc so that it doesn't interfere with the $< list. Also
- // clean=false.
+ // clean=false. Also update=match|unmatch.
//
pts.push_back (prerequisite_target (&pt, false /* adhoc */));
}
-
- if (start != pts.size ())
- match_members (a, t, pts, start);
}
void adhoc_rule_regex_pattern::
diff --git a/libbuild2/adhoc-rule-regex-pattern.hxx b/libbuild2/adhoc-rule-regex-pattern.hxx
index 4327e72..9cb7874 100644
--- a/libbuild2/adhoc-rule-regex-pattern.hxx
+++ b/libbuild2/adhoc-rule-regex-pattern.hxx
@@ -14,7 +14,7 @@ namespace build2
{
// Ad hoc rule regex pattern.
//
- // The name signature is stored in match_extra::buffer while the regex
+ // The name signature string is stored in match_extra::data while the regex
// match_results object -- in the target auxiliary data storage. Both must
// remain valid until after the apply_*() calls.
//
@@ -32,13 +32,17 @@ namespace build2
names&&, const location&);
virtual bool
- match (action, target&, const string&, match_extra&) const override;
+ match (action, const target&, const string&, match_extra&) const override;
virtual void
- apply_adhoc_members (action, target&, match_extra&) const override;
+ apply_group_members (action, target&,
+ const scope&,
+ match_extra&) const override;
virtual void
- apply_prerequisites (action, target&, match_extra&) const override;
+ apply_prerequisites (action, target&,
+ const scope&,
+ match_extra&) const override;
virtual void
dump (ostream&) const override;
diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx
index 0370626..62c500d 100644
--- a/libbuild2/algorithm.cxx
+++ b/libbuild2/algorithm.cxx
@@ -54,29 +54,45 @@ namespace build2
const target&
search (const target& t, const prerequisite_key& pk)
{
- assert (t.ctx.phase == run_phase::match);
+ context& ctx (t.ctx);
+
+ assert (ctx.phase == run_phase::match);
// If this is a project-qualified prerequisite, then this is import's
- // business.
+ // business (phase 2).
//
if (pk.proj)
- return import (t.ctx, pk);
+ return import2 (ctx, pk);
- if (const target* pt = pk.tk.type->search (t, pk))
+ if (const target* pt = pk.tk.type->search (ctx, &t, pk))
return *pt;
- return create_new_target (t.ctx, pk);
+ if (pk.tk.out->empty ())
+ return create_new_target (ctx, pk);
+
+ // If this is triggered, then you are probably not passing scope to
+ // search() (which leads to search_existing_file() being skipped).
+ //
+ fail << "no existing source file for prerequisite " << pk << endf;
}
pair<target&, ulock>
search_locked (const target& t, const prerequisite_key& pk)
{
- assert (t.ctx.phase == run_phase::match && !pk.proj);
+ context& ctx (t.ctx);
- if (const target* pt = pk.tk.type->search (t, pk))
+ assert (ctx.phase == run_phase::match && !pk.proj);
+
+ if (const target* pt = pk.tk.type->search (ctx, &t, pk))
return {const_cast<target&> (*pt), ulock ()};
- return create_new_target_locked (t.ctx, pk);
+ if (pk.tk.out->empty ())
+ return create_new_target_locked (ctx, pk);
+
+ // If this is triggered, then you are probably not passing scope to
+ // search() (which leads to search_existing_file() being skipped).
+ //
+ fail << "no existing source file for prerequisite " << pk << endf;
}
const target*
@@ -84,11 +100,33 @@ namespace build2
{
return pk.proj
? import_existing (ctx, pk)
- : search_existing_target (ctx, pk);
+ : pk.tk.type->search (ctx, nullptr /* existing */, pk);
+ }
+
+ const target&
+ search_new (context& ctx, const prerequisite_key& pk)
+ {
+ assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
+
+ if (const target* pt = search_existing_target (ctx, pk, true /*out_only*/))
+ return *pt;
+
+ return create_new_target (ctx, pk);
+ }
+
+ pair<target&, ulock>
+ search_new_locked (context& ctx, const prerequisite_key& pk)
+ {
+ assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
+
+ if (const target* pt = search_existing_target (ctx, pk, true /*out_only*/))
+ return {const_cast<target&> (*pt), ulock ()};
+
+ return create_new_target_locked (ctx, pk);
}
const target&
- search (const target& t, name n, const scope& s, const target_type* tt)
+ search (const target& t, name&& n, const scope& s, const target_type* tt)
{
assert (t.ctx.phase == run_phase::match);
@@ -142,16 +180,12 @@ namespace build2
}
bool q (cn.qualified ());
-
- // @@ OUT: for now we assume the prerequisite's out is undetermined.
- // Would need to pass a pair of names.
- //
prerequisite_key pk {
n.proj, {tt, &n.dir, q ? &empty_dir_path : &out, &n.value, ext}, &s};
return q
? import_existing (s.ctx, pk)
- : search_existing_target (s.ctx, pk);
+ : tt->search (s.ctx, nullptr /* existing */, pk);
}
const target*
@@ -198,8 +232,14 @@ namespace build2
// If the work_queue is absent, then we don't wait.
//
+ // While already applied or executed targets are normally not locked, if
+ // options contain any bits that are not already in cur_options, then the
+ // target is locked even in these states.
+ //
target_lock
- lock_impl (action a, const target& ct, optional<scheduler::work_queue> wq)
+ lock_impl (action a, const target& ct,
+ optional<scheduler::work_queue> wq,
+ uint64_t options)
{
context& ctx (ct.ctx);
@@ -214,7 +254,8 @@ namespace build2
size_t appl (b + target::offset_applied);
size_t busy (b + target::offset_busy);
- atomic_count& task_count (ct[a].task_count);
+ const target::opstate& cs (ct[a]);
+ atomic_count& task_count (cs.task_count);
while (!task_count.compare_exchange_strong (
e,
@@ -234,7 +275,7 @@ namespace build2
fail << "dependency cycle detected involving target " << ct;
if (!wq)
- return target_lock {a, nullptr, e - b};
+ return target_lock {a, nullptr, e - b, false};
// We also unlock the phase for the duration of the wait. Why?
// Consider this scenario: we are trying to match a dir{} target whose
@@ -243,14 +284,20 @@ namespace build2
// to switch the phase to load. Which would result in a deadlock
// unless we release the phase.
//
- phase_unlock u (ct.ctx, true /* unlock */, true /* delay */);
- e = ctx.sched.wait (busy - 1, task_count, u, *wq);
+ phase_unlock u (ct.ctx, true /* delay */);
+ e = ctx.sched->wait (busy - 1, task_count, u, *wq);
}
- // We don't lock already applied or executed targets.
+ // We don't lock already applied or executed targets unless there
+ // are new options.
+ //
+ // Note: we don't have the lock yet so we must use atomic cur_options.
+ // We also have to re-check this once we've grabbed the lock.
//
- if (e >= appl)
- return target_lock {a, nullptr, e - b};
+ if (e >= appl &&
+ (cs.match_extra.cur_options_.load (memory_order_relaxed) & options)
+ == options)
+ return target_lock {a, nullptr, e - b, false};
}
// We now have the lock. Analyze the old value and decide what to do.
@@ -259,24 +306,41 @@ namespace build2
target::opstate& s (t[a]);
size_t offset;
- if (e <= b)
+ bool first;
+ if ((first = (e <= b)))
{
// First lock for this operation.
//
+ // Note that we use 0 match_extra::cur_options_ as an indication of not
+ // being applied yet. In particular, in the match phase, this is used to
+ // distinguish between the "busy because not applied yet" and "busy
+ // because relocked to reapply match options" cases. See
+ // target::matched() for details.
+ //
s.rule = nullptr;
s.dependents.store (0, memory_order_release);
+ s.match_extra.cur_options_.store (0, memory_order_relaxed);
offset = target::offset_touched;
}
else
{
+ // Re-check the options if already applied or worse.
+ //
+ if (e >= appl && (s.match_extra.cur_options & options) == options)
+ {
+ // Essentially unlock_impl().
+ //
+ task_count.store (e, memory_order_release);
+ ctx.sched->resume (task_count);
+
+ return target_lock {a, nullptr, e - b, false};
+ }
+
offset = e - b;
- assert (offset == target::offset_touched ||
- offset == target::offset_tried ||
- offset == target::offset_matched);
}
- return target_lock {a, &t, offset};
+ return target_lock {a, &t, offset, first};
}
void
@@ -292,7 +356,7 @@ namespace build2
// this target.
//
task_count.store (offset + ctx.count_base (), memory_order_release);
- ctx.sched.resume (task_count);
+ ctx.sched->resume (task_count);
}
target&
@@ -300,7 +364,8 @@ namespace build2
const target_type& tt,
dir_path dir,
dir_path out,
- string n)
+ string n,
+ optional<string> ext)
{
tracer trace ("add_adhoc_member");
@@ -315,101 +380,342 @@ namespace build2
move (dir),
move (out),
move (n),
- nullopt /* ext */,
+ move (ext),
target_decl::implied,
- trace));
-
- assert (r.second);
+ trace,
+ true /* skip_find */));
target& m (r.first);
- *mp = &m;
+
+ if (!r.second)
+ fail << "target " << m << " already exists and cannot be made "
+ << "ad hoc member of group " << t;
+
m.group = &t;
+ *mp = &m;
return m;
};
- // Return the matching rule or NULL if no match and try_match is true.
- //
- const rule_match*
- match_rule (action a, target& t, const rule* skip, bool try_match)
+ pair<target&, bool>
+ add_adhoc_member_identity (target& t,
+ const target_type& tt,
+ dir_path dir,
+ dir_path out,
+ string n,
+ optional<string> ext,
+ const location& loc)
{
- const scope& bs (t.base_scope ());
+ // NOTE: see similar code in parser::enter_adhoc_members().
- // Match rules in project environment.
- //
- auto_project_env penv;
- if (const scope* rs = bs.root_scope ())
- penv = auto_project_env (*rs);
+ tracer trace ("add_adhoc_member_identity");
- match_extra& me (t[a].match_extra);
+ pair<target&, ulock> r (
+ t.ctx.targets.insert_locked (tt,
+ move (dir),
+ move (out),
+ move (n),
+ move (ext),
+ target_decl::implied,
+ trace,
+ true /* skip_find */));
+ target& m (r.first);
- // First check for an ad hoc recipe.
+ // Add as an ad hoc member at the end of the chain skipping duplicates.
//
- // Note that a fallback recipe is preferred over a non-fallback rule.
+ const_ptr<target>* mp (&t.adhoc_member);
+ for (; *mp != nullptr; mp = &(*mp)->adhoc_member)
+ {
+ if (*mp == &m)
+ return {m, false};
+ }
+
+ if (!r.second)
+ fail (loc) << "target " << m << " already exists and cannot be made "
+ << "ad hoc member of group " << t;
+
+ m.group = &t;
+ *mp = &m;
+
+ return {m, true};
+ }
+
+ static bool
+ trace_target (const target& t, const vector<name>& ns)
+ {
+ for (const name& n: ns)
+ {
+ if (n.untyped () || n.qualified () || n.pattern)
+ fail << "unsupported trace target name '" << n << "'" <<
+ info << "unqualified, typed, non-pattern name expected";
+
+ if (!n.dir.empty ())
+ {
+ if (n.dir.relative () || !n.dir.normalized ())
+ fail << "absolute and normalized trace target directory expected";
+
+ if (t.dir != n.dir)
+ continue;
+ }
+
+ if (n.type == t.type ().name && n.value == t.name)
+ return true;
+ }
+
+ return false;
+ }
+
+ void
+ set_rule_trace (target_lock& l, const rule_match* rm)
+ {
+ action a (l.action);
+ target& t (*l.target);
+
+ // Note: see similar code in execute_impl() for execute.
//
- if (!t.adhoc_recipes.empty ())
+ if (trace_target (t, *t.ctx.trace_match))
{
- auto df = make_diag_frame (
- [a, &t](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while matching ad hoc recipe to " << diag_do (a, t);
- });
+ diag_record dr (info);
+
+ dr << "matching to " << diag_do (a, t);
- auto match = [a, &t, &me] (const adhoc_rule& r, bool fallback) -> bool
+ if (rm != nullptr)
{
- me.init (fallback);
+ const rule& r (rm->second);
- if (auto* f = (a.outer ()
- ? t.ctx.current_outer_oif
- : t.ctx.current_inner_oif)->adhoc_match)
- return f (r, a, t, string () /* hint */, me);
+ if (const adhoc_rule* ar = dynamic_cast<const adhoc_rule*> (&r))
+ {
+ dr << info (ar->loc);
+
+ if (ar->pattern != nullptr)
+ dr << "using ad hoc pattern rule ";
+ else
+ dr << "using ad hoc recipe ";
+ }
else
- return r.match (a, t, string () /* hint */, me);
- };
+ dr << info << "using rule ";
- // The action could be Y-for-X while the ad hoc recipes are always for
- // X. So strip the Y-for part for comparison (but not for the match()
- // calls; see below for the hairy inner/outer semantics details).
- //
- action ca (a.inner ()
- ? a
- : action (a.meta_operation (), a.outer_operation ()));
+ dr << rm->first;
+ }
+ else
+ dr << info << "using directly-assigned recipe";
+ }
+
+ t[a].rule = rm;
+ }
+ // Note: not static since also called by rule::sub_match().
+ //
+ const rule_match*
+ match_adhoc_recipe (action a, target& t, match_extra& me)
+ {
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching ad hoc recipe to " << diag_do (a, t);
+ });
+
+ auto match = [a, &t, &me] (const adhoc_rule& r, bool fallback) -> bool
+ {
+ me.reinit (fallback);
+
+ if (auto* f = (a.outer ()
+ ? t.ctx.current_outer_oif
+ : t.ctx.current_inner_oif)->adhoc_match)
+ return f (r, a, t, string () /* hint */, me);
+ else
+ return r.match (a, t, string () /* hint */, me);
+ };
+
+ // The action could be Y-for-X while the ad hoc recipes are always for
+ // X. So strip the Y-for part for comparison (but not for the match()
+ // calls; see below for the hairy inner/outer semantics details).
+ //
+ action ca (a.inner ()
+ ? a
+ : action (a.meta_operation (), a.outer_operation ()));
+
+ // If returned rule_match is NULL, then the second half indicates whether
+ // the rule was found (but did not match).
+ //
+ auto find_match = [&t, &match] (action ca) -> pair<const rule_match*, bool>
+ {
+ // Note that there can be at most one recipe for any action.
+ //
auto b (t.adhoc_recipes.begin ()), e (t.adhoc_recipes.end ());
auto i (find_if (
b, e,
- [&match, ca] (const shared_ptr<adhoc_rule>& r)
+ [ca] (const shared_ptr<adhoc_rule>& r)
{
auto& as (r->actions);
- return (find (as.begin (), as.end (), ca) != as.end () &&
- match (*r, false));
+ return find (as.begin (), as.end (), ca) != as.end ();
}));
- if (i == e)
+ bool f (i != e);
+ if (f)
+ {
+ if (!match (**i, false /* fallback */))
+ i = e;
+ }
+ else
{
// See if we have a fallback implementation.
//
// See the adhoc_rule::reverse_fallback() documentation for details on
// what's going on here.
//
+ // Note that it feels natural not to look for a fallback if a custom
+ // recipe was provided but did not match.
+ //
+ const target_type& tt (t.type ());
i = find_if (
b, e,
- [&match, ca, &t] (const shared_ptr<adhoc_rule>& r)
+ [ca, &tt] (const shared_ptr<adhoc_rule>& r)
{
- auto& as (r->actions);
-
- // Note that the rule could be there but not match (see above),
- // thus this extra check.
+ // Only the rule that provides the "forward" action can provide
+ // "reverse", so there can be at most one such rule.
//
- return (find (as.begin (), as.end (), ca) == as.end () &&
- r->reverse_fallback (ca, t.type ()) &&
- match (*r, true));
+ return r->reverse_fallback (ca, tt);
});
+
+ f = (i != e);
+ if (f)
+ {
+ if (!match (**i, true /* fallback */))
+ i = e;
+ }
+ }
+
+ return pair<const rule_match*, bool> (
+ i != e ? &(*i)->rule_match : nullptr,
+ f);
+ };
+
+ pair<const rule_match*, bool> r (find_match (ca));
+
+ // Provide the "add dist_* and configure_* actions for every perform_*
+ // action unless there is a custom one" semantics (see the equivalent ad
+ // hoc rule registration code in the parser for background).
+ //
+ // Note that handling this in the parser by adding the extra actions is
+ // difficult because we store recipe actions in the recipe itself (
+ // adhoc_rule::actions) and a recipe could be shared among multiple
+ // targets, some of which may provide a "custom one" as another recipe. On
+ // the other hand, handling it here is relatively straightforward.
+ //
+ if (r.first == nullptr && !r.second)
+ {
+ meta_operation_id mo (ca.meta_operation ());
+ if (mo == configure_id || mo == dist_id)
+ {
+ action pa (perform_id, ca.operation ());
+ r = find_match (pa);
+ }
+ }
+
+ return r.first;
+ }
+
+ // Return the matching rule or NULL if no match and try_match is true.
+ //
+ const rule_match*
+ match_rule_impl (action a, target& t,
+ uint64_t options,
+ const rule* skip,
+ bool try_match,
+ match_extra* pme)
+ {
+ using fallback_rule = adhoc_rule_pattern::fallback_rule;
+
+ auto adhoc_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const adhoc_rule*> (&r.second.get ());
+ };
+
+ auto fallback_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const fallback_rule*> (&r.second.get ());
+ };
+
+ // Note: we copy the options value to me.new_options after successfully
+ // matching the rule to make sure rule::match() implementations don't rely
+ // on it.
+ //
+ match_extra& me (pme == nullptr ? t[a].match_extra : *pme);
+
+ if (const target* g = t.group)
+ {
+ // If this is a group with dynamic members, then match it with the
+ // group's rule automatically. See dyndep_rule::inject_group_member()
+ // for background.
+ //
+ if ((g->type ().flags & target_type::flag::dyn_members) ==
+ target_type::flag::dyn_members)
+ {
+ if (g->matched (a, memory_order_acquire))
+ {
+ const rule_match* r (g->state[a].rule);
+ assert (r != nullptr); // Shouldn't happen with dyn_members.
+
+ me.new_options = options;
+ return r;
+ }
+
+ // Assume static member and fall through.
+ }
+
+ // If this is a member of group-based target, then first try to find a
+ // matching ad hoc recipe/rule by matching (to an ad hoc recipe/rule)
+ // the group but applying to the member. See adhoc_rule::match() for
+ // background, including for why const_cast should be safe.
+ //
+ // To put it another way, if a group is matched by an ad hoc
+ // recipe/rule, then we want all the member to be matched to the same
+ // recipe/rule.
+ //
+ // Note that such a group is dyn_members so we would have tried the
+ // "already matched" case above.
+ //
+ if (g->is_a<group> ())
+ {
+ // We cannot init match_extra from the target if it's unlocked so use
+ // a temporary (it shouldn't be modified if unlocked).
+ //
+ match_extra gme (false /* locked */);
+ if (const rule_match* r = match_rule_impl (a, const_cast<target&> (*g),
+ 0 /* options */,
+ skip,
+ true /* try_match */,
+ &gme))
+ {
+ me.new_options = options;
+ return r;
+ }
+
+ // Fall through to normal match of the member.
}
+ }
+
+ const scope& bs (t.base_scope ());
+
+ // Match rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
- if (i != e)
- return &(*i)->rule_match;
+ // First check for an ad hoc recipe.
+ //
+ // Note that a fallback recipe is preferred over a non-fallback rule.
+ //
+ if (!t.adhoc_recipes.empty ())
+ {
+ if (const rule_match* r = match_adhoc_recipe (a, t, me))
+ {
+ me.new_options = options;
+ return r;
+ }
}
// If this is an outer operation (Y-for-X), then we look for rules
@@ -425,183 +731,214 @@ namespace build2
meta_operation_id mo (a.meta_operation ());
operation_id o (a.inner () ? a.operation () : a.outer_operation ());
- for (auto tt (&t.type ()); tt != nullptr; tt = tt->base)
+ // Our hint semantics applies regardless of the meta-operation. This works
+ // reasonably well except for the default/fallback rules provided by some
+ // meta-operations (e.g., dist, config), which naturally do not match the
+ // hint.
+ //
+ // The way we solve this problem is by trying a hint-less match as a
+ // fallback for non-perform meta-operations. @@ Ideally we would want to
+ // only consider such default/fallback rules, which we may do in the
+ // future (we could just decorate their names with some special marker,
+ // e.g., `dist.file.*` but that would be visible in diagnostics).
+ //
+ // It seems the only potential problem with this approach is the inability
+ // by the user to specify the hint for this specific meta-operation (e.g.,
+ // to resolve an ambiguity between two rules or override a matched rule),
+ // which seems quite remote at the moment. Maybe/later we can invent a
+ // syntax for that.
+ //
+ const string* hint;
+ for (bool retry (false);; retry = true)
{
- // Search scopes outwards, stopping at the project root.
- //
- for (const scope* s (&bs);
- s != nullptr;
- s = s->root () ? &s->global_scope () : s->parent_scope ())
- {
- const operation_rule_map* om (s->rules[mo]);
-
- if (om == nullptr)
- continue; // No entry for this meta-operation id.
+ hint = retry
+ ? &empty_string
+ : &t.find_hint (o); // MT-safe (target locked).
- // First try the map for the actual operation. If that doesn't yeld
- // anything, try the wildcard map.
+ for (auto tt (&t.type ()); tt != nullptr; tt = tt->base)
+ {
+ // Search scopes outwards, stopping at the project root. For retry
+ // only look in the root and global scopes.
//
- for (operation_id oi (o), oip (o); oip != 0; oip = oi, oi = 0)
+ for (const scope* s (retry ? bs.root_scope () : &bs);
+ s != nullptr;
+ s = s->root () ? &s->global_scope () : s->parent_scope ())
{
- const target_type_rule_map* ttm ((*om)[oi]);
-
- if (ttm == nullptr)
- continue; // No entry for this operation id.
-
- if (ttm->empty ())
- continue; // Empty map for this operation id.
-
- auto i (ttm->find (tt));
-
- if (i == ttm->end () || i->second.empty ())
- continue; // No rules registered for this target type.
+ const operation_rule_map* om (s->rules[mo]);
- const auto& rules (i->second); // Hint map.
+ if (om == nullptr)
+ continue; // No entry for this meta-operation id.
- // @@ TODO hint
+ // First try the map for the actual operation. If that doesn't yeld
+ // anything, try the wildcard map.
//
- // Different rules can be used for different operations (update vs
- // test is a good example). So, at some point, we will probably have
- // to support a list of hints or even an operation-hint map (e.g.,
- // 'hint=cxx test=foo' if cxx supports the test operation but we
- // want the foo rule instead). This is also the place where the
- // '{build clean}=cxx' construct (which we currently do not support)
- // can come handy.
- //
- // Also, ignore the hint (that is most likely ment for a different
- // operation) if this is a unique match.
- //
- string hint;
- auto rs (rules.size () == 1
- ? make_pair (rules.begin (), rules.end ())
- : rules.find_sub (hint));
-
- for (auto i (rs.first); i != rs.second; ++i)
+ for (operation_id oi (o), oip (o); oip != 0; oip = oi, oi = 0)
{
- const rule_match* r (&*i);
+ const target_type_rule_map* ttm ((*om)[oi]);
- // In a somewhat hackish way we reuse operation wildcards to plumb
- // the ad hoc rule's reverse operation fallback logic.
- //
- // The difficulty is two-fold:
- //
- // 1. It's difficult to add the fallback flag to the rule map
- // because of rule_match which is used throughout.
- //
- // 2. Even if we could do that, we pass the reverse action to
- // reverse_fallback() rather than it returning (a list) of
- // reverse actions, which would be necessary to register them.
- //
- using fallback_rule = adhoc_rule_pattern::fallback_rule;
+ if (ttm == nullptr)
+ continue; // No entry for this operation id.
- auto find_fallback = [mo, o, tt] (const fallback_rule& fr)
- -> const rule_match*
- {
- for (const shared_ptr<adhoc_rule>& ar: fr.rules)
- if (ar->reverse_fallback (action (mo, o), *tt))
- return &ar->rule_match;
+ if (ttm->empty ())
+ continue; // Empty map for this operation id.
- return nullptr;
- };
+ auto i (ttm->find (tt));
- if (oi == 0)
- {
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r->second.get ()))
- {
- if ((r = find_fallback (*fr)) == nullptr)
- continue;
- }
- }
+ if (i == ttm->end () || i->second.empty ())
+ continue; // No rules registered for this target type.
- const string& n (r->first);
- const rule& ru (r->second);
+ const auto& rules (i->second); // Name map.
- if (&ru == skip)
- continue;
+ // Filter against the hint, if any.
+ //
+ auto rs (hint->empty ()
+ ? make_pair (rules.begin (), rules.end ())
+ : rules.find_sub (*hint));
- me.init (oi == 0 /* fallback */);
+ for (auto i (rs.first); i != rs.second; ++i)
{
- auto df = make_diag_frame (
- [a, &t, &n](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while matching rule " << n << " to "
- << diag_do (a, t);
- });
-
- if (!ru.match (a, t, hint, me))
- continue;
- }
+ const rule_match* r (&*i);
- // Do the ambiguity test.
- //
- bool ambig (false);
+ // In a somewhat hackish way we reuse operation wildcards to
+ // plumb the ad hoc rule's reverse operation fallback logic.
+ //
+ // The difficulty is two-fold:
+ //
+ // 1. It's difficult to add the fallback flag to the rule map
+ // because of rule_match which is used throughout.
+ //
+ // 2. Even if we could do that, we pass the reverse action to
+ // reverse_fallback() rather than it returning (a list) of
+ // reverse actions, which would be necessary to register them.
+ //
+ auto find_fallback = [mo, o, tt] (const fallback_rule& fr)
+ -> const rule_match*
+ {
+ for (const shared_ptr<adhoc_rule>& ar: fr.rules)
+ if (ar->reverse_fallback (action (mo, o), *tt))
+ return &ar->rule_match;
- diag_record dr;
- for (++i; i != rs.second; ++i)
- {
- const rule_match* r1 (&*i);
+ return nullptr;
+ };
if (oi == 0)
{
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r1->second.get ()))
+ if (const fallback_rule* fr = fallback_rule_match (*r))
{
- if ((r1 = find_fallback (*fr)) == nullptr)
+ if ((r = find_fallback (*fr)) == nullptr)
continue;
}
}
- const string& n1 (r1->first);
- const rule& ru1 (r1->second);
+ // Skip non-ad hoc rules if the target is not locked (see above;
+ // note that in this case match_extra is a temporary which we
+ // can reinit).
+ //
+ if (!me.locked && !adhoc_rule_match (*r))
+ continue;
+ const string& n (r->first);
+ const rule& ru (r->second);
+
+ if (&ru == skip)
+ continue;
+
+ me.reinit (oi == 0 /* fallback */);
{
auto df = make_diag_frame (
- [a, &t, &n1](const diag_record& dr)
+ [a, &t, &n](const diag_record& dr)
{
if (verb != 0)
- dr << info << "while matching rule " << n1 << " to "
+ dr << info << "while matching rule " << n << " to "
<< diag_do (a, t);
});
- // @@ TODO: this makes target state in match() undetermined
- // so need to fortify rules that modify anything in match
- // to clear things.
- //
- // @@ Can't we temporarily swap things out in target?
- //
- match_extra me1;
- me1.init (oi == 0);
- if (!ru1.match (a, t, hint, me1))
+ if (!ru.match (a, t, *hint, me))
continue;
}
- if (!ambig)
+ // Do the ambiguity test.
+ //
+ bool ambig (false);
+
+ diag_record dr;
+ for (++i; i != rs.second; ++i)
{
- dr << fail << "multiple rules matching " << diag_doing (a, t)
- << info << "rule " << n << " matches";
- ambig = true;
+ const rule_match* r1 (&*i);
+
+ if (oi == 0)
+ {
+ if (const fallback_rule* fr = fallback_rule_match (*r1))
+ {
+ if ((r1 = find_fallback (*fr)) == nullptr)
+ continue;
+ }
+ }
+
+ if (!me.locked && !adhoc_rule_match (*r1))
+ continue;
+
+ const string& n1 (r1->first);
+ const rule& ru1 (r1->second);
+
+ {
+ auto df = make_diag_frame (
+ [a, &t, &n1](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching rule " << n1 << " to "
+ << diag_do (a, t);
+ });
+
+ // @@ TODO: this makes target state in match() undetermined
+ // so need to fortify rules that modify anything in match
+ // to clear things.
+ //
+ // @@ Can't we temporarily swap things out in target?
+ //
+ match_extra me1 (me.locked, oi == 0 /* fallback */);
+ if (!ru1.match (a, t, *hint, me1))
+ continue;
+ }
+
+ if (!ambig)
+ {
+ dr << fail << "multiple rules matching " << diag_doing (a, t)
+ << info << "rule " << n << " matches";
+ ambig = true;
+ }
+
+ dr << info << "rule " << n1 << " also matches";
}
- dr << info << "rule " << n1 << " also matches";
+ if (!ambig)
+ {
+ me.new_options = options;
+ return r;
+ }
+ else
+ dr << info << "use rule hint to disambiguate this match";
}
-
- if (!ambig)
- return r;
- else
- dr << info << "use rule hint to disambiguate this match";
}
}
}
+
+ if (mo == perform_id || hint->empty () || retry)
+ break;
}
me.free ();
if (!try_match)
{
- diag_record dr;
- dr << fail << "no rule to " << diag_do (a, t);
+ diag_record dr (fail);
+
+ if (hint->empty ())
+ dr << "no rule to ";
+ else
+ dr << "no rule with hint " << *hint << " to ";
+
+ dr << diag_do (a, t);
// Try to give some hints of the common causes.
//
@@ -691,66 +1028,267 @@ namespace build2
recipe re (ar != nullptr ? f (*ar, a, t, me) : ru.apply (a, t, me));
- me.free ();
+ me.free (); // Note: cur_options are still in use.
+ assert (me.cur_options != 0); // Match options cannot be 0 after apply().
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
return re;
}
+ static void
+ apply_posthoc_impl (
+ action a, target& t,
+ const pair<const string, reference_wrapper<const rule>>& m,
+ context::posthoc_target& pt)
+ {
+ const scope& bs (t.base_scope ());
+
+ // Apply rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ const rule& ru (m.second);
+ match_extra& me (t[a].match_extra);
+ me.posthoc_prerequisite_targets = &pt.prerequisite_targets;
+
+ auto df = make_diag_frame (
+ [a, &t, &m](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while applying rule " << m.first << " to "
+ << diag_do (a, t) << " for post hoc prerequisites";
+ });
+
+ // Note: for now no adhoc_apply_posthoc().
+ //
+ ru.apply_posthoc (a, t, me);
+ }
+
+ static void
+ reapply_impl (action a,
+ target& t,
+ const pair<const string, reference_wrapper<const rule>>& m)
+ {
+ const scope& bs (t.base_scope ());
+
+ // Reapply rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ const rule& ru (m.second);
+ match_extra& me (t[a].match_extra);
+ // Note: me.posthoc_prerequisite_targets carried over.
+
+ auto df = make_diag_frame (
+ [a, &t, &m](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while reapplying rule " << m.first << " to "
+ << diag_do (a, t);
+ });
+
+ // Note: for now no adhoc_reapply().
+ //
+ ru.reapply (a, t, me);
+ assert (me.cur_options != 0); // Match options cannot be 0 after reapply().
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
+ }
+
+ // If anything goes wrong, set target state to failed and return nullopt.
+ // Otherwise return the pointer to the new posthoc_target entry if any post
+ // hoc prerequisites were present or NULL otherwise. Note that the returned
+ // entry is stable (because we use a list) and should only be accessed
+ // during the match phase if the holding the target lock.
+ //
+ // Note: must be called while holding target_lock.
+ //
+ static optional<context::posthoc_target*>
+ match_posthoc (action a, target& t)
+ {
+ // The plan is to, while holding the lock, search and collect all the post
+ // hoc prerequisited and add an entry to context::current_posthoc_targets.
+ // The actual matching happens as post-pass in the meta-operation's match
+ // function.
+ //
+ // While it may seem like we could do matching here by unlocking (or
+ // unstacking) the lock for this target, that will only work for simple
+ // cases. In particular, consider:
+ //
+ // lib{foo}: ...
+ // lib{plug}: ... lib{foo}
+ // libs{foo}: libs{plug}: include = posthoc
+ //
+ // The chain we will end up with:
+ //
+ // lib{foo}->libs{foo}=>libs{plug}->lib{foo}
+ //
+ // This will trip up the cycle detection for group lib{foo}, not for
+ // libs{foo}.
+ //
+ // In the end, matching (and execution) "inline" (i.e., as we match/
+ // execute the corresponding target) appears to be unworkable in the
+ // face of cycles.
+ //
+ // Note also that this delayed match also helps with allowing the rule to
+ // adjust match options of post hoc prerequisites without needing the
+ // rematch support (see match_extra::posthoc_prerequisites).
+ //
+ // @@ Anything we need to do for group members (see through)? Feels quite
+ // far-fetched.
+ //
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ vector<posthoc_prerequisite_target> pts;
+ try
+ {
+ for (const prerequisite& p: group_prerequisites (t))
+ {
+ // Note that we have to ignore any operation-specific values for
+ // non-posthoc prerequisites. See include_impl() for details.
+ //
+ lookup l;
+ if (include (a, t, p, &l) == include_type::posthoc)
+ {
+ if (l)
+ {
+ const string& v (cast<string> (l));
+
+ // The only valid values are true and false and the latter would
+ // have been translated to include_type::exclude.
+ //
+ if (v != "true")
+ {
+ fail << "unrecognized " << *l.var << " variable value "
+ << "'" << v << "' specified for prerequisite " << p;
+ }
+ }
+
+ pts.push_back (
+ posthoc_prerequisite_target {
+ &search (t, p), // May fail.
+ match_extra::all_options});
+ }
+ }
+ }
+ catch (const failed&)
+ {
+ t.state[a].state = target_state::failed;
+ return nullopt;
+ }
+
+ if (!pts.empty ())
+ {
+ context& ctx (t.ctx);
+
+ mlock l (ctx.current_posthoc_targets_mutex);
+ ctx.current_posthoc_targets.push_back (posthoc_target {a, t, move (pts)});
+ return &ctx.current_posthoc_targets.back (); // Stable.
+ }
+
+ return nullptr;
+ }
+
// If step is true then perform only one step of the match/apply sequence.
//
// If try_match is true, then indicate whether there is a rule match with
// the first half of the result.
//
static pair<bool, target_state>
- match_impl (target_lock& l,
- bool step = false,
- bool try_match = false)
+ match_impl_impl (target_lock& l,
+ uint64_t options,
+ bool step = false,
+ bool try_match = false)
{
+ // With regards to options, the semantics that we need to achieve for each
+ // target::offeset_*:
+ //
+ // tried -- nothing to do (no match)
+ // touched -- set to new_options
+ // matched -- add to new_options
+ // applied -- reapply if any new options
+ // executed -- check and fail if any new options
+ // busy -- postpone until *_complete() call
+ //
+ // Note that if options is 0 (see resolve_{members,group}_impl()), then
+ // all this can be skipped.
+
assert (l.target != nullptr);
action a (l.action);
target& t (*l.target);
target::opstate& s (t[a]);
- // Intercept and handle matching an ad hoc group member.
- //
- if (t.adhoc_group_member ())
+ try
{
- assert (!step);
+ // Intercept and handle matching an ad hoc group member.
+ //
+ if (t.adhoc_group_member ())
+ {
+ // It feels natural to "convert" this call to the one for the group,
+ // including the try_match part. Semantically, we want to achieve the
+ // following:
+ //
+ // [try_]match (a, g);
+ // match_recipe (l, group_recipe);
+ //
+ // Currently, ad hoc group members cannot have options. An alternative
+ // semantics could be to call the goup's rule to translate member
+ // options to group options and then (re)match the group with that.
+ // The implementation of this semantics could look like this:
+ //
+ // 1. Lock the group.
+ // 2. If not already offset_matched, do one step to get the rule.
+ // 3. Call the rule to translate options.
+ // 4. Continue matching the group passing the translated options.
+ // 5. Keep track of member options in member's cur_options to handle
+ // member rematches (if already offset_{applied,executed}).
+ //
+ // Note: see also similar semantics but for explicit groups in
+ // adhoc-rule-*.cxx.
- const target& g (*t.group);
+ assert (!step && options == match_extra::all_options);
- // It feels natural to "convert" this call to the one for the group,
- // including the try_match part. Semantically, we want to achieve the
- // following:
- //
- // [try_]match (a, g);
- // match_recipe (l, group_recipe);
- //
- auto df = make_diag_frame (
- [a, &t](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while matching group rule to " << diag_do (a, t);
- });
+ const target& g (*t.group);
- pair<bool, target_state> r (match (a, g, 0, nullptr, try_match));
+ // What should we do with options? After some rumination it fells most
+ // natural to treat options for the group and for its ad hoc member as
+ // the same entity ... or not.
+ //
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching group rule to " << diag_do (a, t);
+ });
- if (r.first)
- {
- if (r.second != target_state::failed)
+ pair<bool, target_state> r (
+ match_impl (a, g, 0 /* options */, 0, nullptr, try_match));
+
+ if (r.first)
{
- match_inc_dependens (a, g);
- match_recipe (l, group_recipe);
+ if (r.second != target_state::failed)
+ {
+ // Note: in particular, passing all_options makes sure we will
+ // never re-lock this member if already applied/executed.
+ //
+ match_inc_dependents (a, g);
+ match_recipe (l, group_recipe, match_extra::all_options);
+
+ // Note: no need to call match_posthoc() since an ad hoc member
+ // has no own prerequisites and the group's ones will be matched
+ // by the group.
+ }
}
- }
- else
- l.offset = target::offset_tried;
+ else
+ l.offset = target::offset_tried;
- return r; // Group state (must be consistent with matched_state()).
- }
+ return r; // Group state (must be consistent with matched_state()).
+ }
- try
- {
// Continue from where the target has been left off.
//
switch (l.offset)
@@ -769,13 +1307,14 @@ namespace build2
//
// Clear the rule-specific variables, resolved targets list, and the
- // data pad before calling match(). The rule is free to modify these
- // in its match() (provided that it matches) in order to, for
- // example, convey some information to apply().
+ // auxiliary data storage before calling match(). The rule is free
+ // to modify these in its match() (provided that it matches) in
+ // order to, for example, convey some information to apply().
//
clear_target (a, t);
- const rule_match* r (match_rule (a, t, nullptr, try_match));
+ const rule_match* r (
+ match_rule_impl (a, t, options, nullptr, try_match));
assert (l.offset != target::offset_tried); // Should have failed.
@@ -785,7 +1324,7 @@ namespace build2
return make_pair (false, target_state::unknown);
}
- s.rule = r;
+ set_rule (l, r);
l.offset = target::offset_matched;
if (step)
@@ -797,25 +1336,86 @@ namespace build2
// Fall through.
case target::offset_matched:
{
+ // Add any new options.
+ //
+ s.match_extra.new_options |= options;
+
// Apply.
//
set_recipe (l, apply_impl (a, t, *s.rule));
l.offset = target::offset_applied;
+
+ if (t.has_group_prerequisites ()) // Ok since already matched.
+ {
+ if (optional<context::posthoc_target*> p = match_posthoc (a, t))
+ {
+ if (*p != nullptr)
+ {
+ // It would have been more elegant to do this before calling
+ // apply_impl() and then expose the post hoc prerequisites to
+ // apply(). The problem is the group may not be resolved until
+ // the call to apply(). And so we resort to the separate
+ // apply_posthoc() function.
+ //
+ apply_posthoc_impl (a, t, *s.rule, **p);
+ }
+ }
+ else
+ s.state = target_state::failed;
+ }
+
break;
}
+ case target::offset_applied:
+ {
+ // Reapply if any new options.
+ //
+ match_extra& me (s.match_extra);
+ me.new_options = options & ~me.cur_options; // Clear existing.
+ assert (me.new_options != 0); // Otherwise should not have locked.
+
+ // Feels like this can only be a logic bug since to end up with a
+ // subset of options requires a rule (see match_extra for details).
+ //
+ assert (s.rule != nullptr);
+
+ reapply_impl (a, t, *s.rule);
+ break;
+ }
+ case target::offset_executed:
+ {
+ // Diagnose new options after execute.
+ //
+ match_extra& me (s.match_extra);
+ assert ((me.cur_options & options) != options); // Otherwise no lock.
+
+ fail << "change of match options after " << diag_do (a, t)
+ << " has been executed" <<
+ info << "executed options 0x" << hex << me.cur_options <<
+ info << "requested options 0x" << hex << options << endf;
+ }
default:
assert (false);
}
}
catch (const failed&)
{
+ s.state = target_state::failed;
+ l.offset = target::offset_applied;
+
+ // Make sure we don't relock a failed target.
+ //
+ match_extra& me (s.match_extra);
+ me.cur_options = match_extra::all_options;
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
+ }
+
+ if (s.state == target_state::failed)
+ {
// As a sanity measure clear the target data since it can be incomplete
// or invalid (mark()/unmark() should give you some ideas).
//
clear_target (a, t);
-
- s.state = target_state::failed;
- l.offset = target::offset_applied;
}
return make_pair (true, s.state);
@@ -825,11 +1425,10 @@ namespace build2
// the first half of the result.
//
pair<bool, target_state>
- match (action a,
- const target& ct,
- size_t start_count,
- atomic_count* task_count,
- bool try_match)
+ match_impl (action a, const target& ct,
+ uint64_t options,
+ size_t start_count, atomic_count* task_count,
+ bool try_match)
{
// If we are blocking then work our own queue one task at a time. The
// logic here is that we may have already queued other tasks before this
@@ -850,17 +1449,16 @@ namespace build2
ct,
task_count == nullptr
? optional<scheduler::work_queue> (scheduler::work_none)
- : nullopt));
+ : nullopt,
+ options));
if (l.target != nullptr)
{
- assert (l.offset < target::offset_applied); // Shouldn't lock otherwise.
-
if (try_match && l.offset == target::offset_tried)
return make_pair (false, target_state::unknown);
if (task_count == nullptr)
- return match_impl (l, false /* step */, try_match);
+ return match_impl_impl (l, options, false /* step */, try_match);
// Pass "disassembled" lock since the scheduler queue doesn't support
// task destruction.
@@ -870,12 +1468,18 @@ namespace build2
// Also pass our diagnostics and lock stacks (this is safe since we
// expect the caller to wait for completion before unwinding its stack).
//
- if (ct.ctx.sched.async (
+ // Note: pack captures and arguments a bit to reduce the storage space
+ // requrements.
+ //
+ bool first (ld.first);
+
+ if (ct.ctx.sched->async (
start_count,
*task_count,
- [a, try_match] (const diag_frame* ds,
- const target_lock* ls,
- target& t, size_t offset)
+ [a, try_match, first] (const diag_frame* ds,
+ const target_lock* ls,
+ target& t, size_t offset,
+ uint64_t options)
{
// Switch to caller's diag and lock stacks.
//
@@ -886,17 +1490,18 @@ namespace build2
{
phase_lock pl (t.ctx, run_phase::match); // Throws.
{
- target_lock l {a, &t, offset}; // Reassemble.
- match_impl (l, false /* step */, try_match);
- // Unlock within the match phase.
+ // Note: target_lock must be unlocked within the match phase.
+ //
+ target_lock l {a, &t, offset, first}; // Reassemble.
+ match_impl_impl (l, options, false /* step */, try_match);
}
}
catch (const failed&) {} // Phase lock failure.
},
diag_frame::stack (),
target_lock::stack (),
- ref (*ld.target),
- ld.offset))
+ ref (*ld.target), ld.offset,
+ options))
return make_pair (true, target_state::postponed); // Queued.
// Matched synchronously, fall through.
@@ -914,9 +1519,39 @@ namespace build2
return ct.try_matched_state (a, false);
}
+ void
+ match_only_sync (action a, const target& t, uint64_t options)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ target_lock l (lock_impl (a, t, scheduler::work_none, options));
+
+ if (l.target != nullptr)
+ {
+ if (l.offset != target::offset_matched)
+ {
+ if (match_impl_impl (l,
+ options,
+ true /* step */).second == target_state::failed)
+ throw failed ();
+ }
+ else
+ {
+ // If the target is already matched, then we need to add any new
+ // options but not call apply() (thus cannot use match_impl_impl()).
+ //
+ (*l.target)[a].match_extra.new_options |= options;
+ }
+ }
+ }
+
+ // Note: lock is a reference to avoid the stacking overhead.
+ //
static group_view
- resolve_members_impl (action a, const target& g, target_lock l)
+ resolve_members_impl (action a, const target& g, target_lock&& l)
{
+ assert (a.inner ());
+
// Note that we will be unlocked if the target is already applied.
//
group_view r;
@@ -930,7 +1565,9 @@ namespace build2
{
// Match (locked).
//
- if (match_impl (l, true).second == target_state::failed)
+ if (match_impl_impl (l,
+ 0 /* options */,
+ true /* step */).second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
@@ -941,22 +1578,52 @@ namespace build2
// Fall through.
case target::offset_matched:
{
- // @@ Doing match without execute messes up our target_count. Does
- // not seem like it will be easy to fix (we don't know whether
- // someone else will execute this target).
- //
- // @@ What if we always do match & execute together? After all,
- // if a group can be resolved in apply(), then it can be
- // resolved in match()!
- //
-
// Apply (locked).
//
- if (match_impl (l, true).second == target_state::failed)
+ pair<bool, target_state> s (
+ match_impl_impl (l, 0 /* options */, true /* step */));
+
+ if (s.second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
+ {
+ // Doing match without execute messes up our target_count. There
+ // doesn't seem to be a clean way to solve this. Well, just always
+ // executing if we've done the match would have been clean but quite
+ // heavy-handed (it would be especially surprising if otherwise
+ // there is nothing else to do, which can happen, for example,
+ // during update-for-test when there are no tests to run).
+ //
+ // So our solution is as follows:
+ //
+ // 1. Keep track both of the targets that ended up in this situation
+ // (the target::resolve_counted flag) as well as their total
+ // count (the context::resolve_count member). Only do this if
+ // set_recipe() (called by match_impl()) would have incremented
+ // target_count.
+ //
+ // 2. If we happen to execute such a target (common case), then
+ // clear the flag and decrement the count.
+ //
+ // 3. When it's time to assert that target_count==0 (i.e., all the
+ // matched targets have been executed), check if resolve_count is
+ // 0. If it's not, then find every target with the flag set,
+ // pretend-execute it, and decrement both counts. See
+ // perform_execute() for further details on this step.
+ //
+ if (s.second != target_state::unchanged)
+ {
+ target::opstate& s (l.target->state[a]); // Inner.
+
+ if (!s.recipe_group_action)
+ {
+ s.resolve_counted = true;
+ g.ctx.resolve_count.fetch_add (1, memory_order_relaxed);
+ }
+ }
break;
+ }
// Unlock and to execute ...
//
@@ -967,15 +1634,19 @@ namespace build2
{
// Execute (unlocked).
//
- // Note that we use execute_direct() rather than execute() here to
- // sidestep the dependents count logic. In this context, this is by
- // definition the first attempt to execute this rule (otherwise we
- // would have already known the members list) and we really do need
+ // Note that we use execute_direct_sync() rather than execute_sync()
+ // here to sidestep the dependents count logic. In this context, this
+ // is by definition the first attempt to execute this rule (otherwise
+ // we would have already known the members list) and we really do need
// to execute it now.
//
+ // Note that while it might be tempting to decrement resolve_count
+ // here, there is no guarantee that we were the ones who have matched
+ // this target.
+ //
{
phase_switch ps (g.ctx, run_phase::execute);
- execute_direct (a, g);
+ execute_direct_sync (a, g);
}
r = g.group_members (a);
@@ -1021,10 +1692,23 @@ namespace build2
return r;
}
+ // Note: lock is a reference to avoid the stacking overhead.
+ //
void
- resolve_group_impl (action, const target&, target_lock l)
+ resolve_group_impl (target_lock&& l)
{
- match_impl (l, true /* step */, true /* try_match */);
+ assert (l.action.inner ());
+
+ pair<bool, target_state> r (
+ match_impl_impl (l,
+ 0 /* options */,
+ true /* step */,
+ true /* try_match */));
+
+ l.unlock ();
+
+ if (r.first && r.second == target_state::failed)
+ throw failed ();
}
template <typename R, typename S>
@@ -1032,16 +1716,33 @@ namespace build2
match_prerequisite_range (action a, target& t,
R&& r,
const S& ms,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
auto& pts (t.prerequisite_targets[a]);
+ size_t i (pts.size ()); // Index of the first to be added.
+
+ // Avoid duplicating fsdir{} that may have already been injected by
+ // inject_fsdir() (in which case it is expected to be first).
+ //
+ const target* dir (nullptr);
+ if (i != 0)
+ {
+ const prerequisite_target& pt (pts.front ());
+
+ if (pt.target != nullptr && pt.adhoc () && pt.target->is_a<fsdir> ())
+ dir = pt.target;
+ }
+
// Start asynchronous matching of prerequisites. Wait with unlocked phase
// to allow phase switching.
//
- wait_guard wg (t.ctx, t.ctx.count_busy (), t[a].task_count, true);
+ wait_guard wg (
+ search_only
+ ? wait_guard ()
+ : wait_guard (t.ctx, t.ctx.count_busy (), t[a].task_count, true));
- size_t i (pts.size ()); // Index of the first to be added.
for (auto&& p: forward<R> (r))
{
// Ignore excluded.
@@ -1055,13 +1756,20 @@ namespace build2
? ms (a, t, p, pi)
: prerequisite_target (&search (t, p), pi));
- if (pt.target == nullptr || (s != nullptr && !pt.target->in (*s)))
+ if (pt.target == nullptr ||
+ pt.target == dir ||
+ (s != nullptr && !pt.target->in (*s)))
continue;
- match_async (a, *pt.target, t.ctx.count_busy (), t[a].task_count);
+ if (!search_only)
+ match_async (a, *pt.target, t.ctx.count_busy (), t[a].task_count);
+
pts.push_back (move (pt));
}
+ if (search_only)
+ return;
+
wg.wait ();
// Finish matching all the targets that we have started.
@@ -1069,29 +1777,38 @@ namespace build2
for (size_t n (pts.size ()); i != n; ++i)
{
const target& pt (*pts[i]);
- match (a, pt);
+ match_complete (a, pt);
}
}
void
match_prerequisites (action a, target& t,
const match_search& ms,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
- match_prerequisite_range (a, t, group_prerequisites (t), ms, s);
+ match_prerequisite_range (a, t,
+ group_prerequisites (t),
+ ms,
+ s,
+ search_only);
}
void
match_prerequisite_members (action a, target& t,
const match_search_member& msm,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
- match_prerequisite_range (a, t, group_prerequisite_members (a, t), msm, s);
+ match_prerequisite_range (a, t,
+ group_prerequisite_members (a, t),
+ msm,
+ s,
+ search_only);
}
- template <typename T>
void
- match_members (action a, target& t, T const* ts, size_t n)
+ match_members (action a, const target& t, const target* const* ts, size_t n)
{
// Pretty much identical to match_prerequisite_range() except we don't
// search.
@@ -1119,22 +1836,52 @@ namespace build2
if (m == nullptr || marked (m))
continue;
- match (a, *m);
+ match_complete (a, *m);
}
}
- // Instantiate only for what we need.
- //
- template LIBBUILD2_SYMEXPORT void
- match_members<const target*> (action, target&,
- const target* const*, size_t);
+ void
+ match_members (action a,
+ const target& t,
+ prerequisite_targets& ts,
+ size_t s,
+ pair<uintptr_t, uintptr_t> imv)
+ {
+ size_t n (ts.size ());
+
+ wait_guard wg (t.ctx, t.ctx.count_busy (), t[a].task_count, true);
+
+ for (size_t i (s); i != n; ++i)
+ {
+ const prerequisite_target& pt (ts[i]);
+ const target* m (pt.target);
- template LIBBUILD2_SYMEXPORT void
- match_members<prerequisite_target> (action, target&,
- prerequisite_target const*, size_t);
+ if (m == nullptr ||
+ marked (m) ||
+ (imv.first != 0 && (pt.include & imv.first) != imv.second))
+ continue;
+
+ match_async (a, *m, t.ctx.count_busy (), t[a].task_count);
+ }
+
+ wg.wait ();
+
+ for (size_t i (s); i != n; ++i)
+ {
+ const prerequisite_target& pt (ts[i]);
+ const target* m (pt.target);
+
+ if (m == nullptr ||
+ marked (m) ||
+ (imv.first != 0 && (pt.include & imv.first) != imv.second))
+ continue;
+
+ match_complete (a, *m);
+ }
+ }
const fsdir*
- inject_fsdir (action a, target& t, bool parent)
+ inject_fsdir_impl (target& t, bool prereq, bool parent)
{
tracer trace ("inject_fsdir");
@@ -1155,6 +1902,7 @@ namespace build2
// subprojects (e.g., tests/).
//
const fsdir* r (nullptr);
+
if (rs != nullptr && !d.sub (rs->src_path ()))
{
l6 ([&]{trace << d << " for " << t;});
@@ -1163,7 +1911,7 @@ namespace build2
//
r = &search<fsdir> (t, d, dir_path (), string (), nullptr, nullptr);
}
- else
+ else if (prereq)
{
// See if one was mentioned explicitly.
//
@@ -1182,13 +1930,45 @@ namespace build2
}
}
+ return r;
+ }
+
+ const fsdir*
+ inject_fsdir (action a, target& t, bool match, bool prereq, bool parent)
+ {
+ auto& pts (t.prerequisite_targets[a]);
+
+ assert (!prereq || pts.empty ()); // This prerequisite target must be first.
+
+ const fsdir* r (inject_fsdir_impl (t, prereq, parent));
+
if (r != nullptr)
{
+ if (match)
+ match_sync (a, *r);
+
// Make it ad hoc so that it doesn't end up in prerequisite_targets
// after execution.
//
- match (a, *r);
- t.prerequisite_targets[a].emplace_back (r, include_type::adhoc);
+ pts.emplace_back (r, include_type::adhoc);
+ }
+
+ return r;
+ }
+
+ const fsdir*
+ inject_fsdir_direct (action a, target& t, bool prereq, bool parent)
+ {
+ auto& pts (t.prerequisite_targets[a]);
+
+ assert (!prereq || pts.empty ()); // This prerequisite target must be first.
+
+ const fsdir* r (inject_fsdir_impl (t, prereq, parent));
+
+ if (r != nullptr)
+ {
+ match_direct_sync (a, *r);
+ pts.emplace_back (r, include_type::adhoc);
}
return r;
@@ -1301,11 +2081,26 @@ namespace build2
return ts;
}
- void
- update_backlink (const file& f, const path& l, bool changed, backlink_mode m)
+ static inline const char*
+ update_backlink_name (backlink_mode m, bool to_dir)
{
using mode = backlink_mode;
+ const char* r (nullptr);
+ switch (m)
+ {
+ case mode::link:
+ case mode::symbolic: r = verb >= 3 ? "ln -sf" : verb >= 2 ? "ln -s" : "ln"; break;
+ case mode::hard: r = verb >= 3 ? "ln -f" : "ln"; break;
+ case mode::copy:
+ case mode::overwrite: r = to_dir ? "cp -r" : "cp"; break;
+ }
+ return r;
+ }
+
+ void
+ update_backlink (const file& f, const path& l, bool changed, backlink_mode m)
+ {
const path& p (f.path ());
dir_path d (l.directory ());
@@ -1317,28 +2112,20 @@ namespace build2
// actually updated to signal to the user that the updated out target is
// now available in src.
//
- if (verb <= 2)
+ if (verb == 1 || verb == 2)
{
if (changed || !butl::entry_exists (l,
false /* follow_symlinks */,
true /* ignore_errors */))
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = verb >= 2 ? "ln -s" : "ln"; break;
- case mode::hard: c = "ln"; break;
- case mode::copy:
- case mode::overwrite: c = l.to_directory () ? "cp -r" : "cp"; break;
- }
+ const char* c (update_backlink_name (m, l.to_directory ()));
- // Note: 'ln foo/ bar/' means a different thing.
+ // Note: 'ln foo/ bar/' means a different thing (and below).
//
- if (verb >= 2)
+ if (verb == 2)
text << c << ' ' << p.string () << ' ' << l.string ();
else
- text << c << ' ' << f << " -> " << d;
+ print_diag (c, f, d);
}
}
@@ -1358,30 +2145,25 @@ namespace build2
{
// As above but with a slightly different diagnostics.
- using mode = backlink_mode;
-
dir_path d (l.directory ());
- if (verb <= 2)
+ if (verb == 1 || verb == 2)
{
if (changed || !butl::entry_exists (l,
false /* follow_symlinks */,
true /* ignore_errors */))
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = verb >= 2 ? "ln -s" : "ln"; break;
- case mode::hard: c = "ln"; break;
- case mode::copy:
- case mode::overwrite: c = l.to_directory () ? "cp -r" : "cp"; break;
- }
+ const char* c (update_backlink_name (m, l.to_directory ()));
+ // Note: 'ln foo/ bar/' means a different thing (and above) so strip
+ // trailing directory separator (but keep as path for relative).
+ //
if (verb >= 2)
text << c << ' ' << p.string () << ' ' << l.string ();
else
- text << c << ' ' << p.string () << " -> " << d;
+ print_diag (c,
+ p.to_directory () ? path (p.string ()) : p,
+ d);
}
}
@@ -1433,6 +2215,8 @@ namespace build2
const path& p, const path& l, backlink_mode om,
uint16_t verbosity)
{
+ assert (verbosity >= 2);
+
using mode = backlink_mode;
bool d (l.to_directory ());
@@ -1442,17 +2226,8 @@ namespace build2
{
if (verb >= verbosity)
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = "ln -sf"; break;
- case mode::hard: c = "ln -f"; break;
- case mode::copy:
- case mode::overwrite: c = d ? "cp -r" : "cp"; break;
- }
-
- text << c << ' ' << p.string () << ' ' << l.string ();
+ text << update_backlink_name (m, d) << ' ' << p.string () << ' '
+ << l.string ();
}
};
@@ -1514,8 +2289,7 @@ namespace build2
try_mkdir (to);
- for (const auto& de:
- dir_iterator (fr, false /* ignore_dangling */))
+ for (const auto& de: dir_iterator (fr, dir_iterator::no_follow))
{
path f (fr / de.path ());
path t (to / de.path ());
@@ -1568,6 +2342,11 @@ namespace build2
//
// Note that here the dry-run mode is handled by the filesystem functions.
+ // Note that if we ever need to support level 1 for some reason, maybe
+ // consider showing the target, for example, `unlink exe{hello} <- dir/`?
+ //
+ assert (v >= 2);
+
using mode = backlink_mode;
if (l.to_directory ())
@@ -1602,9 +2381,15 @@ namespace build2
struct backlink: auto_rm<path>
{
using path_type = build2::path;
+ using target_type = build2::target;
reference_wrapper<const path_type> target;
- backlink_mode mode;
+ backlink_mode mode;
+
+ // Ad hoc group-specific information for diagnostics (see below).
+ //
+ const target_type* member = nullptr;
+ bool print = true;
backlink (const path_type& t, path_type&& l, backlink_mode m, bool active)
: auto_rm<path_type> (move (l), active), target (t), mode (m)
@@ -1626,33 +2411,65 @@ namespace build2
};
// Normally (i.e., on sane platforms that don't have things like PDBs, etc)
- // there will be just one backlink so optimize for that.
+ // there will be just one or two backlinks so optimize for that.
//
- using backlinks = small_vector<backlink, 1>;
+ using backlinks = small_vector<backlink, 2>;
- static optional<backlink_mode>
- backlink_test (const target& t, const lookup& l)
+ static optional<pair<backlink_mode, bool>>
+ backlink_test (const target& t, const lookup& l, optional<backlink_mode> gm)
{
using mode = backlink_mode;
- optional<mode> r;
- const string& v (cast<string> (l));
+ const names& ns (cast<names> (l));
- if (v == "true") r = mode::link;
- else if (v == "symbolic") r = mode::symbolic;
- else if (v == "hard") r = mode::hard;
- else if (v == "copy") r = mode::copy;
- else if (v == "overwrite") r = mode::overwrite;
- else if (v != "false")
- fail << "invalid backlink variable value '" << v << "' "
+ if (ns.size () != 1 && ns.size () != 2)
+ {
+ fail << "invalid backlink variable value '" << ns << "' "
<< "specified for target " << t;
+ }
- return r;
+ optional<mode> m;
+ for (;;) // Breakout loop.
+ {
+ const name& n (ns.front ());
+
+ if (n.simple ())
+ {
+ const string& v (n.value);
+
+ if (v == "true") {m = mode::link; break;}
+ else if (v == "symbolic") {m = mode::symbolic; break;}
+ else if (v == "hard") {m = mode::hard; break;}
+ else if (v == "copy") {m = mode::copy; break;}
+ else if (v == "overwrite") {m = mode::overwrite; break;}
+ else if (v == "false") { break;}
+ else if (v == "group") {if ((m = gm)) break;}
+ }
+
+ fail << "invalid backlink variable value mode component '" << n << "' "
+ << "specified for target " << t << endf;
+ }
+
+ bool np (false); // "not print"
+ if (ns.size () == 2)
+ {
+ const name& n (ns.back ());
+
+ if (n.simple () && (n.value == "true" || (np = (n.value == "false"))))
+ ;
+ else
+ fail << "invalid backlink variable value print component '" << n
+ << "' specified for target " << t;
+ }
+
+ return m ? optional<pair<mode, bool>> (make_pair (*m, !np)) : nullopt;
}
static optional<backlink_mode>
backlink_test (action a, target& t)
{
+ using mode = backlink_mode;
+
context& ctx (t.ctx);
// Note: the order of these checks is from the least to most expensive.
@@ -1662,9 +2479,20 @@ namespace build2
if (a.outer () || (a != perform_update_id && a != perform_clean_id))
return nullopt;
- // Only file-based targets in the out tree can be backlinked.
+ // Only targets in the out tree can be backlinked.
//
- if (!t.out.empty () || !t.is_a<file> ())
+ if (!t.out.empty ())
+ return nullopt;
+
+ // Only file-based targets or groups containing file-based targets can be
+ // backlinked. Note that we don't do the "file-based" check of the latter
+ // case here since they can still be execluded. So instead we are prepared
+ // to handle the empty backlinks list.
+ //
+ // @@ Potentially members could only be resolved in execute. I guess we
+ // don't support backlink for such groups at the moment.
+ //
+ if (!t.is_a<file> () && t.group_members (a).members == nullptr)
return nullopt;
// Neither an out-of-project nor in-src configuration can be forwarded.
@@ -1688,7 +2516,13 @@ namespace build2
if (!l.defined ())
l = ctx.global_scope.lookup (*ctx.var_backlink, t.key ());
- return l ? backlink_test (t, l) : nullopt;
+ optional<pair<mode, bool>> r (l ? backlink_test (t, l, nullopt) : nullopt);
+
+ if (r && !r->second)
+ fail << "backlink variable value print component cannot be false "
+ << "for primary target " << t;
+
+ return r ? optional<mode> (r->first) : nullopt;
}
static backlinks
@@ -1696,58 +2530,104 @@ namespace build2
{
using mode = backlink_mode;
+ context& ctx (t.ctx);
const scope& s (t.base_scope ());
backlinks bls;
- auto add = [&bls, &s] (const path& p, mode m)
+ auto add = [&bls, &s] (const path& p,
+ mode m,
+ const target* mt = nullptr,
+ bool print = true)
{
bls.emplace_back (p,
s.src_path () / p.leaf (s.out_path ()),
m,
!s.ctx.dry_run /* active */);
+
+ if (mt != nullptr)
+ {
+ backlink& bl (bls.back ());
+ bl.member = mt;
+ bl.print = print;
+ }
};
- // First the target itself.
+ // Check for a custom backlink mode for this member. If none, then
+ // inherit the one from the group (so if the user asked to copy
+ // .exe, we will also copy .pdb).
//
- add (t.as<file> ().path (), m);
+ // Note that we want to avoid group or tt/patter-spec lookup. And
+ // since this is an ad hoc member (which means it was either declared
+ // in the buildfile or added by the rule), we assume that the value,
+ // if any, will be set as a target or rule-specific variable.
+ //
+ auto member_mode = [a, m, &ctx] (const target& mt)
+ -> optional<pair<mode, bool>>
+ {
+ lookup l (mt.state[a].vars[ctx.var_backlink]);
+
+ if (!l)
+ l = mt.vars[ctx.var_backlink];
+
+ return l ? backlink_test (mt, l, m) : make_pair (m, true);
+ };
- // Then ad hoc group file/fsdir members, if any.
+ // @@ Currently we don't handle the following cases:
//
- for (const target* mt (t.adhoc_member);
- mt != nullptr;
- mt = mt->adhoc_member)
+ // 1. File-based explicit groups.
+ //
+ // 2. Ad hoc subgroups in explicit groups.
+ //
+ // Note: see also the corresponding code in backlink_update_post().
+ //
+ if (file* f = t.is_a<file> ())
{
- const path* p (nullptr);
+ // First the target itself.
+ //
+ add (f->path (), m, f, true); // Note: always printed.
- if (const file* f = mt->is_a<file> ())
+ // Then ad hoc group file/fsdir members, if any.
+ //
+ for (const target* mt (t.adhoc_member);
+ mt != nullptr;
+ mt = mt->adhoc_member)
{
- p = &f->path ();
+ const path* p (nullptr);
- if (p->empty ()) // The "trust me, it's somewhere" case.
- p = nullptr;
- }
- else if (const fsdir* d = mt->is_a<fsdir> ())
- p = &d->dir;
+ if (const file* f = mt->is_a<file> ())
+ {
+ p = &f->path ();
- if (p != nullptr)
- {
- // Check for a custom backlink mode for this member. If none, then
- // inherit the one from the group (so if the user asked to copy .exe,
- // we will also copy .pdb).
- //
- // Note that we want to avoid group or tt/patter-spec lookup. And
- // since this is an ad hoc member (which means it was either declared
- // in the buildfile or added by the rule), we assume that the value,
- // if any, will be set as a rule-specific variable (since setting it
- // as a target-specific wouldn't be MT-safe). @@ Don't think this
- // applies to declared ad hoc members.
- //
- lookup l (mt->state[a].vars[t.ctx.var_backlink]);
+ if (p->empty ()) // The "trust me, it's somewhere" case.
+ p = nullptr;
+ }
+ else if (const fsdir* d = mt->is_a<fsdir> ())
+ p = &d->dir;
- optional<mode> bm (l ? backlink_test (*mt, l) : m);
+ if (p != nullptr)
+ {
+ if (optional<pair<mode, bool>> m = member_mode (*mt))
+ add (*p, m->first, mt, m->second);
+ }
+ }
+ }
+ else
+ {
+ // Explicit group.
+ //
+ group_view gv (t.group_members (a));
+ assert (gv.members != nullptr);
- if (bm)
- add (*p, *bm);
+ for (size_t i (0); i != gv.count; ++i)
+ {
+ if (const target* mt = gv.members[i])
+ {
+ if (const file* f = mt->is_a<file> ())
+ {
+ if (optional<pair<mode, bool>> m = member_mode (*mt))
+ add (f->path (), m->first);
+ }
+ }
}
}
@@ -1761,29 +2641,89 @@ namespace build2
}
static void
- backlink_update_post (target& t, target_state ts, backlinks& bls)
+ backlink_update_post (target& t, target_state ts,
+ backlink_mode m, backlinks& bls)
{
if (ts == target_state::failed)
return; // Let auto rm clean things up.
- // Make backlinks.
- //
- for (auto b (bls.begin ()), i (b); i != bls.end (); ++i)
+ context& ctx (t.ctx);
+
+ file* ft (t.is_a<file> ());
+
+ if (ft != nullptr && bls.size () == 1)
{
- const backlink& bl (*i);
+ // Single file-based target.
+ //
+ const backlink& bl (bls.front ());
- if (i == b)
- update_backlink (t.as<file> (),
- bl.path,
- ts == target_state::changed,
- bl.mode);
- else
- update_backlink (t.ctx, bl.target, bl.path, bl.mode);
+ update_backlink (*ft,
+ bl.path,
+ ts == target_state::changed,
+ bl.mode);
+ }
+ else
+ {
+ // Explicit or ad hoc group.
+ //
+ // What we have below is a custom variant of update_backlink(file).
+ //
+ dir_path d (bls.front ().path.directory ());
+
+ // First print the verbosity level 1 diagnostics. Level 2 and higher are
+ // handled by the update_backlink() calls below.
+ //
+ if (verb == 1)
+ {
+ bool changed (ts == target_state::changed);
+
+ if (!changed)
+ {
+ for (const backlink& bl: bls)
+ {
+ changed = !butl::entry_exists (bl.path,
+ false /* follow_symlinks */,
+ true /* ignore_errors */);
+ if (changed)
+ break;
+ }
+ }
+
+ if (changed)
+ {
+ const char* c (update_backlink_name (m, false /* to_dir */));
+
+ // For explicit groups we only print the group target. For ad hoc
+ // groups we print all the members except those explicitly excluded.
+ //
+ if (ft == nullptr)
+ print_diag (c, t, d);
+ else
+ {
+ vector<target_key> tks;
+ tks.reserve (bls.size ());
+
+ for (const backlink& bl: bls)
+ if (bl.print)
+ tks.push_back (bl.member->key ());
+
+ print_diag (c, move (tks), d);
+ }
+ }
+ }
+
+ if (!exists (d))
+ mkdir_p (d, 2 /* verbosity */);
+
+ // Make backlinks.
+ //
+ for (const backlink& bl: bls)
+ update_backlink (ctx, bl.target, bl.path, bl.mode, 2 /* verbosity */);
}
// Cancel removal.
//
- if (!t.ctx.dry_run)
+ if (!ctx.dry_run)
{
for (backlink& bl: bls)
bl.cancel ();
@@ -1824,15 +2764,57 @@ namespace build2
// which is ok since such targets are probably not interesting for
// backlinking.
//
+ // Note also that for group members (both ad hoc and non) backlinking
+ // is handled when updating/cleaning the group.
+ //
backlinks bls;
- optional<backlink_mode> blm (backlink_test (a, t));
+ optional<backlink_mode> blm;
- if (blm)
+ if (t.group == nullptr) // Matched so must be already resolved.
{
- if (a == perform_update_id)
- bls = backlink_update_pre (a, t, *blm);
+ blm = backlink_test (a, t);
+
+ if (blm)
+ {
+ if (a == perform_update_id)
+ {
+ bls = backlink_update_pre (a, t, *blm);
+ if (bls.empty ())
+ blm = nullopt;
+ }
+ else
+ backlink_clean_pre (a, t, *blm);
+ }
+ }
+
+ // Note: see similar code in set_rule_trace() for match.
+ //
+ if (ctx.trace_execute != nullptr && trace_target (t, *ctx.trace_execute))
+ {
+ diag_record dr (info);
+
+ dr << diag_doing (a, t);
+
+ if (s.rule != nullptr)
+ {
+ const rule& r (s.rule->second);
+
+ if (const adhoc_rule* ar = dynamic_cast<const adhoc_rule*> (&r))
+ {
+ dr << info (ar->loc);
+
+ if (ar->pattern != nullptr)
+ dr << "using ad hoc pattern rule ";
+ else
+ dr << "using ad hoc recipe ";
+ }
+ else
+ dr << info << "using rule ";
+
+ dr << s.rule->first;
+ }
else
- backlink_clean_pre (a, t, *blm);
+ dr << info << "using directly-assigned recipe";
}
ts = execute_recipe (a, t, s.recipe);
@@ -1840,7 +2822,7 @@ namespace build2
if (blm)
{
if (a == perform_update_id)
- backlink_update_post (t, ts, bls);
+ backlink_update_post (t, ts, *blm, bls);
}
}
catch (const failed&)
@@ -1851,13 +2833,29 @@ namespace build2
ts = s.state = target_state::failed;
}
+ // Clear the recipe to release any associated memory. Note that
+ // s.recipe_group_action may be used further (see, for example,
+ // group_state()) and should retain its value.
+ //
+ if (!s.recipe_keep)
+ s.recipe = nullptr;
+
// Decrement the target count (see set_recipe() for details).
//
- if (a.inner ())
+ // Note that here we cannot rely on s.state being group because of the
+ // postponment logic (see excute_recipe() for details).
+ //
+ if (a.inner () && !s.recipe_group_action)
{
- recipe_function** f (s.recipe.target<recipe_function*> ());
- if (f == nullptr || *f != &group_action)
- ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ // See resolve_members_impl() for background.
+ //
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+
+ ctx.target_count.fetch_sub (1, memory_order_relaxed);
}
// Decrement the task count (to count_executed) and wake up any threads
@@ -1867,17 +2865,19 @@ namespace build2
target::offset_busy - target::offset_executed,
memory_order_release));
assert (tc == ctx.count_busy ());
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
return ts;
}
target_state
- execute (action a,
- const target& ct,
- size_t start_count,
- atomic_count* task_count)
+ execute_impl (action a,
+ const target& ct,
+ size_t start_count,
+ atomic_count* task_count)
{
+ // NOTE: see also pretend_execute lambda in perform_execute().
+
target& t (const_cast<target&> (ct)); // MT-aware.
target::opstate& s (t[a]);
@@ -1888,7 +2888,6 @@ namespace build2
size_t gd (ctx.dependency_count.fetch_sub (1, memory_order_relaxed));
size_t td (s.dependents.fetch_sub (1, memory_order_release));
assert (td != 0 && gd != 0);
- td--;
// Handle the "last" execution mode.
//
@@ -1911,7 +2910,7 @@ namespace build2
// thread. For other threads the state will still be unknown (until they
// try to execute it).
//
- if (ctx.current_mode == execution_mode::last && td != 0)
+ if (ctx.current_mode == execution_mode::last && --td != 0)
return target_state::postponed;
// Try to atomically change applied to busy.
@@ -1921,6 +2920,7 @@ namespace build2
size_t exec (ctx.count_executed ());
size_t busy (ctx.count_busy ());
+ optional<target_state> r;
if (s.task_count.compare_exchange_strong (
tc,
busy,
@@ -1933,32 +2933,35 @@ namespace build2
{
// There could still be scope operations.
//
- if (t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
+ r = t.is_a<dir> ()
+ ? execute_recipe (a, t, nullptr /* recipe */)
+ : s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
else
{
if (task_count == nullptr)
- return execute_impl (a, t);
-
- // Pass our diagnostics stack (this is safe since we expect the
- // caller to wait for completion before unwinding its diag stack).
- //
- if (ctx.sched.async (start_count,
- *task_count,
- [a] (const diag_frame* ds, target& t)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (a, t);
- },
- diag_frame::stack (),
- ref (t)))
- return target_state::unknown; // Queued.
-
- // Executed synchronously, fall through.
+ r = execute_impl (a, t);
+ else
+ {
+ // Pass our diagnostics stack (this is safe since we expect the
+ // caller to wait for completion before unwinding its diag stack).
+ //
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
+ return target_state::unknown; // Queued.
+
+ // Executed synchronously, fall through.
+ }
}
}
else
@@ -1969,24 +2972,28 @@ namespace build2
else assert (tc == exec);
}
- return t.executed_state (a, false);
+ return r ? *r : t.executed_state (a, false /* fail */);
}
target_state
- execute_direct (action a, const target& ct)
+ execute_direct_impl (action a,
+ const target& ct,
+ size_t start_count,
+ atomic_count* task_count)
{
context& ctx (ct.ctx);
target& t (const_cast<target&> (ct)); // MT-aware.
target::opstate& s (t[a]);
- // Similar logic to match() above except we execute synchronously.
+ // Similar logic to execute_impl() above.
//
size_t tc (ctx.count_applied ());
size_t exec (ctx.count_executed ());
size_t busy (ctx.count_busy ());
+ optional<target_state> r;
if (s.task_count.compare_exchange_strong (
tc,
busy,
@@ -1994,33 +3001,436 @@ namespace build2
memory_order_acquire)) // Synchronize on failure.
{
if (s.state == target_state::unknown)
- execute_impl (a, t);
+ {
+ if (task_count == nullptr)
+ r = execute_impl (a, t);
+ else
+ {
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
+ return target_state::unknown; // Queued.
+
+ // Executed synchronously, fall through.
+ }
+ }
else
{
assert (s.state == target_state::unchanged ||
s.state == target_state::failed);
- if (s.state == target_state::unchanged)
- {
- if (t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
- }
+ r = s.state == target_state::unchanged && t.is_a<dir> ()
+ ? execute_recipe (a, t, nullptr /* recipe */)
+ : s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
}
else
{
- // If the target is busy, wait for it.
+ // Either busy or already executed.
+ //
+ if (tc >= busy) return target_state::busy;
+ else assert (tc == exec);
+ }
+
+ return r ? *r : t.executed_state (a, false /* fail */);
+ }
+
+ bool
+ update_during_match (tracer& trace, action a, const target& t, timestamp ts)
+ {
+ // NOTE: see also clean_during_match() if changing anything here.
+
+ assert (a == perform_update_id);
+
+ // Note: this function is used to make sure header dependencies are up to
+ // date (and which is where it originated).
+ //
+ // There would normally be a lot of headers for every source file (think
+ // all the system headers) and just calling execute_direct_sync() on all
+ // of them can get expensive. At the same time, most of these headers are
+ // existing files that we will never be updating (again, system headers,
+ // for example) and the rule that will match them is the fallback
+ // file_rule. That rule has an optimization: it returns noop_recipe (which
+ // causes the target state to be automatically set to unchanged) if the
+ // file is known to be up to date. So we do the update "smartly".
+ //
+ // Also, now that we do header pre-generation by default, there is a good
+ // chance the header has already been updated. So we also detect that and
+ // avoid switching the phase.
+ //
+ const path_target* pt (t.is_a<path_target> ());
+
+ if (pt == nullptr)
+ ts = timestamp_unknown;
+
+ target_state os (t.matched_state (a));
+
+ if (os == target_state::unchanged)
+ {
+ if (ts == timestamp_unknown)
+ return false;
+ else
+ {
+ // We expect the timestamp to be known (i.e., existing file).
//
- if (tc >= busy)
- ctx.sched.wait (exec, s.task_count, scheduler::work_none);
- else
- assert (tc == exec);
+ timestamp mt (pt->mtime ());
+ assert (mt != timestamp_unknown);
+ return mt > ts;
+ }
+ }
+ else
+ {
+ // We only want to return true if our call to execute() actually caused
+ // an update. In particular, the target could already have been in
+ // target_state::changed because of the dynamic dependency extraction
+ // run for some other target.
+ //
+ target_state ns;
+ if (os != target_state::changed)
+ {
+ phase_switch ps (t.ctx, run_phase::execute);
+ ns = execute_direct_sync (a, t);
+ }
+ else
+ ns = os;
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "updated " << t
+ << "; old state " << os
+ << "; new state " << ns;});
+ return true;
+ }
+ else
+ return ts != timestamp_unknown ? pt->newer (ts, ns) : false;
+ }
+ }
+
+ bool
+ update_during_match_prerequisites (tracer& trace,
+ action a, target& t,
+ uintptr_t mask)
+ {
+ // NOTE: see also clean_during_match_prerequisites() if changing anything
+ // here.
+
+ assert (a == perform_update_id);
+
+ prerequisite_targets& pts (t.prerequisite_targets[a]);
+
+ // On the first pass detect and handle unchanged tragets. Note that we
+ // have to do it in a separate pass since we cannot call matched_state()
+ // once we've switched the phase.
+ //
+ size_t n (0);
+
+ for (prerequisite_target& p: pts)
+ {
+ if (mask == 0 || (p.include & mask) != 0)
+ {
+ if (p.target != nullptr)
+ {
+ const target& pt (*p.target);
+
+ target_state os (pt.matched_state (a));
+
+ if (os != target_state::unchanged)
+ {
+ ++n;
+ p.data = static_cast<uintptr_t> (os);
+ continue;
+ }
+ }
+
+ p.data = 0;
+ }
+ }
+
+ // If all unchanged, we are done.
+ //
+ if (n == 0)
+ return false;
+
+ // Provide additional information on what's going on.
+ //
+ auto df = make_diag_frame (
+ [&t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while updating during match prerequisites of "
+ << "target " << t;
+ });
+
+ context& ctx (t.ctx);
+
+ phase_switch ps (ctx, run_phase::execute);
+
+ bool r (false);
+
+ // @@ Maybe we should optimize for n == 1? Maybe we should just call
+ // smarter update_during_match() in this case?
+ //
+#if 0
+ for (prerequisite_target& p: pts)
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+
+ target_state os (static_cast<target_state> (p.data));
+ target_state ns (execute_direct_sync (a, pt));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "updated " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#else
+
+ // Start asynchronous execution of prerequisites. Similar logic to
+ // straight_execute_members().
+ //
+ // Note that the target's task count is expected to be busy (since this
+ // function is called during match). And there don't seem to be any
+ // problems in using it for execute.
+ //
+ atomic_count& tc (t[a].task_count);
+
+ size_t busy (ctx.count_busy ());
+
+ wait_guard wg (ctx, busy, tc);
+
+ for (prerequisite_target& p: pts)
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ execute_direct_async (a, *p.target, busy, tc);
+ }
+ }
+
+ wg.wait ();
+
+ // Finish execution and process the result.
+ //
+ for (prerequisite_target& p: pts)
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+ target_state ns (execute_complete (a, pt));
+ target_state os (static_cast<target_state> (p.data));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "updated " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#endif
+
+ return r;
+ }
+
+ bool
+ clean_during_match (tracer& trace, action a, const target& t)
+ {
+ // Let's keep this as close to update_during_match() semantically as
+ // possible until we see a clear reason to deviate.
+
+ // We have a problem with fsdir{}: if the directory is not empty because
+ // there are other targets that depend on it and we execute it here and
+ // now, it will not remove the directory (because it's not yet empty) but
+ // will cause the target to be in the executed state, which means that
+ // when other targets try to execute it, it will be a noop and the
+ // directory will be left behind.
+
+ assert (a == perform_clean_id && !t.is_a<fsdir> ());
+
+ target_state os (t.matched_state (a));
+
+ if (os == target_state::unchanged)
+ return false;
+ else
+ {
+ target_state ns;
+ if (os != target_state::changed)
+ {
+ phase_switch ps (t.ctx, run_phase::execute);
+ ns = execute_direct_sync (a, t);
+ }
+ else
+ ns = os;
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << t
+ << "; old state " << os
+ << "; new state " << ns;});
+ return true;
+ }
+ else
+ return false;
+ }
+ }
+
+ bool
+ clean_during_match_prerequisites (tracer& trace,
+ action a, target& t,
+ uintptr_t mask)
+ {
+ // Let's keep this as close to update_during_match_prerequisites()
+ // semantically as possible until we see a clear reason to deviate.
+ //
+ // Currently the only substantial change is the reverse iteration order.
+
+ assert (a == perform_clean_id);
+
+ prerequisite_targets& pts (t.prerequisite_targets[a]);
+
+ // On the first pass detect and handle unchanged tragets. Note that we
+ // have to do it in a separate pass since we cannot call matched_state()
+ // once we've switched the phase.
+ //
+ size_t n (0);
+
+ for (prerequisite_target& p: pts)
+ {
+ if (mask == 0 || (p.include & mask) != 0)
+ {
+ if (p.target != nullptr)
+ {
+ const target& pt (*p.target);
+
+ assert (!pt.is_a<fsdir> ()); // See above.
+
+ target_state os (pt.matched_state (a));
+
+ if (os != target_state::unchanged)
+ {
+ ++n;
+ p.data = static_cast<uintptr_t> (os);
+ continue;
+ }
+ }
+
+ p.data = 0;
+ }
+ }
+
+ // If all unchanged, we are done.
+ //
+ if (n == 0)
+ return false;
+
+ // Provide additional information on what's going on.
+ //
+ auto df = make_diag_frame (
+ [&t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while cleaning during match prerequisites of "
+ << "target " << t;
+ });
+
+ context& ctx (t.ctx);
+
+ phase_switch ps (ctx, run_phase::execute);
+
+ bool r (false);
+
+ // @@ Maybe we should optimize for n == 1? Maybe we should just call
+ // smarter clean_during_match() in this case?
+ //
+#if 0
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+
+ target_state os (static_cast<target_state> (p.data));
+ target_state ns (execute_direct_sync (a, pt));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#else
+
+ // Start asynchronous execution of prerequisites. Similar logic to
+ // straight_execute_members().
+ //
+ // Note that the target's task count is expected to be busy (since this
+ // function is called during match). And there don't seem to be any
+ // problems in using it for execute.
+ //
+ atomic_count& tc (t[a].task_count);
+
+ size_t busy (ctx.count_busy ());
+
+ wait_guard wg (ctx, busy, tc);
+
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ execute_direct_async (a, *p.target, busy, tc);
+ }
+ }
+
+ wg.wait ();
+
+ // Finish execution and process the result.
+ //
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+ target_state ns (execute_complete (a, pt));
+ target_state os (static_cast<target_state> (p.data));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
}
+#endif
- return t.executed_state (a);
+ return r;
}
static inline void
@@ -2031,7 +3441,7 @@ namespace build2
static inline void
blank_adhoc_member (prerequisite_target& pt)
{
- if (pt.adhoc)
+ if (pt.adhoc ())
pt.target = nullptr;
}
@@ -2043,7 +3453,6 @@ namespace build2
target_state r (target_state::unchanged);
size_t busy (ctx.count_busy ());
- size_t exec (ctx.count_executed ());
// Start asynchronous execution of prerequisites.
//
@@ -2078,12 +3487,7 @@ namespace build2
continue;
const target& mt (*ts[i]);
-
- // If the target is still busy, wait for its completion.
- //
- ctx.sched.wait (exec, mt[a].task_count, scheduler::work_none);
-
- r |= mt.executed_state (a);
+ r |= execute_complete (a, mt);
blank_adhoc_member (ts[i]);
}
@@ -2101,7 +3505,6 @@ namespace build2
target_state r (target_state::unchanged);
size_t busy (ctx.count_busy ());
- size_t exec (ctx.count_executed ());
wait_guard wg (ctx, busy, tc);
@@ -2130,10 +3533,7 @@ namespace build2
continue;
const target& mt (*ts[i]);
-
- ctx.sched.wait (exec, mt[a].task_count, scheduler::work_none);
-
- r |= mt.executed_state (a);
+ r |= execute_complete (a, mt);
blank_adhoc_member (ts[i]);
}
@@ -2165,12 +3565,11 @@ namespace build2
const timestamp& mt, const execute_filter& ef,
size_t n)
{
- context& ctx (t.ctx);
+ assert (a == perform_update_id);
- assert (ctx.current_mode == execution_mode::first);
+ context& ctx (t.ctx);
size_t busy (ctx.count_busy ());
- size_t exec (ctx.count_executed ());
auto& pts (t.prerequisite_targets[a]);
@@ -2202,7 +3601,7 @@ namespace build2
wg.wait ();
bool e (mt == timestamp_nonexistent);
- const target* rt (tt != nullptr ? nullptr : &t);
+ const target* rt (nullptr);
for (size_t i (0); i != n; ++i)
{
@@ -2212,15 +3611,102 @@ namespace build2
continue;
const target& pt (*p.target);
+ target_state s (execute_complete (a, pt));
+ rs |= s;
+
+ // Should we compare the timestamp to this target's?
+ //
+ if (!e && (p.adhoc () || !ef || ef (pt, i)))
+ {
+ // If this is an mtime-based target, then compare timestamps.
+ //
+ if (const mtime_target* mpt = pt.is_a<mtime_target> ())
+ {
+ if (mpt->newer (mt, s))
+ e = true;
+ }
+ else
+ {
+ // Otherwise we assume the prerequisite is newer if it was changed.
+ //
+ if (s == target_state::changed)
+ e = true;
+ }
+ }
+
+ if (p.adhoc ())
+ p.target = nullptr; // Blank out.
+ else if (tt != nullptr)
+ {
+ if (rt == nullptr && pt.is_a (*tt))
+ rt = &pt;
+ }
+ }
- ctx.sched.wait (exec, pt[a].task_count, scheduler::work_none);
+ assert (tt == nullptr || rt != nullptr);
- target_state s (pt.executed_state (a));
+ return pair<optional<target_state>, const target*> (
+ e ? optional<target_state> () : rs, rt);
+ }
+
+ pair<optional<target_state>, const target*>
+ reverse_execute_prerequisites (const target_type* tt,
+ action a, const target& t,
+ const timestamp& mt, const execute_filter& ef,
+ size_t n)
+ {
+ assert (a == perform_update_id);
+
+ context& ctx (t.ctx);
+
+ size_t busy (ctx.count_busy ());
+
+ auto& pts (t.prerequisite_targets[a]);
+
+ if (n == 0)
+ n = pts.size ();
+
+ // Pretty much as reverse_execute_members() but hairier.
+ //
+ target_state rs (target_state::unchanged);
+
+ wait_guard wg (ctx, busy, t[a].task_count);
+
+ for (size_t i (n); i != 0; )
+ {
+ const target*& pt (pts[--i]);
+
+ if (pt == nullptr) // Skipped.
+ continue;
+
+ target_state s (execute_async (a, *pt, busy, t[a].task_count));
+
+ if (s == target_state::postponed)
+ {
+ rs |= s;
+ pt = nullptr;
+ }
+ }
+
+ wg.wait ();
+
+ bool e (mt == timestamp_nonexistent);
+ const target* rt (nullptr);
+
+ for (size_t i (n); i != 0; )
+ {
+ prerequisite_target& p (pts[--i]);
+
+ if (p == nullptr)
+ continue;
+
+ const target& pt (*p.target);
+ target_state s (execute_complete (a, pt));
rs |= s;
// Should we compare the timestamp to this target's?
//
- if (!e && (p.adhoc || !ef || ef (pt, i)))
+ if (!e && (p.adhoc () || !ef || ef (pt, i)))
{
// If this is an mtime-based target, then compare timestamps.
//
@@ -2238,26 +3724,27 @@ namespace build2
}
}
- if (p.adhoc)
+ if (p.adhoc ())
p.target = nullptr; // Blank out.
- else
+ else if (tt != nullptr)
{
- if (rt == nullptr && pt.is_a (*tt))
+ // Note that here we need last.
+ //
+ if (pt.is_a (*tt))
rt = &pt;
}
}
- assert (rt != nullptr);
+ assert (tt == nullptr || rt != nullptr);
return pair<optional<target_state>, const target*> (
- e ? optional<target_state> () : rs,
- tt != nullptr ? rt : nullptr);
+ e ? optional<target_state> () : rs, rt);
}
target_state
noop_action (action a, const target& t)
{
- text << "noop action triggered for " << diag_doing (a, t);
+ error << "noop action triggered for " << diag_doing (a, t);
assert (false); // We shouldn't be called (see set_recipe()).
return target_state::unchanged;
}
@@ -2271,10 +3758,13 @@ namespace build2
//
const target& g (*t.group);
- target_state gs (execute (a, g));
+ // This is execute_sync(a, t, false) but that saves a call to
+ // executed_state() (which we don't need).
+ //
+ target_state gs (execute_impl (a, g, 0, nullptr));
if (gs == target_state::busy)
- ctx.sched.wait (ctx.count_executed (),
+ ctx.sched->wait (ctx.count_executed (),
g[a].task_count,
scheduler::work_none);
@@ -2302,102 +3792,105 @@ namespace build2
return execute_prerequisites (a, t);
}
- target_state
- perform_clean_extra (action a, const file& ft,
- const clean_extras& extras,
- const clean_adhoc_extras& adhoc_extras)
+ static target_state
+ clean_extra (context& ctx,
+ const path& fp,
+ const clean_extras& es,
+ path& ep, bool& ed)
{
- // Clean the extras first and don't print the commands at verbosity level
- // below 3. Note the first extra file/directory that actually got removed
- // for diagnostics below.
- //
- // Note that dry-run is taken care of by the filesystem functions.
- //
- target_state er (target_state::unchanged);
- bool ed (false);
- path ep;
+ assert (!fp.empty ()); // Must be assigned.
- context& ctx (ft.ctx);
+ target_state er (target_state::unchanged);
- auto clean_extra = [&er, &ed, &ep, &ctx] (const file& f,
- const path* fp,
- const clean_extras& es)
+ for (const char* e: es)
{
- for (const char* e: es)
- {
- size_t n;
- if (e == nullptr || (n = strlen (e)) == 0)
- continue;
+ size_t n;
+ if (e == nullptr || (n = strlen (e)) == 0)
+ continue;
- path p;
- bool d;
+ path p;
+ bool d;
- if (path::traits_type::absolute (e))
- {
- p = path (e);
- d = p.to_directory ();
- }
- else
- {
- if ((d = (e[n - 1] == '/')))
- --n;
+ if (path::traits_type::absolute (e))
+ {
+ p = path (e);
+ d = p.to_directory ();
+ }
+ else
+ {
+ if ((d = (e[n - 1] == '/')))
+ --n;
- if (fp == nullptr)
- {
- fp = &f.path ();
- assert (!fp->empty ()); // Must be assigned.
- }
+ p = fp;
+ for (; *e == '-'; ++e)
+ p = p.base ();
- p = *fp;
- for (; *e == '-'; ++e)
- p = p.base ();
+ p.append (e, n);
+ }
- p.append (e, n);
- }
+ target_state r (target_state::unchanged);
- target_state r (target_state::unchanged);
+ if (d)
+ {
+ dir_path dp (path_cast<dir_path> (p));
- if (d)
+ switch (rmdir_r (ctx, dp, true, 3))
{
- dir_path dp (path_cast<dir_path> (p));
-
- switch (rmdir_r (ctx, dp, true, 3))
+ case rmdir_status::success:
{
- case rmdir_status::success:
- {
- r = target_state::changed;
- break;
- }
- case rmdir_status::not_empty:
- {
- if (verb >= 3)
- text << dp << " is current working directory, not removing";
- break;
- }
- case rmdir_status::not_exist:
+ r = target_state::changed;
break;
}
+ case rmdir_status::not_empty:
+ {
+ if (verb >= 3)
+ info << dp << " is current working directory, not removing";
+ break;
+ }
+ case rmdir_status::not_exist:
+ break;
}
- else
- {
- if (rmfile (ctx, p, 3))
- r = target_state::changed;
- }
-
- if (r == target_state::changed && ep.empty ())
- {
- ed = d;
- ep = move (p);
- }
+ }
+ else
+ {
+ if (rmfile (ctx, p, 3))
+ r = target_state::changed;
+ }
- er |= r;
+ if (r == target_state::changed && ep.empty ())
+ {
+ ed = d;
+ ep = move (p);
}
- };
+
+ er |= r;
+ }
+
+ return er;
+ }
+
+ target_state
+ perform_clean_extra (action a, const file& ft,
+ const clean_extras& extras,
+ const clean_adhoc_extras& adhoc_extras,
+ bool show_adhoc)
+ {
+ context& ctx (ft.ctx);
+
+ // Clean the extras first and don't print the commands at verbosity level
+ // below 3. Note the first extra file/directory that actually got removed
+ // for diagnostics below.
+ //
+ // Note that dry-run is taken care of by the filesystem functions.
+ //
+ target_state er (target_state::unchanged);
+ bool ed (false);
+ path ep;
const path& fp (ft.path ());
if (!fp.empty () && !extras.empty ())
- clean_extra (ft, nullptr, extras);
+ er |= clean_extra (ctx, fp, extras, ep, ed);
target_state tr (target_state::unchanged);
@@ -2412,6 +3905,12 @@ namespace build2
// Now clean the ad hoc group file members, if any.
//
+ // While at it, also collect the group target keys if we are showing
+ // the members. But only those that exist (since we don't want to
+ // print any diagnostics if none of them exist).
+ //
+ vector<target_key> tks;
+
for (const target* m (ft.adhoc_member);
m != nullptr;
m = m->adhoc_member)
@@ -2432,7 +3931,7 @@ namespace build2
}));
if (i != adhoc_extras.end ())
- clean_extra (*mf, mp, i->extras);
+ er |= clean_extra (ctx, *mp, i->extras, ep, ed);
}
if (!clean)
@@ -2452,18 +3951,38 @@ namespace build2
? target_state::changed
: target_state::unchanged);
- if (r == target_state::changed && ep.empty ())
- ep = *mp;
-
- er |= r;
+ if (r == target_state::changed)
+ {
+ if (show_adhoc && verb == 1)
+ tks.push_back (mf->key ());
+ else if (ep.empty ())
+ {
+ ep = *mp;
+ er |= r;
+ }
+ }
}
}
// Now clean the primary target and its prerequisited in the reverse order
// of update: first remove the file, then clean the prerequisites.
//
- if (clean && !fp.empty () && rmfile (fp, ft))
- tr = target_state::changed;
+ if (clean && !fp.empty ())
+ {
+ if (show_adhoc && verb == 1 && !tks.empty ())
+ {
+ if (rmfile (fp, ft, 2 /* verbosity */))
+ tks.insert (tks.begin (), ft.key ());
+
+ print_diag ("rm", move (tks));
+ tr = target_state::changed;
+ }
+ else
+ {
+ if (rmfile (fp, ft))
+ tr = target_state::changed;
+ }
+ }
// Update timestamp in case there are operations after us that could use
// the information.
@@ -2483,10 +4002,20 @@ namespace build2
{
if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
{
- if (ed)
- text << "rm -r " << path_cast<dir_path> (ep);
- else
- text << "rm " << ep;
+ if (verb >= 2)
+ {
+ if (ed)
+ text << "rm -r " << path_cast<dir_path> (ep);
+ else
+ text << "rm " << ep;
+ }
+ else if (verb)
+ {
+ if (ed)
+ print_diag ("rm -r", path_cast<dir_path> (ep));
+ else
+ print_diag ("rm", ep);
+ }
}
}
@@ -2499,29 +4028,19 @@ namespace build2
}
target_state
- perform_clean (action a, const target& t)
+ perform_clean_group_extra (action a, const mtime_target& g,
+ const clean_extras& extras)
{
- const file& f (t.as<file> ());
- assert (!f.path ().empty ());
- return perform_clean_extra (a, f, {});
- }
+ context& ctx (g.ctx);
- target_state
- perform_clean_depdb (action a, const target& t)
- {
- const file& f (t.as<file> ());
- assert (!f.path ().empty ());
- return perform_clean_extra (a, f, {".d"});
- }
+ target_state er (target_state::unchanged);
+ bool ed (false);
+ path ep;
- target_state
- perform_clean_group (action a, const target& xg)
- {
- const mtime_target& g (xg.as<mtime_target> ());
+ if (!extras.empty ())
+ er |= clean_extra (ctx, g.dir / path (g.name), extras, ep, ed);
- // Similar logic to perform_clean_extra() above.
- //
- target_state r (target_state::unchanged);
+ target_state tr (target_state::unchanged);
if (cast_true<bool> (g[g.ctx.var_clean]))
{
@@ -2529,54 +4048,93 @@ namespace build2
{
if (const target* m = gv.members[gv.count - 1])
{
- if (rmfile (m->as<file> ().path (), *m))
- r |= target_state::changed;
+ // Note that at the verbosity level 1 we don't show the removal of
+ // each group member. This is consistent with what is normally shown
+ // during update.
+ //
+ if (rmfile (m->as<file> ().path (), *m, 2 /* verbosity */))
+ tr |= target_state::changed;
}
}
+
+ if (tr == target_state::changed && verb == 1)
+ print_diag ("rm", g);
}
g.mtime (timestamp_nonexistent);
- r |= reverse_execute_prerequisites (a, g);
- return r;
+ if (tr != target_state::changed && er == target_state::changed)
+ {
+ if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
+ {
+ if (verb >= 2)
+ {
+ if (ed)
+ text << "rm -r " << path_cast<dir_path> (ep);
+ else
+ text << "rm " << ep;
+ }
+ else if (verb)
+ {
+ if (ed)
+ print_diag ("rm -r", path_cast<dir_path> (ep));
+ else
+ print_diag ("rm", ep);
+ }
+ }
+ }
+
+ tr |= reverse_execute_prerequisites (a, g);
+
+ tr |= er;
+ return tr;
}
target_state
- perform_clean_group_depdb (action a, const target& g)
+ perform_clean (action a, const target& t)
{
- context& ctx (g.ctx);
+ const file& f (t.as<file> ());
+ assert (!f.path ().empty ());
+ return perform_clean_extra (a, f, {});
+ }
- // The same twisted target state merging logic as in perform_clean_extra().
- //
- target_state er (target_state::unchanged);
- path ep;
+ target_state
+ perform_clean_depdb (action a, const target& t)
+ {
+ const file& f (t.as<file> ());
+ assert (!f.path ().empty ());
+ return perform_clean_extra (a, f, {".d"});
+ }
+
+ target_state
+ perform_clean_group (action a, const target& t)
+ {
+ return perform_clean_group_extra (a, t.as<mtime_target> (), {});
+ }
- group_view gv (g.group_members (a));
- if (gv.count != 0)
+ target_state
+ perform_clean_group_depdb (action a, const target& t)
+ {
+ path d;
+ clean_extras extras;
{
- for (size_t i (0); i != gv.count; ++i)
+ group_view gv (t.group_members (a));
+ if (gv.count != 0)
{
- if (const target* m = gv.members[i])
+ for (size_t i (0); i != gv.count; ++i)
{
- ep = m->as<file> ().path () + ".d";
- break;
+ if (const target* m = gv.members[i])
+ {
+ d = m->as<file> ().path () + ".d";
+ break;
+ }
}
- }
- assert (!ep.empty ());
- if (rmfile (ctx, ep, 3))
- er = target_state::changed;
- }
-
- target_state tr (perform_clean_group (a, g));
-
- if (tr != target_state::changed && er == target_state::changed)
- {
- if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
- text << "rm " << ep;
+ assert (!d.empty ());
+ extras.push_back (d.string ().c_str ());
+ }
}
- tr |= er;
- return tr;
+ return perform_clean_group_extra (a, t.as<mtime_target> (), extras);
}
}
diff --git a/libbuild2/algorithm.hxx b/libbuild2/algorithm.hxx
index 01b69f2..a4feaea 100644
--- a/libbuild2/algorithm.hxx
+++ b/libbuild2/algorithm.hxx
@@ -17,7 +17,7 @@
namespace build2
{
// The default prerequisite search implementation. It first calls the
- // prerequisite-type-specific search function. If that doesn't yeld
+ // prerequisite-type-specific search function. If that doesn't yield
// anything, it creates a new target.
//
LIBBUILD2_SYMEXPORT const target&
@@ -45,20 +45,32 @@ namespace build2
LIBBUILD2_SYMEXPORT pair<target&, ulock>
search_locked (const target&, const prerequisite_key&);
- // Note that unlike the above version, this one can be called during the
- // load and execute phases.
+ // As above but this one can be called during the load and execute phases.
//
LIBBUILD2_SYMEXPORT const target*
search_existing (context&, const prerequisite_key&);
+ // First search for an existing target and if that doesn't yield anything,
+ // creates a new target, bypassing any prerequisite-type-specific search.
+ // Can be called during the load and match phases but only on project-
+ // unqualified prerequisites. This version is suitable for cases where you
+ // know the target is in out and cannot be possibly found in src.
+ //
+ LIBBUILD2_SYMEXPORT const target&
+ search_new (context&, const prerequisite_key&);
+
+ // As above but return the lock if the target was newly created.
+ //
+ LIBBUILD2_SYMEXPORT pair<target&, ulock>
+ search_new_locked (context&, const prerequisite_key&);
+
// Uniform search interface for prerequisite/prerequisite_member.
//
inline const target&
search (const target& t, const prerequisite_member& p) {return p.search (t);}
- // As above but override the target type. Useful for searching for
- // target group members where we need to search for a different
- // target type.
+ // As above but override the target type. Useful for searching for target
+ // group members where we need to search for a different target type.
//
const target&
search (const target&, const target_type&, const prerequisite_key&);
@@ -66,6 +78,15 @@ namespace build2
pair<target&, ulock>
search_locked (const target&, const target_type&, const prerequisite_key&);
+ const target*
+ search_existing (context&, const target_type&, const prerequisite_key&);
+
+ const target&
+ search_new (context&, const target_type&, const prerequisite_key&);
+
+ pair<target&, ulock>
+ search_new_locked (context&, const target_type&, const prerequisite_key&);
+
// As above but specify the prerequisite to search as individual key
// components. Scope can be NULL if the directory is absolute.
//
@@ -85,8 +106,8 @@ namespace build2
const dir_path& dir,
const dir_path& out,
const string& name,
- const string* ext = nullptr, // NULL means unspecified.
- const scope* = nullptr); // NULL means dir is absolute.
+ const string* ext = nullptr,
+ const scope* = nullptr);
const target*
search_existing (context&,
@@ -98,6 +119,24 @@ namespace build2
const scope* = nullptr,
const optional<project_name>& proj = nullopt);
+ const target&
+ search_new (context&,
+ const target_type&,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext = nullptr,
+ const scope* = nullptr);
+
+ pair<target&, ulock>
+ search_new_locked (context&,
+ const target_type&,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext = nullptr,
+ const scope* = nullptr);
+
// As above but specify the target type as template argument.
//
template <typename T>
@@ -109,6 +148,15 @@ namespace build2
const string* ext = nullptr,
const scope* = nullptr);
+ template <typename T>
+ const T*
+ search_existing (context&,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext = nullptr,
+ const scope* = nullptr);
+
// Search for a target identified by the name. The semantics is "as if" we
// first created a prerequisite based on this name in exactly the same way
// as the parser would and then searched based on this prerequisite. If the
@@ -116,15 +164,13 @@ namespace build2
// argument.
//
LIBBUILD2_SYMEXPORT const target&
- search (const target&, name, const scope&, const target_type* = nullptr);
+ search (const target&, name&&, const scope&, const target_type* = nullptr);
- // Note: returns NULL for unknown target types. Note that unlike the above
- // version, these ones can be called during the load and execute phases.
+ // Note: returns NULL for unknown target types. Note also that unlike the
+ // above version, these can be called during the load and execute phases.
//
LIBBUILD2_SYMEXPORT const target*
- search_existing (const name&,
- const scope&,
- const dir_path& out = dir_path ());
+ search_existing (const name&, const scope&, const dir_path& out = dir_path ());
LIBBUILD2_SYMEXPORT const target*
search_existing (const names&, const scope&);
@@ -143,17 +189,20 @@ namespace build2
action_type action;
target_type* target = nullptr;
size_t offset = 0;
+ bool first;
explicit operator bool () const {return target != nullptr;}
+ // Note: achieved offset is preserved.
+ //
void
unlock ();
// Movable-only type with move-assignment only to NULL lock.
//
target_lock () = default;
- target_lock (target_lock&&);
- target_lock& operator= (target_lock&&);
+ target_lock (target_lock&&) noexcept;
+ target_lock& operator= (target_lock&&) noexcept;
target_lock (const target_lock&) = delete;
target_lock& operator= (const target_lock&) = delete;
@@ -161,13 +210,14 @@ namespace build2
// Implementation details.
//
~target_lock ();
- target_lock (action_type, target_type*, size_t);
+ target_lock (action_type, target_type*, size_t, bool);
struct data
{
action_type action;
target_type* target;
size_t offset;
+ bool first;
};
data
@@ -205,10 +255,10 @@ namespace build2
// If the target is already applied (for this action) or executed, then no
// lock is acquired. Otherwise, unless matched is true, the target must not
- // be matched but not yet applied for this action (and if that's the case
- // and matched is true, then you get a locked target that you should
- // probably check for consistency, for exmaple, by comparing the matched
- // rule).
+ // be in the matched but not yet applied state for this action (and if
+ // that's the case and matched is true, then you get a locked target that
+ // you should probably check for consistency, for example, by comparing the
+ // matched rule).
//
// @@ MT fuzzy: what if it is already in the desired state, why assert?
// Currently we only use it with match_recipe/rule() and if it is matched
@@ -224,21 +274,27 @@ namespace build2
//
// Note that here and in find_adhoc_member() below (as well as in
// perform_clean_extra()) we use target type (as opposed to, say, type and
- // name) as the member's identity. This fits our current needs where every
+ // name) as the member's identity. This fits common needs where every
// (rule-managed) ad hoc member has a unique target type and we have no need
// for multiple members of the same type. This also allows us to support
// things like changing the ad hoc member name by declaring it in a
- // buildfile.
+ // buildfile. However, if this semantics is not appropriate, use the
+ // add_adhoc_member_identity() version below.
+ //
+ // Note that the current implementation asserts if the member target already
+ // exists but is not already a member.
//
LIBBUILD2_SYMEXPORT target&
add_adhoc_member (target&,
const target_type&,
dir_path dir,
dir_path out,
- string name);
+ string name,
+ optional<string> ext);
// If the extension is specified then it is added to the member's target
- // name.
+ // name as a second-level extension (the first-level extension, if any,
+ // comes from the target type).
//
target&
add_adhoc_member (target&, const target_type&, const char* ext = nullptr);
@@ -257,6 +313,24 @@ namespace build2
return add_adhoc_member<T> (g, T::static_type, e);
}
+ // Add an ad hoc member using the member identity (as opposed to only its
+ // type as in add_adhoc_member() above) to suppress diplicates. See also
+ // dyndep::inject_adhoc_group_member().
+ //
+ // Return the member target as well as an indication of whether it was added
+ // or was already a member. Fail if the member target already exists but is
+ // not a member since it's not possible to make it a member in an MT-safe
+ // manner.
+ //
+ LIBBUILD2_SYMEXPORT pair<target&, bool>
+ add_adhoc_member_identity (target&,
+ const target_type&,
+ dir_path dir,
+ dir_path out,
+ string name,
+ optional<string> ext,
+ const location& = location ());
+
// Find an ad hoc member of the specified target type returning NULL if not
// found.
//
@@ -295,78 +369,171 @@ namespace build2
}
// Match and apply a rule to the action/target with ambiguity detection.
- // Increment the target's dependents count, which means that you should call
- // this function with the intent to also call execute(). Return the target
- // state translating target_state::failed to the failed exception unless
- // instructed otherwise.
- //
- // The try_match() version doesn't issue diagnostics if there is no rule
- // match (but fails as match() for all other errors, like rule ambiguity,
- // inability to apply, etc). The first half of the result indicated whether
- // there was a rule match.
- //
- // The unmatch argument allows optimizations that avoid calling execute().
+ // This is the synchrounous match implementation that waits for completion
+ // if the target is already being matched. Increment the target's dependents
+ // count, which means that you should call this function with the intent to
+ // also call execute*(). Translating target_state::failed to the failed
+ // exception unless instructed otherwise.
+ //
+ // The try_match_sync() version doesn't issue diagnostics if there is no
+ // rule match (but fails as match_sync() for all other errors, like rule
+ // ambiguity, inability to apply, etc). The first half of the result
+ // indicated whether there was a rule match.
+ //
+ // The unmatch argument allows optimizations that avoid calling execute*().
// If it is unmatch::unchanged then only unmatch the target if it is known
// to be unchanged after match. If it is unmatch::safe, then unmatch the
// target if it is safe (this includes unchanged or if we know that someone
// else will execute this target). Return true in first half of the pair if
- // unmatch succeeded. Always throw if failed.
+ // unmatch succeeded. Always throw if failed. Note that unmatching may not
+ // play well with options -- if unmatch succeeds, the options that have been
+ // passed to match will not be cleared.
//
enum class unmatch {none, unchanged, safe};
target_state
- match (action, const target&, bool fail = true);
+ match_sync (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
pair<bool, target_state>
- try_match (action, const target&, bool fail = true);
+ try_match_sync (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
pair<bool, target_state>
- match (action, const target&, unmatch);
+ match_sync (action, const target&,
+ unmatch,
+ uint64_t options = match_extra::all_options);
+
+ // As above but only match the target (unless already matched) without
+ // applying the match (which is normally done with match_sync()). You will
+ // most likely regret using this function.
+ //
+ LIBBUILD2_SYMEXPORT void
+ match_only_sync (action, const target&,
+ uint64_t options = match_extra::all_options);
// Start asynchronous match. Return target_state::postponed if the
- // asynchrounous operation has been started and target_state::busy if the
- // target has already been busy. Regardless of the result, match() must be
- // called in order to complete the operation (except target_state::failed).
+ // asynchronous operation has been started and target_state::busy if the
+ // target has already been busy. Regardless of the result, match_complete()
+ // must be called in order to complete the operation (except if the result
+ // is target_state::failed), which has the result semantics of match_sync().
//
// If fail is false, then return target_state::failed if the target match
// failed. Otherwise, throw the failed exception if keep_going is false and
// return target_state::failed otherwise.
//
+ // Note: same options must be passed to match_async() and match_complete().
+ //
target_state
match_async (action, const target&,
size_t start_count, atomic_count& task_count,
+ uint64_t options = match_extra::all_options,
bool fail = true);
+ target_state
+ match_complete (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
+
+ pair<bool, target_state>
+ match_complete (action, const target&,
+ unmatch,
+ uint64_t options = match_extra::all_options);
+
+ // As above but without incrementing the target's dependents count. Should
+ // be executed with execute_direct_*().
+ //
+ // For async, call match_async() followed by match_direct_complete().
+ //
+ target_state
+ match_direct_sync (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
+
+ target_state
+ match_direct_complete (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
+
// Apply the specified recipe directly and without incrementing the
- // dependency counts. The target must be locked.
+ // dependency counts. The target must be locked (and it remains locked
+ // after this function returns).
+ //
+ // Note that there will be no way to rematch on options change (since there
+ // is no rule), so passing anything other than all_options is most likely a
+ // bad idea. Passing 0 for options is illegal.
//
void
- match_recipe (target_lock&, recipe);
+ match_recipe (target_lock&,
+ recipe,
+ uint64_t options = match_extra::all_options);
// Match (but do not apply) the specified rule directly and without
- // incrementing the dependency counts. The target must be locked.
+ // incrementing the dependency counts. The target must be locked (and it
+ // remains locked after this function returns).
//
void
- match_rule (target_lock&, const rule_match&);
+ match_rule (target_lock&,
+ const rule_match&,
+ uint64_t options = match_extra::all_options);
// Match a "delegate rule" from withing another rules' apply() function
// avoiding recursive matches (thus the third argument). Unless try_match is
// true, fail if no rule is found. Otherwise return empty recipe. Note that
- // unlike match(), this function does not increment the dependents count and
- // the two rules must coordinate who is using the target's data pad and/or
- // prerequisite_targets. See also the companion execute_delegate().
+ // unlike match(), this function does not increment the dependents count.
+ // See also the companion execute_delegate().
//
recipe
- match_delegate (action, target&, const rule&, bool try_match = false);
+ match_delegate (action, target&,
+ const rule&,
+ uint64_t options = match_extra::all_options,
+ bool try_match = false);
+
+ // Incrementing the dependency counts of the specified target.
+ //
+ void
+ match_inc_dependents (action, const target&);
- // Match a rule for the inner operation from withing the outer rule's
- // apply() function. See also the companion execute_inner().
+ // Match (synchronously) a rule for the inner operation from withing the
+ // outer rule's apply() function. See also the companion execute_inner()
+ // and inner_recipe.
//
target_state
- match_inner (action, const target&);
+ match_inner (action, const target&,
+ uint64_t options = match_extra::all_options);
pair<bool, target_state>
- match_inner (action, const target&, unmatch);
+ match_inner (action, const target&,
+ unmatch,
+ uint64_t options = match_extra::all_options);
+
+ // Re-match with new options a target that has already been matched with one
+ // of the match_*() functions. Note that natually you cannot rematch a
+ // target that you have unmatched.
+ //
+ // Note also that there is no way to check if the rematch is unnecessary
+ // (i.e., because the target is already matched with this option) because
+ // that would require MT-safety considerations (since there could be a
+ // concurrent rematch). Instead, you should rematch unconditionally and if
+ // the option is already present, it will be a cheap noop.
+ //
+ target_state
+ rematch_sync (action, const target&,
+ uint64_t options,
+ bool fail = true);
+
+ target_state
+ rematch_async (action, const target&,
+ size_t start_count, atomic_count& task_count,
+ uint64_t options,
+ bool fail = true);
+
+ target_state
+ rematch_complete (action, const target&,
+ uint64_t options,
+ bool fail = true);
// The standard prerequisite search and match implementations. They call
// search() (unless a custom is provided) and then match() (unless custom
@@ -396,6 +563,19 @@ namespace build2
void
match_prerequisites (action, target&, const match_search& = nullptr);
+ // As above but only do search. The match part can be performed later, for
+ // example, with the match_members() function below. The typical call
+ // sequence would be:
+ //
+ // inject_fsdir (a, t, false /* match */);
+ // search_prerequisite_members (a, t); // Potentially with filter.
+ // pattern->apply_prerequisites (a, t, bs, me); // If ad hoc pattern.
+ // <dependency synthesis> // Optional.
+ // match_members (a, t, t.prerequisite_targets[a]);
+ //
+ void
+ search_prerequisites (action, target&, const match_search& = nullptr);
+
// As above but go into group members.
//
// Note that if we are cleaning, this function doesn't go into group
@@ -411,39 +591,48 @@ namespace build2
match_prerequisite_members (action, target&,
const match_search_member& = nullptr);
+ void
+ search_prerequisite_members (action, target&,
+ const match_search_member& = nullptr);
+
// As above but omit prerequisites that are not in the specified scope.
//
void
match_prerequisites (action, target&, const scope&);
void
+ search_prerequisites (action, target&, const scope&);
+
+ void
match_prerequisite_members (action, target&, const scope&);
+ void
+ search_prerequisite_members (action, target&, const scope&);
+
// Match (already searched) members of a group or similar prerequisite-like
// dependencies. Similar in semantics to match_prerequisites(). Any marked
// target pointers are skipped.
//
- // T can only be const target* or prerequisite_target.
- //
- template <typename T>
- void
- match_members (action, target&, T const*, size_t);
+ LIBBUILD2_SYMEXPORT void
+ match_members (action, const target&, const target* const*, size_t);
template <size_t N>
inline void
- match_members (action a, target& t, const target* (&ts)[N])
+ match_members (action a, const target& t, const target* (&ts)[N])
{
match_members (a, t, ts, N);
}
- inline void
- match_members (action a,
- target& t,
- prerequisite_targets& ts,
- size_t start = 0)
- {
- match_members (a, t, ts.data () + start, ts.size () - start);
- }
+ // As above plus if the include mask (first) and value (second) are
+ // specified, then only match prerequisites that satisfy the
+ // ((prerequisite_target::include & mask) == value) condition.
+ //
+ LIBBUILD2_SYMEXPORT void
+ match_members (action,
+ const target&,
+ prerequisite_targets&,
+ size_t start = 0,
+ pair<uintptr_t, uintptr_t> include = {0, 0});
// Unless already known, match, and, if necessary, execute the group in
// order to resolve its members list. Note that even after that the member's
@@ -473,8 +662,9 @@ namespace build2
resolve_group (action, const target&);
// Inject a target as a "prerequisite target" (note: not a prerequisite) of
- // another target. Specifically, first match the prerequisite target and
- // then add it to the back of the dependent target's prerequisite_targets.
+ // another target. Specifically, match (synchronously) the prerequisite
+ // target and then add it to the back of the dependent target's
+ // prerequisite_targets.
//
void
inject (action, target&, const target& prereq);
@@ -482,60 +672,78 @@ namespace build2
// Inject dependency on the target's directory fsdir{}, unless it is in the
// src tree or is outside of any project (say, for example, an installation
// directory). If the parent argument is true, then inject the parent
- // directory of a target that is itself a directory (name is empty). Return
- // the injected target or NULL. Normally this function is called from the
- // rule's apply() function.
+ // directory of a target that is itself a directory (name is empty). Match
+ // unless match is false and return the injected target or NULL. Normally
+ // this function is called from the rule's apply() function.
//
- // As an extension, this function will also search for an existing fsdir{}
- // prerequisite for the directory and if one exists, return that (even if
- // the target is in src tree). This can be used, for example, to place
- // output into an otherwise non-existent directory.
+ // The match=false semantics is useful when you wish to first collect all
+ // the prerequisites targets and then match them all as a separate step, for
+ // example, with match_members().
+ //
+ // As an extension, unless prereq is false, this function will also search
+ // for an existing fsdir{} prerequisite for the directory and if one exists,
+ // return that (even if the target is in the src tree). In this case, the
+ // injected fsdir{} (if any) must be the first prerequisite in this target's
+ // prerequisite_targets, which is relied upon by the match_prerequisite*()
+ // family of functons to suppress the duplicate addition.
+ //
+ // Note that the explicit fsdir{} prerquiste is used to place output into an
+ // otherwise non-existent (in src) directory.
//
LIBBUILD2_SYMEXPORT const fsdir*
- inject_fsdir (action, target&, bool parent = true);
+ inject_fsdir (action, target&,
+ bool match = true,
+ bool prereq = true,
+ bool parent = true);
- // Execute the action on target, assuming a rule has been matched and the
- // recipe for this action has been set. This is the synchrounous executor
- // implementation (but may still return target_state::busy if the target
- // is already being executed). Decrements the dependents count.
- //
- // Note: does not translate target_state::failed to the failed exception.
+ // As above, but match the injected fsdir{} target directly (that is,
+ // without incrementing the dependency counts).
//
- target_state
- execute (action, const target&);
+ LIBBUILD2_SYMEXPORT const fsdir*
+ inject_fsdir_direct (action, target&, bool prereq = true, bool parent = true);
- // As above but wait for completion if the target is busy and translate
- // target_state::failed to the failed exception.
+ // Execute the action on target, assuming a rule has been matched and the
+ // recipe for this action has been set. This is the synchrounous executor
+ // implementation that waits for completion if the target is already being
+ // executed. Translate target_state::failed to the failed exception unless
+ // fail is false.
//
target_state
- execute_wait (action, const target&);
+ execute_sync (action, const target&, bool fail = true);
// As above but start asynchronous execution. Return target_state::unknown
// if the asynchrounous execution has been started and target_state::busy if
// the target has already been busy.
//
- // If fail is false, then return target_state::failed if the target match
- // failed. Otherwise, throw the failed exception if keep_going is false and
- // return target_state::failed otherwise.
+ // If fail is false, then return target_state::failed if the target
+ // execution failed. Otherwise, throw the failed exception if keep_going is
+ // false and return target_state::failed otherwise. Regardless of the
+ // result, execute_complete() must be called in order to complete the
+ // operation (except if the result is target_state::failed), which has the
+ // result semantics of execute_sync().
//
target_state
execute_async (action, const target&,
size_t start_count, atomic_count& task_count,
bool fail = true);
- // Execute the recipe obtained with match_delegate(). Note that the target's
- // state is neither checked nor updated by this function. In other words,
- // the appropriate usage is to call this function from another recipe and to
- // factor the obtained state into the one returned.
+ target_state
+ execute_complete (action, const target&);
+
+ // Execute (synchronously) the recipe obtained with match_delegate(). Note
+ // that the target's state is neither checked nor updated by this function.
+ // In other words, the appropriate usage is to call this function from
+ // another recipe and to factor the obtained state into the one returned.
//
target_state
execute_delegate (const recipe&, action, const target&);
- // Execute the inner operation matched with match_inner(). Note that the
- // returned target state is for the inner operation. The appropriate usage
- // is to call this function from the outer operation's recipe and to factor
- // the obtained state into the one returned (similar to how we do it for
- // prerequisites).
+ // Execute (synchronously) the inner operation matched with match_inner().
+ // Note that the returned target state is for the inner operation. The
+ // appropriate usage is to call this function from the outer operation's
+ // recipe and to factor the obtained state into the one returned (similar to
+ // how we do it for prerequisites). Or, if factoring is not needed, simply
+ // return inner_recipe as outer recipe.
//
// Note: waits for the completion if the target is busy and translates
// target_state::failed to the failed exception.
@@ -548,11 +756,63 @@ namespace build2
// relationship (so no dependents count is decremented) and execution order
// (so this function never returns the postponed target state).
//
- // Note: waits for the completion if the target is busy and translates
- // target_state::failed to the failed exception.
+ // The first version waits for the completion if the target is busy and
+ // translates target_state::failed to the failed exception.
//
- LIBBUILD2_SYMEXPORT target_state
- execute_direct (action, const target&);
+ target_state
+ execute_direct_sync (action, const target&, bool fail = true);
+
+ target_state
+ execute_direct_async (action, const target&,
+ size_t start_count, atomic_count& task_count,
+ bool fail = true);
+
+ // Update the target during the match phase (by switching the phase and
+ // calling execute_direct()). Return true if the target has changed or, if
+ // the passed timestamp is not timestamp_unknown, it is older than the
+ // target.
+ //
+ // Note that such a target must still be updated normally during the execute
+ // phase in order to keep the dependency counts straight (at which point the
+ // target state/timestamp will be re-incorporated into the result). Unless
+ // it was matched direct.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ update_during_match (tracer&,
+ action, const target&,
+ timestamp = timestamp_unknown);
+
+ // As above, but update all the targets in prerequisite_targets that have
+ // the specified mask in prerequisite_target::include. Return true if any of
+ // them have changed. If mask is 0, then update all the targets.
+ //
+ // Note that this function spoils prerequisite_target::data (which is used
+ // for temporary storage). But it resets data to 0 once done.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ update_during_match_prerequisites (
+ tracer&,
+ action, target&,
+ uintptr_t mask = prerequisite_target::include_udm);
+
+ // Equivalent functions for clean. Note that if possible you should leave
+ // cleaning to normal execute and these functions should only be used in
+ // special cases where this is not possible.
+ //
+ // Note also that neither function should be called on fsdir{} since it's
+ // hard to guarantee such an execution won't be too early (see the
+ // implementation for details). If you do need to clean fsdir{} during
+ // match, use fsdir_rule::perform_clean_direct() instead.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ clean_during_match (tracer&,
+ action, const target&);
+
+ LIBBUILD2_SYMEXPORT bool
+ clean_during_match_prerequisites (
+ tracer&,
+ action, target&,
+ uintptr_t mask = prerequisite_target::include_udm);
// The default prerequisite execute implementation. Call execute_async() on
// each non-ignored (non-NULL) prerequisite target in a loop and then wait
@@ -615,8 +875,8 @@ namespace build2
// case if they are up to something tricky (like recursively linking liba{}
// prerequisites).
//
- // Note that because we use mtime, this function should normally only be
- // used in the perform_update action (which is straight).
+ // Note that because we use mtime, this function can only be used for the
+ // perform_update action.
//
using execute_filter = function<bool (const target&, size_t pos)>;
@@ -626,6 +886,18 @@ namespace build2
const execute_filter& = nullptr,
size_t count = 0);
+ // As above, but execute prerequisites in reverse.
+ //
+ // Sometime it may be advantageous to execute prerequisites in reverse, for
+ // example, to have more immediate incremental compilation or more accurate
+ // progress. See cc::link_rule for background.
+ //
+ optional<target_state>
+ reverse_execute_prerequisites (action, const target&,
+ const timestamp&,
+ const execute_filter& = nullptr,
+ size_t count = 0);
+
// Another version of the above that does two extra things for the caller:
// it determines whether the action needs to be executed on the target based
// on the passed timestamp and finds a prerequisite of the specified type
@@ -690,8 +962,9 @@ namespace build2
// Call straight or reverse depending on the current mode.
//
+ template <typename T>
target_state
- execute_members (action, const target&, const target*[], size_t);
+ execute_members (action, const target&, T[], size_t);
template <size_t N>
inline target_state
@@ -731,8 +1004,8 @@ namespace build2
LIBBUILD2_SYMEXPORT target_state
group_action (action, const target&);
- // Standard perform(clean) action implementation for the file target
- // (or derived).
+ // Standard perform(clean) action implementation for the file target (or
+ // derived). Note: also cleans ad hoc group members, if any.
//
LIBBUILD2_SYMEXPORT target_state
perform_clean (action, const target&);
@@ -742,8 +1015,8 @@ namespace build2
LIBBUILD2_SYMEXPORT target_state
perform_clean_depdb (action, const target&);
- // As above but clean the target group. The group should be an mtime_target
- // and members should be files.
+ // As above but clean the (non-ad hoc) target group. The group should be an
+ // mtime_target and members should be files.
//
LIBBUILD2_SYMEXPORT target_state
perform_clean_group (action, const target&);
@@ -754,21 +1027,22 @@ namespace build2
LIBBUILD2_SYMEXPORT target_state
perform_clean_group_depdb (action, const target&);
- // Helper for custom perform(clean) implementations that cleans extra files
- // and directories (recursively) specified as a list of either absolute
- // paths or "path derivation directives". The directive string can be NULL,
- // or empty in which case it is ignored. If the last character in a
- // directive is '/', then the resulting path is treated as a directory
- // rather than a file. The directive can start with zero or more '-'
- // characters which indicate the number of extensions that should be
- // stripped before the new extension (if any) is added (so if you want to
- // strip the extension, specify just "-"). For example:
+ // Helpers for custom perform(clean) implementations that, besides the
+ // target and group members, can also clean extra files and directories
+ // (recursively) specified as a list of either absolute paths or "path
+ // derivation directives". The directive string can be NULL, or empty in
+ // which case it is ignored. If the last character in a directive is '/',
+ // then the resulting path is treated as a directory rather than a file. The
+ // directive can start with zero or more '-' characters which indicate the
+ // number of extensions that should be stripped before the new extension (if
+ // any) is added (so if you want to strip the extension, specify just
+ // "-"). For example:
//
// perform_clean_extra (a, t, {".d", ".dlls/", "-.dll"});
//
// The extra files/directories are removed first in the specified order
- // followed by the ad hoc group member, then target itself, and, finally,
- // the prerequisites in the reverse order.
+ // followed by the group member, then target itself, and, finally, the
+ // prerequisites in the reverse order.
//
// You can also clean extra files derived from ad hoc group members that are
// "indexed" using their target types (see add/find_adhoc_member() for
@@ -787,21 +1061,46 @@ namespace build2
using clean_adhoc_extras = small_vector<clean_adhoc_extra, 2>;
+ // If show_adhoc_members is true, then print the entire ad hoc group instead
+ // of just the primary member at verbosity level 1 (see print_diag() for
+ // details). Note that the default is false because normally a rule
+ // implemented in C++ would only use an ad hoc group for subordiate members
+ // (.pdb, etc) and would use a dedicate target group type if the members
+ // are equal.
+ //
LIBBUILD2_SYMEXPORT target_state
perform_clean_extra (action, const file&,
const clean_extras&,
- const clean_adhoc_extras& = {});
+ const clean_adhoc_extras& = {},
+ bool show_adhoc_members = false);
inline target_state
perform_clean_extra (action a, const file& f,
- initializer_list<const char*> e)
+ initializer_list<const char*> e,
+ bool show_adhoc_members = false)
+ {
+ return perform_clean_extra (a, f, clean_extras (e), {}, show_adhoc_members);
+ }
+
+ // Similar to perform_clean_group() but with extras similar to
+ // perform_clean_extra(). Note that the extras are derived from the group
+ // "path" (g.dir / g.name).
+ //
+ LIBBUILD2_SYMEXPORT target_state
+ perform_clean_group_extra (action, const mtime_target&, const clean_extras&);
+
+ inline target_state
+ perform_clean_group_extra (action a, const mtime_target& g,
+ initializer_list<const char*> e)
{
- return perform_clean_extra (a, f, clean_extras (e));
+ return perform_clean_group_extra (a, g, clean_extras (e));
}
// Update/clean a backlink issuing appropriate diagnostics at appropriate
// levels depending on the overload and the changed argument.
//
+ // Note that these functions assume (target.leaf() == link.leaf ()).
+ //
enum class backlink_mode
{
link, // Make a symbolic link if possible, hard otherwise.
@@ -824,6 +1123,8 @@ namespace build2
bool changed,
backlink_mode = backlink_mode::link);
+ // Note: verbosity should be 2 or greater.
+ //
LIBBUILD2_SYMEXPORT void
update_backlink (context&,
const path& target,
@@ -831,6 +1132,8 @@ namespace build2
backlink_mode = backlink_mode::link,
uint16_t verbosity = 3);
+ // Note: verbosity should be 2 or greater.
+ //
LIBBUILD2_SYMEXPORT void
clean_backlink (context&,
const path& link,
diff --git a/libbuild2/algorithm.ixx b/libbuild2/algorithm.ixx
index d64747d..836dbed 100644
--- a/libbuild2/algorithm.ixx
+++ b/libbuild2/algorithm.ixx
@@ -45,6 +45,39 @@ namespace build2
k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
}
+ inline const target*
+ search_existing (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_existing (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
+ inline const target&
+ search_new (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_new (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
+ inline pair<target&, ulock>
+ search_new_locked (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_new_locked (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
inline const target&
search (const target& t,
const target_type& type,
@@ -110,6 +143,48 @@ namespace build2
scope});
}
+ inline const target&
+ search_new (context& ctx,
+ const target_type& type,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ return search_new (
+ ctx,
+ prerequisite_key {
+ nullopt,
+ {
+ &type,
+ &dir, &out, &name,
+ ext != nullptr ? optional<string> (*ext) : nullopt
+ },
+ scope});
+ }
+
+ inline pair<target&, ulock>
+ search_new_locked (context& ctx,
+ const target_type& type,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ return search_new_locked (
+ ctx,
+ prerequisite_key {
+ nullopt,
+ {
+ &type,
+ &dir, &out, &name,
+ ext != nullptr ? optional<string> (*ext) : nullopt
+ },
+ scope});
+ }
+
template <typename T>
inline const T&
search (const target& t,
@@ -123,15 +198,32 @@ namespace build2
t, T::static_type, dir, out, name, ext, scope).template as<T> ();
}
+ template <typename T>
+ inline const T*
+ search_existing (context& ctx,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ const target* r (
+ search_existing (
+ ctx, T::static_type, dir, out, name, ext, scope));
+ return r != nullptr ? &r->template as<T> () : nullptr;
+ }
+
LIBBUILD2_SYMEXPORT target_lock
- lock_impl (action, const target&, optional<scheduler::work_queue>);
+ lock_impl (action, const target&,
+ optional<scheduler::work_queue>,
+ uint64_t = 0);
LIBBUILD2_SYMEXPORT void
unlock_impl (action, target&, size_t);
inline target_lock::
- target_lock (action_type a, target_type* t, size_t o)
- : action (a), target (t), offset (o)
+ target_lock (action_type a, target_type* t, size_t o, bool f)
+ : action (a), target (t), offset (o), first (f)
{
if (target != nullptr)
prev = stack (this);
@@ -170,7 +262,7 @@ namespace build2
inline auto target_lock::
release () -> data
{
- data r {action, target, offset};
+ data r {action, target, offset, first};
if (target != nullptr)
{
@@ -194,7 +286,7 @@ namespace build2
}
inline target_lock::
- target_lock (target_lock&& x)
+ target_lock (target_lock&& x) noexcept
: action (x.action), target (x.target), offset (x.offset)
{
if (target != nullptr)
@@ -214,7 +306,7 @@ namespace build2
}
inline target_lock& target_lock::
- operator= (target_lock&& x)
+ operator= (target_lock&& x) noexcept
{
if (this != &x)
{
@@ -282,7 +374,7 @@ namespace build2
n += e;
}
- return add_adhoc_member (t, tt, t.dir, t.out, move (n));
+ return add_adhoc_member (t, tt, t.dir, t.out, move (n), nullopt /* ext */);
}
inline target*
@@ -302,30 +394,37 @@ namespace build2
}
LIBBUILD2_SYMEXPORT const rule_match*
- match_rule (action, target&, const rule* skip, bool try_match = false);
+ match_rule_impl (action, target&,
+ uint64_t options,
+ const rule* skip,
+ bool try_match = false,
+ match_extra* = nullptr);
LIBBUILD2_SYMEXPORT recipe
apply_impl (action, target&, const rule_match&);
LIBBUILD2_SYMEXPORT pair<bool, target_state>
- match (action, const target&, size_t, atomic_count*, bool try_match = false);
+ match_impl (action, const target&,
+ uint64_t options,
+ size_t, atomic_count*,
+ bool try_match = false);
inline void
- match_inc_dependens (action a, const target& t)
+ match_inc_dependents (action a, const target& t)
{
t.ctx.dependency_count.fetch_add (1, memory_order_relaxed);
t[a].dependents.fetch_add (1, memory_order_release);
}
inline target_state
- match (action a, const target& t, bool fail)
+ match_sync (action a, const target& t, uint64_t options, bool fail)
{
assert (t.ctx.phase == run_phase::match);
- target_state r (match (a, t, 0, nullptr).second);
+ target_state r (match_impl (a, t, options, 0, nullptr).second);
if (r != target_state::failed)
- match_inc_dependens (a, t);
+ match_inc_dependents (a, t);
else if (fail)
throw failed ();
@@ -333,17 +432,17 @@ namespace build2
}
inline pair<bool, target_state>
- try_match (action a, const target& t, bool fail)
+ try_match_sync (action a, const target& t, uint64_t options, bool fail)
{
assert (t.ctx.phase == run_phase::match);
pair<bool, target_state> r (
- match (a, t, 0, nullptr, true /* try_match */));
+ match_impl (a, t, options, 0, nullptr, true /* try_match */));
if (r.first)
{
if (r.second != target_state::failed)
- match_inc_dependens (a, t);
+ match_inc_dependents (a, t);
else if (fail)
throw failed ();
}
@@ -352,11 +451,11 @@ namespace build2
}
inline pair<bool, target_state>
- match (action a, const target& t, unmatch um)
+ match_sync (action a, const target& t, unmatch um, uint64_t options)
{
assert (t.ctx.phase == run_phase::match);
- target_state s (match (a, t, 0, nullptr).second);
+ target_state s (match_impl (a, t, options, 0, nullptr).second);
if (s == target_state::failed)
throw failed ();
@@ -383,42 +482,90 @@ namespace build2
// cannot change their mind).
//
if ((s == target_state::unchanged && t.group == nullptr) ||
- t[a].dependents.load (memory_order_consume) != 0)
+ t[a].dependents.load (memory_order_relaxed) != 0)
return make_pair (true, s);
break;
}
}
- match_inc_dependens (a, t);
+ match_inc_dependents (a, t);
return make_pair (false, s);;
}
inline target_state
match_async (action a, const target& t,
size_t sc, atomic_count& tc,
+ uint64_t options,
bool fail)
{
context& ctx (t.ctx);
assert (ctx.phase == run_phase::match);
- target_state r (match (a, t, sc, &tc).second);
+ target_state r (match_impl (a, t, options, sc, &tc).second);
+
+ if (r == target_state::failed && fail && !ctx.keep_going)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ match_complete (action a, const target& t, uint64_t options, bool fail)
+ {
+ return match_sync (a, t, options, fail);
+ }
+
+ inline pair<bool, target_state>
+ match_complete (action a, const target& t, unmatch um, uint64_t options)
+ {
+ return match_sync (a, t, um, options);
+ }
+
+ inline target_state
+ match_direct_sync (action a, const target& t, uint64_t options, bool fail)
+ {
+ assert (t.ctx.phase == run_phase::match);
- if (fail && !ctx.keep_going && r == target_state::failed)
+ target_state r (match_impl (a, t, options, 0, nullptr).second);
+
+ if (r == target_state::failed && fail)
throw failed ();
return r;
}
- // Clear rule match-specific target data.
+ inline target_state
+ match_direct_complete (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_sync (a, t, options, fail);
+ }
+
+ // Clear rule match-specific target data (except match_extra).
//
inline void
clear_target (action a, target& t)
{
- t[a].vars.clear ();
+ target::opstate& s (t.state[a]);
+ s.recipe = nullptr;
+ s.recipe_keep = false;
+ s.resolve_counted = false;
+ s.vars.clear ();
t.prerequisite_targets[a].clear ();
- if (a.inner ())
- t.clear_data ();
+ }
+
+ LIBBUILD2_SYMEXPORT void
+ set_rule_trace (target_lock&, const rule_match*);
+
+ inline void
+ set_rule (target_lock& l, const rule_match* r)
+ {
+ if (l.target->ctx.trace_match == nullptr)
+ (*l.target)[l.action].rule = r;
+ else
+ set_rule_trace (l, r);
}
inline void
@@ -428,6 +575,7 @@ namespace build2
target::opstate& s (t[l.action]);
s.recipe = move (r);
+ s.recipe_group_action = false;
// If this is a noop recipe, then mark the target unchanged to allow for
// some optimizations.
@@ -453,69 +601,112 @@ namespace build2
// likely. The alternative (trying to "merge" the count keeping track of
// whether inner and/or outer is noop) gets hairy rather quickly.
//
- if (l.action.inner ())
+ if (f != nullptr && *f == &group_action)
+ s.recipe_group_action = true;
+ else
{
- if (f == nullptr || *f != &group_action)
+ if (l.action.inner ())
t.ctx.target_count.fetch_add (1, memory_order_relaxed);
}
}
}
inline void
- match_recipe (target_lock& l, recipe r)
+ match_recipe (target_lock& l, recipe r, uint64_t options)
{
- assert (l.target != nullptr &&
- l.offset != target::offset_matched &&
+ assert (options != 0 &&
+ l.target != nullptr &&
+ l.offset < target::offset_matched &&
l.target->ctx.phase == run_phase::match);
+ match_extra& me ((*l.target)[l.action].match_extra);
+
+ me.reinit (false /* fallback */);
+ me.cur_options = options; // Already applied, so cur_, not new_options.
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
clear_target (l.action, *l.target);
- (*l.target)[l.action].rule = nullptr; // No rule.
+ set_rule (l, nullptr); // No rule.
set_recipe (l, move (r));
l.offset = target::offset_applied;
}
inline void
- match_rule (target_lock& l, const rule_match& r)
+ match_rule (target_lock& l, const rule_match& r, uint64_t options)
{
assert (l.target != nullptr &&
- l.offset != target::offset_matched &&
+ l.offset < target::offset_matched &&
l.target->ctx.phase == run_phase::match);
+ match_extra& me ((*l.target)[l.action].match_extra);
+
+ me.reinit (false /* fallback */);
+ me.new_options = options;
clear_target (l.action, *l.target);
- (*l.target)[l.action].rule = &r;
+ set_rule (l, &r);
l.offset = target::offset_matched;
}
inline recipe
- match_delegate (action a, target& t, const rule& dr, bool try_match)
+ match_delegate (action a, target& t,
+ const rule& dr,
+ uint64_t options,
+ bool try_match)
{
assert (t.ctx.phase == run_phase::match);
// Note: we don't touch any of the t[a] state since that was/will be set
// for the delegating rule.
//
- const rule_match* r (match_rule (a, t, &dr, try_match));
+ const rule_match* r (match_rule_impl (a, t, options, &dr, try_match));
return r != nullptr ? apply_impl (a, t, *r) : empty_recipe;
}
inline target_state
- match_inner (action a, const target& t)
+ match_inner (action a, const target& t, uint64_t options)
{
// In a sense this is like any other dependency.
//
assert (a.outer ());
- return match (a.inner_action (), t);
+ return match_sync (a.inner_action (), t, options);
}
inline pair<bool, target_state>
- match_inner (action a, const target& t, unmatch um)
+ match_inner (action a, const target& t, unmatch um, uint64_t options)
{
assert (a.outer ());
- return match (a.inner_action (), t, um);
+ return match_sync (a.inner_action (), t, um, options);
+ }
+
+ // Note: rematch is basically normal match but without the counts increment,
+ // so we just delegate to match_direct_*().
+ //
+ inline target_state
+ rematch_sync (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_sync (a, t, options, fail);
+ }
+
+ inline target_state
+ rematch_async (action a, const target& t,
+ size_t start_count, atomic_count& task_count,
+ uint64_t options,
+ bool fail)
+ {
+ return match_async (a, t, start_count, task_count, options, fail);
+ }
+
+ inline target_state
+ rematch_complete (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_complete (a, t, options, fail);
}
LIBBUILD2_SYMEXPORT void
- resolve_group_impl (action, const target&, target_lock);
+ resolve_group_impl (target_lock&&);
inline const target*
resolve_group (action a, const target& t)
@@ -535,7 +726,7 @@ namespace build2
// then unlock and return.
//
if (t.group == nullptr && l.offset < target::offset_tried)
- resolve_group_impl (a, t, move (l));
+ resolve_group_impl (move (l));
break;
}
@@ -549,17 +740,21 @@ namespace build2
inline void
inject (action a, target& t, const target& p)
{
- match (a, p);
+ match_sync (a, p);
t.prerequisite_targets[a].emplace_back (&p);
}
LIBBUILD2_SYMEXPORT void
- match_prerequisites (action, target&, const match_search&, const scope*);
+ match_prerequisites (action, target&,
+ const match_search&,
+ const scope*,
+ bool search_only);
LIBBUILD2_SYMEXPORT void
match_prerequisite_members (action, target&,
const match_search_member&,
- const scope*);
+ const scope*,
+ bool search_only);
inline void
match_prerequisites (action a, target& t, const match_search& ms)
@@ -570,7 +765,21 @@ namespace build2
ms,
(a.operation () != clean_id || t.is_a<alias> ()
? nullptr
- : &t.root_scope ()));
+ : &t.root_scope ()),
+ false);
+ }
+
+ inline void
+ search_prerequisites (action a, target& t, const match_search& ms)
+ {
+ match_prerequisites (
+ a,
+ t,
+ ms,
+ (a.operation () != clean_id || t.is_a<alias> ()
+ ? nullptr
+ : &t.root_scope ()),
+ true);
}
inline void
@@ -578,13 +787,46 @@ namespace build2
const match_search_member& msm)
{
if (a.operation () != clean_id || t.is_a<alias> ())
- match_prerequisite_members (a, t, msm, nullptr);
+ match_prerequisite_members (a, t, msm, nullptr, false);
+ else
+ {
+ // Note that here we don't iterate over members even for see-through
+ // groups since the group target should clean eveything up. A bit of an
+ // optimization.
+ //
+ // @@ TMP: I wonder if this still holds for the new group semantics
+ // we have in Qt automoc? Also below.
+ //
+ match_search ms (
+ msm
+ ? [&msm] (action a,
+ const target& t,
+ const prerequisite& p,
+ include_type i)
+ {
+ return msm (a, t, prerequisite_member {p, nullptr}, i);
+ }
+ : match_search ());
+
+ match_prerequisites (a, t, ms, &t.root_scope (), false);
+ }
+ }
+
+ inline void
+ search_prerequisite_members (action a, target& t,
+ const match_search_member& msm)
+ {
+ if (a.operation () != clean_id || t.is_a<alias> ())
+ match_prerequisite_members (a, t, msm, nullptr, true);
else
{
// Note that here we don't iterate over members even for see-through
// groups since the group target should clean eveything up. A bit of an
// optimization.
//
+ // @@ TMP: I wonder if this still holds for the new group semantics
+ // we have in Qt automoc? Also above.
+ //
match_search ms (
msm
? [&msm] (action a,
@@ -596,40 +838,55 @@ namespace build2
}
: match_search ());
- match_prerequisites (a, t, ms, &t.root_scope ());
+ match_prerequisites (a, t, ms, &t.root_scope (), true);
}
}
inline void
match_prerequisites (action a, target& t, const scope& s)
{
- match_prerequisites (a, t, nullptr, &s);
+ match_prerequisites (a, t, nullptr, &s, false);
}
inline void
- match_prerequisite_members (action a, target& t, const scope& s)
+ search_prerequisites (action a, target& t, const scope& s)
{
- match_prerequisite_members (a, t, nullptr, &s);
+ match_prerequisites (a, t, nullptr, &s, true);
}
- LIBBUILD2_SYMEXPORT target_state
- execute (action, const target&, size_t, atomic_count*);
+ inline void
+ match_prerequisite_members (action a, target& t, const scope& s)
+ {
+ match_prerequisite_members (a, t, nullptr, &s, false);
+ }
- inline target_state
- execute (action a, const target& t)
+ inline void
+ search_prerequisite_members (action a, target& t, const scope& s)
{
- return execute (a, t, 0, nullptr);
+ match_prerequisite_members (a, t, nullptr, &s, true);
}
+ LIBBUILD2_SYMEXPORT target_state
+ execute_impl (action, const target&, size_t, atomic_count*);
+
inline target_state
- execute_wait (action a, const target& t)
+ execute_sync (action a, const target& t, bool fail)
{
- if (execute (a, t) == target_state::busy)
- t.ctx.sched.wait (t.ctx.count_executed (),
+ target_state r (execute_impl (a, t, 0, nullptr));
+
+ if (r == target_state::busy)
+ {
+ t.ctx.sched->wait (t.ctx.count_executed (),
t[a].task_count,
scheduler::work_none);
- return t.executed_state (a);
+ r = t.executed_state (a, false);
+ }
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
}
inline target_state
@@ -637,9 +894,62 @@ namespace build2
size_t sc, atomic_count& tc,
bool fail)
{
- target_state r (execute (a, t, sc, &tc));
+ target_state r (execute_impl (a, t, sc, &tc));
+
+ if (r == target_state::failed && fail && !t.ctx.keep_going)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ execute_complete (action a, const target& t)
+ {
+ // Note: standard operation execute() sidesteps this and calls
+ // executed_state() directly.
+
+ context& ctx (t.ctx);
+
+ // If the target is still busy, wait for its completion.
+ //
+ ctx.sched->wait (ctx.count_executed (),
+ t[a].task_count,
+ scheduler::work_none);
+
+ return t.executed_state (a);
+ }
+
+ LIBBUILD2_SYMEXPORT target_state
+ execute_direct_impl (action, const target&, size_t, atomic_count*);
+
+ inline target_state
+ execute_direct_sync (action a, const target& t, bool fail)
+ {
+ target_state r (execute_direct_impl (a, t, 0, nullptr));
- if (fail && !t.ctx.keep_going && r == target_state::failed)
+ if (r == target_state::busy)
+ {
+ t.ctx.sched->wait (t.ctx.count_executed (),
+ t[a].task_count,
+ scheduler::work_none);
+
+ r = t.executed_state (a, false);
+ }
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ execute_direct_async (action a, const target& t,
+ size_t sc, atomic_count& tc,
+ bool fail)
+ {
+ target_state r (execute_direct_impl (a, t, sc, &tc));
+
+ if (r == target_state::failed && fail && !t.ctx.keep_going)
throw failed ();
return r;
@@ -655,7 +965,7 @@ namespace build2
execute_inner (action a, const target& t)
{
assert (a.outer ());
- return execute_wait (a.inner_action (), t);
+ return execute_sync (a.inner_action (), t);
}
inline target_state
@@ -731,6 +1041,12 @@ namespace build2
const timestamp&, const execute_filter&,
size_t);
+ LIBBUILD2_SYMEXPORT pair<optional<target_state>, const target*>
+ reverse_execute_prerequisites (const target_type*,
+ action, const target&,
+ const timestamp&, const execute_filter&,
+ size_t);
+
inline optional<target_state>
execute_prerequisites (action a, const target& t,
const timestamp& mt, const execute_filter& ef,
@@ -739,6 +1055,14 @@ namespace build2
return execute_prerequisites (nullptr, a, t, mt, ef, n).first;
}
+ inline optional<target_state>
+ reverse_execute_prerequisites (action a, const target& t,
+ const timestamp& mt, const execute_filter& ef,
+ size_t n)
+ {
+ return reverse_execute_prerequisites (nullptr, a, t, mt, ef, n).first;
+ }
+
template <typename T>
inline pair<optional<target_state>, const T&>
execute_prerequisites (action a, const target& t,
@@ -772,8 +1096,9 @@ namespace build2
p.first, static_cast<const T&> (p.second));
}
+ template <typename T>
inline target_state
- execute_members (action a, const target& t, const target* ts[], size_t n)
+ execute_members (action a, const target& t, T ts[], size_t n)
{
return t.ctx.current_mode == execution_mode::first
? straight_execute_members (a, t, ts, n, 0)
diff --git a/libbuild2/b-cmdline.cxx b/libbuild2/b-cmdline.cxx
new file mode 100644
index 0000000..206c9de
--- /dev/null
+++ b/libbuild2/b-cmdline.cxx
@@ -0,0 +1,516 @@
+// file : libbuild2/b-cmdline.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/b-cmdline.hxx>
+
+#include <limits>
+#include <cstring> // strcmp(), strchr()
+
+#include <libbutl/default-options.hxx>
+
+#include <libbuild2/b-options.hxx>
+#include <libbuild2/scheduler.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+using namespace std;
+using namespace butl;
+
+namespace cli = build2::build::cli;
+
+namespace build2
+{
+ b_cmdline
+ parse_b_cmdline (tracer& trace,
+ int argc, char* argv[],
+ b_options& ops,
+ uint16_t def_verb,
+ size_t def_jobs)
+ {
+ // Note that the diagnostics verbosity level can only be calculated after
+ // default options are loaded and merged (see below). Thus, until then we
+ // refer to the verbosity level specified on the command line.
+ //
+ auto verbosity = [&ops, def_verb] ()
+ {
+ uint16_t v (
+ ops.verbose_specified ()
+ ? ops.verbose ()
+ : (ops.V () ? 3 :
+ ops.v () ? 2 :
+ ops.quiet () || ops.silent () ? 0 : def_verb));
+ return v;
+ };
+
+ b_cmdline r;
+
+ // We want to be able to specify options, vars, and buildspecs in any
+ // order (it is really handy to just add -v at the end of the command
+ // line).
+ //
+ try
+ {
+ // Command line arguments starting position.
+ //
+ // We want the positions of the command line arguments to be after the
+ // default options files. Normally that would be achieved by passing the
+ // last position of the previous scanner to the next. The problem is
+ // that we parse the command line arguments first (for good reasons).
+ // Also the default options files parsing machinery needs the maximum
+ // number of arguments to be specified and assigns the positions below
+ // this value (see load_default_options() for details). So we are going
+ // to "reserve" the first half of the size_t value range for the default
+ // options positions and the second half for the command line arguments
+ // positions.
+ //
+ size_t args_pos (numeric_limits<size_t>::max () / 2);
+ cli::argv_file_scanner scan (argc, argv, "--options-file", args_pos);
+
+ size_t argn (0); // Argument count.
+ bool shortcut (false); // True if the shortcut syntax is used.
+
+ for (bool opt (true), var (true); scan.more (); )
+ {
+ if (opt)
+ {
+ // Parse the next chunk of options until we reach an argument (or
+ // eos).
+ //
+ if (ops.parse (scan) && !scan.more ())
+ break;
+
+ // If we see first "--", then we are done parsing options.
+ //
+ if (strcmp (scan.peek (), "--") == 0)
+ {
+ scan.next ();
+ opt = false;
+ continue;
+ }
+
+ // Fall through.
+ }
+
+ const char* s (scan.next ());
+
+ // See if this is a command line variable. What if someone needs to
+ // pass a buildspec that contains '='? One way to support this would
+ // be to quote such a buildspec (e.g., "'/tmp/foo=bar/'"). Or invent
+ // another separator. Or use a second "--". Actually, let's just do
+ // the second "--".
+ //
+ if (var)
+ {
+ // If we see second "--", then we are also done parsing variables.
+ //
+ if (strcmp (s, "--") == 0)
+ {
+ var = false;
+ continue;
+ }
+
+ if (const char* p = strchr (s, '=')) // Covers =, +=, and =+.
+ {
+ // Diagnose the empty variable name situation. Note that we don't
+ // allow "partially broken down" assignments (as in foo =bar)
+ // since foo= bar would be ambigous.
+ //
+ if (p == s || (p == s + 1 && *s == '+'))
+ fail << "missing variable name in '" << s << "'";
+
+ r.cmd_vars.push_back (s);
+ continue;
+ }
+
+ // Handle the "broken down" variable assignments (i.e., foo = bar
+ // instead of foo=bar).
+ //
+ if (scan.more ())
+ {
+ const char* a (scan.peek ());
+
+ if (strcmp (a, "=" ) == 0 ||
+ strcmp (a, "+=") == 0 ||
+ strcmp (a, "=+") == 0)
+ {
+ string v (s);
+ v += a;
+
+ scan.next ();
+
+ if (scan.more ())
+ v += scan.next ();
+
+ r.cmd_vars.push_back (move (v));
+ continue;
+ }
+ }
+
+ // Fall through.
+ }
+
+ // Merge all the individual buildspec arguments into a single string.
+ // We use newlines to separate arguments so that line numbers in
+ // diagnostics signify argument numbers. Clever, huh?
+ //
+ if (argn != 0)
+ r.buildspec += '\n';
+
+ r.buildspec += s;
+
+ // See if we are using the shortcut syntax.
+ //
+ if (argn == 0 && r.buildspec.back () == ':')
+ {
+ r.buildspec.back () = '(';
+ shortcut = true;
+ }
+
+ argn++;
+ }
+
+ // Add the closing parenthesis unless there wasn't anything in between
+ // in which case pop the opening one.
+ //
+ if (shortcut)
+ {
+ if (argn == 1)
+ r.buildspec.pop_back ();
+ else
+ r.buildspec += ')';
+ }
+
+ // Get/set an environment variable tracing the operation.
+ //
+ auto get_env = [&verbosity, &trace] (const char* nm)
+ {
+ optional<string> r (getenv (nm));
+
+ if (verbosity () >= 5)
+ {
+ if (r)
+ trace << nm << ": '" << *r << "'";
+ else
+ trace << nm << ": <NULL>";
+ }
+
+ return r;
+ };
+
+ auto set_env = [&verbosity, &trace] (const char* nm, const string& vl)
+ {
+ try
+ {
+ if (verbosity () >= 5)
+ trace << "setting " << nm << "='" << vl << "'";
+
+ setenv (nm, vl);
+ }
+ catch (const system_error& e)
+ {
+ // The variable value can potentially be long/multi-line, so let's
+ // print it last.
+ //
+ fail << "unable to set environment variable " << nm << ": " << e <<
+ info << "value: '" << vl << "'";
+ }
+ };
+
+ // If the BUILD2_VAR_OVR environment variable is present, then parse its
+ // value as a newline-separated global variable overrides and prepend
+ // them to the overrides specified on the command line.
+ //
+ // Note that this means global overrides may not contain a newline.
+
+ // Verify that the string is a valid global override. Uses the file name
+ // and the options flag for diagnostics only.
+ //
+ auto verify_glb_ovr = [] (const string& v, const path_name& fn, bool opt)
+ {
+ size_t p (v.find ('=', 1));
+ if (p == string::npos || v[0] != '!')
+ {
+ diag_record dr (fail (fn));
+ dr << "expected " << (opt ? "option or " : "") << "global "
+ << "variable override instead of '" << v << "'";
+
+ if (p != string::npos)
+ dr << info << "prefix variable assignment with '!'";
+ }
+
+ if (p == 1 || (p == 2 && v[1] == '+')) // '!=' or '!+=' ?
+ fail (fn) << "missing variable name in '" << v << "'";
+ };
+
+ optional<string> env_ovr (get_env ("BUILD2_VAR_OVR"));
+ if (env_ovr)
+ {
+ path_name fn ("<BUILD2_VAR_OVR>");
+
+ auto i (r.cmd_vars.begin ());
+ for (size_t b (0), e (0); next_word (*env_ovr, b, e, '\n', '\r'); )
+ {
+ // Extract the override from the current line, stripping the leading
+ // and trailing spaces.
+ //
+ string s (*env_ovr, b, e - b);
+ trim (s);
+
+ // Verify and save the override, unless the line is empty.
+ //
+ if (!s.empty ())
+ {
+ verify_glb_ovr (s, fn, false /* opt */);
+ i = r.cmd_vars.insert (i, move (s)) + 1;
+ }
+ }
+ }
+
+ // Load the default options files, unless --no-default-options is
+ // specified on the command line or the BUILD2_DEF_OPT environment
+ // variable is set to a value other than 'true' or '1'.
+ //
+ // If loaded, prepend the default global overrides to the variables
+ // specified on the command line, unless BUILD2_VAR_OVR is set in which
+ // case just ignore them.
+ //
+ optional<string> env_def (get_env ("BUILD2_DEF_OPT"));
+
+ // False if --no-default-options is specified on the command line. Note
+ // that we cache the flag since it can be overridden by a default
+ // options file.
+ //
+ bool cmd_def (!ops.no_default_options ());
+
+ if (cmd_def && (!env_def || *env_def == "true" || *env_def == "1"))
+ try
+ {
+ optional<dir_path> extra;
+ if (ops.default_options_specified ())
+ {
+ extra = ops.default_options ();
+
+ // Note that load_default_options() expects absolute and normalized
+ // directory.
+ //
+ try
+ {
+ if (extra->relative ())
+ extra->complete ();
+
+ extra->normalize ();
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid --default-options value " << e.path;
+ }
+ }
+
+ // Load default options files.
+ //
+ default_options<b_options> def_ops (
+ load_default_options<b_options,
+ cli::argv_file_scanner,
+ cli::unknown_mode> (
+ nullopt /* sys_dir */,
+ path::home_directory (), // The home variable is not assigned yet.
+ extra,
+ default_options_files {{path ("b.options")},
+ nullopt /* start */},
+ [&trace, &verbosity] (const path& f, bool r, bool o)
+ {
+ if (verbosity () >= 3)
+ {
+ if (o)
+ trace << "treating " << f << " as "
+ << (r ? "remote" : "local");
+ else
+ trace << "loading " << (r ? "remote " : "local ") << f;
+ }
+ },
+ "--options-file",
+ args_pos,
+ 1024,
+ true /* args */));
+
+ // Merge the default and command line options.
+ //
+ ops = merge_default_options (def_ops, ops);
+
+ // Merge the default and command line global overrides, unless
+ // BUILD2_VAR_OVR is already set (in which case we assume this has
+ // already been done).
+ //
+ // Note that the "broken down" variable assignments occupying a single
+ // line are naturally supported.
+ //
+ if (!env_ovr)
+ r.cmd_vars =
+ merge_default_arguments (
+ def_ops,
+ r.cmd_vars,
+ [&verify_glb_ovr] (const default_options_entry<b_options>& e,
+ const strings&)
+ {
+ path_name fn (e.file);
+
+ // Verify that all arguments are global overrides.
+ //
+ for (const string& a: e.arguments)
+ verify_glb_ovr (a, fn, true /* opt */);
+ });
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "unable to load default options files: " << e;
+ }
+ catch (const pair<path, system_error>& e)
+ {
+ fail << "unable to load default options files: " << e.first << ": "
+ << e.second;
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to obtain home directory: " << e;
+ }
+
+ // Verify and save the global overrides present in cmd_vars (default,
+ // from the command line, etc), if any, into the BUILD2_VAR_OVR
+ // environment variable.
+ //
+ if (!r.cmd_vars.empty ())
+ {
+ string ovr;
+ for (const string& v: r.cmd_vars)
+ {
+ if (v[0] == '!')
+ {
+ if (v.find_first_of ("\n\r") != string::npos)
+ fail << "newline in global variable override '" << v << "'";
+
+ if (!ovr.empty ())
+ ovr += '\n';
+
+ ovr += v;
+ }
+ }
+
+ // Optimize for the common case.
+ //
+ // Note: cmd_vars may contain non-global overrides.
+ //
+ if (!ovr.empty () && (!env_ovr || *env_ovr != ovr))
+ set_env ("BUILD2_VAR_OVR", ovr);
+ }
+
+ // Propagate disabling of the default options files to the potential
+ // nested invocations.
+ //
+ if (!cmd_def && (!env_def || *env_def != "0"))
+ set_env ("BUILD2_DEF_OPT", "0");
+
+ // Validate options.
+ //
+ if (ops.progress () && ops.no_progress ())
+ fail << "both --progress and --no-progress specified";
+
+ if (ops.diag_color () && ops.no_diag_color ())
+ fail << "both --diag-color and --no-diag-color specified";
+
+ if (ops.mtime_check () && ops.no_mtime_check ())
+ fail << "both --mtime-check and --no-mtime-check specified";
+
+ if (ops.match_only () && ops.load_only ())
+ fail << "both --match-only and --load-only specified";
+
+ if (!ops.dump_specified ())
+ {
+ // Note: let's allow specifying --dump-format without --dump in case
+ // it comes from a default options file or some such.
+
+ if (ops.dump_target_specified ())
+ fail << "--dump-target requires --dump";
+
+ if (ops.dump_scope_specified ())
+ fail << "--dump-scope requires --dump";
+ }
+ }
+ catch (const cli::exception& e)
+ {
+ fail << e;
+ }
+
+ if (ops.help () || ops.version ())
+ return r;
+
+ r.verbosity = verbosity ();
+
+ if (ops.silent () && r.verbosity != 0)
+ fail << "specified with -v, -V, or --verbose verbosity level "
+ << r.verbosity << " is incompatible with --silent";
+
+ r.progress = (ops.progress () ? optional<bool> (true) :
+ ops.no_progress () ? optional<bool> (false) : nullopt);
+
+ r.diag_color = (ops.diag_color () ? optional<bool> (true) :
+ ops.no_diag_color () ? optional<bool> (false) : nullopt);
+
+ r.mtime_check = (ops.mtime_check () ? optional<bool> (true) :
+ ops.no_mtime_check () ? optional<bool> (false) : nullopt);
+
+
+ r.config_sub = (ops.config_sub_specified ()
+ ? optional<path> (ops.config_sub ())
+ : nullopt);
+
+ r.config_guess = (ops.config_guess_specified ()
+ ? optional<path> (ops.config_guess ())
+ : nullopt);
+
+ if (ops.jobs_specified ())
+ r.jobs = ops.jobs ();
+ else if (ops.serial_stop ())
+ r.jobs = 1;
+
+ if (def_jobs != 0)
+ r.jobs = def_jobs;
+ else
+ {
+ if (r.jobs == 0)
+ r.jobs = scheduler::hardware_concurrency ();
+
+ if (r.jobs == 0)
+ {
+ warn << "unable to determine the number of hardware threads" <<
+ info << "falling back to serial execution" <<
+ info << "use --jobs|-j to override";
+
+ r.jobs = 1;
+ }
+ }
+
+ if (ops.max_jobs_specified ())
+ {
+ r.max_jobs = ops.max_jobs ();
+
+ if (r.max_jobs != 0 && r.max_jobs < r.jobs)
+ fail << "invalid --max-jobs|-J value";
+ }
+
+ r.max_stack = (ops.max_stack_specified ()
+ ? optional<size_t> (ops.max_stack () * 1024)
+ : nullopt);
+
+ if (ops.file_cache_specified ())
+ {
+ const string& v (ops.file_cache ());
+ if (v == "noop" || v == "none")
+ r.fcache_compress = false;
+ else if (v == "sync-lz4")
+ r.fcache_compress = true;
+ else
+ fail << "invalid --file-cache value '" << v << "'";
+ }
+
+ return r;
+ }
+}
diff --git a/libbuild2/b-cmdline.hxx b/libbuild2/b-cmdline.hxx
new file mode 100644
index 0000000..8ccbb20
--- /dev/null
+++ b/libbuild2/b-cmdline.hxx
@@ -0,0 +1,45 @@
+// file : libbuild2/b-cmdline.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_B_CMDLINE_HXX
+#define LIBBUILD2_B_CMDLINE_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/forward.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/b-options.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+#include <libbuild2/export.hxx>
+
+namespace build2
+{
+ struct b_cmdline
+ {
+ strings cmd_vars;
+ string buildspec;
+
+ // Processed/meged option values (unless --help or --version specified).
+ //
+ uint16_t verbosity = 1;
+ optional<bool> progress;
+ optional<bool> diag_color;
+ optional<bool> mtime_check;
+ optional<path> config_sub;
+ optional<path> config_guess;
+ size_t jobs = 0;
+ size_t max_jobs = 0;
+ optional<size_t> max_stack;
+ bool fcache_compress = true;
+ };
+
+ LIBBUILD2_SYMEXPORT b_cmdline
+ parse_b_cmdline (tracer&,
+ int argc, char* argv[],
+ b_options&,
+ uint16_t default_verbosity = 1,
+ size_t default_jobs = 0);
+}
+
+#endif // LIBBUILD2_B_CMDLINE_HXX
diff --git a/libbuild2/b-options.cxx b/libbuild2/b-options.cxx
new file mode 100644
index 0000000..c107b44
--- /dev/null
+++ b/libbuild2/b-options.cxx
@@ -0,0 +1,1607 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+#include <libbuild2/types-parsers.hxx>
+//
+// End prologue.
+
+#include <libbuild2/b-options.hxx>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ostream>
+#include <sstream>
+#include <cstring>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ template <typename X>
+ struct parser
+ {
+ static void
+ parse (X& x, bool& xs, scanner& s)
+ {
+ using namespace std;
+
+ const char* o (s.next ());
+ if (s.more ())
+ {
+ string v (s.next ());
+ istringstream is (v);
+ if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (X& b, const X& a)
+ {
+ b = a;
+ }
+ };
+
+ template <>
+ struct parser<bool>
+ {
+ static void
+ parse (bool& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (bool& b, const bool&)
+ {
+ b = true;
+ }
+ };
+
+ template <>
+ struct parser<std::string>
+ {
+ static void
+ parse (std::string& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ x = s.next ();
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (std::string& b, const std::string& a)
+ {
+ b = a;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::pair<X, std::size_t> >
+ {
+ static void
+ parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
+ {
+ x.second = s.position ();
+ parser<X>::parse (x.first, xs, s);
+ }
+
+ static void
+ merge (std::pair<X, std::size_t>& b, const std::pair<X, std::size_t>& a)
+ {
+ b = a;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::vector<X> >
+ {
+ static void
+ parse (std::vector<X>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.push_back (x);
+ xs = true;
+ }
+
+ static void
+ merge (std::vector<X>& b, const std::vector<X>& a)
+ {
+ b.insert (b.end (), a.begin (), a.end ());
+ }
+ };
+
+ template <typename X, typename C>
+ struct parser<std::set<X, C> >
+ {
+ static void
+ parse (std::set<X, C>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.insert (x);
+ xs = true;
+ }
+
+ static void
+ merge (std::set<X, C>& b, const std::set<X, C>& a)
+ {
+ b.insert (a.begin (), a.end ());
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::map<K, V, C> >
+ {
+ static void
+ parse (std::map<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m[k] = v;
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (std::map<K, V, C>& b, const std::map<K, V, C>& a)
+ {
+ for (typename std::map<K, V, C>::const_iterator i (a.begin ());
+ i != a.end ();
+ ++i)
+ b[i->first] = i->second;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (std::multimap<K, V, C>& b, const std::multimap<K, V, C>& a)
+ {
+ for (typename std::multimap<K, V, C>::const_iterator i (a.begin ());
+ i != a.end ();
+ ++i)
+ b.insert (typename std::multimap<K, V, C>::value_type (i->first,
+ i->second));
+ }
+ };
+
+ template <typename X, typename T, T X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, s);
+ }
+
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
+ template <typename X, typename T, T X::*M, bool X::*S>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, x.*S, s);
+ }
+ }
+ }
+}
+
+#include <map>
+
+namespace build2
+{
+ // b_options
+ //
+
+ b_options::
+ b_options ()
+ : build2_metadata_ (),
+ build2_metadata_specified_ (false),
+ v_ (),
+ V_ (),
+ quiet_ (),
+ silent_ (),
+ verbose_ (1),
+ verbose_specified_ (false),
+ stat_ (),
+ progress_ (),
+ no_progress_ (),
+ diag_color_ (),
+ no_diag_color_ (),
+ jobs_ (),
+ jobs_specified_ (false),
+ max_jobs_ (),
+ max_jobs_specified_ (false),
+ queue_depth_ (4),
+ queue_depth_specified_ (false),
+ file_cache_ (),
+ file_cache_specified_ (false),
+ max_stack_ (),
+ max_stack_specified_ (false),
+ serial_stop_ (),
+ dry_run_ (),
+ no_diag_buffer_ (),
+ match_only_ (),
+ load_only_ (),
+ no_external_modules_ (),
+ structured_result_ (),
+ structured_result_specified_ (false),
+ mtime_check_ (),
+ no_mtime_check_ (),
+ dump_ (),
+ dump_specified_ (false),
+ dump_format_ (),
+ dump_format_specified_ (false),
+ dump_scope_ (),
+ dump_scope_specified_ (false),
+ dump_target_ (),
+ dump_target_specified_ (false),
+ trace_match_ (),
+ trace_match_specified_ (false),
+ trace_execute_ (),
+ trace_execute_specified_ (false),
+ no_column_ (),
+ no_line_ (),
+ buildfile_ (),
+ buildfile_specified_ (false),
+ config_guess_ (),
+ config_guess_specified_ (false),
+ config_sub_ (),
+ config_sub_specified_ (false),
+ pager_ (),
+ pager_specified_ (false),
+ pager_option_ (),
+ pager_option_specified_ (false),
+ options_file_ (),
+ options_file_specified_ (false),
+ default_options_ (),
+ default_options_specified_ (false),
+ no_default_options_ (),
+ help_ (),
+ version_ ()
+ {
+ }
+
+ bool b_options::
+ parse (int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool b_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool b_options::
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool b_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool b_options::
+ parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ void b_options::
+ merge (const b_options& a)
+ {
+ CLI_POTENTIALLY_UNUSED (a);
+
+ if (a.build2_metadata_specified_)
+ {
+ ::build2::build::cli::parser< uint64_t>::merge (
+ this->build2_metadata_, a.build2_metadata_);
+ this->build2_metadata_specified_ = true;
+ }
+
+ if (a.v_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->v_, a.v_);
+ }
+
+ if (a.V_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->V_, a.V_);
+ }
+
+ if (a.quiet_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->quiet_, a.quiet_);
+ }
+
+ if (a.silent_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->silent_, a.silent_);
+ }
+
+ if (a.verbose_specified_)
+ {
+ ::build2::build::cli::parser< uint16_t>::merge (
+ this->verbose_, a.verbose_);
+ this->verbose_specified_ = true;
+ }
+
+ if (a.stat_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->stat_, a.stat_);
+ }
+
+ if (a.progress_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->progress_, a.progress_);
+ }
+
+ if (a.no_progress_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_progress_, a.no_progress_);
+ }
+
+ if (a.diag_color_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->diag_color_, a.diag_color_);
+ }
+
+ if (a.no_diag_color_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_diag_color_, a.no_diag_color_);
+ }
+
+ if (a.jobs_specified_)
+ {
+ ::build2::build::cli::parser< size_t>::merge (
+ this->jobs_, a.jobs_);
+ this->jobs_specified_ = true;
+ }
+
+ if (a.max_jobs_specified_)
+ {
+ ::build2::build::cli::parser< size_t>::merge (
+ this->max_jobs_, a.max_jobs_);
+ this->max_jobs_specified_ = true;
+ }
+
+ if (a.queue_depth_specified_)
+ {
+ ::build2::build::cli::parser< size_t>::merge (
+ this->queue_depth_, a.queue_depth_);
+ this->queue_depth_specified_ = true;
+ }
+
+ if (a.file_cache_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->file_cache_, a.file_cache_);
+ this->file_cache_specified_ = true;
+ }
+
+ if (a.max_stack_specified_)
+ {
+ ::build2::build::cli::parser< size_t>::merge (
+ this->max_stack_, a.max_stack_);
+ this->max_stack_specified_ = true;
+ }
+
+ if (a.serial_stop_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->serial_stop_, a.serial_stop_);
+ }
+
+ if (a.dry_run_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->dry_run_, a.dry_run_);
+ }
+
+ if (a.no_diag_buffer_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_diag_buffer_, a.no_diag_buffer_);
+ }
+
+ if (a.match_only_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->match_only_, a.match_only_);
+ }
+
+ if (a.load_only_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->load_only_, a.load_only_);
+ }
+
+ if (a.no_external_modules_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_external_modules_, a.no_external_modules_);
+ }
+
+ if (a.structured_result_specified_)
+ {
+ ::build2::build::cli::parser< structured_result_format>::merge (
+ this->structured_result_, a.structured_result_);
+ this->structured_result_specified_ = true;
+ }
+
+ if (a.mtime_check_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->mtime_check_, a.mtime_check_);
+ }
+
+ if (a.no_mtime_check_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_mtime_check_, a.no_mtime_check_);
+ }
+
+ if (a.dump_specified_)
+ {
+ ::build2::build::cli::parser< strings>::merge (
+ this->dump_, a.dump_);
+ this->dump_specified_ = true;
+ }
+
+ if (a.dump_format_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->dump_format_, a.dump_format_);
+ this->dump_format_specified_ = true;
+ }
+
+ if (a.dump_scope_specified_)
+ {
+ ::build2::build::cli::parser< dir_paths>::merge (
+ this->dump_scope_, a.dump_scope_);
+ this->dump_scope_specified_ = true;
+ }
+
+ if (a.dump_target_specified_)
+ {
+ ::build2::build::cli::parser< vector<pair<name, optional<name>>>>::merge (
+ this->dump_target_, a.dump_target_);
+ this->dump_target_specified_ = true;
+ }
+
+ if (a.trace_match_specified_)
+ {
+ ::build2::build::cli::parser< vector<name>>::merge (
+ this->trace_match_, a.trace_match_);
+ this->trace_match_specified_ = true;
+ }
+
+ if (a.trace_execute_specified_)
+ {
+ ::build2::build::cli::parser< vector<name>>::merge (
+ this->trace_execute_, a.trace_execute_);
+ this->trace_execute_specified_ = true;
+ }
+
+ if (a.no_column_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_column_, a.no_column_);
+ }
+
+ if (a.no_line_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_line_, a.no_line_);
+ }
+
+ if (a.buildfile_specified_)
+ {
+ ::build2::build::cli::parser< path>::merge (
+ this->buildfile_, a.buildfile_);
+ this->buildfile_specified_ = true;
+ }
+
+ if (a.config_guess_specified_)
+ {
+ ::build2::build::cli::parser< path>::merge (
+ this->config_guess_, a.config_guess_);
+ this->config_guess_specified_ = true;
+ }
+
+ if (a.config_sub_specified_)
+ {
+ ::build2::build::cli::parser< path>::merge (
+ this->config_sub_, a.config_sub_);
+ this->config_sub_specified_ = true;
+ }
+
+ if (a.pager_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->pager_, a.pager_);
+ this->pager_specified_ = true;
+ }
+
+ if (a.pager_option_specified_)
+ {
+ ::build2::build::cli::parser< strings>::merge (
+ this->pager_option_, a.pager_option_);
+ this->pager_option_specified_ = true;
+ }
+
+ if (a.options_file_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->options_file_, a.options_file_);
+ this->options_file_specified_ = true;
+ }
+
+ if (a.default_options_specified_)
+ {
+ ::build2::build::cli::parser< dir_path>::merge (
+ this->default_options_, a.default_options_);
+ this->default_options_specified_ = true;
+ }
+
+ if (a.no_default_options_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_default_options_, a.no_default_options_);
+ }
+
+ if (a.help_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->help_, a.help_);
+ }
+
+ if (a.version_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->version_, a.version_);
+ }
+ }
+
+ ::build2::build::cli::usage_para b_options::
+ print_usage (::std::ostream& os, ::build2::build::cli::usage_para p)
+ {
+ CLI_POTENTIALLY_UNUSED (os);
+
+ if (p != ::build2::build::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mOPTIONS\033[0m" << ::std::endl;
+
+ os << std::endl
+ << "\033[1m-v\033[0m Print actual commands being executed. This options is" << ::std::endl
+ << " equivalent to \033[1m--verbose 2\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m-V\033[0m Print all underlying commands being executed. This" << ::std::endl
+ << " options is equivalent to \033[1m--verbose 3\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--quiet\033[0m|\033[1m-q\033[0m Run quietly, only printing error messages in most" << ::std::endl
+ << " contexts. In certain contexts (for example, while" << ::std::endl
+ << " updating build system modules) this verbosity level may" << ::std::endl
+ << " be ignored. Use \033[1m--silent\033[0m to run quietly in all" << ::std::endl
+ << " contexts. This option is equivalent to \033[1m--verbose 0\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--silent\033[0m Run quietly, only printing error messages in all" << ::std::endl
+ << " contexts." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--verbose\033[0m \033[4mlevel\033[0m Set the diagnostics verbosity to \033[4mlevel\033[0m between 0 and 6." << ::std::endl
+ << " Level 0 disables any non-error messages (but see the" << ::std::endl
+ << " difference between \033[1m--quiet\033[0m and \033[1m--silent\033[0m) while level 6" << ::std::endl
+ << " produces lots of information, with level 1 being the" << ::std::endl
+ << " default. The following additional types of diagnostics" << ::std::endl
+ << " are produced at each level:" << ::std::endl
+ << ::std::endl
+ << " 1. High-level information messages." << ::std::endl
+ << " 2. Essential underlying commands being executed." << ::std::endl
+ << " 3. All underlying commands being executed." << ::std::endl
+ << " 4. Information that could be helpful to the user." << ::std::endl
+ << " 5. Information that could be helpful to the developer." << ::std::endl
+ << " 6. Even more detailed information." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--stat\033[0m Display build statistics." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--progress\033[0m Display build progress. If printing to a terminal the" << ::std::endl
+ << " progress is displayed by default for low verbosity" << ::std::endl
+ << " levels. Use \033[1m--no-progress\033[0m to suppress." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-progress\033[0m Don't display build progress." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--diag-color\033[0m Use color in diagnostics. If printing to a terminal the" << ::std::endl
+ << " color is used by default provided the terminal is not" << ::std::endl
+ << " dumb. Use \033[1m--no-diag-color\033[0m to suppress." << ::std::endl
+ << ::std::endl
+ << " This option affects the diagnostics printed by the" << ::std::endl
+ << " build system itself. Some rules may also choose to" << ::std::endl
+ << " propagate its value to tools (such as compilers) that" << ::std::endl
+ << " they invoke." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-diag-color\033[0m Don't use color in diagnostics." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--jobs\033[0m|\033[1m-j\033[0m \033[4mnum\033[0m Number of active jobs to perform in parallel. This" << ::std::endl
+ << " includes both the number of active threads inside the" << ::std::endl
+ << " build system as well as the number of external commands" << ::std::endl
+ << " (compilers, linkers, etc) started but not yet finished." << ::std::endl
+ << " If this option is not specified or specified with the" << ::std::endl
+ << " \033[1m0\033[0m value, then the number of available hardware threads" << ::std::endl
+ << " is used." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--max-jobs\033[0m|\033[1m-J\033[0m \033[4mnum\033[0m Maximum number of jobs (threads) to create. The default" << ::std::endl
+ << " is 8x the number of active jobs (\033[1m--jobs|j\033[0m) on 32-bit" << ::std::endl
+ << " architectures and 32x on 64-bit. See the build system" << ::std::endl
+ << " scheduler implementation for details." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--queue-depth\033[0m|\033[1m-Q\033[0m \033[4mnum\033[0m The queue depth as a multiplier over the number of" << ::std::endl
+ << " active jobs. Normally we want a deeper queue if the" << ::std::endl
+ << " jobs take long (for example, compilation) and shorter" << ::std::endl
+ << " if they are quick (for example, simple tests). The" << ::std::endl
+ << " default is 4. See the build system scheduler" << ::std::endl
+ << " implementation for details." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--file-cache\033[0m \033[4mimpl\033[0m File cache implementation to use for intermediate build" << ::std::endl
+ << " results. Valid values are \033[1mnoop\033[0m (no caching or" << ::std::endl
+ << " compression) and \033[1msync-lz4\033[0m (no caching with synchronous" << ::std::endl
+ << " LZ4 on-disk compression). If this option is not" << ::std::endl
+ << " specified, then a suitable default implementation is" << ::std::endl
+ << " used (currently \033[1msync-lz4\033[0m)." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--max-stack\033[0m \033[4mnum\033[0m The maximum stack size in KBytes to allow for newly" << ::std::endl
+ << " created threads. For \033[4mpthreads\033[0m-based systems the driver" << ::std::endl
+ << " queries the stack size of the main thread and uses the" << ::std::endl
+ << " same size for creating additional threads. This allows" << ::std::endl
+ << " adjusting the stack size using familiar mechanisms," << ::std::endl
+ << " such as \033[1mulimit\033[0m. Sometimes, however, the stack size of" << ::std::endl
+ << " the main thread is excessively large. As a result, the" << ::std::endl
+ << " driver checks if it is greater than a predefined limit" << ::std::endl
+ << " (64MB on 64-bit systems and 32MB on 32-bit ones) and" << ::std::endl
+ << " caps it to a more sensible value (8MB) if that's the" << ::std::endl
+ << " case. This option allows you to override this check" << ::std::endl
+ << " with the special zero value indicating that the main" << ::std::endl
+ << " thread stack size should be used as is." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--serial-stop\033[0m|\033[1m-s\033[0m Run serially and stop at the first error. This mode is" << ::std::endl
+ << " useful to investigate build failures that are caused by" << ::std::endl
+ << " build system errors rather than compilation errors." << ::std::endl
+ << " Note that if you don't want to keep going but still" << ::std::endl
+ << " want parallel execution, add \033[1m--jobs|-j\033[0m (for example \033[1m-j" << ::std::endl
+ << " 0\033[0m for default concurrency). Note also that during" << ::std::endl
+ << " serial execution there is no diagnostics buffering and" << ::std::endl
+ << " child process' \033[1mstderr\033[0m is a terminal (unless redirected;" << ::std::endl
+ << " see \033[1m--no-diag-buffer\033[0m for details)." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dry-run\033[0m|\033[1m-n\033[0m Print commands without actually executing them. Note" << ::std::endl
+ << " that commands that are required to create an accurate" << ::std::endl
+ << " build state will still be executed and the extracted" << ::std::endl
+ << " auxiliary dependency information saved. In other words," << ::std::endl
+ << " this is not the \033[4m\"don't touch the filesystem\"\033[0m mode but" << ::std::endl
+ << " rather \033[4m\"do minimum amount of work to show what needs to" << ::std::endl
+ << " be done\"\033[0m. Note also that only the \033[1mperform\033[0m" << ::std::endl
+ << " meta-operation supports this mode." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-diag-buffer\033[0m Do not buffer diagnostics from child processes. By" << ::std::endl
+ << " default, unless running serially, such diagnostics is" << ::std::endl
+ << " buffered and printed all at once after each child exits" << ::std::endl
+ << " in order to prevent interleaving. However, this can" << ::std::endl
+ << " have side-effects since the child process' \033[1mstderr\033[0m is no" << ::std::endl
+ << " longer a terminal. Most notably, the use of color in" << ::std::endl
+ << " diagnostics may be disabled by some programs. On the" << ::std::endl
+ << " other hand, depending on the platform and programs" << ::std::endl
+ << " invoked, the interleaving diagnostics may not break" << ::std::endl
+ << " lines and thus could be tolerable." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--match-only\033[0m Match the rules without executing the operation. This" << ::std::endl
+ << " mode is primarily useful for profiling and dumping the" << ::std::endl
+ << " build system state." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--load-only\033[0m Match the rules only to \033[1malias{}\033[0m targets ignoring other" << ::std::endl
+ << " targets and without executing the operation. In" << ::std::endl
+ << " particular, this has the effect of loading all the" << ::std::endl
+ << " subdirectory \033[1mbuildfiles\033[0m that are not explicitly" << ::std::endl
+ << " included. Note that this option can only be used with" << ::std::endl
+ << " the \033[1mperform(update)\033[0m action on an \033[1malias{}\033[0m target," << ::std::endl
+ << " usually \033[1mdir{}\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-external-modules\033[0m Don't load external modules during project bootstrap." << ::std::endl
+ << " Note that this option can only be used with" << ::std::endl
+ << " meta-operations that do not load the project's" << ::std::endl
+ << " \033[1mbuildfiles\033[0m, such as \033[1minfo\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--structured-result\033[0m \033[4mfmt\033[0m Write the result of execution in a structured form. In" << ::std::endl
+ << " this mode, instead of printing to \033[1mstderr\033[0m diagnostics" << ::std::endl
+ << " messages about the outcome of executing actions on" << ::std::endl
+ << " targets, the driver writes to \033[1mstdout\033[0m a machine-readable" << ::std::endl
+ << " result description in the specified format. Valid" << ::std::endl
+ << " values for this option are \033[1mlines\033[0m and \033[1mjson\033[0m. Note that" << ::std::endl
+ << " currently only the \033[1mperform\033[0m meta-operation supports the" << ::std::endl
+ << " structured result output." << ::std::endl
+ << ::std::endl
+ << " If the output format is \033[1mlines\033[0m, then the result is" << ::std::endl
+ << " written one line per the buildspec action/target pair." << ::std::endl
+ << " Each line has the following form:" << ::std::endl
+ << ::std::endl
+ << " \033[4mstate\033[0m \033[4mmeta-operation\033[0m \033[4moperation\033[0m \033[4mtarget\033[0m\033[0m" << ::std::endl
+ << ::std::endl
+ << " Where \033[4mstate\033[0m can be one of \033[1munchanged\033[0m, \033[1mchanged\033[0m, or" << ::std::endl
+ << " \033[1mfailed\033[0m. If the action is a pre or post operation, then" << ::std::endl
+ << " the outer operation is specified in parenthesis. For" << ::std::endl
+ << " example:" << ::std::endl
+ << ::std::endl
+ << " unchanged perform update(test)" << ::std::endl
+ << " /tmp/hello/hello/exe{hello}" << ::std::endl
+ << " changed perform test /tmp/hello/hello/exe{hello}" << ::std::endl
+ << ::std::endl
+ << " If the output format is \033[1mjson\033[0m, then the output is a JSON" << ::std::endl
+ << " array of objects which are the serialized" << ::std::endl
+ << " representation of the following C++ \033[1mstruct\033[0m" << ::std::endl
+ << " \033[1mtarget_action_result\033[0m:" << ::std::endl
+ << ::std::endl
+ << " struct target_action_result" << ::std::endl
+ << " {" << ::std::endl
+ << " string target;" << ::std::endl
+ << " string display_target;" << ::std::endl
+ << " string target_type;" << ::std::endl
+ << " optional<string> target_path;" << ::std::endl
+ << " string meta_operation;" << ::std::endl
+ << " string operation;" << ::std::endl
+ << " optional<string> outer_operation;" << ::std::endl
+ << " string state;" << ::std::endl
+ << " };" << ::std::endl
+ << ::std::endl
+ << " For example:" << ::std::endl
+ << ::std::endl
+ << " [" << ::std::endl
+ << " {" << ::std::endl
+ << " \"target\": \"/tmp/hello/hello/exe{hello.}\"," << ::std::endl
+ << " \"display_target\": \"/tmp/hello/hello/exe{hello}\"," << ::std::endl
+ << " \"target_type\": \"exe\"," << ::std::endl
+ << " \"target_path\": \"/tmp/hello/hello/hello\"," << ::std::endl
+ << " \"meta_operation\": \"perform\"," << ::std::endl
+ << " \"operation\": \"update\"," << ::std::endl
+ << " \"outer_operation\": \"test\"," << ::std::endl
+ << " \"state\": \"unchanged\"" << ::std::endl
+ << " }," << ::std::endl
+ << " {" << ::std::endl
+ << " \"target\": \"/tmp/hello/hello/exe{hello.}\"," << ::std::endl
+ << " \"display_target\": \"/tmp/hello/hello/exe{hello}\"," << ::std::endl
+ << " \"target_type\": \"exe\"," << ::std::endl
+ << " \"target_path\": \"/tmp/hello/hello/hello\"," << ::std::endl
+ << " \"meta_operation\": \"perform\"," << ::std::endl
+ << " \"operation\": \"test\"," << ::std::endl
+ << " \"state\": \"changed\"" << ::std::endl
+ << " }" << ::std::endl
+ << " ]" << ::std::endl
+ << ::std::endl
+ << " See the JSON OUTPUT section below for details on the" << ::std::endl
+ << " overall properties of this format and the semantics of" << ::std::endl
+ << " the \033[1mstruct\033[0m serialization." << ::std::endl
+ << ::std::endl
+ << " The \033[1mtarget\033[0m member is the target name that is qualified" << ::std::endl
+ << " with the extension (if applicable) and, if required, is" << ::std::endl
+ << " quoted so that it can be passed back to the build" << ::std::endl
+ << " system driver on the command line. The \033[1mdisplay_target\033[0m" << ::std::endl
+ << " member is the unqualified and unquoted \"display\" target" << ::std::endl
+ << " name, the same as in the \033[1mlines\033[0m format. The \033[1mtarget_type\033[0m" << ::std::endl
+ << " member is the type of target. The \033[1mtarget_path\033[0m member" << ::std::endl
+ << " is an absolute path to the target if the target type is" << ::std::endl
+ << " path-based or \033[1mdir\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--mtime-check\033[0m Perform file modification time sanity checks. These" << ::std::endl
+ << " checks can be helpful in diagnosing spurious rebuilds" << ::std::endl
+ << " and are enabled by default on Windows (which is known" << ::std::endl
+ << " not to guarantee monotonically increasing mtimes) and" << ::std::endl
+ << " for the staged version of the build system on other" << ::std::endl
+ << " platforms. Use \033[1m--no-mtime-check\033[0m to disable." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-mtime-check\033[0m Don't perform file modification time sanity checks. See" << ::std::endl
+ << " \033[1m--mtime-check\033[0m for details." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump\033[0m \033[4mphase\033[0m Dump the build system state after the specified phase." << ::std::endl
+ << " Valid \033[4mphase\033[0m values are \033[1mload\033[0m (after loading \033[1mbuildfiles\033[0m)" << ::std::endl
+ << " and \033[1mmatch\033[0m (after matching rules to targets). The \033[1mmatch\033[0m" << ::std::endl
+ << " value also has the \033[1mmatch-pre\033[0m and \033[1mmatch-post\033[0m variants to" << ::std::endl
+ << " dump the state for the pre/post-operations (\033[1mmatch\033[0m dumps" << ::std::endl
+ << " the main operation only). Repeat this option to dump" << ::std::endl
+ << " the state after multiple phases/variants. By default" << ::std::endl
+ << " the entire build state is dumped but this behavior can" << ::std::endl
+ << " be altered with the \033[1m--dump-scope\033[0m and \033[1m--dump-target\033[0m" << ::std::endl
+ << " options. See also the \033[1m--match-only\033[0m and \033[1m--load-only\033[0m" << ::std::endl
+ << " options." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-format\033[0m \033[4mformat\033[0m Representation format and output stream to use when" << ::std::endl
+ << " dumping the build system state. Valid values for this" << ::std::endl
+ << " option are \033[1mbuildfile\033[0m (a human-readable, Buildfile-like" << ::std::endl
+ << " format written to \033[1mstderr\033[0m; this is the default), and" << ::std::endl
+ << " \033[1mjson-v0.1\033[0m (machine-readable, JSON-based format written" << ::std::endl
+ << " to \033[1mstdout\033[0m). For details on the \033[1mbuildfile\033[0m format, see" << ::std::endl
+ << " Diagnostics and Debugging (b#intro-diag-debug). For" << ::std::endl
+ << " details on the \033[1mjson-v0.1\033[0m format, see the JSON OUTPUT" << ::std::endl
+ << " section below (overall properties) and JSON Dump Format" << ::std::endl
+ << " (b#json-dump) (format specifics). Note that the JSON" << ::std::endl
+ << " format is currently unstable (thus the temporary \033[1m-v0.1\033[0m" << ::std::endl
+ << " suffix)." << ::std::endl
+ << ::std::endl
+ << " Note that because it's possible to end up with multiple" << ::std::endl
+ << " dumps (for example, by specifying the \033[1m--dump-scope\033[0m" << ::std::endl
+ << " and/or \033[1m--dump-target\033[0m options multiple times), the JSON" << ::std::endl
+ << " output is in the \"JSON Lines\" form, that is, without" << ::std::endl
+ << " pretty-printing and with the top-level JSON objects" << ::std::endl
+ << " delimited by newlines. Note also that if the JSON dump" << ::std::endl
+ << " output is combined with \033[1m--structured-result=json\033[0m, then" << ::std::endl
+ << " the structured result is the last line." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-scope\033[0m \033[4mdir\033[0m Dump the build system state for the specified scope" << ::std::endl
+ << " only. Repeat this option to dump the state of multiple" << ::std::endl
+ << " scopes." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-target\033[0m \033[4mtarget\033[0m Dump the build system state for the specified target" << ::std::endl
+ << " only. Repeat this option to dump the state of multiple" << ::std::endl
+ << " targets." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--trace-match\033[0m \033[4mtarget\033[0m Trace rule matching for the specified target. This is" << ::std::endl
+ << " primarily useful during troubleshooting. Repeat this" << ::std::endl
+ << " option to trace multiple targets." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--trace-execute\033[0m \033[4mtarget\033[0m Trace rule execution for the specified target. This is" << ::std::endl
+ << " primarily useful during troubleshooting. Repeat this" << ::std::endl
+ << " option to trace multiple targets." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-column\033[0m Don't print column numbers in diagnostics." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-line\033[0m Don't print line and column numbers in diagnostics." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--buildfile\033[0m \033[4mpath\033[0m The alternative file to read build information from." << ::std::endl
+ << " The default is \033[1mbuildfile\033[0m or \033[1mbuild2file\033[0m, depending on" << ::std::endl
+ << " the project's build file/directory naming scheme. If" << ::std::endl
+ << " \033[4mpath\033[0m is '\033[1m-\033[0m', then read from \033[1mstdin\033[0m. Note that this" << ::std::endl
+ << " option only affects the files read as part of the" << ::std::endl
+ << " buildspec processing. Specifically, it has no effect on" << ::std::endl
+ << " the \033[1msource\033[0m and \033[1minclude\033[0m directives. As a result, this" << ::std::endl
+ << " option is primarily intended for testing rather than" << ::std::endl
+ << " changing the build file names in real projects." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--config-guess\033[0m \033[4mpath\033[0m The path to the \033[1mconfig.guess(1)\033[0m script that should be" << ::std::endl
+ << " used to guess the host machine triplet. If this option" << ::std::endl
+ << " is not specified, then \033[1mb\033[0m will fall back on to using the" << ::std::endl
+ << " target it was built for as host." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--config-sub\033[0m \033[4mpath\033[0m The path to the \033[1mconfig.sub(1)\033[0m script that should be" << ::std::endl
+ << " used to canonicalize machine triplets. If this option" << ::std::endl
+ << " is not specified, then \033[1mb\033[0m will use its built-in" << ::std::endl
+ << " canonicalization support which should be sufficient for" << ::std::endl
+ << " commonly-used platforms." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--pager\033[0m \033[4mpath\033[0m The pager program to be used to show long text." << ::std::endl
+ << " Commonly used pager programs are \033[1mless\033[0m and \033[1mmore\033[0m. You can" << ::std::endl
+ << " also specify additional options that should be passed" << ::std::endl
+ << " to the pager program with \033[1m--pager-option\033[0m. If an empty" << ::std::endl
+ << " string is specified as the pager program, then no pager" << ::std::endl
+ << " will be used. If the pager program is not explicitly" << ::std::endl
+ << " specified, then \033[1mb\033[0m will try to use \033[1mless\033[0m. If it is not" << ::std::endl
+ << " available, then no pager will be used." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--pager-option\033[0m \033[4mopt\033[0m Additional option to be passed to the pager program." << ::std::endl
+ << " See \033[1m--pager\033[0m for more information on the pager program." << ::std::endl
+ << " Repeat this option to specify multiple pager options." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--options-file\033[0m \033[4mfile\033[0m Read additional options from \033[4mfile\033[0m. Each option should" << ::std::endl
+ << " appear on a separate line optionally followed by space" << ::std::endl
+ << " or equal sign (\033[1m=\033[0m) and an option value. Empty lines and" << ::std::endl
+ << " lines starting with \033[1m#\033[0m are ignored. Option values can be" << ::std::endl
+ << " enclosed in double (\033[1m\"\033[0m) or single (\033[1m'\033[0m) quotes to preserve" << ::std::endl
+ << " leading and trailing whitespaces as well as to specify" << ::std::endl
+ << " empty values. If the value itself contains trailing or" << ::std::endl
+ << " leading quotes, enclose it with an extra pair of" << ::std::endl
+ << " quotes, for example \033[1m'\"x\"'\033[0m. Non-leading and non-trailing" << ::std::endl
+ << " quotes are interpreted as being part of the option" << ::std::endl
+ << " value." << ::std::endl
+ << ::std::endl
+ << " The semantics of providing options in a file is" << ::std::endl
+ << " equivalent to providing the same set of options in the" << ::std::endl
+ << " same order on the command line at the point where the" << ::std::endl
+ << " \033[1m--options-file\033[0m option is specified except that the" << ::std::endl
+ << " shell escaping and quoting is not required. Repeat this" << ::std::endl
+ << " option to specify more than one options file." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--default-options\033[0m \033[4mdir\033[0m The directory to load additional default options files" << ::std::endl
+ << " from." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-default-options\033[0m Don't load default options files." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--help\033[0m Print usage information and exit." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--version\033[0m Print version and exit." << ::std::endl;
+
+ p = ::build2::build::cli::usage_para::option;
+
+ return p;
+ }
+
+ typedef
+ std::map<std::string, void (*) (b_options&, ::build2::build::cli::scanner&)>
+ _cli_b_options_map;
+
+ static _cli_b_options_map _cli_b_options_map_;
+
+ struct _cli_b_options_map_init
+ {
+ _cli_b_options_map_init ()
+ {
+ _cli_b_options_map_["--build2-metadata"] =
+ &::build2::build::cli::thunk< b_options, uint64_t, &b_options::build2_metadata_,
+ &b_options::build2_metadata_specified_ >;
+ _cli_b_options_map_["-v"] =
+ &::build2::build::cli::thunk< b_options, &b_options::v_ >;
+ _cli_b_options_map_["-V"] =
+ &::build2::build::cli::thunk< b_options, &b_options::V_ >;
+ _cli_b_options_map_["--quiet"] =
+ &::build2::build::cli::thunk< b_options, &b_options::quiet_ >;
+ _cli_b_options_map_["-q"] =
+ &::build2::build::cli::thunk< b_options, &b_options::quiet_ >;
+ _cli_b_options_map_["--silent"] =
+ &::build2::build::cli::thunk< b_options, &b_options::silent_ >;
+ _cli_b_options_map_["--verbose"] =
+ &::build2::build::cli::thunk< b_options, uint16_t, &b_options::verbose_,
+ &b_options::verbose_specified_ >;
+ _cli_b_options_map_["--stat"] =
+ &::build2::build::cli::thunk< b_options, &b_options::stat_ >;
+ _cli_b_options_map_["--progress"] =
+ &::build2::build::cli::thunk< b_options, &b_options::progress_ >;
+ _cli_b_options_map_["--no-progress"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_progress_ >;
+ _cli_b_options_map_["--diag-color"] =
+ &::build2::build::cli::thunk< b_options, &b_options::diag_color_ >;
+ _cli_b_options_map_["--no-diag-color"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_diag_color_ >;
+ _cli_b_options_map_["--jobs"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::jobs_,
+ &b_options::jobs_specified_ >;
+ _cli_b_options_map_["-j"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::jobs_,
+ &b_options::jobs_specified_ >;
+ _cli_b_options_map_["--max-jobs"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::max_jobs_,
+ &b_options::max_jobs_specified_ >;
+ _cli_b_options_map_["-J"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::max_jobs_,
+ &b_options::max_jobs_specified_ >;
+ _cli_b_options_map_["--queue-depth"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::queue_depth_,
+ &b_options::queue_depth_specified_ >;
+ _cli_b_options_map_["-Q"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::queue_depth_,
+ &b_options::queue_depth_specified_ >;
+ _cli_b_options_map_["--file-cache"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::file_cache_,
+ &b_options::file_cache_specified_ >;
+ _cli_b_options_map_["--max-stack"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::max_stack_,
+ &b_options::max_stack_specified_ >;
+ _cli_b_options_map_["--serial-stop"] =
+ &::build2::build::cli::thunk< b_options, &b_options::serial_stop_ >;
+ _cli_b_options_map_["-s"] =
+ &::build2::build::cli::thunk< b_options, &b_options::serial_stop_ >;
+ _cli_b_options_map_["--dry-run"] =
+ &::build2::build::cli::thunk< b_options, &b_options::dry_run_ >;
+ _cli_b_options_map_["-n"] =
+ &::build2::build::cli::thunk< b_options, &b_options::dry_run_ >;
+ _cli_b_options_map_["--no-diag-buffer"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_diag_buffer_ >;
+ _cli_b_options_map_["--match-only"] =
+ &::build2::build::cli::thunk< b_options, &b_options::match_only_ >;
+ _cli_b_options_map_["--load-only"] =
+ &::build2::build::cli::thunk< b_options, &b_options::load_only_ >;
+ _cli_b_options_map_["--no-external-modules"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_external_modules_ >;
+ _cli_b_options_map_["--structured-result"] =
+ &::build2::build::cli::thunk< b_options, structured_result_format, &b_options::structured_result_,
+ &b_options::structured_result_specified_ >;
+ _cli_b_options_map_["--mtime-check"] =
+ &::build2::build::cli::thunk< b_options, &b_options::mtime_check_ >;
+ _cli_b_options_map_["--no-mtime-check"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_mtime_check_ >;
+ _cli_b_options_map_["--dump"] =
+ &::build2::build::cli::thunk< b_options, strings, &b_options::dump_,
+ &b_options::dump_specified_ >;
+ _cli_b_options_map_["--dump-format"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::dump_format_,
+ &b_options::dump_format_specified_ >;
+ _cli_b_options_map_["--dump-scope"] =
+ &::build2::build::cli::thunk< b_options, dir_paths, &b_options::dump_scope_,
+ &b_options::dump_scope_specified_ >;
+ _cli_b_options_map_["--dump-target"] =
+ &::build2::build::cli::thunk< b_options, vector<pair<name, optional<name>>>, &b_options::dump_target_,
+ &b_options::dump_target_specified_ >;
+ _cli_b_options_map_["--trace-match"] =
+ &::build2::build::cli::thunk< b_options, vector<name>, &b_options::trace_match_,
+ &b_options::trace_match_specified_ >;
+ _cli_b_options_map_["--trace-execute"] =
+ &::build2::build::cli::thunk< b_options, vector<name>, &b_options::trace_execute_,
+ &b_options::trace_execute_specified_ >;
+ _cli_b_options_map_["--no-column"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_column_ >;
+ _cli_b_options_map_["--no-line"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_line_ >;
+ _cli_b_options_map_["--buildfile"] =
+ &::build2::build::cli::thunk< b_options, path, &b_options::buildfile_,
+ &b_options::buildfile_specified_ >;
+ _cli_b_options_map_["--config-guess"] =
+ &::build2::build::cli::thunk< b_options, path, &b_options::config_guess_,
+ &b_options::config_guess_specified_ >;
+ _cli_b_options_map_["--config-sub"] =
+ &::build2::build::cli::thunk< b_options, path, &b_options::config_sub_,
+ &b_options::config_sub_specified_ >;
+ _cli_b_options_map_["--pager"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::pager_,
+ &b_options::pager_specified_ >;
+ _cli_b_options_map_["--pager-option"] =
+ &::build2::build::cli::thunk< b_options, strings, &b_options::pager_option_,
+ &b_options::pager_option_specified_ >;
+ _cli_b_options_map_["--options-file"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::options_file_,
+ &b_options::options_file_specified_ >;
+ _cli_b_options_map_["--default-options"] =
+ &::build2::build::cli::thunk< b_options, dir_path, &b_options::default_options_,
+ &b_options::default_options_specified_ >;
+ _cli_b_options_map_["--no-default-options"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_default_options_ >;
+ _cli_b_options_map_["--help"] =
+ &::build2::build::cli::thunk< b_options, &b_options::help_ >;
+ _cli_b_options_map_["--version"] =
+ &::build2::build::cli::thunk< b_options, &b_options::version_ >;
+ }
+ };
+
+ static _cli_b_options_map_init _cli_b_options_map_init_;
+
+ bool b_options::
+ _parse (const char* o, ::build2::build::cli::scanner& s)
+ {
+ _cli_b_options_map::const_iterator i (_cli_b_options_map_.find (o));
+
+ if (i != _cli_b_options_map_.end ())
+ {
+ (*(i->second)) (*this, s);
+ return true;
+ }
+
+ return false;
+ }
+
+ bool b_options::
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
+ {
+ // Can't skip combined flags (--no-combined-flags).
+ //
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
+
+ bool r = false;
+ bool opt = true;
+
+ while (s.more ())
+ {
+ const char* o = s.peek ();
+
+ if (std::strcmp (o, "--") == 0)
+ {
+ opt = false;
+ }
+
+ if (opt)
+ {
+ if (_parse (o, s))
+ {
+ r = true;
+ continue;
+ }
+
+ if (std::strncmp (o, "-", 1) == 0 && o[1] != '\0')
+ {
+ // Handle combined option values.
+ //
+ std::string co;
+ if (const char* v = std::strchr (o, '='))
+ {
+ co.assign (o, 0, v - o);
+ ++v;
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (co.c_str ()),
+ const_cast<char*> (v)
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (_parse (co.c_str (), ns))
+ {
+ // Parsed the option but not its value?
+ //
+ if (ns.end () != 2)
+ throw ::build2::build::cli::invalid_value (co, v);
+
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = co.c_str ();
+ }
+ }
+
+ // Handle combined flags.
+ //
+ char cf[3];
+ {
+ const char* p = o + 1;
+ for (; *p != '\0'; ++p)
+ {
+ if (!((*p >= 'a' && *p <= 'z') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= '0' && *p <= '9')))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ for (p = o + 1; *p != '\0'; ++p)
+ {
+ std::strcpy (cf, "-");
+ cf[1] = *p;
+ cf[2] = '\0';
+
+ int ac (1);
+ char* av[] =
+ {
+ cf
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (!_parse (cf, ns))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ // All handled.
+ //
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = cf;
+ }
+ }
+ }
+
+ switch (opt_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_option (o);
+ }
+ }
+
+ break;
+ }
+ }
+
+ switch (arg_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_argument (o);
+ }
+ }
+
+ break;
+ }
+
+ return r;
+ }
+}
+
+namespace build2
+{
+ ::build2::build::cli::usage_para
+ print_b_usage (::std::ostream& os, ::build2::build::cli::usage_para p)
+ {
+ CLI_POTENTIALLY_UNUSED (os);
+
+ if (p != ::build2::build::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mSYNOPSIS\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mb --help\033[0m" << ::std::endl
+ << "\033[1mb --version\033[0m" << ::std::endl
+ << "\033[1mb\033[0m [\033[4moptions\033[0m] [\033[4mvariables\033[0m] [\033[4mbuildspec\033[0m]\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[4mbuildspec\033[0m = \033[4mmeta-operation\033[0m\033[1m(\033[0m\033[4moperation\033[0m\033[1m(\033[0m\033[4mtarget\033[0m...[\033[1m,\033[0m\033[4mparameters\033[0m]\033[1m)\033[0m...\033[1m)\033[0m...\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mDESCRIPTION\033[0m" << ::std::endl
+ << ::std::endl
+ << "The \033[1mbuild2\033[0m build system driver executes a set of meta-operations on operations" << ::std::endl
+ << "on targets according to the build specification, or \033[4mbuildspec\033[0m for short. This" << ::std::endl
+ << "process can be controlled by specifying driver \033[4moptions\033[0m and build system" << ::std::endl
+ << "\033[4mvariables\033[0m." << ::std::endl
+ << ::std::endl
+ << "Note that \033[4moptions\033[0m, \033[4mvariables\033[0m, and \033[4mbuildspec\033[0m fragments can be specified in any" << ::std::endl
+ << "order. To avoid treating an argument that starts with \033[1m'-'\033[0m as an option, add the" << ::std::endl
+ << "\033[1m'--'\033[0m separator. To avoid treating an argument that contains \033[1m'='\033[0m as a variable," << ::std::endl
+ << "add the second \033[1m'--'\033[0m separator." << ::std::endl;
+
+ p = ::build2::b_options::print_usage (os, ::build2::build::cli::usage_para::text);
+
+ if (p != ::build2::build::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mDEFAULT OPTIONS FILES\033[0m" << ::std::endl
+ << ::std::endl
+ << "Instead of having a separate config file format for tool configuration, the" << ::std::endl
+ << "\033[1mbuild2\033[0m toolchain uses \033[4mdefault options files\033[0m which contain the same options as" << ::std::endl
+ << "what can be specified on the command line. The default options files are like" << ::std::endl
+ << "options files that one can specify with \033[1m--options-file\033[0m except that they are" << ::std::endl
+ << "loaded by default." << ::std::endl
+ << ::std::endl
+ << "The default options files for the build system driver are called \033[1mb.options\033[0m and" << ::std::endl
+ << "are searched for in the \033[1m.build2/\033[0m subdirectory of the home directory and in the" << ::std::endl
+ << "system directory (for example, \033[1m/etc/build2/\033[0m) if configured. Note that besides" << ::std::endl
+ << "options these files can also contain global variable overrides." << ::std::endl
+ << ::std::endl
+ << "Once the search is complete, the files are loaded in the reverse order, that" << ::std::endl
+ << "is, beginning from the system directory (if any), followed by the home" << ::std::endl
+ << "directory, and finishing off with the options specified on the command line. In" << ::std::endl
+ << "other words, the files are loaded from the more generic to the more specific" << ::std::endl
+ << "with the command line options having the ability to override any values" << ::std::endl
+ << "specified in the default options files." << ::std::endl
+ << ::std::endl
+ << "If a default options file contains \033[1m--no-default-options\033[0m, then the search is" << ::std::endl
+ << "stopped at the directory containing this file and no outer files are loaded. If" << ::std::endl
+ << "this option is specified on the command line, then none of the default options" << ::std::endl
+ << "files are searched for or loaded." << ::std::endl
+ << ::std::endl
+ << "An additional directory containing default options files can be specified with" << ::std::endl
+ << "\033[1m--default-options\033[0m. Its configuration files are loaded after the home directory." << ::std::endl
+ << ::std::endl
+ << "The order in which default options files are loaded is traced at the verbosity" << ::std::endl
+ << "level 3 (\033[1m-V\033[0m option) or higher." << ::std::endl
+ << ::std::endl
+ << "\033[1mJSON OUTPUT\033[0m" << ::std::endl
+ << ::std::endl
+ << "Commands that support the JSON output specify their formats as a serialized" << ::std::endl
+ << "representation of a C++ \033[1mstruct\033[0m or an array thereof. For example:" << ::std::endl
+ << ::std::endl
+ << "struct package" << ::std::endl
+ << "{" << ::std::endl
+ << " string name;" << ::std::endl
+ << "};" << ::std::endl
+ << ::std::endl
+ << "struct configuration" << ::std::endl
+ << "{" << ::std::endl
+ << " uint64_t id;" << ::std::endl
+ << " string path;" << ::std::endl
+ << " optional<string> name;" << ::std::endl
+ << " bool default;" << ::std::endl
+ << " vector<package> packages;" << ::std::endl
+ << "};" << ::std::endl
+ << ::std::endl
+ << "An example of the serialized JSON representation of \033[1mstruct\033[0m \033[1mconfiguration\033[0m:" << ::std::endl
+ << ::std::endl
+ << "{" << ::std::endl
+ << " \"id\": 1," << ::std::endl
+ << " \"path\": \"/tmp/hello-gcc\"," << ::std::endl
+ << " \"name\": \"gcc\"," << ::std::endl
+ << " \"default\": true," << ::std::endl
+ << " \"packages\": [" << ::std::endl
+ << " {" << ::std::endl
+ << " \"name\": \"hello\"" << ::std::endl
+ << " }" << ::std::endl
+ << " ]" << ::std::endl
+ << "}" << ::std::endl
+ << ::std::endl
+ << "This sections provides details on the overall properties of such formats and" << ::std::endl
+ << "the semantics of the \033[1mstruct\033[0m serialization." << ::std::endl
+ << ::std::endl
+ << "The order of members in a JSON object is fixed as specified in the" << ::std::endl
+ << "corresponding \033[1mstruct\033[0m. While new members may be added in the future (and should" << ::std::endl
+ << "be ignored by older consumers), the semantics of the existing members" << ::std::endl
+ << "(including whether the top-level entry is an object or array) may not change." << ::std::endl
+ << ::std::endl
+ << "An object member is required unless its type is \033[1moptional<>\033[0m, \033[1mbool\033[0m, or \033[1mvector<>\033[0m" << ::std::endl
+ << "(array). For \033[1mbool\033[0m members absent means \033[1mfalse\033[0m. For \033[1mvector<>\033[0m members absent means" << ::std::endl
+ << "empty. An empty top-level array is always present." << ::std::endl
+ << ::std::endl
+ << "For example, the following JSON text is a possible serialization of the above" << ::std::endl
+ << "\033[1mstruct\033[0m \033[1mconfiguration\033[0m:" << ::std::endl
+ << ::std::endl
+ << "{" << ::std::endl
+ << " \"id\": 1," << ::std::endl
+ << " \"path\": \"/tmp/hello-gcc\"" << ::std::endl
+ << "}" << ::std::endl
+ << ::std::endl
+ << "\033[1mEXIT STATUS\033[0m" << ::std::endl
+ << ::std::endl
+ << "Non-zero exit status is returned in case of an error." << ::std::endl;
+
+ os << std::endl
+ << "\033[1mENVIRONMENT\033[0m" << ::std::endl
+ << ::std::endl
+ << "The \033[1mHOME\033[0m environment variable is used to determine the user's home directory." << ::std::endl
+ << "If it is not set, then \033[1mgetpwuid(3)\033[0m is used instead. This value is used to" << ::std::endl
+ << "shorten paths printed in diagnostics by replacing the home directory with \033[1m~/\033[0m." << ::std::endl
+ << "It is also made available to \033[1mbuildfile\033[0m's as the \033[1mbuild.home\033[0m variable." << ::std::endl
+ << ::std::endl
+ << "The \033[1mBUILD2_VAR_OVR\033[0m environment variable is used to propagate global variable" << ::std::endl
+ << "overrides to nested build system driver invocations. Its value is a list of" << ::std::endl
+ << "global variable assignments separated with newlines." << ::std::endl
+ << ::std::endl
+ << "The \033[1mBUILD2_DEF_OPT\033[0m environment variable is used to suppress loading of default" << ::std::endl
+ << "options files in nested build system driver invocations. Its values are \033[1mfalse\033[0m" << ::std::endl
+ << "or \033[1m0\033[0m to suppress and \033[1mtrue\033[0m or \033[1m1\033[0m to load." << ::std::endl;
+
+ p = ::build2::build::cli::usage_para::text;
+
+ return p;
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
diff --git a/libbuild2/b-options.hxx b/libbuild2/b-options.hxx
new file mode 100644
index 0000000..48dd35f
--- /dev/null
+++ b/libbuild2/b-options.hxx
@@ -0,0 +1,366 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+#ifndef LIBBUILD2_B_OPTIONS_HXX
+#define LIBBUILD2_B_OPTIONS_HXX
+
+// Begin prologue.
+//
+#include <libbuild2/export.hxx>
+//
+// End prologue.
+
+#include <libbuild2/common-options.hxx>
+
+namespace build2
+{
+ class LIBBUILD2_SYMEXPORT b_options
+ {
+ public:
+ b_options ();
+
+ // Return true if anything has been parsed.
+ //
+ bool
+ parse (int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ // Merge options from the specified instance appending/overriding
+ // them as if they appeared after options in this instance.
+ //
+ void
+ merge (const b_options&);
+
+ // Option accessors.
+ //
+ const uint64_t&
+ build2_metadata () const;
+
+ bool
+ build2_metadata_specified () const;
+
+ const bool&
+ v () const;
+
+ const bool&
+ V () const;
+
+ const bool&
+ quiet () const;
+
+ const bool&
+ silent () const;
+
+ const uint16_t&
+ verbose () const;
+
+ bool
+ verbose_specified () const;
+
+ const bool&
+ stat () const;
+
+ const bool&
+ progress () const;
+
+ const bool&
+ no_progress () const;
+
+ const bool&
+ diag_color () const;
+
+ const bool&
+ no_diag_color () const;
+
+ const size_t&
+ jobs () const;
+
+ bool
+ jobs_specified () const;
+
+ const size_t&
+ max_jobs () const;
+
+ bool
+ max_jobs_specified () const;
+
+ const size_t&
+ queue_depth () const;
+
+ bool
+ queue_depth_specified () const;
+
+ const string&
+ file_cache () const;
+
+ bool
+ file_cache_specified () const;
+
+ const size_t&
+ max_stack () const;
+
+ bool
+ max_stack_specified () const;
+
+ const bool&
+ serial_stop () const;
+
+ const bool&
+ dry_run () const;
+
+ const bool&
+ no_diag_buffer () const;
+
+ const bool&
+ match_only () const;
+
+ const bool&
+ load_only () const;
+
+ const bool&
+ no_external_modules () const;
+
+ const structured_result_format&
+ structured_result () const;
+
+ bool
+ structured_result_specified () const;
+
+ const bool&
+ mtime_check () const;
+
+ const bool&
+ no_mtime_check () const;
+
+ const strings&
+ dump () const;
+
+ bool
+ dump_specified () const;
+
+ const string&
+ dump_format () const;
+
+ bool
+ dump_format_specified () const;
+
+ const dir_paths&
+ dump_scope () const;
+
+ bool
+ dump_scope_specified () const;
+
+ const vector<pair<name, optional<name>>>&
+ dump_target () const;
+
+ bool
+ dump_target_specified () const;
+
+ const vector<name>&
+ trace_match () const;
+
+ bool
+ trace_match_specified () const;
+
+ const vector<name>&
+ trace_execute () const;
+
+ bool
+ trace_execute_specified () const;
+
+ const bool&
+ no_column () const;
+
+ const bool&
+ no_line () const;
+
+ const path&
+ buildfile () const;
+
+ bool
+ buildfile_specified () const;
+
+ const path&
+ config_guess () const;
+
+ bool
+ config_guess_specified () const;
+
+ const path&
+ config_sub () const;
+
+ bool
+ config_sub_specified () const;
+
+ const string&
+ pager () const;
+
+ bool
+ pager_specified () const;
+
+ const strings&
+ pager_option () const;
+
+ bool
+ pager_option_specified () const;
+
+ const string&
+ options_file () const;
+
+ bool
+ options_file_specified () const;
+
+ const dir_path&
+ default_options () const;
+
+ bool
+ default_options_specified () const;
+
+ const bool&
+ no_default_options () const;
+
+ const bool&
+ help () const;
+
+ const bool&
+ version () const;
+
+ // Print usage information.
+ //
+ static ::build2::build::cli::usage_para
+ print_usage (::std::ostream&,
+ ::build2::build::cli::usage_para = ::build2::build::cli::usage_para::none);
+
+ // Implementation details.
+ //
+ protected:
+ bool
+ _parse (const char*, ::build2::build::cli::scanner&);
+
+ private:
+ bool
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
+
+ public:
+ uint64_t build2_metadata_;
+ bool build2_metadata_specified_;
+ bool v_;
+ bool V_;
+ bool quiet_;
+ bool silent_;
+ uint16_t verbose_;
+ bool verbose_specified_;
+ bool stat_;
+ bool progress_;
+ bool no_progress_;
+ bool diag_color_;
+ bool no_diag_color_;
+ size_t jobs_;
+ bool jobs_specified_;
+ size_t max_jobs_;
+ bool max_jobs_specified_;
+ size_t queue_depth_;
+ bool queue_depth_specified_;
+ string file_cache_;
+ bool file_cache_specified_;
+ size_t max_stack_;
+ bool max_stack_specified_;
+ bool serial_stop_;
+ bool dry_run_;
+ bool no_diag_buffer_;
+ bool match_only_;
+ bool load_only_;
+ bool no_external_modules_;
+ structured_result_format structured_result_;
+ bool structured_result_specified_;
+ bool mtime_check_;
+ bool no_mtime_check_;
+ strings dump_;
+ bool dump_specified_;
+ string dump_format_;
+ bool dump_format_specified_;
+ dir_paths dump_scope_;
+ bool dump_scope_specified_;
+ vector<pair<name, optional<name>>> dump_target_;
+ bool dump_target_specified_;
+ vector<name> trace_match_;
+ bool trace_match_specified_;
+ vector<name> trace_execute_;
+ bool trace_execute_specified_;
+ bool no_column_;
+ bool no_line_;
+ path buildfile_;
+ bool buildfile_specified_;
+ path config_guess_;
+ bool config_guess_specified_;
+ path config_sub_;
+ bool config_sub_specified_;
+ string pager_;
+ bool pager_specified_;
+ strings pager_option_;
+ bool pager_option_specified_;
+ string options_file_;
+ bool options_file_specified_;
+ dir_path default_options_;
+ bool default_options_specified_;
+ bool no_default_options_;
+ bool help_;
+ bool version_;
+ };
+}
+
+// Print page usage information.
+//
+namespace build2
+{
+ LIBBUILD2_SYMEXPORT ::build2::build::cli::usage_para
+ print_b_usage (::std::ostream&,
+ ::build2::build::cli::usage_para = ::build2::build::cli::usage_para::none);
+}
+
+#include <libbuild2/b-options.ixx>
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
+#endif // LIBBUILD2_B_OPTIONS_HXX
diff --git a/libbuild2/b-options.ixx b/libbuild2/b-options.ixx
new file mode 100644
index 0000000..34b0d39
--- /dev/null
+++ b/libbuild2/b-options.ixx
@@ -0,0 +1,405 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+namespace build2
+{
+ // b_options
+ //
+
+ inline const uint64_t& b_options::
+ build2_metadata () const
+ {
+ return this->build2_metadata_;
+ }
+
+ inline bool b_options::
+ build2_metadata_specified () const
+ {
+ return this->build2_metadata_specified_;
+ }
+
+ inline const bool& b_options::
+ v () const
+ {
+ return this->v_;
+ }
+
+ inline const bool& b_options::
+ V () const
+ {
+ return this->V_;
+ }
+
+ inline const bool& b_options::
+ quiet () const
+ {
+ return this->quiet_;
+ }
+
+ inline const bool& b_options::
+ silent () const
+ {
+ return this->silent_;
+ }
+
+ inline const uint16_t& b_options::
+ verbose () const
+ {
+ return this->verbose_;
+ }
+
+ inline bool b_options::
+ verbose_specified () const
+ {
+ return this->verbose_specified_;
+ }
+
+ inline const bool& b_options::
+ stat () const
+ {
+ return this->stat_;
+ }
+
+ inline const bool& b_options::
+ progress () const
+ {
+ return this->progress_;
+ }
+
+ inline const bool& b_options::
+ no_progress () const
+ {
+ return this->no_progress_;
+ }
+
+ inline const bool& b_options::
+ diag_color () const
+ {
+ return this->diag_color_;
+ }
+
+ inline const bool& b_options::
+ no_diag_color () const
+ {
+ return this->no_diag_color_;
+ }
+
+ inline const size_t& b_options::
+ jobs () const
+ {
+ return this->jobs_;
+ }
+
+ inline bool b_options::
+ jobs_specified () const
+ {
+ return this->jobs_specified_;
+ }
+
+ inline const size_t& b_options::
+ max_jobs () const
+ {
+ return this->max_jobs_;
+ }
+
+ inline bool b_options::
+ max_jobs_specified () const
+ {
+ return this->max_jobs_specified_;
+ }
+
+ inline const size_t& b_options::
+ queue_depth () const
+ {
+ return this->queue_depth_;
+ }
+
+ inline bool b_options::
+ queue_depth_specified () const
+ {
+ return this->queue_depth_specified_;
+ }
+
+ inline const string& b_options::
+ file_cache () const
+ {
+ return this->file_cache_;
+ }
+
+ inline bool b_options::
+ file_cache_specified () const
+ {
+ return this->file_cache_specified_;
+ }
+
+ inline const size_t& b_options::
+ max_stack () const
+ {
+ return this->max_stack_;
+ }
+
+ inline bool b_options::
+ max_stack_specified () const
+ {
+ return this->max_stack_specified_;
+ }
+
+ inline const bool& b_options::
+ serial_stop () const
+ {
+ return this->serial_stop_;
+ }
+
+ inline const bool& b_options::
+ dry_run () const
+ {
+ return this->dry_run_;
+ }
+
+ inline const bool& b_options::
+ no_diag_buffer () const
+ {
+ return this->no_diag_buffer_;
+ }
+
+ inline const bool& b_options::
+ match_only () const
+ {
+ return this->match_only_;
+ }
+
+ inline const bool& b_options::
+ load_only () const
+ {
+ return this->load_only_;
+ }
+
+ inline const bool& b_options::
+ no_external_modules () const
+ {
+ return this->no_external_modules_;
+ }
+
+ inline const structured_result_format& b_options::
+ structured_result () const
+ {
+ return this->structured_result_;
+ }
+
+ inline bool b_options::
+ structured_result_specified () const
+ {
+ return this->structured_result_specified_;
+ }
+
+ inline const bool& b_options::
+ mtime_check () const
+ {
+ return this->mtime_check_;
+ }
+
+ inline const bool& b_options::
+ no_mtime_check () const
+ {
+ return this->no_mtime_check_;
+ }
+
+ inline const strings& b_options::
+ dump () const
+ {
+ return this->dump_;
+ }
+
+ inline bool b_options::
+ dump_specified () const
+ {
+ return this->dump_specified_;
+ }
+
+ inline const string& b_options::
+ dump_format () const
+ {
+ return this->dump_format_;
+ }
+
+ inline bool b_options::
+ dump_format_specified () const
+ {
+ return this->dump_format_specified_;
+ }
+
+ inline const dir_paths& b_options::
+ dump_scope () const
+ {
+ return this->dump_scope_;
+ }
+
+ inline bool b_options::
+ dump_scope_specified () const
+ {
+ return this->dump_scope_specified_;
+ }
+
+ inline const vector<pair<name, optional<name>>>& b_options::
+ dump_target () const
+ {
+ return this->dump_target_;
+ }
+
+ inline bool b_options::
+ dump_target_specified () const
+ {
+ return this->dump_target_specified_;
+ }
+
+ inline const vector<name>& b_options::
+ trace_match () const
+ {
+ return this->trace_match_;
+ }
+
+ inline bool b_options::
+ trace_match_specified () const
+ {
+ return this->trace_match_specified_;
+ }
+
+ inline const vector<name>& b_options::
+ trace_execute () const
+ {
+ return this->trace_execute_;
+ }
+
+ inline bool b_options::
+ trace_execute_specified () const
+ {
+ return this->trace_execute_specified_;
+ }
+
+ inline const bool& b_options::
+ no_column () const
+ {
+ return this->no_column_;
+ }
+
+ inline const bool& b_options::
+ no_line () const
+ {
+ return this->no_line_;
+ }
+
+ inline const path& b_options::
+ buildfile () const
+ {
+ return this->buildfile_;
+ }
+
+ inline bool b_options::
+ buildfile_specified () const
+ {
+ return this->buildfile_specified_;
+ }
+
+ inline const path& b_options::
+ config_guess () const
+ {
+ return this->config_guess_;
+ }
+
+ inline bool b_options::
+ config_guess_specified () const
+ {
+ return this->config_guess_specified_;
+ }
+
+ inline const path& b_options::
+ config_sub () const
+ {
+ return this->config_sub_;
+ }
+
+ inline bool b_options::
+ config_sub_specified () const
+ {
+ return this->config_sub_specified_;
+ }
+
+ inline const string& b_options::
+ pager () const
+ {
+ return this->pager_;
+ }
+
+ inline bool b_options::
+ pager_specified () const
+ {
+ return this->pager_specified_;
+ }
+
+ inline const strings& b_options::
+ pager_option () const
+ {
+ return this->pager_option_;
+ }
+
+ inline bool b_options::
+ pager_option_specified () const
+ {
+ return this->pager_option_specified_;
+ }
+
+ inline const string& b_options::
+ options_file () const
+ {
+ return this->options_file_;
+ }
+
+ inline bool b_options::
+ options_file_specified () const
+ {
+ return this->options_file_specified_;
+ }
+
+ inline const dir_path& b_options::
+ default_options () const
+ {
+ return this->default_options_;
+ }
+
+ inline bool b_options::
+ default_options_specified () const
+ {
+ return this->default_options_specified_;
+ }
+
+ inline const bool& b_options::
+ no_default_options () const
+ {
+ return this->no_default_options_;
+ }
+
+ inline const bool& b_options::
+ help () const
+ {
+ return this->help_;
+ }
+
+ inline const bool& b_options::
+ version () const
+ {
+ return this->version_;
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
diff --git a/libbuild2/b.cli b/libbuild2/b.cli
new file mode 100644
index 0000000..f58b869
--- /dev/null
+++ b/libbuild2/b.cli
@@ -0,0 +1,1074 @@
+// file : libbuild2/b.cli
+// license : MIT; see accompanying LICENSE file
+
+include <libbuild2/common.cli>;
+
+"\section=1"
+"\name=b"
+"\summary=build system driver"
+
+namespace build2
+{
+ {
+ "<options>
+ <variables>
+ <buildspec> <meta-operation> <operation> <target> <parameters>",
+
+ "\h|SYNOPSIS|
+
+ \c{\b{b --help}\n
+ \b{b --version}\n
+ \b{b} [<options>] [<variables>] [<buildspec>]}
+
+ \c{<buildspec> = <meta-operation>\b{(}<operation>\b{(}<target>...[\b{,}<parameters>]\b{)}...\b{)}...}
+
+ \h|DESCRIPTION|
+
+ The \cb{build2} build system driver executes a set of meta-operations on
+ operations on targets according to the build specification, or
+ \i{buildspec} for short. This process can be controlled by specifying
+ driver <options> and build system <variables>.
+
+ Note that <options>, <variables>, and <buildspec> fragments can be
+ specified in any order. To avoid treating an argument that starts with
+ \cb{'-'} as an option, add the \cb{'--'} separator. To avoid treating an
+ argument that contains \cb{'='} as a variable, add the second \cb{'--'}
+ separator."
+ }
+
+ // For usage it's nice to see the list of options on the first page. So
+ // let's not put this "extended" description into usage.
+ //
+ {
+ "<meta-operation> <operation> <target> <parameters> <src-base>",
+ "",
+
+ "All components in the buildspec can be omitted. If <meta-operation> is
+ omitted, then it defaults to \cb{perform}. If <operation> is omitted,
+ then it defaults to the default operation for this meta-operation. For
+ \cb{perform} it is \cb{update}. Finally, if <target> is omitted, then it
+ defaults to the current working directory. A meta-operation on operation
+ is called an \i{action}. Some operations and meta-operations may take
+ additional <parameters>. For example:
+
+ \
+ $ b # perform(update(./))
+ $ b foo/ # perform(update(foo/))
+ $ b foo/ bar/ # perform(update(foo/ bar/))
+ $ b update # perform(update(./))
+ $ b 'clean(../)' # perform(clean(../))
+ $ b perform # perform(update(./))
+ $ b configure # configure(?(./))
+ $ b 'configure(../)' # configure(?(../))
+ $ b clean update # perform(clean(./) update(./))
+ $ b configure update # configure(?(./)) perform(update(./))
+ $ b 'create(conf/, cxx)' # create(?(conf/), cxx)
+ \
+
+ Notice the question mark used to show the (imaginary) default operation
+ for the \cb{configure} meta-operation. For \cb{configure} the default
+ operation is \"all operations\". That is, it will configure all the
+ operations for the specified target.
+
+ You can also \"generate\" multiple operations for the same set of targets.
+ Compare:
+
+ \
+ $ b 'clean(foo/ bar/)' 'update(foo/ bar/)'
+ $ b '{clean update}(foo/ bar/)'
+ \
+
+ Some more useful buildspec examples:
+
+ \
+ $ b '{clean update}(...)' # rebuild
+ $ b '{clean update clean}(...)' # make sure builds
+ $ b '{clean test clean}(...)' # make sure passes tests
+ $ b '{clean disfigure}(...)' # similar to distclean
+ \
+
+ In POSIX shells parenthesis are special characters and must be quoted
+ when used in a buildspec. Besides being an inconvenience in itself,
+ quoting also inhibits path auto-completion. To help with this situation a
+ shortcut syntax is available for executing a single operation or
+ meta-operation, for example:
+
+ \
+ $ b clean: foo/ bar/ # clean(foo/ bar/)
+ $ b configure: src/@out/ # configure(src/@out/)
+ $ b create: conf/, cxx # create(conf/, cxx)
+ $ b configure: config.cxx=g++ src/ # configure(src/) config.cxx=g++
+ \
+
+ To activate the shortcut syntax the first buildspec argument must start
+ with an operation or meta-operation name and end with a colon (\cb{:}).
+ To transform the shortcut syntax to the normal buildspec syntax the colon
+ is replaced with the opening parenthesis ('\cb{(}'), the rest of the
+ buildspec arguments are treated as is, and the final closing parenthesis
+ ('\cb{)}') is added.
+
+ For each <target> the driver expects to find \cb{buildfile} either in the
+ target's directory or, if the directory is part of the \cb{out} tree
+ (\cb{out_base}), in the corresponding \cb{src} directory (\cb{src_base}).
+
+ For example, assuming \cb{foo/} is the source directory of a project:
+
+ \
+ $ b foo/ # out_base=src_base=foo/
+ $ b foo-out/ # out_base=foo-out/ src_base=foo/
+ $ b foo-out/exe{foo} # out_base=foo-out/ src_base=foo/
+ \
+
+ An exception to this requirement is a directory target in which case,
+ provided the directory has subdirectories, an \i{implied} \cb{buildfile}
+ with the following content is assumed:
+
+ \
+ # Implied directory buildfile: build all subdirectories.
+ #
+ ./: */
+ \
+
+ In the above example, we assumed that the build system driver was able to
+ determine the association between \cb{out_base} and \cb{src_base}. In
+ case \cb{src_base} and \cb{out_base} are not the same directory, this is
+ achieved in one of two ways: the \cb{config} module (which implements the
+ \cb{configure}, \cb{disfigure}, and \cb{create} meta-operations) saves
+ this association as part of the configuration process. If, however, the
+ association hasn't been saved, then we have to specify \cb{src_base}
+ explicitly using the following extended <target> syntax:
+
+ \c{<src-base>/@<target>}
+
+ Continuing with the previous example:
+
+ \
+ $ b foo/@foo-out/exe{foo} # out_base=foo-out/ src_base=foo/
+ \
+
+ Normally, you would need to specify \cb{src_base} explicitly only once,
+ during configuration. For example, a typical usage would be:
+
+ \
+ $ b configure: foo/@foo-out/ # src_base is saved
+ $ b foo-out/ # no need to specify src_base
+ $ b clean: foo-out/exe{foo} # no need to specify src_base
+ \
+
+ Besides in and out of source builds, \cb{build2} also supports
+ configuring a project's source directory as \i{forwarded} to an out of
+ source build. With such a forwarded configuration in place, if we run the
+ build system driver from the source directory, it will automatically
+ build in the output directory and \i{backlink} (using symlinks or another
+ suitable mechanism) certain \"interesting\" targets (executables,
+ documentation, etc) to the source directory for easy access. Continuing
+ with the previous example:
+
+ \
+ $ b configure: foo/@foo-out/,forward # foo/ forwarded to foo-out/
+ $ cd foo/
+ $ b # build in foo-out/
+ $ ./foo # symlink to foo-out/foo
+ \
+
+ The ability to specify \cb{build2} variables as part of the command line
+ is normally used to pass configuration values, for example:
+
+ \
+ $ b config.cxx=clang++ config.cxx.coptions=-O3
+ \
+
+ Similar to buildspec, POSIX shells often inhibit path auto-completion on
+ the right hand side of a variable assignment. To help with this situation
+ the assignment can be broken down into three separate command line
+ arguments, for example:
+
+ \
+ $ b config.import.libhello = ../libhello/
+ \
+
+ The build system has the following built-in and pre-defined
+ meta-operations:
+
+ \dl|
+
+ \li|\cb{perform}
+
+ Perform an operation.|
+
+ \li|\cb{configure}
+
+ Configure all operations supported by a project and save the result
+ in the project's \cb{build/config.build} file. Implemented by the
+ \cb{config} module. For example:
+
+ \
+ $ b configure \
+ config.cxx=clang++ \
+ config.cxx.coptions=-O3 \
+ config.install.root=/usr/local \
+ config.install.root.sudo=sudo
+ \
+
+ Use the \cb{forward} parameter to instead configure a source
+ directory as forwarded to an out of source build. For example:
+
+ \
+ $ b configure: src/@out/,forward
+ \
+
+ |
+
+ \li|\cb{disfigure}
+
+ Disfigure all operations supported by a project and remove the
+ project's \cb{build/config.build} file. Implemented by the
+ \cb{config} module.
+
+ Use the \cb{forward} parameter to instead disfigure forwarding of a
+ source directory to an out of source build. For example:
+
+ \
+ $ b disfigure: src/,forward
+ \
+
+ |
+
+ \li|\cb{create}
+
+ Create and configure a \i{configuration} project. Implemented by the
+ \cb{config} module.
+
+ Normally a \cb{build2} project is created manually by writing the
+ \cb{bootstrap.build} and \cb{config.build} files, adding source
+ files, and so on. However, a special kind of project, which we call
+ \i{configuration}, is often useful. Such a project doesn't have any
+ source files of its own. Instead, it serves as an amalgamation for
+ building other projects as part of it. Doing it this way has two
+ major benefits: sub-projects automatically resolve their imports
+ to other projects in the amalgamation and sub-projects inherits their
+ configuration from the amalgamation (which means if we want to change
+ something, we only need to do it in one place).
+
+ As an example, let's assume we have two C++ projects: the
+ \cb{libhello} library in \cb{libhello/} and the \cb{hello} executable
+ that imports it in \cb{hello/}. And we want to build \cb{hello} with
+ \cb{clang++}.
+
+ One way to do it would be to configure and build each project in its
+ own directory, for example:
+
+ \
+ $ b configure: libhello/@libhello-clang/ config.cxx=clang++
+ $ b configure: hello/@hello-clang/ config.cxx=clang++ \
+ config.import.libhello=libhello-clang/
+ \
+
+ The two drawbacks, as mentioned above, are the need to explicitly
+ resolve the import and having to make changes in multiple places
+ should, for example, we want to switch from \cb{clang++} to \cb{g++}.
+
+ We can, however, achieve the same end result but without any of the
+ drawbacks using the configuration project:
+
+ \
+ $ b create: clang/,cxx config.cxx=clang++ # Creates clang/.
+ $ b configure: libhello/@clang/libhello/
+ $ b configure: hello/@clang/hello/
+ \
+
+ The targets passed to the \cb{create} meta-operation must be
+ directories which should either not exist or be empty. For each
+ such directory \cb{create} first initializes a project as described
+ below and then configures it by executing the \cb{configure}
+ meta-operation.
+
+ The first optional parameter to \cb{create} is the list of modules to
+ load in \cb{root.build}. By default, \cb{create} appends \cb{.config}
+ to the names of these modules so that only their configurations are
+ loaded. You can override this behavior by specifying the period
+ (\cb{.}) after the module name. You can also instruct \cb{create} to
+ use the optional module load by prefixing the module name with the
+ question mark (\cb{?}).
+
+ The second optional parameter is the list of modules to load in
+ \cb{bootstrap.build}. If not specified, then the \cb{test},
+ \cb{dist}, and \cb{install} modules are loaded by default. The
+ \cb{config} module is always loaded first.
+
+ Besides creating project's \cb{bootstrap.build} and \cb{root.build},
+ \cb{create} also writes the root \cb{buildfile} with the following
+ contents:
+
+ \
+ ./: {*/ -build/}
+ \
+
+ If used, this \cb{buildfile} will build all the sub-projects
+ currently present in the configuration.|
+
+ \li|\cb{dist}
+
+ Prepare a distribution containing all files necessary to perform all
+ operations in a project. Implemented by the \cb{dist} module.|
+
+ \li|\cb{info}
+
+ Print basic information (name, version, source and output
+ directories, etc) about one or more projects to \cb{stdout},
+ separating multiple projects with a blank line. Each project is
+ identified by its root directory target. For example (some output
+ is omitted):
+
+ \
+ $ b info: libfoo/ libbar/
+ project: libfoo
+ version: 1.0.0
+ src_root: /tmp/libfoo
+ out_root: /tmp/libfoo
+ subprojects: @tests
+
+ project: libbar
+ version: 2.0.0
+ src_root: /tmp/libbar
+ out_root: /tmp/libbar-out
+ subprojects: @tests
+ \
+
+ To omit discovering and printing subprojects information, use the
+ \cb{no_subprojects} parameter, for example:
+
+ \
+ $ b info: libfoo/,no_subprojects
+ \
+
+ To instead print this information in the JSON format, use the
+ \cb{json} parameter, for example:
+
+ \
+ $ b info: libfoo/,json
+ \
+
+ In this case the output is a JSON array of objects which are the
+ serialized representation of the following C++ \cb{struct}
+ \cb{project_info}:
+
+ \
+ struct subproject
+ {
+ string path;
+ optional<string> name;
+ };
+
+ struct project_info
+ {
+ optional<string> project;
+ optional<string> version;
+ optional<string> summary;
+ optional<string> url;
+ string src_root;
+ string out_root;
+ optional<string> amalgamation;
+ vector<subproject> subprojects;
+ vector<string> operations;
+ vector<string> meta_operations;
+ vector<string> modules;
+ };
+ \
+
+ For example:
+
+ \
+ [
+ {
+ \"project\": \"libfoo\",
+ \"version\": \"1.0.0\",
+ \"summary\": \"libfoo C++ library\",
+ \"src_root\": \"/tmp/libfoo\",
+ \"out_root\": \"/tmp/gcc-debug/libfoo\",
+ \"amalgamation\": \"..\",
+ \"subprojects\": [
+ {
+ \"path\": \"tests\"
+ }
+ ],
+ \"operations\": [
+ \"update\",
+ \"clean\",
+ \"test\",
+ \"update-for-test\",
+ \"install\",
+ \"uninstall\",
+ \"update-for-install\"
+ ],
+ \"meta-operations\": [
+ \"perform\",
+ \"configure\",
+ \"disfigure\",
+ \"dist\",
+ \"info\"
+ ],
+ \"modules\": [
+ \"version\",
+ \"config\",
+ \"test\",
+ \"install\",
+ \"dist\"
+ ]
+ }
+ ]
+ \
+
+ See the JSON OUTPUT section below for details on the overall
+ properties of this format and the semantics of the \cb{struct}
+ serialization.
+
+ ||
+
+ The build system has the following built-in and pre-defined operations:
+
+ \dl|
+
+ \li|\cb{update}
+
+ Update a target.|
+
+ \li|\cb{clean}
+
+ Clean a target.|
+
+ \li|\cb{test}
+
+ Test a target. Performs \cb{update} as a pre-operation. Implemented by
+ the \cb{test} module.|
+
+ \li|\cb{update-for-test}
+
+ Update a target for testing. This operation is equivalent to the
+ \cb{update} pre-operation as executed by the \cb{test} operation and
+ can be used to only update what is necessary for testing. Implemented
+ by the \cb{test} module.|
+
+ \li|\cb{install}
+
+ Install a target. Performs \cb{update} as a pre-operation. Implemented
+ by the \cb{install} module.|
+
+
+ \li|\cb{uninstall}
+
+ Uninstall a target. Performs \cb{update} as a pre-operation.
+ Implemented by the \cb{install} module.|
+
+ \li|\cb{update-for-install}
+
+ Update a target for installation. This operation is equivalent to the
+ \cb{update} pre-operation as executed by the \cb{install} operation
+ and can be used to only update what is necessary for
+ installation. Implemented by the \cb{install} module.||
+
+ Note that buildspec and command line variable values are treated as
+ \cb{buildfile} fragments and so can use quoting and escaping as well as
+ contain variable expansions and evaluation contexts. However, to be more
+ usable on various platforms, escaping in these two situations is limited
+ to the \i{effective sequences} of \cb{\\'}, \cb{\\\"}, \cb{\\\\},
+ \cb{\\$}, and \cb{\\(} with all other sequences interpreted as is.
+ Together with double-quoting this is sufficient to represent any value.
+ For example:
+
+ \
+ $ b config.install.root=c:\projects\install
+ $ b \"config.install.root='c:\Program Files\test\'\"
+ $ b 'config.cxx.poptions=-DFOO_STR=\"foo\"'
+ \
+ "
+ }
+
+ class b_options
+ {
+ "\h#options|OPTIONS|"
+
+ uint64_t --build2-metadata; // Leave undocumented/hidden.
+
+ bool -v
+ {
+ "Print actual commands being executed. This options is equivalent to
+ \cb{--verbose 2}."
+ }
+
+ bool -V
+ {
+ "Print all underlying commands being executed. This options is
+ equivalent to \cb{--verbose 3}."
+ }
+
+ bool --quiet|-q
+ {
+ "Run quietly, only printing error messages in most contexts. In certain
+ contexts (for example, while updating build system modules) this
+ verbosity level may be ignored. Use \cb{--silent} to run quietly in all
+ contexts. This option is equivalent to \cb{--verbose 0}."
+ }
+
+ bool --silent
+ {
+ "Run quietly, only printing error messages in all contexts."
+ }
+
+ uint16_t --verbose = 1
+ {
+ "<level>",
+ "Set the diagnostics verbosity to <level> between 0 and 6. Level 0
+ disables any non-error messages (but see the difference between
+ \cb{--quiet} and \cb{--silent}) while level 6 produces lots of
+ information, with level 1 being the default. The following additional
+ types of diagnostics are produced at each level:
+
+ \ol|
+
+ \li|High-level information messages.|
+
+ \li|Essential underlying commands being executed.|
+
+ \li|All underlying commands being executed.|
+
+ \li|Information that could be helpful to the user.|
+
+ \li|Information that could be helpful to the developer.|
+
+ \li|Even more detailed information.||"
+ }
+
+ bool --stat
+ {
+ "Display build statistics."
+ }
+
+ bool --progress
+ {
+ "Display build progress. If printing to a terminal the progress is
+ displayed by default for low verbosity levels. Use \cb{--no-progress}
+ to suppress."
+ }
+
+ bool --no-progress
+ {
+ "Don't display build progress."
+ }
+
+ bool --diag-color
+ {
+ "Use color in diagnostics. If printing to a terminal the color is used
+ by default provided the terminal is not dumb. Use \cb{--no-diag-color}
+ to suppress.
+
+ This option affects the diagnostics printed by the build system itself.
+ Some rules may also choose to propagate its value to tools (such as
+ compilers) that they invoke."
+ }
+
+ bool --no-diag-color
+ {
+ "Don't use color in diagnostics."
+ }
+
+ size_t --jobs|-j
+ {
+ "<num>",
+ "Number of active jobs to perform in parallel. This includes both the
+ number of active threads inside the build system as well as the number
+ of external commands (compilers, linkers, etc) started but not yet
+ finished. If this option is not specified or specified with the \cb{0}
+ value, then the number of available hardware threads is used."
+ }
+
+ size_t --max-jobs|-J
+ {
+ "<num>",
+ "Maximum number of jobs (threads) to create. The default is 8x the
+ number of active jobs (\cb{--jobs|j}) on 32-bit architectures and 32x
+ on 64-bit. See the build system scheduler implementation for details."
+ }
+
+ size_t --queue-depth|-Q = 4
+ {
+ "<num>",
+ "The queue depth as a multiplier over the number of active jobs.
+ Normally we want a deeper queue if the jobs take long (for example,
+ compilation) and shorter if they are quick (for example, simple tests).
+ The default is 4. See the build system scheduler implementation for
+ details."
+ }
+
+ string --file-cache
+ {
+ "<impl>",
+ "File cache implementation to use for intermediate build results. Valid
+ values are \cb{noop} (no caching or compression) and \cb{sync-lz4} (no
+ caching with synchronous LZ4 on-disk compression). If this option is
+ not specified, then a suitable default implementation is used
+ (currently \cb{sync-lz4})."
+ }
+
+ size_t --max-stack
+ {
+ "<num>",
+ "The maximum stack size in KBytes to allow for newly created threads.
+ For \i{pthreads}-based systems the driver queries the stack size of
+ the main thread and uses the same size for creating additional threads.
+ This allows adjusting the stack size using familiar mechanisms, such
+ as \cb{ulimit}. Sometimes, however, the stack size of the main thread
+ is excessively large. As a result, the driver checks if it is greater
+ than a predefined limit (64MB on 64-bit systems and 32MB on 32-bit
+ ones) and caps it to a more sensible value (8MB) if that's the case.
+ This option allows you to override this check with the special zero
+ value indicating that the main thread stack size should be used as is."
+ }
+
+ bool --serial-stop|-s
+ {
+ "Run serially and stop at the first error. This mode is useful to
+ investigate build failures that are caused by build system errors
+ rather than compilation errors. Note that if you don't want to keep
+ going but still want parallel execution, add \cb{--jobs|-j} (for
+ example \cb{-j\ 0} for default concurrency). Note also that during
+ serial execution there is no diagnostics buffering and child
+ process' \cb{stderr} is a terminal (unless redirected; see
+ \cb{--no-diag-buffer} for details)."
+ }
+
+ bool --dry-run|-n
+ {
+ "Print commands without actually executing them. Note that commands
+ that are required to create an accurate build state will still be
+ executed and the extracted auxiliary dependency information saved. In
+ other words, this is not the \i{\"don't touch the filesystem\"} mode
+ but rather \i{\"do minimum amount of work to show what needs to be
+ done\"}. Note also that only the \cb{perform} meta-operation supports
+ this mode."
+ }
+
+ bool --no-diag-buffer
+ {
+ "Do not buffer diagnostics from child processes. By default, unless
+ running serially, such diagnostics is buffered and printed all at
+ once after each child exits in order to prevent interleaving.
+ However, this can have side-effects since the child process'
+ \cb{stderr} is no longer a terminal. Most notably, the use of
+ color in diagnostics may be disabled by some programs. On the
+ other hand, depending on the platform and programs invoked, the
+ interleaving diagnostics may not break lines and thus could be
+ tolerable."
+ }
+
+ bool --match-only
+ {
+ "Match the rules without executing the operation. This mode is primarily
+ useful for profiling and dumping the build system state."
+ }
+
+ bool --load-only
+ {
+ "Match the rules only to \cb{alias{\}} targets ignoring other targets
+ and without executing the operation. In particular, this has the
+ effect of loading all the subdirectory \cb{buildfiles} that are not
+ explicitly included. Note that this option can only be used with the
+ \cb{perform(update)} action on an \cb{alias{\}} target, usually
+ \cb{dir{\}}."
+ }
+
+ bool --no-external-modules
+ {
+ "Don't load external modules during project bootstrap. Note that this
+ option can only be used with meta-operations that do not load the
+ project's \cb{buildfiles}, such as \cb{info}."
+ }
+
+ structured_result_format --structured-result
+ {
+ "<fmt>",
+
+ "Write the result of execution in a structured form. In this mode,
+ instead of printing to \cb{stderr} diagnostics messages about the
+ outcome of executing actions on targets, the driver writes to
+ \cb{stdout} a machine-readable result description in the specified
+ format. Valid values for this option are \cb{lines} and \cb{json}.
+ Note that currently only the \cb{perform} meta-operation supports
+ the structured result output.
+
+ If the output format is \cb{lines}, then the result is written one line
+ per the buildspec action/target pair. Each line has the following form:
+
+ \c{\i{state} \i{meta-operation} \i{operation} \i{target}}
+
+ Where \ci{state} can be one of \cb{unchanged}, \cb{changed}, or
+ \cb{failed}. If the action is a pre or post operation, then the
+ outer operation is specified in parenthesis. For example:
+
+ \
+ unchanged perform update(test) /tmp/hello/hello/exe{hello}
+ changed perform test /tmp/hello/hello/exe{hello}
+ \
+
+ If the output format is \cb{json}, then the output is a JSON array of
+ objects which are the serialized representation of the following C++
+ \cb{struct} \cb{target_action_result}:
+
+ \
+ struct target_action_result
+ {
+ string target;
+ string display_target;
+ string target_type;
+ optional<string> target_path;
+ string meta_operation;
+ string operation;
+ optional<string> outer_operation;
+ string state;
+ };
+ \
+
+ For example:
+
+ \
+ [
+ {
+ \"target\": \"/tmp/hello/hello/exe{hello.}\",
+ \"display_target\": \"/tmp/hello/hello/exe{hello}\",
+ \"target_type\": \"exe\",
+ \"target_path\": \"/tmp/hello/hello/hello\",
+ \"meta_operation\": \"perform\",
+ \"operation\": \"update\",
+ \"outer_operation\": \"test\",
+ \"state\": \"unchanged\"
+ },
+ {
+ \"target\": \"/tmp/hello/hello/exe{hello.}\",
+ \"display_target\": \"/tmp/hello/hello/exe{hello}\",
+ \"target_type\": \"exe\",
+ \"target_path\": \"/tmp/hello/hello/hello\",
+ \"meta_operation\": \"perform\",
+ \"operation\": \"test\",
+ \"state\": \"changed\"
+ }
+ ]
+ \
+
+ See the JSON OUTPUT section below for details on the overall
+ properties of this format and the semantics of the \cb{struct}
+ serialization.
+
+ The \cb{target} member is the target name that is qualified with the
+ extension (if applicable) and, if required, is quoted so that it can be
+ passed back to the build system driver on the command line. The
+ \cb{display_target} member is the unqualified and unquoted \"display\"
+ target name, the same as in the \cb{lines} format. The \cb{target_type}
+ member is the type of target. The \cb{target_path} member is an
+ absolute path to the target if the target type is path-based or
+ \cb{dir}.
+ "
+ }
+
+ bool --mtime-check
+ {
+ "Perform file modification time sanity checks. These checks can be
+ helpful in diagnosing spurious rebuilds and are enabled by default
+ on Windows (which is known not to guarantee monotonically increasing
+ mtimes) and for the staged version of the build system on other
+ platforms. Use \cb{--no-mtime-check} to disable."
+ }
+
+ bool --no-mtime-check
+ {
+ "Don't perform file modification time sanity checks. See
+ \cb{--mtime-check} for details."
+ }
+
+ strings --dump
+ {
+ "<phase>",
+ "Dump the build system state after the specified phase. Valid <phase>
+ values are \cb{load} (after loading \cb{buildfiles}) and \cb{match}
+ (after matching rules to targets). The \cb{match} value also has the
+ \cb{match-pre} and \cb{match-post} variants to dump the state for the
+ pre/post-operations (\cb{match} dumps the main operation only). Repeat
+ this option to dump the state after multiple phases/variants. By
+ default the entire build state is dumped but this behavior can be
+ altered with the \cb{--dump-scope} and \cb{--dump-target} options.
+ See also the \cb{--match-only} and \cb{--load-only} options."
+ }
+
+ string --dump-format
+ {
+ // NOTE: fix all references to json-v0.1, including the manual.
+ //
+ "<format>",
+ "Representation format and output stream to use when dumping the build
+ system state. Valid values for this option are \cb{buildfile} (a
+ human-readable, Buildfile-like format written to \cb{stderr}; this is
+ the default), and \cb{json-v0.1} (machine-readable, JSON-based format
+ written to \cb{stdout}). For details on the \cb{buildfile} format, see
+ \l{b#intro-diag-debug Diagnostics and Debugging}. For details on the
+ \cb{json-v0.1} format, see the JSON OUTPUT section below (overall
+ properties) and \l{b#json-dump JSON Dump Format} (format specifics).
+ Note that the JSON format is currently unstable (thus the temporary
+ \cb{-v0.1} suffix).
+
+ Note that because it's possible to end up with multiple dumps (for
+ example, by specifying the \cb{--dump-scope} and/or \cb{--dump-target}
+ options multiple times), the JSON output is in the \"JSON Lines\" form,
+ that is, without pretty-printing and with the top-level JSON objects
+ delimited by newlines. Note also that if the JSON dump output is
+ combined with \cb{--structured-result=json}, then the structured
+ result is the last line."
+ }
+
+ dir_paths --dump-scope
+ {
+ "<dir>",
+ "Dump the build system state for the specified scope only. Repeat this
+ option to dump the state of multiple scopes."
+ }
+
+ vector<pair<name, optional<name>>> --dump-target
+ {
+ "<target>",
+ "Dump the build system state for the specified target only. Repeat this
+ option to dump the state of multiple targets."
+ }
+
+ vector<name> --trace-match
+ {
+ "<target>",
+ "Trace rule matching for the specified target. This is primarily useful
+ during troubleshooting. Repeat this option to trace multiple targets."
+ }
+
+ vector<name> --trace-execute
+ {
+ "<target>",
+ "Trace rule execution for the specified target. This is primarily useful
+ during troubleshooting. Repeat this option to trace multiple targets."
+ }
+
+ bool --no-column
+ {
+ "Don't print column numbers in diagnostics."
+ }
+
+ bool --no-line
+ {
+ "Don't print line and column numbers in diagnostics."
+ }
+
+ path --buildfile
+ {
+ "<path>",
+ "The alternative file to read build information from. The default is
+ \cb{buildfile} or \cb{build2file}, depending on the project's build
+ file/directory naming scheme. If <path> is '\cb{-}', then read from
+ \cb{stdin}. Note that this option only affects the files read as part
+ of the buildspec processing. Specifically, it has no effect on the
+ \cb{source} and \cb{include} directives. As a result, this option is
+ primarily intended for testing rather than changing the build file
+ names in real projects."
+ }
+
+ path --config-guess
+ {
+ "<path>",
+ "The path to the \cb{config.guess(1)} script that should be used to
+ guess the host machine triplet. If this option is not specified, then
+ \cb{b} will fall back on to using the target it was built for as host."
+ }
+
+ path --config-sub
+ {
+ "<path>",
+ "The path to the \cb{config.sub(1)} script that should be used to
+ canonicalize machine triplets. If this option is not specified, then
+ \cb{b} will use its built-in canonicalization support which should
+ be sufficient for commonly-used platforms."
+ }
+
+ string --pager // String to allow empty value.
+ {
+ "<path>",
+ "The pager program to be used to show long text. Commonly used pager
+ programs are \cb{less} and \cb{more}. You can also specify additional
+ options that should be passed to the pager program with
+ \cb{--pager-option}. If an empty string is specified as the pager
+ program, then no pager will be used. If the pager program is not
+ explicitly specified, then \cb{b} will try to use \cb{less}. If it
+ is not available, then no pager will be used."
+ }
+
+ strings --pager-option
+ {
+ "<opt>",
+ "Additional option to be passed to the pager program. See \cb{--pager}
+ for more information on the pager program. Repeat this option to
+ specify multiple pager options."
+ }
+
+ // The following option is "fake" in that it is actually handled by
+ // argv_file_scanner. We have it here for documentation.
+ //
+ string --options-file
+ {
+ "<file>",
+ "Read additional options from <file>. Each option should appear on a
+ separate line optionally followed by space or equal sign (\cb{=}) and
+ an option value. Empty lines and lines starting with \cb{#} are
+ ignored. Option values can be enclosed in double (\cb{\"}) or single
+ (\cb{'}) quotes to preserve leading and trailing whitespaces as well as
+ to specify empty values. If the value itself contains trailing or
+ leading quotes, enclose it with an extra pair of quotes, for example
+ \cb{'\"x\"'}. Non-leading and non-trailing quotes are interpreted as
+ being part of the option value.
+
+ The semantics of providing options in a file is equivalent to providing
+ the same set of options in the same order on the command line at the
+ point where the \cb{--options-file} option is specified except that
+ the shell escaping and quoting is not required. Repeat this option
+ to specify more than one options file."
+ }
+
+ dir_path --default-options
+ {
+ "<dir>",
+ "The directory to load additional default options files from."
+ }
+
+ bool --no-default-options
+ {
+ "Don't load default options files."
+ }
+
+ bool --help {"Print usage information and exit."}
+ bool --version {"Print version and exit."}
+ };
+
+ "
+ \h|DEFAULT OPTIONS FILES|
+
+ Instead of having a separate config file format for tool configuration, the
+ \cb{build2} toolchain uses \i{default options files} which contain the same
+ options as what can be specified on the command line. The default options
+ files are like options files that one can specify with \cb{--options-file}
+ except that they are loaded by default.
+
+ The default options files for the build system driver are called
+ \cb{b.options} and are searched for in the \cb{.build2/} subdirectory of the
+ home directory and in the system directory (for example, \cb{/etc/build2/})
+ if configured. Note that besides options these files can also contain global
+ variable overrides.
+
+ Once the search is complete, the files are loaded in the reverse order, that
+ is, beginning from the system directory (if any), followed by the home
+ directory, and finishing off with the options specified on the command line.
+ In other words, the files are loaded from the more generic to the more
+ specific with the command line options having the ability to override any
+ values specified in the default options files.
+
+ If a default options file contains \cb{--no-default-options}, then the
+ search is stopped at the directory containing this file and no outer files
+ are loaded. If this option is specified on the command line, then none of
+ the default options files are searched for or loaded.
+
+ An additional directory containing default options files can be specified
+ with \cb{--default-options}. Its configuration files are loaded after the
+ home directory.
+
+ The order in which default options files are loaded is traced at the
+ verbosity level 3 (\cb{-V} option) or higher.
+
+ \h#json-output|JSON OUTPUT|
+
+ Commands that support the JSON output specify their formats as a
+ serialized representation of a C++ \cb{struct} or an array thereof. For
+ example:
+
+ \
+ struct package
+ {
+ string name;
+ };
+
+ struct configuration
+ {
+ uint64_t id;
+ string path;
+ optional<string> name;
+ bool default;
+ vector<package> packages;
+ };
+ \
+
+ An example of the serialized JSON representation of \cb{struct}
+ \cb{configuration}:
+
+ \
+ {
+ \"id\": 1,
+ \"path\": \"/tmp/hello-gcc\",
+ \"name\": \"gcc\",
+ \"default\": true,
+ \"packages\": [
+ {
+ \"name\": \"hello\"
+ }
+ ]
+ }
+ \
+
+ This sections provides details on the overall properties of such formats
+ and the semantics of the \cb{struct} serialization.
+
+ The order of members in a JSON object is fixed as specified in the
+ corresponding \cb{struct}. While new members may be added in the
+ future (and should be ignored by older consumers), the semantics of the
+ existing members (including whether the top-level entry is an object or
+ array) may not change.
+
+ An object member is required unless its type is \cb{optional<>},
+ \cb{bool}, or \cb{vector<>} (array). For \cb{bool} members absent means
+ \cb{false}. For \cb{vector<>} members absent means empty. An empty
+ top-level array is always present.
+
+ For example, the following JSON text is a possible serialization of
+ the above \cb{struct} \cb{configuration}:
+
+ \
+ {
+ \"id\": 1,
+ \"path\": \"/tmp/hello-gcc\"
+ }
+ \
+
+ \h|EXIT STATUS|
+
+ Non-zero exit status is returned in case of an error.
+ "
+
+ // NOTE: remember to update --build2-metadata output if adding any relevant
+ // new environment variables.
+ //
+ "
+ \h|ENVIRONMENT|
+
+ The \cb{HOME} environment variable is used to determine the user's home
+ directory. If it is not set, then \cb{getpwuid(3)} is used instead. This
+ value is used to shorten paths printed in diagnostics by replacing the home
+ directory with \cb{~/}. It is also made available to \cb{buildfile}'s as the
+ \cb{build.home} variable.
+
+ The \cb{BUILD2_VAR_OVR} environment variable is used to propagate global
+ variable overrides to nested build system driver invocations. Its value is a
+ list of global variable assignments separated with newlines.
+
+ The \cb{BUILD2_DEF_OPT} environment variable is used to suppress loading of
+ default options files in nested build system driver invocations. Its values
+ are \cb{false} or \cb{0} to suppress and \cb{true} or \cb{1} to load.
+ "
+}
diff --git a/libbuild2/bash/init.cxx b/libbuild2/bash/init.cxx
index cf5307f..88c88ba 100644
--- a/libbuild2/bash/init.cxx
+++ b/libbuild2/bash/init.cxx
@@ -20,7 +20,7 @@ namespace build2
namespace bash
{
static const in_rule in_rule_;
- static const install_rule install_rule_ (in_rule_);
+ static const install_rule install_rule_ (in_rule_, "bash.in");
bool
init (scope& rs,
@@ -48,14 +48,13 @@ namespace build2
{
using namespace install;
- // Install into bin/<project>/ by default stripping the .bash
- // extension from <project> if present.
+ // Install bash{} into bin/<project>.bash/ by default.
//
const project_name& p (project (rs));
if (!p.empty ())
{
- install_path<bash> (bs, dir_path ("bin") /= project_base (p));
+ install_path<bash> (bs, dir_path ("bin") /= modules_install_dir (p));
install_mode<bash> (bs, "644");
}
}
@@ -72,11 +71,11 @@ namespace build2
if (install_loaded)
{
- bs.insert_rule<exe> (perform_install_id, "bash.install", install_rule_);
- bs.insert_rule<exe> (perform_uninstall_id, "bash.uninstall", install_rule_);
+ bs.insert_rule<exe> (perform_install_id, "bash.install", install_rule_);
+ bs.insert_rule<exe> (perform_uninstall_id, "bash.install", install_rule_);
- bs.insert_rule<bash> (perform_install_id, "bash.install", install_rule_);
- bs.insert_rule<bash> (perform_uninstall_id, "bash.uninstall", install_rule_);
+ bs.insert_rule<bash> (perform_install_id, "bash.install", install_rule_);
+ bs.insert_rule<bash> (perform_uninstall_id, "bash.install", install_rule_);
}
return true;
diff --git a/libbuild2/bash/rule.cxx b/libbuild2/bash/rule.cxx
index f2c1eae..6e96b34 100644
--- a/libbuild2/bash/rule.cxx
+++ b/libbuild2/bash/rule.cxx
@@ -26,6 +26,9 @@ namespace build2
struct match_data
{
+ explicit
+ match_data (const in_rule& r): rule (r) {}
+
// The "for install" condition is signalled to us by install_rule when
// it is matched for the update operation. It also verifies that if we
// have already been executed, then it was for install.
@@ -33,24 +36,45 @@ namespace build2
// See cc::link_rule for a discussion of some subtleties in this logic.
//
optional<bool> for_install;
+
+ const in_rule& rule;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ // Unless the outer install rule signalled that this is update for
+ // install, signal back that we've performed plain update.
+ //
+ if (!for_install)
+ for_install = false;
+
+ //@@ TODO: need to verify all the modules we depend on are compatible
+ // with our for_install value, similar to cc::link_rule's
+ // append_libraries() (and which is the other half of the check
+ // in install_rule).
+
+ return rule.perform_update (a, t);
+ }
};
- static_assert (sizeof (match_data) <= target::data_size,
- "insufficient space");
+ static_assert (sizeof (match_data) <= target::small_data_size,
+ "match data requires dynamic allocation");
// in_rule
//
bool in_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& xt, const string& hint, match_extra&) const
{
tracer trace ("bash::in_rule::match");
- // Note that for bash{} we match even if the target does not depend on
- // any modules (while it could have been handled by the in module, that
- // would require loading it).
+ file& t (xt.as<file> ()); // Only registered for exe{} and bash{}.
+
+ // Note that for bash{} and for exe{} with hint we match even if the
+ // target does not depend on any modules (while it could have been
+ // handled by the in module, that would require loading it).
//
- bool fi (false); // Found in.
- bool fm (t.is_a<bash> ()); // Found module.
+ bool fi (false); // Found in.
+ bool fm (!hint.empty () || t.is_a<bash> ()); // Found module.
for (prerequisite_member p: group_prerequisite_members (a, t))
{
if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
@@ -64,39 +88,32 @@ namespace build2
l4 ([&]{trace << "no in file prerequisite for target " << t;});
if (!fm)
- l4 ([&]{trace << "no bash module prerequisite for target " << t;});
-
- return (fi && fm);
- }
+ l4 ([&]{trace << "no bash module prerequisite or hint for target "
+ << t;});
- recipe in_rule::
- apply (action a, target& t) const
- {
- // Note that for-install is signalled by install_rule and therefore
- // can only be relied upon during execute.
+ // If we match, derive the file name early as recommended by the in
+ // rule.
//
- t.data (match_data ());
+ if (fi && fm)
+ t.derive_path ();
- return rule::apply (a, t);
+ return fi && fm;
}
- target_state in_rule::
- perform_update (action a, const target& t) const
+ recipe in_rule::
+ apply (action a, target& t) const
{
- // Unless the outer install rule signalled that this is update for
- // install, signal back that we've performed plain update.
- //
- match_data& md (t.data<match_data> ());
-
- if (!md.for_install)
- md.for_install = false;
+ recipe r (rule::apply (a, t));
- //@@ TODO: need to verify all the modules we depend on are compatible
- // with our for_install value, similar to cc::link_rule's
- // append_libraries() (and which is the other half of the check
- // in install_rule).
+ if (a == perform_update_id)
+ {
+ // Note that for-install is signalled by install_rule and therefore
+ // can only be relied upon during execute.
+ //
+ return match_data (*this);
+ }
- return rule::perform_update (a, t);
+ return r;
}
prerequisite_target in_rule::
@@ -126,7 +143,7 @@ namespace build2
// apply).
//
string ext (p.ext ? *p.ext : "bash");
- path ip (dir_path (project_base (*p.proj)) / p.dir / p.name);
+ path ip (dir_path (modules_install_dir (*p.proj)) / p.dir / p.name);
if (!ext.empty ())
{
@@ -161,6 +178,9 @@ namespace build2
if (mt != timestamp_nonexistent)
{
+ // @@ Do we actually need _locked(), isn't path_mtime()
+ // atomic?
+ //
auto rp (t.ctx.targets.insert_locked (bash::static_type,
ap.directory (),
dir_path () /* out */,
@@ -202,13 +222,14 @@ namespace build2
const string& n,
optional<uint64_t> flags,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
assert (!flags);
return n.compare (0, 6, "import") == 0 && (n[6] == ' ' || n[6] == '\t')
? substitute_import (l, a, t, trim (string (n, 7)))
- : rule::substitute (l, a, t, n, nullopt, strict, null);
+ : rule::substitute (l, a, t, n, nullopt, strict, smap, null);
}
string in_rule::
@@ -217,21 +238,45 @@ namespace build2
const target& t,
const string& n) const
{
- // Derive (relative) import path from the import name.
+ // Derive (relative) import path from the import name. And derive import
+ // installed path from that by adding the .bash extension to the first
+ // component.
//
- path ip;
+ path ip, iip;
+ project_name pn;
try
{
ip = path (n);
- if (ip.empty () || ip.absolute ())
+ if (ip.empty () || ip.simple () || ip.absolute ())
throw invalid_path (n);
if (ip.extension_cstring () == nullptr)
ip += ".bash";
ip.normalize ();
+
+ auto b (ip.begin ()), e (ip.end ());
+
+ try
+ {
+ pn = project_name (*b);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid import path '" << n << "': " << e.what ();
+ }
+
+ char s (b++.separator ());
+
+ iip = path (modules_install_dir (pn) + s) / path (b, e);
+
+ // Strip the .bash extension from the project name in this path to
+ // be able to compare it to paths inside the project (see below).
+ //
+ if (pn.extension () == "bash")
+ ip = path (pn.base ("bash") + s) / path (b, e);
}
catch (const invalid_path&)
{
@@ -243,7 +288,7 @@ namespace build2
const path* ap (nullptr);
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt.adhoc || pt.target == nullptr)
+ if (pt.target == nullptr || pt.adhoc ())
continue;
if (const bash* b = pt.target->is_a<bash> ())
@@ -261,19 +306,19 @@ namespace build2
//
// But we still do a simple match first since it can quickly weed
// out candidates that cannot possibly match.
- //
- if (!pp.sup (ip))
- continue;
- // See if this is import-installed target (refer to search() for
- // details).
+ // See if this is import-installed target (refer to search() above
+ // for details).
//
if (size_t n = pt.data)
{
+ if (!pp.sup (iip))
+ continue;
+
// Both are normalized so we can compare the "tails".
//
const string& ps (pp.string ());
- const string& is (ip.string ());
+ const string& is (iip.string ());
if (path::traits_type::compare (
ps.c_str () + ps.size () - n, n,
@@ -288,6 +333,9 @@ namespace build2
if (const scope* rs = b->base_scope ().root_scope ())
{
+ if (!pp.sup (ip) || project (*rs) != pn)
+ continue;
+
const dir_path& d (pp.sub (rs->src_path ())
? rs->src_path ()
: rs->out_path ());
@@ -308,7 +356,7 @@ namespace build2
if (ap == nullptr)
fail (l) << "unable to resolve import path " << ip;
- match_data& md (t.data<match_data> ());
+ match_data& md (t.data<match_data> (a));
assert (md.for_install);
if (*md.for_install)
@@ -366,7 +414,7 @@ namespace build2
"source \"$(dirname"
" \"$(readlink -f"
" \"${BASH_SOURCE[0]}\")\")/"
- + ip.string () + "\"";
+ + iip.string () + '"';
}
else
{
@@ -387,7 +435,7 @@ namespace build2
return
"source \"$(dirname"
" \"${BASH_SOURCE[0]}\")/"
- + o + ip.string () + "\"";
+ + o + iip.string () + '"';
}
}
else
@@ -397,18 +445,19 @@ namespace build2
// install_rule
//
bool install_rule::
- match (action a, target& t, const string& hint) const
+ match (action a, target& t) const
{
// We only want to handle installation if we are also the ones building
// this target. So first run in's match().
//
- return in_.match (a, t, hint) && file_rule::match (a, t, "");
+ return in_.sub_match (in_name_, update_id, a, t) &&
+ file_rule::match (a, t);
}
recipe install_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
- recipe r (file_rule::apply_impl (a, t));
+ recipe r (file_rule::apply_impl (a, t, me));
if (r == nullptr)
return noop_recipe;
@@ -418,7 +467,7 @@ namespace build2
// Signal to the in rule that this is update for install. And if the
// update has already been executed, verify it was done for install.
//
- auto& md (t.data<match_data> ());
+ auto& md (t.data<match_data> (a.inner_action ()));
if (md.for_install)
{
diff --git a/libbuild2/bash/rule.hxx b/libbuild2/bash/rule.hxx
index f69ac3b..3f9618f 100644
--- a/libbuild2/bash/rule.hxx
+++ b/libbuild2/bash/rule.hxx
@@ -29,17 +29,16 @@ namespace build2
class LIBBUILD2_BASH_SYMEXPORT in_rule: public in::rule
{
public:
- in_rule (): rule ("bash.in 1", "bash.in", '@', false /* strict */) {}
+ in_rule (): rule ("bash.in 1", "bash", '@', false /* strict */) {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
+
+ using in::rule::match; // Make Clang happy.
virtual recipe
apply (action, target&) const override;
- virtual target_state
- perform_update (action, const target&) const override;
-
virtual prerequisite_target
search (action,
const target&,
@@ -53,6 +52,7 @@ namespace build2
const string&,
optional<uint64_t>,
bool,
+ const substitution_map*,
const optional<string>&) const override;
string
@@ -68,16 +68,17 @@ namespace build2
class LIBBUILD2_BASH_SYMEXPORT install_rule: public install::file_rule
{
public:
- install_rule (const in_rule& in): in_ (in) {}
+ install_rule (const in_rule& r, const char* n): in_ (r), in_name_ (n) {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
protected:
const in_rule& in_;
+ const string in_name_;
};
}
}
diff --git a/libbuild2/bash/target.cxx b/libbuild2/bash/target.cxx
index 6fa7cf4..5240fed 100644
--- a/libbuild2/bash/target.cxx
+++ b/libbuild2/bash/target.cxx
@@ -23,7 +23,7 @@ namespace build2
&target_pattern_var<bash_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/bash/target.hxx b/libbuild2/bash/target.hxx
index f0af967..ad926bd 100644
--- a/libbuild2/bash/target.hxx
+++ b/libbuild2/bash/target.hxx
@@ -21,11 +21,14 @@ namespace build2
class LIBBUILD2_BASH_SYMEXPORT bash: public file
{
public:
- using file::file;
+ bash (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/bash/utility.hxx b/libbuild2/bash/utility.hxx
index 087fc38..e5e4377 100644
--- a/libbuild2/bash/utility.hxx
+++ b/libbuild2/bash/utility.hxx
@@ -11,14 +11,26 @@ namespace build2
{
namespace bash
{
- // Strip the .bash extension from the project name.
+ // Return the bash{} modules installation directory under bin/.
//
- // Note that the result may not be a valid project name.
+ // Note that we used to install into bin/<project>/ but that has a good
+ // chance of clashing with the project's executable. Adding the .bash
+ // extension feels like a good idea since in our model the executables
+ // should not use the .bash extension (only modules) and therefore are
+ // unlikely to clash with this name.
+ //
+ // One drawback of this approach is that in case of a project like
+ // libbutl.bash we now have different module directories inside the
+ // project (libbutl/) and when installed (libbutl.bash/). Also, the
+ // installation directory will be shared with the libbutl project but
+ // that's probably ok (and we had the same issue before).
//
inline string
- project_base (const project_name& pn)
+ modules_install_dir (const project_name& pn)
{
- return pn.base ("bash");
+ // Strip the .bash extension if present not to duplicate it.
+ //
+ return pn.base ("bash") + ".bash";
}
}
}
diff --git a/libbuild2/bin/def-rule.cxx b/libbuild2/bin/def-rule.cxx
index 032d521..143cc35 100644
--- a/libbuild2/bin/def-rule.cxx
+++ b/libbuild2/bin/def-rule.cxx
@@ -7,6 +7,7 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
#include <libbuild2/algorithm.hxx>
+#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/bin/target.hxx>
@@ -32,8 +33,10 @@ namespace build2
};
static void
- read_dumpbin (istream& is, symbols& syms)
+ read_dumpbin (diag_buffer& dbuf, ifdstream& is, symbols& syms)
{
+ // Note: io_error is handled by the caller.
+
// Lines that describe symbols look like:
//
// 0 1 2 3 4 5 6
@@ -71,29 +74,27 @@ namespace build2
//
// Note that an UNDEF data symbol with non-zero OFFSET is a "common
// symbol", equivalent to the nm `C` type.
-
- // Map of read-only (.rdata, .xdata) and uninitialized (.bss) sections
- // to their types (R and B, respectively). If a section is not found in
- // this map, then it's assumed to be normal data (.data).
//
- map<string, char> sections;
-
- string l;
- while (!eof (getline (is, l)))
+ // We keep a map of read-only (.rdata, .xdata) and uninitialized (.bss)
+ // sections to their types (R and B, respectively). If a section is not
+ // found in this map, then it's assumed to be normal data (.data).
+ //
+ auto parse_line = [&syms,
+ secs = map<string, char> ()] (const string& l) mutable
{
size_t b (0), e (0), n;
// IDX (note that it can be more than 3 characters).
//
if (next_word (l, b, e) == 0)
- continue;
+ return;
// OFFSET (always 8 characters).
//
n = next_word (l, b, e);
if (n != 8)
- continue;
+ return;
string off (l, b, n);
@@ -102,7 +103,7 @@ namespace build2
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string sec (l, b, n);
@@ -111,7 +112,7 @@ namespace build2
n = next_word (l, b, e);
if (l.compare (b, n, "notype") != 0)
- continue;
+ return;
bool dat;
if (l[e] == ' ' && l[e + 1] == '(' && l[e + 2] == ')')
@@ -127,7 +128,7 @@ namespace build2
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string vis (l, b, n);
@@ -136,14 +137,14 @@ namespace build2
n = next_word (l, b, e);
if (n != 1 || l[b] != '|')
- continue;
+ return;
// SYMNAME
//
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string s (l, b, n);
@@ -161,23 +162,23 @@ namespace build2
};
if (cmp (".rdata", 6) ||
- cmp (".xdata", 6)) sections.emplace (move (sec), 'R');
- else if (cmp (".bss", 4)) sections.emplace (move (sec), 'B');
+ cmp (".xdata", 6)) secs.emplace (move (sec), 'R');
+ else if (cmp (".bss", 4)) secs.emplace (move (sec), 'B');
- continue;
+ return;
}
// We can only export extern symbols.
//
if (vis != "External")
- continue;
+ return;
if (dat)
{
if (sec != "UNDEF")
{
- auto i (sections.find (sec));
- switch (i == sections.end () ? 'D' : i->second)
+ auto i (secs.find (sec));
+ switch (i == secs.end () ? 'D' : i->second)
{
case 'D': syms.d.insert (move (s)); break;
case 'R': syms.r.insert (move (s)); break;
@@ -195,20 +196,54 @@ namespace build2
if (sec != "UNDEF")
syms.t.insert (move (s));
}
+ };
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically get an
+ // inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
+ {
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ ist.fd = nullfd;
+ else
+ {
+ parse_line (l);
+ l.clear ();
+ }
+
+ continue;
+ }
+
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
}
}
static void
- read_posix_nm (istream& is, symbols& syms)
+ read_posix_nm (diag_buffer& dbuf, ifdstream& is, symbols& syms)
{
+ // Note: io_error is handled by the caller.
+
// Lines that describe symbols look like:
//
// <NAME> <TYPE> <VALUE> <SIZE>
//
// The types that we are interested in are T, D, R, and B.
//
- string l;
- while (!eof (getline (is, l)))
+ auto parse_line = [&syms] (const string& l)
{
size_t b (0), e (0), n;
@@ -217,7 +252,7 @@ namespace build2
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string s (l, b, n);
@@ -226,7 +261,7 @@ namespace build2
n = next_word (l, b, e);
if (n != 1)
- continue;
+ return;
switch (l[b])
{
@@ -237,6 +272,39 @@ namespace build2
case 'C': syms.c.insert (move (s)); break;
case 'T': syms.t.insert (move (s)); break;
}
+ };
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically get an
+ // inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
+ {
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ ist.fd = nullfd;
+ else
+ {
+ parse_line (l);
+ l.clear ();
+ }
+
+ continue;
+ }
+
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
}
}
@@ -349,6 +417,8 @@ namespace build2
// we will try to recognize C/C++ identifiers plus the special symbols
// that we need to export (e.g., vtable).
//
+ // Note that it looks like rdata should not be declared DATA. It is
+ // known to break ??_7 (vtable) exporting (see GH issue 315).
//
for (const string& s: syms.r)
{
@@ -356,7 +426,7 @@ namespace build2
(s[0] == '?' && s[1] != '?') || // C++
s.compare (0, 4, "??_7") == 0) // vtable
{
- os << " " << strip (s) << " DATA\n";
+ os << " " << strip (s) << '\n';
}
}
}
@@ -428,6 +498,12 @@ namespace build2
// we will try to recognize C/C++ identifiers plus the special symbols
// that we need to export (e.g., vtable and typeinfo).
//
+ // For the description of GNU binutils .def format, see:
+ //
+ // https://sourceware.org/binutils/docs/binutils/def-file-format.html
+ //
+ // @@ Maybe CONSTANT is more appropriate than DATA?
+ //
for (const string& s: syms.r)
{
if (s.find_first_of (".") != string::npos) // Special (.refptr.*)
@@ -448,7 +524,7 @@ namespace build2
}
bool def_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t) const
{
tracer trace ("bin::def_rule::match");
@@ -652,8 +728,12 @@ namespace build2
const char*& arg (*(args.end () - 2));
+ // We could print the prerequisite if it's a single obj{}/libu{} (with
+ // the latter being the common case). But it doesn't feel like that's
+ // worth the variability and the associated possibility of confusion.
+ //
if (verb == 1)
- text << "def " << t;
+ print_diag ("def", t);
// Extract symbols from each object file.
//
@@ -673,22 +753,37 @@ namespace build2
// Both dumpbin.exe and nm send their output to stdout. While nm sends
// diagnostics to stderr, dumpbin sends it to stdout together with the
- // output.
+ // output. To keep things uniform we will buffer stderr in both cases.
+ //
+ process pr (
+ run_start (nm,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+
+ // Note that while we read both streams until eof in the normal
+ // circumstances, we cannot use fdstream_mode::skip for the exception
+ // case on both of them: we may end up being blocked trying to read
+ // one stream while the process may be blocked writing to the other.
+ // So in case of an exception we only skip the diagnostics and close
+ // stdout hard. The latter should happen first so the order of the
+ // dbuf/is variables is important.
//
- process pr (run_start (nm,
- args,
- 0 /* stdin */,
- -1 /* stdout */));
+ diag_buffer dbuf (ctx, args[0], pr, (fdstream_mode::non_blocking |
+ fdstream_mode::skip));
+
bool io (false);
try
{
- ifdstream is (
- move (pr.in_ofd), fdstream_mode::skip, ifdstream::badbit);
+ ifdstream is (move (pr.in_ofd),
+ fdstream_mode::non_blocking,
+ ifdstream::badbit);
if (lid == "msvc" || nid == "msvc")
- read_dumpbin (is, syms);
+ read_dumpbin (dbuf, is, syms);
else
- read_posix_nm (is, syms);
+ read_posix_nm (dbuf, is, syms);
is.close ();
}
@@ -700,7 +795,7 @@ namespace build2
io = true;
}
- if (!run_finish_code (args.data (), pr) || io)
+ if (!run_finish_code (dbuf, args, pr, 1 /* verbosity */) || io)
fail << "unable to extract symbols from " << arg;
}
diff --git a/libbuild2/bin/def-rule.hxx b/libbuild2/bin/def-rule.hxx
index 32423a0..acdf841 100644
--- a/libbuild2/bin/def-rule.hxx
+++ b/libbuild2/bin/def-rule.hxx
@@ -24,7 +24,7 @@ namespace build2
def_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
diff --git a/libbuild2/bin/guess.cxx b/libbuild2/bin/guess.cxx
index 905bd0a..e9759b8 100644
--- a/libbuild2/bin/guess.cxx
+++ b/libbuild2/bin/guess.cxx
@@ -34,9 +34,12 @@ namespace build2
// Return 0-version if the version is invalid.
//
static inline semantic_version
- parse_version (const string& s, size_t p = 0, const char* bs = ".-+~ ")
+ parse_version (const string& s, size_t p = 0,
+ semantic_version::flags f = semantic_version::allow_omit_patch |
+ semantic_version::allow_build,
+ const char* bs = ".-+~ ")
{
- optional<semantic_version> v (parse_semantic_version (s, p, bs));
+ optional<semantic_version> v (parse_semantic_version (s, p, f, bs));
return v ? *v : semantic_version ();
}
@@ -89,7 +92,7 @@ namespace build2
static global_cache<ar_info> ar_cache;
const ar_info&
- guess_ar (const path& ar, const path* rl, const char* paths)
+ guess_ar (context& ctx, const path& ar, const path* rl, const char* paths)
{
tracer trace ("bin::guess_ar");
@@ -177,7 +180,11 @@ namespace build2
// "LLVM version 3.5.2"
// "LLVM version 5.0.0"
//
- if (l.compare (0, 13, "LLVM version ") == 0)
+ // But it can also be prefixed with some stuff, for example:
+ //
+ // "Debian LLVM version 14.0.6"
+ //
+ if (l.find ("LLVM version ") != string::npos)
{
semantic_version v (parse_version (l, l.rfind (' ') + 1));
return guess_result ("llvm", move (l), move (v));
@@ -227,7 +234,11 @@ namespace build2
// (yes, it goes to stdout) but that seems harmless.
//
sha256 cs;
- arr = run<guess_result> (3, are, "--version", f, false, false, &cs);
+ arr = run<guess_result> (ctx,
+ 3,
+ are, "--version",
+ f,
+ false , false, &cs);
if (!arr.empty ())
arr.checksum = cs.string ();
@@ -247,10 +258,10 @@ namespace build2
: guess_result ();
};
- // Redirect STDERR to STDOUT and ignore exit status.
+ // Redirect stderr to stdout and ignore exit status.
//
sha256 cs;
- arr = run<guess_result> (3, are, f, false, true, &cs);
+ arr = run<guess_result> (ctx, 3, are, f, false, true, &cs);
if (!arr.empty ())
{
@@ -280,7 +291,7 @@ namespace build2
// "LLVM version ".
//
- if (l.compare (0, 13, "LLVM version ") == 0)
+ if (l.find ("LLVM version ") != string::npos)
return guess_result ("llvm", move (l), semantic_version ());
// On FreeBSD we get "ranlib" rather than "BSD ranlib" for some
@@ -293,7 +304,11 @@ namespace build2
};
sha256 cs;
- rlr = run<guess_result> (3, rle, "--version", f, false, false, &cs);
+ rlr = run<guess_result> (ctx,
+ 3,
+ rle, "--version",
+ f,
+ false, false, &cs);
if (!rlr.empty ())
rlr.checksum = cs.string ();
@@ -310,10 +325,10 @@ namespace build2
: guess_result ();
};
- // Redirect STDERR to STDOUT and ignore exit status.
+ // Redirect stderr to stdout and ignore exit status.
//
sha256 cs;
- rlr = run<guess_result> (3, rle, f, false, true, &cs);
+ rlr = run<guess_result> (ctx, 3, rle, f, false, true, &cs);
if (!rlr.empty ())
{
@@ -378,7 +393,7 @@ namespace build2
static global_cache<ld_info> ld_cache;
const ld_info&
- guess_ld (const path& ld, const char* paths)
+ guess_ld (context& ctx, const path& ld, const char* paths)
{
tracer trace ("bin::guess_ld");
@@ -437,17 +452,22 @@ namespace build2
string id;
optional<semantic_version> ver;
+ size_t p;
+
// Microsoft link.exe output starts with "Microsoft (R) ".
//
if (l.compare (0, 14, "Microsoft (R) ") == 0)
{
id = "msvc";
}
- // LLD prints a line in the form "LLD X.Y.Z ...".
+ // LLD prints a line in the form "LLD X.Y.Z ...". But it can also
+ // be prefixed with some stuff, for example:
//
- else if (l.compare (0, 4, "LLD ") == 0)
+ // Debian LLD 14.0.6 (compatible with GNU linkers)
+ //
+ else if ((p = l.find ("LLD ")) != string::npos)
{
- ver = parse_version (l, 4);
+ ver = parse_version (l, p + 4);
// The only way to distinguish between various LLD drivers is via
// their name. Handle potential prefixes (say a target) and
@@ -485,12 +505,12 @@ namespace build2
: guess_result (move (id), move (l), move (ver)));
};
- // Redirect STDERR to STDOUT and ignore exit status. Note that in case
+ // Redirect stderr to stdout and ignore exit status. Note that in case
// of link.exe we will hash the diagnostics (yes, it goes to stdout)
// but that seems harmless.
//
sha256 cs;
- r = run<guess_result> (3, env, "--version", f, false, true, &cs);
+ r = run<guess_result> (ctx, 3, env, "--version", f, false, true, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -521,7 +541,7 @@ namespace build2
};
sha256 cs;
- r = run<guess_result> (3, env, "-v", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "-v", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -548,7 +568,7 @@ namespace build2
// option.
//
sha256 cs;
- r = run<guess_result> (3, env, "-version", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "-version", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -586,7 +606,7 @@ namespace build2
static global_cache<rc_info> rc_cache;
const rc_info&
- guess_rc (const path& rc, const char* paths)
+ guess_rc (context& ctx, const path& rc, const char* paths)
{
tracer trace ("bin::guess_rc");
@@ -642,7 +662,7 @@ namespace build2
// option.
//
sha256 cs;
- r = run<guess_result> (3, env, "--version", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "--version", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -675,7 +695,7 @@ namespace build2
};
sha256 cs;
- r = run<guess_result> (3, env, "/?", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "/?", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -703,7 +723,7 @@ namespace build2
static global_cache<nm_info> nm_cache;
const nm_info&
- guess_nm (const path& nm, const char* paths)
+ guess_nm (context& ctx, const path& nm, const char* paths)
{
tracer trace ("bin::guess_nm");
@@ -764,7 +784,10 @@ namespace build2
// LLVM nm --version output has a line that starts with
// "LLVM version" followed by a version.
//
- if (l.compare (0, 13, "LLVM version ") == 0)
+ // But let's assume it can be prefixed with some stuff like the rest
+ // of the LLVM tools (see above).
+ //
+ if (l.find ("LLVM version ") != string::npos)
return guess_result ("llvm", move (l), semantic_version ());
if (l.compare (0, 14, "Microsoft (R) ") == 0)
@@ -784,7 +807,7 @@ namespace build2
// option.
//
sha256 cs;
- r = run<guess_result> (3, env, "--version", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "--version", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
diff --git a/libbuild2/bin/guess.hxx b/libbuild2/bin/guess.hxx
index 52c0e1b..7dc7b33 100644
--- a/libbuild2/bin/guess.hxx
+++ b/libbuild2/bin/guess.hxx
@@ -54,7 +54,7 @@ namespace build2
// attemplated and the returned ranlib_* members will be left empty.
//
const ar_info&
- guess_ar (const path& ar, const path* ranlib, const char* paths);
+ guess_ar (context&, const path& ar, const path* ranlib, const char* paths);
// ld information.
//
@@ -100,7 +100,7 @@ namespace build2
};
const ld_info&
- guess_ld (const path& ld, const char* paths);
+ guess_ld (context&, const path& ld, const char* paths);
// rc information.
//
@@ -132,7 +132,7 @@ namespace build2
};
const rc_info&
- guess_rc (const path& rc, const char* paths);
+ guess_rc (context&, const path& rc, const char* paths);
// nm information.
//
@@ -166,7 +166,7 @@ namespace build2
};
const nm_info&
- guess_nm (const path& nm, const char* paths);
+ guess_nm (context&, const path& nm, const char* paths);
}
}
diff --git a/libbuild2/bin/init.cxx b/libbuild2/bin/init.cxx
index ab3980a..610082e 100644
--- a/libbuild2/bin/init.cxx
+++ b/libbuild2/bin/init.cxx
@@ -41,24 +41,30 @@ namespace build2
bool
vars_init (scope& rs,
- scope&,
- const location&,
- bool first,
+ scope& bs,
+ const location& loc,
+ bool,
bool,
module_init_extra&)
{
tracer trace ("bin::vars_init");
l5 ([&]{trace << "for " << rs;});
- assert (first);
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "bin.vars module must be loaded in project root";
// Enter variables.
//
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
+
// Target is a string and not target_triplet because it can be
// specified by the user.
//
- auto& vp (rs.var_pool ());
-
vp.insert<string> ("config.bin.target");
vp.insert<string> ("config.bin.pattern");
@@ -76,6 +82,9 @@ namespace build2
// example, addition of rpaths for prerequisite libraries (see the cc
// module for an example). Default is true.
//
+ // Note also that a rule may need to make rpath relative if
+ // install.relocatable is true.
+ //
vp.insert<dir_paths> ("config.bin.rpath");
vp.insert<bool> ("config.bin.rpath.auto");
@@ -104,12 +113,12 @@ namespace build2
// Link whole archive. Note: with target visibility.
//
// The lookup semantics is as follows: we first look for a prerequisite-
- // specific value, then for a target-specific value in the library being
- // linked, and then for target type/pattern-specific value starting from
- // the scope of the target being linked-to. In that final lookup we do
- // not look in the target being linked-to itself since that is used to
- // indicate how this target should be linked to other targets. For
- // example:
+ // specific value, then for a target-specific value in the prerequisite
+ // library, and then for target type/pattern-specific value starting
+ // from the scope of the target being linked. In that final lookup we do
+ // not look in the target being linked itself since that is used to
+ // indicate how this target should be used as a prerequisite of other
+ // targets. For example:
//
// exe{test}: liba{foo}
// liba{foo}: libua{foo1 foo2}
@@ -150,6 +159,68 @@ namespace build2
return true;
}
+ bool
+ types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("bin::types_init");
+ l5 ([&]{trace << "for " << rs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "bin.types module must be loaded in project root";
+
+ // Register target types.
+ //
+ // Note that certain platform-specific and toolchain-specific types are
+ // registered in bin and bin.ld.
+ //
+ // Note also that it would make sense to configure their default
+ // "installability" here but that requires the knowledge of the platform
+ // in some cases. So we do it all in bin for now. One way to support
+ // both use-cases would be to detect if we are loaded after bin.guess
+ // and then decide whether to do it here or delay to bin.
+ //
+ // NOTE: remember to update the documentation if changing anything here!
+ //
+ rs.insert_target_type<obj> ();
+ rs.insert_target_type<obje> ();
+ rs.insert_target_type<obja> ();
+ rs.insert_target_type<objs> ();
+
+ rs.insert_target_type<bmi> ();
+ rs.insert_target_type<bmie> ();
+ rs.insert_target_type<bmia> ();
+ rs.insert_target_type<bmis> ();
+
+ rs.insert_target_type<hbmi> ();
+ rs.insert_target_type<hbmie> ();
+ rs.insert_target_type<hbmia> ();
+ rs.insert_target_type<hbmis> ();
+
+ rs.insert_target_type<libul> ();
+ rs.insert_target_type<libue> ();
+ rs.insert_target_type<libua> ();
+ rs.insert_target_type<libus> ();
+
+ rs.insert_target_type<lib> ();
+ rs.insert_target_type<liba> ();
+ rs.insert_target_type<libs> ();
+
+ // Register the def{} target type. Note that we do it here since it is
+ // input and can be specified unconditionally (i.e., not only when
+ // building for Windows).
+ //
+ rs.insert_target_type<def> ();
+
+ return true;
+ }
+
void
functions (function_map&); // functions.cxx
@@ -195,6 +266,8 @@ namespace build2
//
const target_triplet* tgt (nullptr);
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (ctx.var_pool["config.bin.target"]);
// We first see if the value was specified via the configuration
@@ -231,9 +304,9 @@ namespace build2
//
if (!hint && config_sub)
{
- s = run<string> (3,
- *config_sub,
- s.c_str (),
+ s = run<string> (ctx,
+ 3,
+ *config_sub, s.c_str (),
[] (string& l, bool) {return move (l);});
l5 ([&]{trace << "config.sub target: '" << s << "'";});
}
@@ -272,6 +345,8 @@ namespace build2
//
const string* pat (nullptr);
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (ctx.var_pool["config.bin.pattern"]);
// We first see if the value was specified via the configuration
@@ -440,53 +515,22 @@ namespace build2
tracer trace ("bin::init");
l5 ([&]{trace << "for " << bs;});
- // Load bin.config.
+ // Load bin.{config,types}.
//
load_module (rs, rs, "bin.config", loc, extra.hints);
+ load_module (rs, rs, "bin.types", loc);
// Cache some config values we will be needing below.
//
const target_triplet& tgt (cast<target_triplet> (rs["bin.target"]));
- // Register target types and configure their default "installability".
+ // Configure target type default "installability". Also register
+ // additional platform-specific types.
//
bool install_loaded (cast_false<bool> (rs["install.loaded"]));
{
using namespace install;
- if (first)
- {
- rs.insert_target_type<obj> ();
- rs.insert_target_type<obje> ();
- rs.insert_target_type<obja> ();
- rs.insert_target_type<objs> ();
-
- rs.insert_target_type<bmi> ();
- rs.insert_target_type<bmie> ();
- rs.insert_target_type<bmia> ();
- rs.insert_target_type<bmis> ();
-
- rs.insert_target_type<hbmi> ();
- rs.insert_target_type<hbmie> ();
- rs.insert_target_type<hbmia> ();
- rs.insert_target_type<hbmis> ();
-
- rs.insert_target_type<libul> ();
- rs.insert_target_type<libue> ();
- rs.insert_target_type<libua> ();
- rs.insert_target_type<libus> ();
-
- rs.insert_target_type<lib> ();
- rs.insert_target_type<liba> ();
- rs.insert_target_type<libs> ();
-
- // Register the def{} target type. Note that we do it here since it
- // is input and can be specified unconditionally (i.e., not only
- // when building for Windows).
- //
- rs.insert_target_type<def> ();
- }
-
// Note: libu*{} members are not installable.
//
if (install_loaded)
@@ -536,6 +580,8 @@ namespace build2
if (tgt.cpu == "wasm32" || tgt.cpu == "wasm64")
{
+ // @@ TODO: shouldn't this be wrapped in if(first) somehow?
+
const target_type& wasm (
rs.derive_target_type(
target_type {
@@ -546,8 +592,8 @@ namespace build2
nullptr, /* default_extension */
&target_pattern_fix<wasm_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
- &file_search,
- false /* see_through */}));
+ &target_search, // Note: don't look for an existing file.
+ target_type::flag::none}));
if (install_loaded)
{
@@ -578,8 +624,6 @@ namespace build2
// Similar to alias.
//
-
- //@@ outer
r.insert<lib> (perform_id, 0, "bin.lib", lib_);
r.insert<lib> (configure_id, 0, "bin.lib", lib_);
@@ -600,6 +644,18 @@ namespace build2
if (rs.find_module ("dist"))
{
+ // Note that without custom dist rules in setups along the follwing
+ // lines the source file will be unreachable by dist:
+ //
+ // lib{foo}: obj{foo}
+ // obja{foo}: cxx{foo}
+ // objs{foo}: cxx{foo}
+ //
+ r.insert<obj> (dist_id, 0, "bin.obj", obj_);
+ r.insert<bmi> (dist_id, 0, "bin.bmi", obj_);
+ r.insert<hbmi> (dist_id, 0, "bin.hbmi", obj_);
+ r.insert<libul> (dist_id, 0, "bin.libul", libul_);
+
r.insert<lib> (dist_id, 0, "bin.lib", lib_);
}
}
@@ -626,7 +682,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.ar");
vp.insert<path> ("config.bin.ranlib");
@@ -684,7 +743,7 @@ namespace build2
nullptr,
config::save_default_commented)));
- const ar_info& ari (guess_ar (ar, ranlib, pat.paths));
+ const ar_info& ari (guess_ar (rs.ctx, ar, ranlib, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -800,7 +859,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.ld");
}
@@ -832,7 +894,7 @@ namespace build2
path (apply_pattern (ld_d, pat.pattern)),
config::save_default_commented)));
- const ld_info& ldi (guess_ld (ld, pat.paths));
+ const ld_info& ldi (guess_ld (rs.ctx, ld, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -916,6 +978,8 @@ namespace build2
if (lid == "msvc")
{
+ // @@ TODO: shouldn't this be wrapped in if(first) somehow?
+
const target_type& pdb (
rs.derive_target_type(
target_type {
@@ -926,8 +990,8 @@ namespace build2
nullptr, /* default_extension */
&target_pattern_fix<pdb_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
- &file_search,
- false /* see_through */}));
+ &target_search, // Note: don't look for an existing file.
+ target_type::flag::none}));
if (cast_false<bool> (rs["install.loaded"]))
{
@@ -958,7 +1022,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.rc");
}
@@ -990,7 +1057,7 @@ namespace build2
path (apply_pattern (rc_d, pat.pattern)),
config::save_default_commented)));
- const rc_info& rci (guess_rc (rc, pat.paths));
+ const rc_info& rci (guess_rc (rs.ctx, rc, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -1057,7 +1124,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.nm");
}
@@ -1099,7 +1169,7 @@ namespace build2
path (apply_pattern (nm_d, pat.pattern)),
config::save_default_commented)));
- const nm_info& nmi (guess_nm (nm, pat.paths));
+ const nm_info& nmi (guess_nm (rs.ctx, nm, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -1184,8 +1254,8 @@ namespace build2
// changing anything here.
{"bin.vars", nullptr, vars_init},
+ {"bin.types", nullptr, types_init},
{"bin.config", nullptr, config_init},
- {"bin", nullptr, init},
{"bin.ar.config", nullptr, ar_config_init},
{"bin.ar", nullptr, ar_init},
{"bin.ld.config", nullptr, ld_config_init},
@@ -1195,6 +1265,7 @@ namespace build2
{"bin.nm.config", nullptr, nm_config_init},
{"bin.nm", nullptr, nm_init},
{"bin.def", nullptr, def_init},
+ {"bin", nullptr, init},
{nullptr, nullptr, nullptr}
};
diff --git a/libbuild2/bin/init.hxx b/libbuild2/bin/init.hxx
index 4eb0f10..b163bf5 100644
--- a/libbuild2/bin/init.hxx
+++ b/libbuild2/bin/init.hxx
@@ -20,9 +20,11 @@ namespace build2
// Submodules:
//
// `bin.vars` -- registers some variables.
+ // `bin.types` -- registers target types.
// `bin.config` -- loads bin.vars and sets some variables.
- // `bin` -- loads bin.config and registers target types and
- // rules.
+ // `bin` -- loads bin.{types,config} and registers rules and
+ // functions.
+ //
// `bin.ar.config` -- loads bin.config and registers/sets more variables.
// `bin.ar` -- loads bin and bin.ar.config.
//
diff --git a/libbuild2/bin/rule.cxx b/libbuild2/bin/rule.cxx
index 021a768..c7147bf 100644
--- a/libbuild2/bin/rule.cxx
+++ b/libbuild2/bin/rule.cxx
@@ -17,12 +17,30 @@ namespace build2
{
namespace bin
{
+ // Search for an existing (declared real) member and match it if found.
+ //
+ static void
+ dist_match (action a, target& t, const target_type& tt)
+ {
+ if (const target* m = search_existing (t.ctx, tt, t.dir, t.out, t.name))
+ {
+ // Only a real target declaration can have prerequisites (which is
+ // the reason we are doing this).
+ //
+ if (m->decl == target_decl::real)
+ match_sync (a, *m);
+ }
+ }
+
// obj_rule
//
bool obj_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t) const
{
- const char* n (t.dynamic_type ().name); // Ignore derived type.
+ if (a.meta_operation () == dist_id)
+ return true;
+
+ const char* n (t.dynamic_type->name); // Ignore derived type.
fail << diag_doing (a, t) << " target group" <<
info << "explicitly select " << n << "e{}, " << n << "a{}, or "
@@ -30,27 +48,142 @@ namespace build2
}
recipe obj_rule::
- apply (action, target&) const {return empty_recipe;}
+ apply (action a, target& t) const
+ {
+ // We only get here for dist.
+ //
+ const target_type* ett (nullptr);
+ const target_type* att (nullptr);
+ const target_type* stt (nullptr);
+
+ if (t.is_a<obj> ())
+ {
+ ett = &obje::static_type;
+ att = &obja::static_type;
+ stt = &objs::static_type;
+ }
+ else if (t.is_a<bmi> ())
+ {
+ ett = &bmie::static_type;
+ att = &bmia::static_type;
+ stt = &bmis::static_type;
+ }
+ else if (t.is_a<hbmi> ())
+ {
+ ett = &hbmie::static_type;
+ att = &hbmia::static_type;
+ stt = &hbmis::static_type;
+ }
+ else
+ assert (false);
+
+ dist_match (a, t, *ett);
+ dist_match (a, t, *att);
+ dist_match (a, t, *stt);
+
+ // Delegate to the default dist rule to match prerequisites.
+ //
+ return dist::rule::apply (a, t);
+ }
// libul_rule
//
bool libul_rule::
- match (action a, target& t, const string&) const
+ match (action, target&) const
{
- fail << diag_doing (a, t) << " target group" <<
- info << "explicitly select libua{} or libus{} member" << endf;
+ return true;
}
recipe libul_rule::
- apply (action, target&) const {return empty_recipe;}
+ apply (action a, target& t) const
+ {
+ if (a.meta_operation () == dist_id)
+ {
+ dist_match (a, t, libua::static_type);
+ dist_match (a, t, libus::static_type);
+
+ // Delegate to the default dist rule to match prerequisites.
+ //
+ return dist::rule::apply (a, t);
+ }
+
+ // Pick one of the members. First looking for the one already matched.
+ //
+ const target* m (nullptr);
+
+ const libus* ls (nullptr);
+ {
+ ls = search_existing<libus> (t.ctx, t.dir, t.out, t.name);
+
+ if (ls != nullptr && ls->matched (a))
+ m = ls;
+ }
+
+ const libua* la (nullptr);
+ if (m == nullptr)
+ {
+ la = search_existing<libua> (t.ctx, t.dir, t.out, t.name);
+
+ if (la != nullptr && la->matched (a))
+ m = la;
+ }
+
+ if (m == nullptr)
+ {
+ const scope& bs (t.base_scope ());
+
+ lmembers lm (link_members (*bs.root_scope ()));
+
+ if (lm.s && lm.a)
+ {
+ // Use the bin.exe.lib order as a heuristics to pick the library
+ // (i.e., the most likely utility library to be built is the one
+ // most likely to be linked).
+ //
+ lorder lo (link_order (bs, otype::e));
+
+ (lo == lorder::s_a || lo == lorder::s ? lm.a : lm.s) = false;
+ }
+
+ if (lm.s)
+ m = ls != nullptr ? ls : &search<libus> (t, t.dir, t.out, t.name);
+ else
+ m = la != nullptr ? la : &search<libua> (t, t.dir, t.out, t.name);
+ }
+
+ // Save the member we picked in case others (e.g., $x.lib_poptions())
+ // need this information.
+ //
+ t.prerequisite_targets[a].push_back (m);
+
+ if (match_sync (a, *m, unmatch::safe).first)
+ return noop_recipe;
+
+ return [] (action a, const target& t)
+ {
+ const target* m (t.prerequisite_targets[a].back ());
+
+ // For update always return unchanged so we are consistent whether we
+ // managed to unmatch or now. Note that for clean we may get postponed
+ // so let's return the actual target state.
+ //
+ target_state r (execute_sync (a, *m));
+ return a == perform_update_id ? target_state::unchanged : r;
+ };
+ }
// lib_rule
//
// The whole logic is pretty much as if we had our two group members as
// our prerequisites.
//
+ // Note also that unlike the obj and libul rules above, we don't need to
+ // delegate to the default dist rule since any group prerequisites will be
+ // matched by one of the members (the key difference here is that unlike
+ // those rules, we insert and match members unconditionally).
+ //
bool lib_rule::
- match (action a, target& xt, const string&) const
+ match (action a, target& xt) const
{
lib& t (xt.as<lib> ());
diff --git a/libbuild2/bin/rule.hxx b/libbuild2/bin/rule.hxx
index ffb975d..9dd1d14 100644
--- a/libbuild2/bin/rule.hxx
+++ b/libbuild2/bin/rule.hxx
@@ -9,6 +9,8 @@
#include <libbuild2/rule.hxx>
+#include <libbuild2/dist/rule.hxx>
+
#include <libbuild2/bin/export.hxx>
namespace build2
@@ -18,28 +20,41 @@ namespace build2
// "Fail rule" for obj{} and [h]bmi{} that issues diagnostics if someone
// tries to build these groups directly.
//
- class obj_rule: public simple_rule
+ // Note that for dist it acts as a pass-through to all existing (declared)
+ // members.
+ //
+ class obj_rule: public dist::rule
{
public:
obj_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
};
- // "Fail rule" for libul{} that issues diagnostics if someone tries to
- // build this group directly.
+ // This rule picks, matches, and unmatches (if possible) a member for the
+ // purpose of making its metadata (for example, library's poptions, if
+ // it's one of the cc libraries) available.
+ //
+ // The underlying idea here is that someone else (e.g., cc::link_rule)
+ // makes a more informed choice and we piggy back on that decision,
+ // falling back to making our own based on bin.lib and bin.exe.lib. Note
+ // that for update this rule always returns target_state::unchanged.
//
- class libul_rule: public simple_rule
+ // Note also that for dist it acts as a pass-through to all existing
+ // (declared) members.
+ //
+ class libul_rule: public dist::rule
{
public:
+ explicit
libul_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -47,13 +62,15 @@ namespace build2
// Pass-through to group members rule, similar to alias.
//
+ // Note that for dist it always passes to both members.
+ //
class LIBBUILD2_BIN_SYMEXPORT lib_rule: public simple_rule
{
public:
lib_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
diff --git a/libbuild2/bin/target.cxx b/libbuild2/bin/target.cxx
index bf701c9..7e4875a 100644
--- a/libbuild2/bin/target.cxx
+++ b/libbuild2/bin/target.cxx
@@ -21,7 +21,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type bmix::static_type
@@ -34,7 +34,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type hbmix::static_type
@@ -47,7 +47,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type libx::static_type
@@ -60,7 +60,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
const target_type libux::static_type
@@ -73,7 +73,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
// Note that we link groups during the load phase since this is often
@@ -108,7 +108,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type bmie::static_type
@@ -121,7 +121,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type hbmie::static_type
@@ -134,7 +134,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type obja::static_type
@@ -147,7 +147,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type bmia::static_type
@@ -160,7 +160,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type hbmia::static_type
@@ -173,7 +173,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type objs::static_type
@@ -186,7 +186,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type bmis::static_type
@@ -199,7 +199,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type hbmis::static_type
@@ -212,7 +212,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type libue::static_type
@@ -225,7 +225,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type libua::static_type
@@ -238,7 +238,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type libus::static_type
@@ -251,7 +251,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
// obj{}, [h]bmi{}, and libu{} group factory.
@@ -292,7 +292,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
const target_type bmi::static_type
@@ -305,7 +305,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
const target_type hbmi::static_type
@@ -318,7 +318,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
// The same as g_factory() but without E.
@@ -352,7 +352,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
// What extensions should we use? At the outset, this is platform-
@@ -374,8 +374,8 @@ namespace build2
&target_extension_var<nullptr>,
&target_pattern_var<nullptr>,
nullptr,
- &file_search,
- false
+ &target_search, // Note: not _file(); don't look for an existing file.
+ target_type::flag::none
};
const target_type libs::static_type
@@ -387,8 +387,8 @@ namespace build2
&target_extension_var<nullptr>,
&target_pattern_var<nullptr>,
nullptr,
- &file_search,
- false
+ &target_search, // Note: not _file(); don't look for an existing file.
+ target_type::flag::none
};
// lib
@@ -435,7 +435,10 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false // Note: not see-through ("alternatives" group).
+
+ // Note: not see-through ("alternatives" group).
+ //
+ target_type::flag::member_hint // Use untyped hint for group members.
};
// libi
@@ -449,8 +452,8 @@ namespace build2
&target_extension_var<nullptr>,
&target_pattern_var<nullptr>,
nullptr,
- &file_search,
- false
+ &target_search, // Note: not _file(); don't look for an existing file.
+ target_type::flag::none
};
// def
@@ -467,7 +470,7 @@ namespace build2
&target_pattern_fix<def_ext>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/bin/target.hxx b/libbuild2/bin/target.hxx
index f8d2dd0..8f2a92e 100644
--- a/libbuild2/bin/target.hxx
+++ b/libbuild2/bin/target.hxx
@@ -22,7 +22,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT objx: public file
{
public:
- using file::file;
+ objx (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -31,41 +35,55 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT obje: public objx
{
public:
- using objx::objx;
+ obje (context& c, dir_path d, dir_path o, string n)
+ : objx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT obja: public objx
{
public:
- using objx::objx;
+ obja (context& c, dir_path d, dir_path o, string n)
+ : objx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT objs: public objx
{
public:
- using objx::objx;
+ objs (context& c, dir_path d, dir_path o, string n)
+ : objx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
+ // Note: this is a "choice" target group.
+ //
class LIBBUILD2_BIN_SYMEXPORT obj: public target
{
public:
- using target::target;
+ obj (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Binary module interface (BMI).
@@ -100,7 +118,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT bmix: public file
{
public:
- using file::file;
+ bmix (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -111,7 +133,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT hbmix: public bmix
{
public:
- using bmix::bmix;
+ hbmix (context& c, dir_path d, dir_path o, string n)
+ : bmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -120,84 +146,111 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT bmie: public bmix
{
public:
- using bmix::bmix;
+ bmie (context& c, dir_path d, dir_path o, string n)
+ : bmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT hbmie: public hbmix
{
public:
- using hbmix::hbmix;
+ hbmie (context& c, dir_path d, dir_path o, string n)
+ : hbmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT bmia: public bmix
{
public:
- using bmix::bmix;
+ bmia (context& c, dir_path d, dir_path o, string n)
+ : bmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT hbmia: public hbmix
{
public:
- using hbmix::hbmix;
+ hbmia (context& c, dir_path d, dir_path o, string n)
+ : hbmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT bmis: public bmix
{
public:
- using bmix::bmix;
+ bmis (context& c, dir_path d, dir_path o, string n)
+ : bmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT hbmis: public hbmix
{
public:
- using hbmix::hbmix;
+ hbmis (context& c, dir_path d, dir_path o, string n)
+ : hbmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
+ // Note: this is a "choice" target group (similar to obj{}).
+ //
class LIBBUILD2_BIN_SYMEXPORT bmi: public target
{
public:
- using target::target;
+ bmi (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
+ // Note: this is a "choice" target group (similar to bmi{} and obj{}).
+ //
class LIBBUILD2_BIN_SYMEXPORT hbmi: public target
{
public:
- using target::target;
+ hbmi (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
-
// Common base for lib{} and libul{} groups.
//
// Use mtime_target as a base for the "trust me it exists" functionality
@@ -207,7 +260,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT libx: public mtime_target
{
public:
- using mtime_target::mtime_target;
+ libx (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -240,7 +297,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT libux: public file
{
public:
- using file::file;
+ libux (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -249,41 +310,58 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT libue: public libux
{
public:
- using libux::libux;
+ libue (context& c, dir_path d, dir_path o, string n)
+ : libux (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT libua: public libux
{
public:
- using libux::libux;
+ libua (context& c, dir_path d, dir_path o, string n)
+ : libux (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT libus: public libux
{
public:
- using libux::libux;
+ libus (context& c, dir_path d, dir_path o, string n)
+ : libux (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
+ // Note: this is a "choice" target group.
+ //
+ // @@ Ideally this shouldn't derive from mtime_target (via libx). Maybe
+ // get rid of libx?
+ //
class LIBBUILD2_BIN_SYMEXPORT libul: public libx
{
public:
- using libx::libx;
+ libul (context& c, dir_path d, dir_path o, string n)
+ : libx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// The lib{} target group.
@@ -291,23 +369,27 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT liba: public file
{
public:
- using file::file;
+ liba (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT libs: public file
{
public:
- using file::file;
+ libs (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
-
- virtual const target_type&
- dynamic_type () const override {return static_type;}
};
// Standard layout type compatible with group_view's const target*[2].
@@ -321,16 +403,32 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT lib: public libx, public lib_members
{
public:
- using libx::libx;
+ lib (context& c, dir_path d, dir_path o, string n)
+ : libx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
virtual group_view
group_members (action) const override;
+ // Match options for the install operation on the liba{}/libs{} and
+ // libua{}/libus{} target types (note: not lib{}/libul{} nor libue{}).
+ //
+ // If only install_runtime option is specified, then only install the
+ // runtime files omitting everything buildtime (headers, pkg-config
+ // files, shared library version-related symlinks, etc).
+ //
+ // Note that it's either runtime-only or runtime and buildtime (i.e.,
+ // everything), so match with install_all instead of install_buildtime
+ // (the latter is only useful in the rule implementations).
+ //
+ static constexpr uint64_t option_install_runtime = 0x01;
+ static constexpr uint64_t option_install_buildtime = 0x02;
+ static constexpr uint64_t option_install_all = match_extra::all_options;
+
public:
static const target_type static_type;
-
- virtual const target_type&
- dynamic_type () const override {return static_type;}
};
// Windows import library.
@@ -338,11 +436,14 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT libi: public file
{
public:
- using file::file;
+ libi (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Windows module definition (.def).
@@ -350,11 +451,14 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT def: public file
{
public:
- using file::file;
+ def (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/bin/utility.cxx b/libbuild2/bin/utility.cxx
index cb06287..a03ea50 100644
--- a/libbuild2/bin/utility.cxx
+++ b/libbuild2/bin/utility.cxx
@@ -57,6 +57,11 @@ namespace build2
// prefer static over shared since it could be faster (but I am sure
// someone will probably want this configurable).
//
+ // Maybe we should use the bin.exe.lib order as a heuristics (i.e.,
+ // the most likely utility library to be built is the one most likely
+ // to be linked)? Will need the variables rs-only, similar to
+ // bin.lib, which probably is a good thing. See also libul_rule.
+ //
if (li.type == otype::e)
{
// Utility libraries are project-local which means the primarily
@@ -84,7 +89,9 @@ namespace build2
// Make sure group members are resolved.
//
group_view gv (resolve_members (a, l));
- assert (gv.members != nullptr);
+
+ if (gv.members == nullptr)
+ fail << "group " << l << " has no members";
pair<otype, bool> p (
link_member (lmembers {l.a != nullptr, l.s != nullptr}, li.order));
diff --git a/libbuild2/build/script/builtin-options.cxx b/libbuild2/build/script/builtin-options.cxx
index 04cd1c2..dba3c59 100644
--- a/libbuild2/build/script/builtin-options.cxx
+++ b/libbuild2/build/script/builtin-options.cxx
@@ -6,7 +6,7 @@
// Begin prologue.
//
-#include <libbuild2/build/script/types-parsers.hxx>
+#include <libbuild2/types-parsers.hxx>
//
// End prologue.
@@ -19,377 +19,251 @@
#include <utility>
#include <ostream>
#include <sstream>
+#include <cstring>
namespace build2
{
namespace build
{
- namespace script
+ namespace cli
{
- namespace cli
+ template <typename X>
+ struct parser
{
- // unknown_option
- //
- unknown_option::
- ~unknown_option () throw ()
- {
- }
-
- void unknown_option::
- print (::std::ostream& os) const
+ static void
+ parse (X& x, bool& xs, scanner& s)
{
- os << "unknown option '" << option ().c_str () << "'";
- }
-
- const char* unknown_option::
- what () const throw ()
- {
- return "unknown option";
- }
+ using namespace std;
- // unknown_argument
- //
- unknown_argument::
- ~unknown_argument () throw ()
- {
- }
-
- void unknown_argument::
- print (::std::ostream& os) const
- {
- os << "unknown argument '" << argument ().c_str () << "'";
- }
-
- const char* unknown_argument::
- what () const throw ()
- {
- return "unknown argument";
- }
+ const char* o (s.next ());
+ if (s.more ())
+ {
+ string v (s.next ());
+ istringstream is (v);
+ if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
- // missing_value
- //
- missing_value::
- ~missing_value () throw ()
- {
+ xs = true;
}
+ };
- void missing_value::
- print (::std::ostream& os) const
+ template <>
+ struct parser<bool>
+ {
+ static void
+ parse (bool& x, bool& xs, scanner& s)
{
- os << "missing value for option '" << option ().c_str () << "'";
- }
+ const char* o (s.next ());
- const char* missing_value::
- what () const throw ()
- {
- return "missing option value";
- }
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
- // invalid_value
- //
- invalid_value::
- ~invalid_value () throw ()
- {
+ xs = true;
}
+ };
- void invalid_value::
- print (::std::ostream& os) const
+ template <>
+ struct parser<std::string>
+ {
+ static void
+ parse (std::string& x, bool& xs, scanner& s)
{
- os << "invalid value '" << value ().c_str () << "' for option '"
- << option ().c_str () << "'";
+ const char* o (s.next ());
- if (!message ().empty ())
- os << ": " << message ().c_str ();
- }
+ if (s.more ())
+ x = s.next ();
+ else
+ throw missing_value (o);
- const char* invalid_value::
- what () const throw ()
- {
- return "invalid option value";
+ xs = true;
}
+ };
- // eos_reached
- //
- void eos_reached::
- print (::std::ostream& os) const
+ template <typename X>
+ struct parser<std::pair<X, std::size_t> >
+ {
+ static void
+ parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
{
- os << what ();
+ x.second = s.position ();
+ parser<X>::parse (x.first, xs, s);
}
+ };
- const char* eos_reached::
- what () const throw ()
- {
- return "end of argument stream reached";
+ template <typename X>
+ struct parser<std::vector<X> >
+ {
+ static void
+ parse (std::vector<X>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.push_back (x);
+ xs = true;
}
+ };
- // scanner
- //
- scanner::
- ~scanner ()
- {
+ template <typename X, typename C>
+ struct parser<std::set<X, C> >
+ {
+ static void
+ parse (std::set<X, C>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.insert (x);
+ xs = true;
}
+ };
- // argv_scanner
- //
- bool argv_scanner::
- more ()
+ template <typename K, typename V, typename C>
+ struct parser<std::map<K, V, C> >
+ {
+ static void
+ parse (std::map<K, V, C>& m, bool& xs, scanner& s)
{
- return i_ < argc_;
- }
+ const char* o (s.next ());
- const char* argv_scanner::
- peek ()
- {
- if (i_ < argc_)
- return argv_[i_];
- else
- throw eos_reached ();
- }
-
- const char* argv_scanner::
- next ()
- {
- if (i_ < argc_)
+ if (s.more ())
{
- const char* r (argv_[i_]);
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
- if (erase_)
+ int ac (2);
+ char* av[] =
{
- for (int i (i_ + 1); i < argc_; ++i)
- argv_[i - 1] = argv_[i];
+ const_cast<char*> (o),
+ 0
+ };
- --argc_;
- argv_[argc_] = 0;
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
}
- else
- ++i_;
- ++start_position_;
- return r;
- }
- else
- throw eos_reached ();
- }
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
- void argv_scanner::
- skip ()
- {
- if (i_ < argc_)
- {
- ++i_;
- ++start_position_;
+ m[k] = v;
}
else
- throw eos_reached ();
- }
+ throw missing_value (o);
- std::size_t argv_scanner::
- position ()
- {
- return start_position_;
+ xs = true;
}
+ };
- // vector_scanner
- //
- bool vector_scanner::
- more ()
- {
- return i_ < v_.size ();
- }
-
- const char* vector_scanner::
- peek ()
- {
- if (i_ < v_.size ())
- return v_[i_].c_str ();
- else
- throw eos_reached ();
- }
-
- const char* vector_scanner::
- next ()
- {
- if (i_ < v_.size ())
- return v_[i_++].c_str ();
- else
- throw eos_reached ();
- }
-
- void vector_scanner::
- skip ()
- {
- if (i_ < v_.size ())
- ++i_;
- else
- throw eos_reached ();
- }
-
- std::size_t vector_scanner::
- position ()
- {
- return start_position_ + i_;
- }
-
- template <typename X>
- struct parser
- {
- static void
- parse (X& x, bool& xs, scanner& s)
- {
- using namespace std;
-
- const char* o (s.next ());
- if (s.more ())
- {
- string v (s.next ());
- istringstream is (v);
- if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
- throw invalid_value (o, v);
- }
- else
- throw missing_value (o);
-
- xs = true;
- }
- };
-
- template <>
- struct parser<bool>
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
{
- static void
- parse (bool& x, scanner& s)
- {
- s.next ();
- x = true;
- }
- };
+ const char* o (s.next ());
- template <>
- struct parser<std::string>
- {
- static void
- parse (std::string& x, bool& xs, scanner& s)
+ if (s.more ())
{
- const char* o (s.next ());
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
- if (s.more ())
- x = s.next ();
- else
- throw missing_value (o);
-
- xs = true;
- }
- };
-
- template <typename X>
- struct parser<std::pair<X, std::size_t> >
- {
- static void
- parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
- {
- x.second = s.position ();
- parser<X>::parse (x.first, xs, s);
- }
- };
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
- template <typename X>
- struct parser<std::vector<X> >
- {
- static void
- parse (std::vector<X>& c, bool& xs, scanner& s)
- {
- X x;
- bool dummy;
- parser<X>::parse (x, dummy, s);
- c.push_back (x);
- xs = true;
- }
- };
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
- template <typename X, typename C>
- struct parser<std::set<X, C> >
- {
- static void
- parse (std::set<X, C>& c, bool& xs, scanner& s)
- {
- X x;
bool dummy;
- parser<X>::parse (x, dummy, s);
- c.insert (x);
- xs = true;
- }
- };
-
- template <typename K, typename V, typename C>
- struct parser<std::map<K, V, C> >
- {
- static void
- parse (std::map<K, V, C>& m, bool& xs, scanner& s)
- {
- const char* o (s.next ());
-
- if (s.more ())
+ if (!kstr.empty ())
{
- std::size_t pos (s.position ());
- std::string ov (s.next ());
- std::string::size_type p = ov.find ('=');
-
- K k = K ();
- V v = V ();
- std::string kstr (ov, 0, p);
- std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
-
- int ac (2);
- char* av[] =
- {
- const_cast<char*> (o),
- 0
- };
-
- bool dummy;
- if (!kstr.empty ())
- {
- av[1] = const_cast<char*> (kstr.c_str ());
- argv_scanner s (0, ac, av, false, pos);
- parser<K>::parse (k, dummy, s);
- }
-
- if (!vstr.empty ())
- {
- av[1] = const_cast<char*> (vstr.c_str ());
- argv_scanner s (0, ac, av, false, pos);
- parser<V>::parse (v, dummy, s);
- }
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
- m[k] = v;
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
}
- else
- throw missing_value (o);
- xs = true;
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
}
- };
+ else
+ throw missing_value (o);
- template <typename X, typename T, T X::*M>
- void
- thunk (X& x, scanner& s)
- {
- parser<T>::parse (x.*M, s);
+ xs = true;
}
+ };
- template <typename X, typename T, T X::*M, bool X::*S>
- void
- thunk (X& x, scanner& s)
- {
- parser<T>::parse (x.*M, x.*S, s);
- }
+ template <typename X, typename T, T X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, s);
+ }
+
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
+ template <typename X, typename T, T X::*M, bool X::*S>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, x.*S, s);
}
}
}
}
#include <map>
-#include <cstring>
namespace build2
{
@@ -415,7 +289,15 @@ namespace build2
adhoc_ (),
cwd_ (),
cwd_specified_ (false),
- drop_cycles_ ()
+ drop_cycles_ (),
+ target_what_ (),
+ target_what_specified_ (false),
+ target_default_type_ (),
+ target_default_type_specified_ (false),
+ target_extension_type_ (),
+ target_extension_type_specified_ (false),
+ target_cwd_ (),
+ target_cwd_specified_ (false)
{
}
@@ -423,10 +305,10 @@ namespace build2
parse (int& argc,
char** argv,
bool erase,
- ::build2::build::script::cli::unknown_mode opt,
- ::build2::build::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
{
- ::build2::build::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
bool r = _parse (s, opt, arg);
return r;
}
@@ -436,10 +318,10 @@ namespace build2
int& argc,
char** argv,
bool erase,
- ::build2::build::script::cli::unknown_mode opt,
- ::build2::build::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
{
- ::build2::build::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
bool r = _parse (s, opt, arg);
return r;
}
@@ -449,10 +331,10 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::build::script::cli::unknown_mode opt,
- ::build2::build::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
{
- ::build2::build::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
bool r = _parse (s, opt, arg);
end = s.end ();
return r;
@@ -464,26 +346,26 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::build::script::cli::unknown_mode opt,
- ::build2::build::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
{
- ::build2::build::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
bool r = _parse (s, opt, arg);
end = s.end ();
return r;
}
bool depdb_dyndep_options::
- parse (::build2::build::script::cli::scanner& s,
- ::build2::build::script::cli::unknown_mode opt,
- ::build2::build::script::cli::unknown_mode arg)
+ parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
{
bool r = _parse (s, opt, arg);
return r;
}
typedef
- std::map<std::string, void (*) (depdb_dyndep_options&, ::build2::build::script::cli::scanner&)>
+ std::map<std::string, void (*) (depdb_dyndep_options&, ::build2::build::cli::scanner&)>
_cli_depdb_dyndep_options_map;
static _cli_depdb_dyndep_options_map _cli_depdb_dyndep_options_map_;
@@ -493,37 +375,49 @@ namespace build2
_cli_depdb_dyndep_options_map_init ()
{
_cli_depdb_dyndep_options_map_["--file"] =
- &::build2::build::script::cli::thunk< depdb_dyndep_options, path, &depdb_dyndep_options::file_,
+ &::build2::build::cli::thunk< depdb_dyndep_options, path, &depdb_dyndep_options::file_,
&depdb_dyndep_options::file_specified_ >;
_cli_depdb_dyndep_options_map_["--format"] =
- &::build2::build::script::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::format_,
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::format_,
&depdb_dyndep_options::format_specified_ >;
_cli_depdb_dyndep_options_map_["--what"] =
- &::build2::build::script::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::what_,
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::what_,
&depdb_dyndep_options::what_specified_ >;
_cli_depdb_dyndep_options_map_["--include-path"] =
- &::build2::build::script::cli::thunk< depdb_dyndep_options, dir_paths, &depdb_dyndep_options::include_path_,
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_paths, &depdb_dyndep_options::include_path_,
&depdb_dyndep_options::include_path_specified_ >;
_cli_depdb_dyndep_options_map_["-I"] =
- &::build2::build::script::cli::thunk< depdb_dyndep_options, dir_paths, &depdb_dyndep_options::include_path_,
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_paths, &depdb_dyndep_options::include_path_,
&depdb_dyndep_options::include_path_specified_ >;
_cli_depdb_dyndep_options_map_["--default-type"] =
- &::build2::build::script::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::default_type_,
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::default_type_,
&depdb_dyndep_options::default_type_specified_ >;
_cli_depdb_dyndep_options_map_["--adhoc"] =
- &::build2::build::script::cli::thunk< depdb_dyndep_options, bool, &depdb_dyndep_options::adhoc_ >;
+ &::build2::build::cli::thunk< depdb_dyndep_options, &depdb_dyndep_options::adhoc_ >;
_cli_depdb_dyndep_options_map_["--cwd"] =
- &::build2::build::script::cli::thunk< depdb_dyndep_options, dir_path, &depdb_dyndep_options::cwd_,
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_path, &depdb_dyndep_options::cwd_,
&depdb_dyndep_options::cwd_specified_ >;
_cli_depdb_dyndep_options_map_["--drop-cycles"] =
- &::build2::build::script::cli::thunk< depdb_dyndep_options, bool, &depdb_dyndep_options::drop_cycles_ >;
+ &::build2::build::cli::thunk< depdb_dyndep_options, &depdb_dyndep_options::drop_cycles_ >;
+ _cli_depdb_dyndep_options_map_["--target-what"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::target_what_,
+ &depdb_dyndep_options::target_what_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-default-type"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::target_default_type_,
+ &depdb_dyndep_options::target_default_type_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-extension-type"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, map<string, string>, &depdb_dyndep_options::target_extension_type_,
+ &depdb_dyndep_options::target_extension_type_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-cwd"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_path, &depdb_dyndep_options::target_cwd_,
+ &depdb_dyndep_options::target_cwd_specified_ >;
}
};
static _cli_depdb_dyndep_options_map_init _cli_depdb_dyndep_options_map_init_;
bool depdb_dyndep_options::
- _parse (const char* o, ::build2::build::script::cli::scanner& s)
+ _parse (const char* o, ::build2::build::cli::scanner& s)
{
_cli_depdb_dyndep_options_map::const_iterator i (_cli_depdb_dyndep_options_map_.find (o));
@@ -537,13 +431,13 @@ namespace build2
}
bool depdb_dyndep_options::
- _parse (::build2::build::script::cli::scanner& s,
- ::build2::build::script::cli::unknown_mode opt_mode,
- ::build2::build::script::cli::unknown_mode arg_mode)
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
{
// Can't skip combined flags (--no-combined-flags).
//
- assert (opt_mode != ::build2::build::script::cli::unknown_mode::skip);
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
bool r = false;
bool opt = true;
@@ -585,14 +479,14 @@ namespace build2
const_cast<char*> (v)
};
- ::build2::build::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (_parse (co.c_str (), ns))
{
// Parsed the option but not its value?
//
if (ns.end () != 2)
- throw ::build2::build::script::cli::invalid_value (co, v);
+ throw ::build2::build::cli::invalid_value (co, v);
s.next ();
r = true;
@@ -633,7 +527,7 @@ namespace build2
cf
};
- ::build2::build::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (!_parse (cf, ns))
break;
@@ -658,19 +552,19 @@ namespace build2
switch (opt_mode)
{
- case ::build2::build::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::build::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::build::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::build::script::cli::unknown_option (o);
+ throw ::build2::build::cli::unknown_option (o);
}
}
@@ -680,19 +574,19 @@ namespace build2
switch (arg_mode)
{
- case ::build2::build::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::build::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::build::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::build::script::cli::unknown_argument (o);
+ throw ::build2::build::cli::unknown_argument (o);
}
}
diff --git a/libbuild2/build/script/builtin-options.hxx b/libbuild2/build/script/builtin-options.hxx
index 60020c9..a8c3440 100644
--- a/libbuild2/build/script/builtin-options.hxx
+++ b/libbuild2/build/script/builtin-options.hxx
@@ -12,284 +12,7 @@
//
// End prologue.
-#include <vector>
-#include <iosfwd>
-#include <string>
-#include <cstddef>
-#include <exception>
-
-#ifndef CLI_POTENTIALLY_UNUSED
-# if defined(_MSC_VER) || defined(__xlC__)
-# define CLI_POTENTIALLY_UNUSED(x) (void*)&x
-# else
-# define CLI_POTENTIALLY_UNUSED(x) (void)x
-# endif
-#endif
-
-namespace build2
-{
- namespace build
- {
- namespace script
- {
- namespace cli
- {
- class unknown_mode
- {
- public:
- enum value
- {
- skip,
- stop,
- fail
- };
-
- unknown_mode (value);
-
- operator value () const
- {
- return v_;
- }
-
- private:
- value v_;
- };
-
- // Exceptions.
- //
-
- class exception: public std::exception
- {
- public:
- virtual void
- print (::std::ostream&) const = 0;
- };
-
- ::std::ostream&
- operator<< (::std::ostream&, const exception&);
-
- class unknown_option: public exception
- {
- public:
- virtual
- ~unknown_option () throw ();
-
- unknown_option (const std::string& option);
-
- const std::string&
- option () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- };
-
- class unknown_argument: public exception
- {
- public:
- virtual
- ~unknown_argument () throw ();
-
- unknown_argument (const std::string& argument);
-
- const std::string&
- argument () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string argument_;
- };
-
- class missing_value: public exception
- {
- public:
- virtual
- ~missing_value () throw ();
-
- missing_value (const std::string& option);
-
- const std::string&
- option () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- };
-
- class invalid_value: public exception
- {
- public:
- virtual
- ~invalid_value () throw ();
-
- invalid_value (const std::string& option,
- const std::string& value,
- const std::string& message = std::string ());
-
- const std::string&
- option () const;
-
- const std::string&
- value () const;
-
- const std::string&
- message () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- std::string value_;
- std::string message_;
- };
-
- class eos_reached: public exception
- {
- public:
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
- };
-
- // Command line argument scanner interface.
- //
- // The values returned by next() are guaranteed to be valid
- // for the two previous arguments up until a call to a third
- // peek() or next().
- //
- // The position() function returns a monotonically-increasing
- // number which, if stored, can later be used to determine the
- // relative position of the argument returned by the following
- // call to next(). Note that if multiple scanners are used to
- // extract arguments from multiple sources, then the end
- // position of the previous scanner should be used as the
- // start position of the next.
- //
- class scanner
- {
- public:
- virtual
- ~scanner ();
-
- virtual bool
- more () = 0;
-
- virtual const char*
- peek () = 0;
-
- virtual const char*
- next () = 0;
-
- virtual void
- skip () = 0;
-
- virtual std::size_t
- position () = 0;
- };
-
- class argv_scanner: public scanner
- {
- public:
- argv_scanner (int& argc,
- char** argv,
- bool erase = false,
- std::size_t start_position = 0);
-
- argv_scanner (int start,
- int& argc,
- char** argv,
- bool erase = false,
- std::size_t start_position = 0);
-
- int
- end () const;
-
- virtual bool
- more ();
-
- virtual const char*
- peek ();
-
- virtual const char*
- next ();
-
- virtual void
- skip ();
-
- virtual std::size_t
- position ();
-
- protected:
- std::size_t start_position_;
- int i_;
- int& argc_;
- char** argv_;
- bool erase_;
- };
-
- class vector_scanner: public scanner
- {
- public:
- vector_scanner (const std::vector<std::string>&,
- std::size_t start = 0,
- std::size_t start_position = 0);
-
- std::size_t
- end () const;
-
- void
- reset (std::size_t start = 0, std::size_t start_position = 0);
-
- virtual bool
- more ();
-
- virtual const char*
- peek ();
-
- virtual const char*
- next ();
-
- virtual void
- skip ();
-
- virtual std::size_t
- position ();
-
- private:
- std::size_t start_position_;
- const std::vector<std::string>& v_;
- std::size_t i_;
- };
-
- template <typename X>
- struct parser;
- }
- }
- }
-}
-
-#include <libbuild2/types.hxx>
+#include <libbuild2/common-options.hxx>
namespace build2
{
@@ -308,24 +31,24 @@ namespace build2
parse (int& argc,
char** argv,
bool erase = false,
- ::build2::build::script::cli::unknown_mode option = ::build2::build::script::cli::unknown_mode::fail,
- ::build2::build::script::cli::unknown_mode argument = ::build2::build::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
bool
parse (int start,
int& argc,
char** argv,
bool erase = false,
- ::build2::build::script::cli::unknown_mode option = ::build2::build::script::cli::unknown_mode::fail,
- ::build2::build::script::cli::unknown_mode argument = ::build2::build::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
bool
parse (int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::build::script::cli::unknown_mode option = ::build2::build::script::cli::unknown_mode::fail,
- ::build2::build::script::cli::unknown_mode argument = ::build2::build::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
bool
parse (int start,
@@ -333,13 +56,13 @@ namespace build2
char** argv,
int& end,
bool erase = false,
- ::build2::build::script::cli::unknown_mode option = ::build2::build::script::cli::unknown_mode::fail,
- ::build2::build::script::cli::unknown_mode argument = ::build2::build::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
bool
- parse (::build2::build::script::cli::scanner&,
- ::build2::build::script::cli::unknown_mode option = ::build2::build::script::cli::unknown_mode::fail,
- ::build2::build::script::cli::unknown_mode argument = ::build2::build::script::cli::unknown_mode::stop);
+ parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
// Option accessors and modifiers.
//
@@ -451,17 +174,77 @@ namespace build2
void
drop_cycles (const bool&);
+ const string&
+ target_what () const;
+
+ string&
+ target_what ();
+
+ void
+ target_what (const string&);
+
+ bool
+ target_what_specified () const;
+
+ void
+ target_what_specified (bool);
+
+ const string&
+ target_default_type () const;
+
+ string&
+ target_default_type ();
+
+ void
+ target_default_type (const string&);
+
+ bool
+ target_default_type_specified () const;
+
+ void
+ target_default_type_specified (bool);
+
+ const map<string, string>&
+ target_extension_type () const;
+
+ map<string, string>&
+ target_extension_type ();
+
+ void
+ target_extension_type (const map<string, string>&);
+
+ bool
+ target_extension_type_specified () const;
+
+ void
+ target_extension_type_specified (bool);
+
+ const dir_path&
+ target_cwd () const;
+
+ dir_path&
+ target_cwd ();
+
+ void
+ target_cwd (const dir_path&);
+
+ bool
+ target_cwd_specified () const;
+
+ void
+ target_cwd_specified (bool);
+
// Implementation details.
//
protected:
bool
- _parse (const char*, ::build2::build::script::cli::scanner&);
+ _parse (const char*, ::build2::build::cli::scanner&);
private:
bool
- _parse (::build2::build::script::cli::scanner&,
- ::build2::build::script::cli::unknown_mode option,
- ::build2::build::script::cli::unknown_mode argument);
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
public:
path file_;
@@ -478,6 +261,14 @@ namespace build2
dir_path cwd_;
bool cwd_specified_;
bool drop_cycles_;
+ string target_what_;
+ bool target_what_specified_;
+ string target_default_type_;
+ bool target_default_type_specified_;
+ map<string, string> target_extension_type_;
+ bool target_extension_type_specified_;
+ dir_path target_cwd_;
+ bool target_cwd_specified_;
};
}
}
diff --git a/libbuild2/build/script/builtin-options.ixx b/libbuild2/build/script/builtin-options.ixx
index 6f91b2c..20847c2 100644
--- a/libbuild2/build/script/builtin-options.ixx
+++ b/libbuild2/build/script/builtin-options.ixx
@@ -9,167 +9,6 @@
//
// End prologue.
-#include <cassert>
-
-namespace build2
-{
- namespace build
- {
- namespace script
- {
- namespace cli
- {
- // unknown_mode
- //
- inline unknown_mode::
- unknown_mode (value v)
- : v_ (v)
- {
- }
-
- // exception
- //
- inline ::std::ostream&
- operator<< (::std::ostream& os, const exception& e)
- {
- e.print (os);
- return os;
- }
-
- // unknown_option
- //
- inline unknown_option::
- unknown_option (const std::string& option)
- : option_ (option)
- {
- }
-
- inline const std::string& unknown_option::
- option () const
- {
- return option_;
- }
-
- // unknown_argument
- //
- inline unknown_argument::
- unknown_argument (const std::string& argument)
- : argument_ (argument)
- {
- }
-
- inline const std::string& unknown_argument::
- argument () const
- {
- return argument_;
- }
-
- // missing_value
- //
- inline missing_value::
- missing_value (const std::string& option)
- : option_ (option)
- {
- }
-
- inline const std::string& missing_value::
- option () const
- {
- return option_;
- }
-
- // invalid_value
- //
- inline invalid_value::
- invalid_value (const std::string& option,
- const std::string& value,
- const std::string& message)
- : option_ (option),
- value_ (value),
- message_ (message)
- {
- }
-
- inline const std::string& invalid_value::
- option () const
- {
- return option_;
- }
-
- inline const std::string& invalid_value::
- value () const
- {
- return value_;
- }
-
- inline const std::string& invalid_value::
- message () const
- {
- return message_;
- }
-
- // argv_scanner
- //
- inline argv_scanner::
- argv_scanner (int& argc,
- char** argv,
- bool erase,
- std::size_t sp)
- : start_position_ (sp + 1),
- i_ (1),
- argc_ (argc),
- argv_ (argv),
- erase_ (erase)
- {
- }
-
- inline argv_scanner::
- argv_scanner (int start,
- int& argc,
- char** argv,
- bool erase,
- std::size_t sp)
- : start_position_ (sp + static_cast<std::size_t> (start)),
- i_ (start),
- argc_ (argc),
- argv_ (argv),
- erase_ (erase)
- {
- }
-
- inline int argv_scanner::
- end () const
- {
- return i_;
- }
-
- // vector_scanner
- //
- inline vector_scanner::
- vector_scanner (const std::vector<std::string>& v,
- std::size_t i,
- std::size_t sp)
- : start_position_ (sp), v_ (v), i_ (i)
- {
- }
-
- inline std::size_t vector_scanner::
- end () const
- {
- return i_;
- }
-
- inline void vector_scanner::
- reset (std::size_t i, std::size_t sp)
- {
- i_ = i;
- start_position_ = sp;
- }
- }
- }
- }
-}
-
namespace build2
{
namespace build
@@ -394,6 +233,126 @@ namespace build2
{
this->drop_cycles_ = x;
}
+
+ inline const string& depdb_dyndep_options::
+ target_what () const
+ {
+ return this->target_what_;
+ }
+
+ inline string& depdb_dyndep_options::
+ target_what ()
+ {
+ return this->target_what_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_what (const string& x)
+ {
+ this->target_what_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_what_specified () const
+ {
+ return this->target_what_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_what_specified (bool x)
+ {
+ this->target_what_specified_ = x;
+ }
+
+ inline const string& depdb_dyndep_options::
+ target_default_type () const
+ {
+ return this->target_default_type_;
+ }
+
+ inline string& depdb_dyndep_options::
+ target_default_type ()
+ {
+ return this->target_default_type_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_default_type (const string& x)
+ {
+ this->target_default_type_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_default_type_specified () const
+ {
+ return this->target_default_type_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_default_type_specified (bool x)
+ {
+ this->target_default_type_specified_ = x;
+ }
+
+ inline const map<string, string>& depdb_dyndep_options::
+ target_extension_type () const
+ {
+ return this->target_extension_type_;
+ }
+
+ inline map<string, string>& depdb_dyndep_options::
+ target_extension_type ()
+ {
+ return this->target_extension_type_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_extension_type (const map<string, string>& x)
+ {
+ this->target_extension_type_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_extension_type_specified () const
+ {
+ return this->target_extension_type_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_extension_type_specified (bool x)
+ {
+ this->target_extension_type_specified_ = x;
+ }
+
+ inline const dir_path& depdb_dyndep_options::
+ target_cwd () const
+ {
+ return this->target_cwd_;
+ }
+
+ inline dir_path& depdb_dyndep_options::
+ target_cwd ()
+ {
+ return this->target_cwd_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_cwd (const dir_path& x)
+ {
+ this->target_cwd_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_cwd_specified () const
+ {
+ return this->target_cwd_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_cwd_specified (bool x)
+ {
+ this->target_cwd_specified_ = x;
+ }
}
}
}
diff --git a/libbuild2/build/script/builtin.cli b/libbuild2/build/script/builtin.cli
index 6292f48..5aea034 100644
--- a/libbuild2/build/script/builtin.cli
+++ b/libbuild2/build/script/builtin.cli
@@ -1,7 +1,7 @@
// file : libbuild2/build/script/builtin.cli
// license : MIT; see accompanying LICENSE file
-include <libbuild2/types.hxx>;
+include <libbuild2/common.cli>;
// Note that options in this file are undocumented because we generate neither
// the usage printing code nor man pages. Instead, they are documented in the
@@ -17,8 +17,8 @@ namespace build2
//
class depdb_dyndep_options
{
- // Note that --byproduct, if any, must be the first option and is
- // handled ad hoc, kind of as a sub-command.
+ // Note that --byproduct or --dyn-target, if any, must be the first
+ // option and is handled ad hoc.
//
// Similarly, --update-{include,exclude} are handled ad hoc and must
// be literals, similar to the -- separator. They specify prerequisite
@@ -40,23 +40,48 @@ namespace build2
// with support for generated files (and thus -I) at least in the make
// format where we use relative paths for non-existent files.
//
+ // Currently Supported dependency formats (--format) are `make`
+ // (default) and `lines`.
+ //
+ // The `make` format is the make dependency declaration in the
+ // `<target>...: [<prerequisite>...]` form. In the non-byproduct mode
+ // a relative prerequisite path is considered non-existent.
+ //
+ // The `lines` format lists targets and/or prerequisites one per line.
+ // If the --dyn-target option is specified then the target list is
+ // expected to come first separated from the prerequisites list with a
+ // blank line. If there are no prerequisites, then the blank line can
+ // be omitted. If the --dyn-target option is not specified, then all
+ // lines are treated as prerequisites and there should be no blank
+ // lines. In the non-byproduct mode a prerequisite line that starts
+ // with a leading space is considered a non-existent prerequisite.
+ // Currently only relative non-existent prerequisites are supported.
+ // Finally, in this mode, if the prerequisite is syntactically a
+ // directory (that is, it ends with a trailing directory separator),
+ // then it is added as fsdir{}. This can be used to handle situations
+ // where the dynamic targets are placed into subdirectories.
+ //
// Note on naming: whenever we (may) have two options, one for target
// and the other for prerequisite, we omit "prerequisite" as that's
// what we extract by default and most commonly. For example:
//
- // --what --what-target
- // --default-type --default-target-type
+ // --what --target-what
+ // --default-type --target-default-type
//
path --file; // Read from file rather than stdin.
- string --format; // Dependency format: make (default).
+ string --format; // Dependency format: `make` (default),
+ // or `lines`.
- string --what; // Dependency kind, e.g., "header".
+ // Dynamic dependency extraction options.
+ //
+ string --what; // Prerequisite kind, e.g., "header".
- dir_paths --include-path|-I; // Search paths for generated files.
+ dir_paths --include-path|-I; // Search paths for generated
+ // prerequisites.
- string --default-type; // Default prerequisite type to use
- // if none could be derived from ext.
+ string --default-type; // Default prerequisite type to use if
+ // none could be derived from extension.
bool --adhoc; // Treat dynamically discovered
// prerequisites as ad hoc (so they
@@ -64,14 +89,39 @@ namespace build2
// normal mode).
dir_path --cwd; // Builtin's working directory used
- // to complete relative paths (only
- // in --byproduct mode).
+ // to complete relative paths of
+ // prerequisites (only in --byproduct
+ // mode, lines format for existing
+ // paths).
bool --drop-cycles; // Drop prerequisites that are also
// targets. Only use if you are sure
// such cycles are harmless, that is,
// the output is not affected by such
// prerequisites' content.
+
+ // Dynamic target extraction options.
+ //
+ // This functionality is enabled with the --dyn-target option. Only
+ // the make format is supported, where the listed targets are added as
+ // ad hoc group members (unless already specified as static members).
+ // This functionality is not available in the byproduct mode.
+ //
+ string --target-what; // Target kind, e.g., "source".
+
+ string --target-default-type; // Default target type to use if none
+ // could be derived from extension.
+
+ map<string, string> // Extension to target type mapping in
+ --target-extension-type; // the <ext>=<type> form, for example,
+ // h=hxx. This mapping is considered
+ // before attempting to automatically
+ // map the extension and so can be used
+ // to resolve ambiguities.
+
+ dir_path --target-cwd; // Builtin's working directory used to
+ // complete relative paths of targets.
+
};
}
}
diff --git a/libbuild2/build/script/lexer+for-loop.test.testscript b/libbuild2/build/script/lexer+for-loop.test.testscript
new file mode 100644
index 0000000..3f8e6b5
--- /dev/null
+++ b/libbuild2/build/script/lexer+for-loop.test.testscript
@@ -0,0 +1,188 @@
+# file : libbuild2/build/script/lexer+for-loop.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+test.arguments = for-loop
+
+: redirect
+:
+{
+ : pass
+ :
+ $* <"cmd <| 1>|" >>EOO
+ 'cmd'
+ <|
+ '1'
+ >|
+ <newline>
+ EOO
+
+ : null
+ :
+ $* <"cmd <- 1>-" >>EOO
+ 'cmd'
+ <-
+ '1'
+ >-
+ <newline>
+ EOO
+
+ : trace
+ :
+ $* <"cmd 1>!" >>EOO
+ 'cmd'
+ '1'
+ >!
+ <newline>
+ EOO
+
+ : merge
+ :
+ $* <"cmd 1>&2" >>EOO
+ 'cmd'
+ '1'
+ >&
+ '2'
+ <newline>
+ EOO
+
+ : str
+ :
+ $* <"cmd <<<=a 1>>>?b" >>EOO
+ 'cmd'
+ <<<=
+ 'a'
+ '1'
+ >>>?
+ 'b'
+ <newline>
+ EOO
+
+ : str-nn
+ :
+ $* <"cmd <<<=:a 1>>>?:b" >>EOO
+ 'cmd'
+ <<<=:
+ 'a'
+ '1'
+ >>>?:
+ 'b'
+ <newline>
+ EOO
+
+ : str-nn-alias
+ :
+ $* <"cmd <<<:a 1>>>?:b" >>EOO
+ 'cmd'
+ <<<:
+ 'a'
+ '1'
+ >>>?:
+ 'b'
+ <newline>
+ EOO
+
+ : doc
+ :
+ $* <"cmd <<EOI 1>>EOO" >>EOO
+ 'cmd'
+ <<
+ 'EOI'
+ '1'
+ >>
+ 'EOO'
+ <newline>
+ EOO
+
+ : doc-nn
+ :
+ $* <"cmd <<:EOI 1>>?:EOO" >>EOO
+ 'cmd'
+ <<:
+ 'EOI'
+ '1'
+ >>?:
+ 'EOO'
+ <newline>
+ EOO
+
+ : file-cmp
+ :
+ $* <"cmd <=in >?out 2>?err" >>EOO
+ 'cmd'
+ <=
+ 'in'
+ >?
+ 'out'
+ '2'
+ >?
+ 'err'
+ <newline>
+ EOO
+
+ : file-write
+ :
+ $* <"cmd >=out 2>+err" >>EOO
+ 'cmd'
+ >=
+ 'out'
+ '2'
+ >+
+ 'err'
+ <newline>
+ EOO
+}
+
+: cleanup
+:
+{
+ : always
+ :
+ $* <"cmd &file" >>EOO
+ 'cmd'
+ &
+ 'file'
+ <newline>
+ EOO
+
+ : maybe
+ :
+ $* <"cmd &?file" >>EOO
+ 'cmd'
+ &?
+ 'file'
+ <newline>
+ EOO
+
+ : never
+ :
+ $* <"cmd &!file" >>EOO
+ 'cmd'
+ &!
+ 'file'
+ <newline>
+ EOO
+}
+
+: for
+:
+{
+ : form-1
+ :
+ $* <"for x: a" >>EOO
+ 'for'
+ 'x'
+ :
+ 'a'
+ <newline>
+ EOO
+
+ : form-3
+ :
+ $* <"for <<<a x" >>EOO
+ 'for'
+ <<<
+ 'a'
+ 'x'
+ <newline>
+ EOO
+}
diff --git a/libbuild2/build/script/lexer.cxx b/libbuild2/build/script/lexer.cxx
index d849ac9..e0d87fe 100644
--- a/libbuild2/build/script/lexer.cxx
+++ b/libbuild2/build/script/lexer.cxx
@@ -35,10 +35,7 @@ namespace build2
bool q (true); // quotes
if (!esc)
- {
- assert (!state_.empty ());
- esc = state_.top ().escapes;
- }
+ esc = current_state ().escapes;
switch (m)
{
@@ -78,6 +75,19 @@ namespace build2
s2 = " ";
break;
}
+ case lexer_mode::for_loop:
+ {
+ // Leading tokens of the for-loop. Like command_line but
+ // recognizes colon as a separator and lsbrace like value.
+ //
+ // Note that while sensing the form of the for-loop (`for x:...`
+ // vs `for x <...`) we need to make sure that the pre-parsed token
+ // types are valid for the execution phase.
+ //
+ s1 = ":=!|&<> $(#\t\n";
+ s2 = " == ";
+ break;
+ }
default:
{
// Recognize special variable names ($>, $<, $~).
@@ -94,7 +104,7 @@ namespace build2
}
assert (ps == '\0');
- state_.push (
+ mode_impl (
state {m, data, nullopt, false, false, ps, s, n, q, *esc, s1, s2});
}
@@ -103,12 +113,13 @@ namespace build2
{
token r;
- switch (state_.top ().mode)
+ switch (mode ())
{
case lexer_mode::command_line:
case lexer_mode::first_token:
case lexer_mode::second_token:
case lexer_mode::variable_line:
+ case lexer_mode::for_loop:
r = next_line ();
break;
default: return base_lexer::next ();
@@ -128,7 +139,7 @@ namespace build2
xchar c (get ());
uint64_t ln (c.line), cn (c.column);
- state st (state_.top ()); // Make copy (see first/second_token).
+ state st (current_state ()); // Make copy (see first/second_token).
lexer_mode m (st.mode);
auto make_token = [&sep, ln, cn] (type t)
@@ -141,9 +152,10 @@ namespace build2
//
if (st.lsbrace)
{
- assert (m == lexer_mode::variable_line);
+ assert (m == lexer_mode::variable_line ||
+ m == lexer_mode::for_loop);
- state_.top ().lsbrace = false; // Note: st is a copy.
+ current_state ().lsbrace = false; // Note: st is a copy.
if (c == '[' && (!st.lsbrace_unsep || !sep))
return make_token (type::lsbrace);
@@ -156,7 +168,7 @@ namespace build2
// we push any new mode (e.g., double quote).
//
if (m == lexer_mode::first_token || m == lexer_mode::second_token)
- state_.pop ();
+ expire_mode ();
// NOTE: remember to update mode() if adding new special characters.
@@ -167,7 +179,7 @@ namespace build2
// Expire variable value mode at the end of the line.
//
if (m == lexer_mode::variable_line)
- state_.pop ();
+ expire_mode ();
sep = true; // Treat newline as always separated.
return make_token (type::newline);
@@ -179,11 +191,20 @@ namespace build2
case '(': return make_token (type::lparen);
}
+ if (m == lexer_mode::for_loop)
+ {
+ switch (c)
+ {
+ case ':': return make_token (type::colon);
+ }
+ }
+
// Command line operator/separators.
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -205,7 +226,8 @@ namespace build2
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
if (optional<token> t = next_cmd_op (c, sep))
return move (*t);
diff --git a/libbuild2/build/script/lexer.hxx b/libbuild2/build/script/lexer.hxx
index 646d3b9..3f51493 100644
--- a/libbuild2/build/script/lexer.hxx
+++ b/libbuild2/build/script/lexer.hxx
@@ -24,9 +24,10 @@ namespace build2
enum
{
command_line = base_type::value_next,
- first_token, // Expires at the end of the token.
- second_token, // Expires at the end of the token.
- variable_line // Expires at the end of the line.
+ first_token, // Expires at the end of the token.
+ second_token, // Expires at the end of the token.
+ variable_line, // Expires at the end of the line.
+ for_loop // Used for sensing the for-loop leading tokens.
};
lexer_mode () = default;
@@ -67,6 +68,8 @@ namespace build2
static redirect_aliases_type redirect_aliases;
private:
+ using build2::script::lexer::mode; // Getter.
+
token
next_line ();
};
diff --git a/libbuild2/build/script/lexer.test.cxx b/libbuild2/build/script/lexer.test.cxx
index e496f94..d8733ba 100644
--- a/libbuild2/build/script/lexer.test.cxx
+++ b/libbuild2/build/script/lexer.test.cxx
@@ -35,6 +35,7 @@ namespace build2
else if (s == "second-token") m = lexer_mode::second_token;
else if (s == "variable-line") m = lexer_mode::variable_line;
else if (s == "variable") m = lexer_mode::variable;
+ else if (s == "for-loop") m = lexer_mode::for_loop;
else assert (false);
}
diff --git a/libbuild2/build/script/parser+command-if.test.testscript b/libbuild2/build/script/parser+command-if.test.testscript
index a18a885..8b19186 100644
--- a/libbuild2/build/script/parser+command-if.test.testscript
+++ b/libbuild2/build/script/parser+command-if.test.testscript
@@ -279,7 +279,7 @@
cmd
end
EOI
- buildfile:12:1: error: 'end' without preceding 'if'
+ buildfile:12:1: error: 'end' without preceding 'if', 'for', or 'while'
EOE
: before
diff --git a/libbuild2/build/script/parser+command-re-parse.test.testscript b/libbuild2/build/script/parser+command-re-parse.test.testscript
index 56e05b5..3dbdc16 100644
--- a/libbuild2/build/script/parser+command-re-parse.test.testscript
+++ b/libbuild2/build/script/parser+command-re-parse.test.testscript
@@ -1,18 +1,14 @@
# file : libbuild2/build/script/parser+command-re-parse.test.testscript
# license : MIT; see accompanying LICENSE file
-# @@ TMP
-#
-#\
: double-quote
:
$* <<EOI >>EOO
-x = [cmd_line] cmd \">-\" "'<-'"
+x = [cmdline] cmd \">-\" "'<-'"
$x
EOI
cmd '>-' '<-'
EOO
-#\
: literal-re-parse
:
diff --git a/libbuild2/build/script/parser+diag.test.testscript b/libbuild2/build/script/parser+diag.test.testscript
index 30eb859..504c9a4 100644
--- a/libbuild2/build/script/parser+diag.test.testscript
+++ b/libbuild2/build/script/parser+diag.test.testscript
@@ -19,17 +19,99 @@ $* <<EOI >>EOO
name: echo
EOO
-: diag
+: name-operation
:
-$* <<EOI >>~%EOO%
- echo abc
- cat abc
- diag copy >= $>
- cp <- $>
+$* <<EOI >>EOO
+ a = 'b'
EOI
- %diag: copy >= .+file\{driver\.\}%
+ name: update
EOO
+: preamble
+:
+{
+ : disambiguate
+ :
+ $* <<EOI >>~%EOO%
+ echo abc | set v
+ cat abc | set v
+ diag copy >= $>
+ cp <- $>
+ EOI
+ echo abc | set v
+ cat abc | set v
+ %diag: copy >= .+file\{driver\.\}%
+ EOO
+
+ : name
+ :
+ $* <<EOI >>EOO
+ n = foo
+ diag copy $n
+ cp $n $>
+ EOI
+ diag: copy foo
+ EOO
+
+ : quoted
+ :
+ $* <<EOI >'diag: foo'
+ f = foo
+ diag "$f"
+ EOI
+
+ : quoted-eval
+ :
+ $* <<EOI >'diag: foo'
+ f = foo
+ diag "($f)"
+ EOI
+
+ : temp_dir
+ :
+ {
+ test.options += -t
+
+ : no
+ :
+ $* <<EOI >false
+ f = foo
+ diag $f
+ f = $~/f
+ foo "$f"
+ EOI
+
+ : no-depdb
+ :
+ $* <<EOI >false
+ f = $~/f
+ depdb hash "$f"
+ diag $f
+ f = $~/f
+ foo "$f"
+ EOI
+
+ : yes
+ :
+ $* <<EOI >true
+ f = $~/f
+ diag $f
+ foo $f
+ EOI
+
+ : yes-depdb
+ :
+ $* <<EOI >true
+ f = $~/f
+ depdb hash "$f"
+ f = $~/t
+ diag $f
+ f = $~/f
+ foo "$f"
+ EOI
+ }
+}
+
: ambiguity
:
{
@@ -67,16 +149,6 @@ $* <<EOI >>~%EOO%
info: consider specifying it explicitly with the 'diag' recipe attribute
info: or provide custom low-verbosity diagnostics with the 'diag' builtin
EOE
-
- : none
- :
- $* <<EOI 2>>EOE != 0
- a = 'b'
- EOI
- buildfile:11:1: error: unable to deduce low-verbosity script diagnostics name
- info: consider specifying it explicitly with the 'diag' recipe attribute
- info: or provide custom low-verbosity diagnostics with the 'diag' builtin
- EOE
}
: inside-if
diff --git a/libbuild2/build/script/parser+expansion.test.testscript b/libbuild2/build/script/parser+expansion.test.testscript
index 086ec8f..eb99ae2 100644
--- a/libbuild2/build/script/parser+expansion.test.testscript
+++ b/libbuild2/build/script/parser+expansion.test.testscript
@@ -24,19 +24,15 @@ EOI
buildfile:12:5: info: while parsing string 'xy'a bc'
EOE
-# @@ TMP
-#
-#\
: invalid-redirect
:
$* <<EOI 2>>EOE != 0
-x = [cmd_line] "1>&a"
+x = [cmdline] "1>&a"
cmd $x
EOI
<string>:1:4: error: stdout merge redirect file descriptor must be 2
buildfile:12:5: info: while parsing string '1>&a'
EOE
-#\
: expansion-re-parse
:
diff --git a/libbuild2/build/script/parser+for.test.testscript b/libbuild2/build/script/parser+for.test.testscript
new file mode 100644
index 0000000..847b253
--- /dev/null
+++ b/libbuild2/build/script/parser+for.test.testscript
@@ -0,0 +1,656 @@
+# file : libbuild2/build/script/parser+for.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: form-1
+:
+: for x: ...
+:
+{
+ : for
+ :
+ {
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for
+ cmd
+ end
+ EOI
+ buildfile:11:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for x: a b
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : null
+ :
+ $* <<EOI >:''
+ for x: [null]
+ cmd $x
+ end
+ EOI
+
+ : empty
+ :
+ $* <<EOI >:''
+ for x:
+ cmd $x
+ end
+ EOI
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x: $vs
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : typed-values
+ :
+ $* <<EOI >>~%EOO%
+ for x: [dir_paths] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem-value
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: [strings] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : defined-var
+ :
+ $* <<EOI >>EOO
+ x = x
+
+ for x: a b
+ cmd $x
+ end
+
+ cmd $x
+ EOI
+ cmd a
+ cmd b
+ cmd b
+ EOO
+ }
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for x: a b
+ cmd1 $x # 1
+ if ($x == "a") # 2
+ cmd2 # 3
+ for y: x y
+ cmd3 # 4
+ end
+ else
+ cmd4 # 5
+ end
+ cmd5 # 6
+ end
+ cmd6 # 7
+ EOI
+ cmd1 a # 1 i1
+ ? true # 2 i1
+ cmd2 # 3 i1
+ cmd3 # 4 i1 i1
+ cmd3 # 4 i1 i2
+ cmd5 # 6 i1
+ cmd1 b # 1 i2
+ ? false # 2 i2
+ cmd4 # 5 i2
+ cmd5 # 6 i2
+ cmd6 # 7
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ EOI
+ buildfile:12:1: error: expected closing 'end'
+ EOE
+ }
+}
+
+: form-2
+:
+: ... | for x
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x != 0
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x | echo x
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x|echo x
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x && echo x
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&&echo x
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x | for x
+ cmd
+ end
+ EOI
+ buildfile:11:24: error: command expression involving for-loop
+ EOE
+
+ : expression-before-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x|for x
+ cmd
+ end
+ EOI
+ buildfile:11:22: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x &f
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&f
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x >a
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x>a
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: output redirect in for-loop
+ EOE
+
+ : stdin-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x <a
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: stdin is both piped and redirected
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for
+ cmd
+ end
+ EOI
+ buildfile:11:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ echo $vs | for x
+ cmd $x
+ end
+ EOI
+ echo a b | for x
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x [dir_path]
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x [dir_path]
+ EOO
+ }
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ echo 'a b' | for x # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ echo x y | for y # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end
+ cmd6 # 9
+ EOI
+ echo 'a b' | for x # 1
+ cmd6 # 9
+ EOO
+ }
+}
+
+: form-3
+:
+: for x <...
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a != 0
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a | echo x
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x|echo x
+ cmd
+ end
+ EOI
+ buildfile:11:9: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a && echo x
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&&echo x
+ cmd
+ end
+ EOI
+ buildfile:11:9: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && for x <a
+ cmd
+ end
+ EOI
+ buildfile:11:15: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a &f
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: cleanup in for-loop
+ EOE
+
+ : cleanup-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for &f x <a
+ cmd
+ end
+ EOI
+ buildfile:11:5: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&f
+ cmd
+ end
+ EOI
+ buildfile:11:9: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ for x >a
+ cmd
+ end
+ EOI
+ buildfile:11:7: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for >a x
+ cmd
+ end
+ EOI
+ buildfile:11:5: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for x>a
+ cmd
+ end
+ EOI
+ buildfile:11:6: error: output redirect in for-loop
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a
+ cmd
+ end
+ EOI
+ buildfile:11:1: error: for: missing variable name
+ EOE
+
+ : quoted-opt
+ :
+ $* <<EOI >>EOO
+ o = -w
+ for "$o" x <'a b'
+ cmd $x
+ end
+ for "($o)" x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ for -w x <'a b'
+ EOO
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for -w x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x <$vs
+ cmd $x
+ end
+ EOI
+ for x b <a
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ for -w x [dir_path] <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x [dir_path] <'a b'
+ EOO
+ }
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for -w x <'a b' # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ for -w y <'x y' # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end
+ cmd6 # 9
+ EOI
+ for -w x <'a b' # 1
+ cmd6 # 9
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ EOI
+ buildfile:12:1: error: expected closing 'end'
+ EOE
+ }
+}
diff --git a/libbuild2/build/script/parser+while.test.testscript b/libbuild2/build/script/parser+while.test.testscript
new file mode 100644
index 0000000..5587291
--- /dev/null
+++ b/libbuild2/build/script/parser+while.test.testscript
@@ -0,0 +1,133 @@
+# file : libbuild2/build/script/parser+while.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: while
+:
+{
+ : true
+ :
+ $* <<EOI >>EOO
+ while ($v != "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? true
+ cmd ''
+ ? true
+ cmd a
+ ? false
+ EOO
+
+ : false
+ :
+ $* <<EOI >>EOO
+ while ($v == "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? false
+ EOO
+
+ : without-command
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd
+ end
+ EOI
+ buildfile:11:6: error: missing program
+ EOE
+}
+
+: end
+:
+{
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ while true
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+}
+
+: elif
+:
+{
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ while false
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+}
+
+: nested
+:
+{
+ $* -l -r <<EOI >>EOO
+ while ($v != "aa") # 1
+ cmd1 "$v" # 2
+ if ($v == "a") # 3
+ cmd2 # 4
+ while ($v2 != "$v") # 5
+ cmd3 # 6
+ v2=$v
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ v = "$(v)a"
+ end
+ EOI
+ ? true # 1 i1
+ cmd1 '' # 2 i1
+ ? false # 3 i1
+ cmd4 # 7 i1
+ cmd5 # 8 i1
+ ? true # 1 i2
+ cmd1 a # 2 i2
+ ? true # 3 i2
+ cmd2 # 4 i2
+ ? true # 5 i2 i1
+ cmd3 # 6 i2 i1
+ ? false # 5 i2 i2
+ cmd5 # 8 i2
+ ? false # 1 i3
+ EOO
+}
+
+: contained
+:
+{
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ EOI
+ buildfile:12:1: error: expected closing 'end'
+ EOE
+}
+
+: var
+:
+$* <<EOI >>EOO
+while ($v1 != "a")
+ v1 = "$(v1)a"
+ v2 = "$v1"
+end
+cmd $v1
+EOI
+? true
+? false
+cmd a
+EOO
diff --git a/libbuild2/build/script/parser.cxx b/libbuild2/build/script/parser.cxx
index dd6fa2d..3ecf23d 100644
--- a/libbuild2/build/script/parser.cxx
+++ b/libbuild2/build/script/parser.cxx
@@ -15,6 +15,8 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/make-parser.hxx>
+#include <libbuild2/adhoc-rule-buildscript.hxx>
+
#include <libbuild2/script/run.hxx>
#include <libbuild2/build/script/lexer.hxx>
@@ -45,7 +47,7 @@ namespace build2
{
path_ = &pn;
- pre_parse_ = true;
+ top_pre_parse_ = pre_parse_ = true;
lexer l (is, *path_, line, lexer_mode::command_line);
set_lexer (&l);
@@ -59,7 +61,7 @@ namespace build2
pbase_ = scope_->src_path_;
- file_based_ = tt.is_a<file> ();
+ file_based_ = tt.is_a<file> () || tt.is_a<group> ();
perform_update_ = find (as.begin (), as.end (), perform_update_id) !=
as.end ();
@@ -91,15 +93,31 @@ namespace build2
info << "consider using 'depdb' builtin to track its result "
<< "changes";
- // Diagnose absent/ambigous script name.
+ // Diagnose computed variable exansions.
+ //
+ if (computed_var_)
+ fail (*computed_var_)
+ << "expansion of computed variable is only allowed in depdb "
+ << "preamble" <<
+ info << "consider using 'depdb' builtin to track its value "
+ << "changes";
+
+ // Diagnose absent/ambiguous script name. But try to deduce an absent
+ // name from the script operation first.
//
{
diag_record dr;
- if (!diag_name_ && !diag_line_)
+ if (!diag_name_ && diag_preamble_.empty ())
{
- dr << fail (s.start_loc)
- << "unable to deduce low-verbosity script diagnostics name";
+ if (as.size () == 1)
+ {
+ diag_name_ = make_pair (ctx->operation_table[as[0].operation ()],
+ location ());
+ }
+ else
+ dr << fail (s.start_loc)
+ << "unable to deduce low-verbosity script diagnostics name";
}
else if (diag_name2_)
{
@@ -125,20 +143,22 @@ namespace build2
// Save the script name or custom diagnostics line.
//
- assert (diag_name_.has_value () != diag_line_.has_value ());
+ assert (diag_name_.has_value () == diag_preamble_.empty ());
if (diag_name_)
s.diag_name = move (diag_name_->first);
else
- s.diag_line = move (diag_line_->first);
+ s.diag_preamble = move (diag_preamble_);
// Save the custom dependency change tracking lines, if present.
//
s.depdb_clear = depdb_clear_.has_value ();
+ s.depdb_value = depdb_value_;
if (depdb_dyndep_)
{
s.depdb_dyndep = depdb_dyndep_->second;
s.depdb_dyndep_byproduct = depdb_dyndep_byproduct_;
+ s.depdb_dyndep_dyn_target = depdb_dyndep_dyn_target_;
}
s.depdb_preamble = move (depdb_preamble_);
@@ -181,13 +201,27 @@ namespace build2
}
}
+ // Parse a logical line, handling the flow control constructs
+ // recursively.
+ //
+ // If the flow control construct type is specified, then this line is
+ // assumed to belong to such a construct.
+ //
void parser::
- pre_parse_line (token& t, type& tt, bool if_line)
+ pre_parse_line (token& t, type& tt, optional<line_type> fct)
{
+ // enter: next token is peeked at (type in tt)
+ // leave: newline
+
+ assert (!fct ||
+ *fct == line_type::cmd_if ||
+ *fct == line_type::cmd_while ||
+ *fct == line_type::cmd_for_stream ||
+ *fct == line_type::cmd_for_args);
+
// Determine the line type/start token.
//
- line_type lt (
- pre_parse_line_start (t, tt, lexer_mode::second_token));
+ line_type lt (pre_parse_line_start (t, tt, lexer_mode::second_token));
line ln;
@@ -220,22 +254,148 @@ namespace build2
break;
}
+ //
+ // See pre_parse_line_start() for details.
+ //
+ case line_type::cmd_for_args: assert (false); break;
+ case line_type::cmd_for_stream:
+ {
+ // First we need to sense the next few tokens and detect which
+ // form of the loop we are dealing with, the first (for x: ...)
+ // or the third (x <...) one. Note that the second form (... | for
+ // x) is handled separately.
+ //
+ // If the next token doesn't look like a variable name, then this
+ // is the third form. Otherwise, if colon follows the variable
+ // name, potentially after the attributes, then this is the first
+ // form and the third form otherwise.
+ //
+ // Note that for the third form we will need to pass the 'for'
+ // token as a program name to the command expression parsing
+ // function since it will be gone from the token stream by that
+ // time. Thus, we save it. We also need to make sure the sensing
+ // always leaves the variable name token in t/tt.
+ //
+ // Note also that in this model it won't be possible to support
+ // options in the first form.
+ //
+ token pt (t);
+ assert (pt.type == type::word && pt.value == "for");
+
+ mode (lexer_mode::for_loop);
+ next (t, tt);
+
+ // Note that we also consider special variable names (those that
+ // don't clash with the command line elements like redirects, etc)
+ // to later fail gracefully.
+ //
+ string& n (t.value);
+
+ if (tt == type::word && t.qtype == quote_type::unquoted &&
+ (n[0] == '_' || alpha (n[0]) || // Variable.
+ n == "~")) // Special variable.
+ {
+ // Detect patterns analogous to parse_variable_name() (so we
+ // diagnose `for x[string]: ...`).
+ //
+ if (n.find_first_of ("[*?") != string::npos)
+ fail (t) << "expected variable name instead of " << n;
+
+ if (special_variable (n))
+ fail (t) << "attempt to set '" << n << "' special variable";
+
+ // Parse out the element attributes, if present.
+ //
+ if (lexer_->peek_char ().first == '[')
+ {
+ // Save the variable name token before the attributes parsing
+ // and restore it afterwards. Also make sure that the token
+ // which follows the attributes stays in the stream.
+ //
+ token vt (move (t));
+ next_with_attributes (t, tt);
+
+ attributes_push (t, tt,
+ true /* standalone */,
+ false /* next_token */);
+
+ t = move (vt);
+ tt = t.type;
+ }
+
+ if (lexer_->peek_char ().first == ':')
+ lt = line_type::cmd_for_args;
+ }
+
+ if (lt == line_type::cmd_for_stream) // for x <...
+ {
+ // At this point t/tt contains the variable name token. Now
+ // pre-parse the command expression in the command_line lexer
+ // mode starting from this position and also passing the 'for'
+ // token as a program name.
+ //
+ // Note that the fact that the potential attributes are already
+ // parsed doesn't affect the command expression pre-parsing.
+ // Also note that they will be available during the execution
+ // phase being replayed.
+ //
+ expire_mode (); // Expire the for-loop lexer mode.
+
+ parse_command_expr_result r (
+ parse_command_expr (t, tt,
+ lexer::redirect_aliases,
+ move (pt)));
+
+ assert (r.for_loop);
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t;
+
+ parse_here_documents (t, tt, r);
+ }
+ else // for x: ...
+ {
+ next (t, tt);
+
+ assert (tt == type::colon);
+
+ expire_mode (); // Expire the for-loop lexer mode.
+
+ // Parse the value similar to the var line type (see above).
+ //
+ mode (lexer_mode::variable_line);
+ parse_variable_line (t, tt);
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t << " after for";
+ }
+
+ ln.var = nullptr;
+ ++level_;
+ break;
+ }
case line_type::cmd_elif:
case line_type::cmd_elifn:
case line_type::cmd_else:
- case line_type::cmd_end:
{
- if (!if_line)
- {
+ if (!fct || *fct != line_type::cmd_if)
fail (t) << lt << " without preceding 'if'";
- }
+ }
+ // Fall through.
+ case line_type::cmd_end:
+ {
+ if (!fct)
+ fail (t) << lt << " without preceding 'if', 'for', or 'while'";
}
// Fall through.
case line_type::cmd_if:
case line_type::cmd_ifn:
+ case line_type::cmd_while:
next (t, tt); // Skip to start of command.
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
+ if (lt == line_type::cmd_if ||
+ lt == line_type::cmd_ifn ||
+ lt == line_type::cmd_while)
++level_;
else if (lt == line_type::cmd_end)
--level_;
@@ -243,15 +403,24 @@ namespace build2
// Fall through.
case line_type::cmd:
{
- pair<command_expr, here_docs> p;
+ parse_command_expr_result r;
if (lt != line_type::cmd_else && lt != line_type::cmd_end)
- p = parse_command_expr (t, tt, lexer::redirect_aliases);
+ r = parse_command_expr (t, tt, lexer::redirect_aliases);
+
+ if (r.for_loop)
+ {
+ lt = line_type::cmd_for_stream;
+ ln.var = nullptr;
+
+ ++level_;
+ }
if (tt != type::newline)
fail (t) << "expected newline instead of " << t;
- parse_here_documents (t, tt, p);
+ parse_here_documents (t, tt, r);
+
break;
}
}
@@ -269,12 +438,67 @@ namespace build2
*save_line_ = move (ln);
}
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
+ switch (lt)
{
- tt = peek (lexer_mode::first_token);
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ {
+ tt = peek (lexer_mode::first_token);
+
+ pre_parse_if_else (t, tt);
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ tt = peek (lexer_mode::first_token);
+
+ pre_parse_loop (t, tt, lt);
+ break;
+ }
+ default: break;
+ }
+ }
+
+ // Pre-parse the flow control construct block line.
+ //
+ void parser::
+ pre_parse_block_line (token& t, type& tt, line_type bt)
+ {
+ // enter: peeked first token of the line (type in tt)
+ // leave: newline
+
+ const location ll (get_location (peeked ()));
+
+ if (tt == type::eos)
+ fail (ll) << "expected closing 'end'";
+
+ line_type fct; // Flow control type the block type relates to.
- pre_parse_if_else (t, tt);
+ switch (bt)
+ {
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_else:
+ {
+ fct = line_type::cmd_if;
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ fct = bt;
+ break;
+ }
+ default: assert(false);
}
+
+ pre_parse_line (t, tt, fct);
+ assert (tt == type::newline);
}
void parser::
@@ -283,8 +507,7 @@ namespace build2
// enter: peeked first token of next line (type in tt)
// leave: newline
- // Parse lines until we see closing 'end'. Nested if-else blocks are
- // handled recursively.
+ // Parse lines until we see closing 'end'.
//
for (line_type bt (line_type::cmd_if); // Current block.
;
@@ -292,25 +515,21 @@ namespace build2
{
const location ll (get_location (peeked ()));
- if (tt == type::eos)
- fail (ll) << "expected closing 'end'";
-
// Parse one line. Note that this one line can still be multiple
- // lines in case of if-else. In this case we want to view it as
- // cmd_if, not cmd_end. Thus remember the start position of the
- // next logical line.
+ // lines in case of a flow control construct. In this case we want
+ // to view it as cmd_if, not cmd_end. Thus remember the start
+ // position of the next logical line.
//
size_t i (script_->body.size ());
- pre_parse_line (t, tt, true /* if_line */);
- assert (tt == type::newline);
+ pre_parse_block_line (t, tt, bt);
line_type lt (script_->body[i].type);
// First take care of 'end'.
//
if (lt == line_type::cmd_end)
- return;
+ break;
// Check if-else block sequencing.
//
@@ -334,6 +553,29 @@ namespace build2
}
}
+ void parser::
+ pre_parse_loop (token& t, type& tt, line_type lt)
+ {
+ // enter: peeked first token of next line (type in tt)
+ // leave: newline
+
+ assert (lt == line_type::cmd_while ||
+ lt == line_type::cmd_for_stream ||
+ lt == line_type::cmd_for_args);
+
+ // Parse lines until we see closing 'end'.
+ //
+ for (;; tt = peek (lexer_mode::first_token))
+ {
+ size_t i (script_->body.size ());
+
+ pre_parse_block_line (t, tt, lt);
+
+ if (script_->body[i].type == line_type::cmd_end)
+ break;
+ }
+ }
+
command_expr parser::
parse_command_line (token& t, type& tt)
{
@@ -344,12 +586,12 @@ namespace build2
//
assert (!pre_parse_);
- pair<command_expr, here_docs> p (
+ parse_command_expr_result pr (
parse_command_expr (t, tt, lexer::redirect_aliases));
assert (tt == type::newline);
- parse_here_documents (t, tt, p);
+ parse_here_documents (t, tt, pr);
assert (tt == type::newline);
// @@ Note that currently running programs via a runner (e.g., see
@@ -362,7 +604,7 @@ namespace build2
// passed to the environment constructor, similar to passing the
// script deadline.
//
- return move (p.first);
+ return move (pr.expr);
}
//
@@ -423,6 +665,12 @@ namespace build2
fail (l) << "'" << v << "' call via 'env' builtin";
};
+ auto diag_loc = [this] ()
+ {
+ assert (!diag_preamble_.empty ());
+ return diag_preamble_.back ().tokens[0].location ();
+ };
+
if (v == "diag")
{
verify ();
@@ -439,24 +687,41 @@ namespace build2
}
else // Custom diagnostics.
{
- assert (diag_line_);
-
fail (l) << "multiple 'diag' builtin calls" <<
- info (diag_line_->second) << "previous call is here";
+ info (diag_loc ()) << "previous call is here";
}
}
- // Instruct the parser to save the diag builtin line separately
- // from the script lines, when it is fully parsed. Note that it
- // will be executed prior to the script body execution to obtain
- // the custom diagnostics.
+ // Move the script body to the end of the diag preamble.
//
- diag_line_ = make_pair (line (), l);
- save_line_ = &diag_line_->first;
- diag_weight_ = 4;
+ // Note that we move into the preamble whatever is there and delay
+ // the check until the execution (see the depdb preamble
+ // collecting for the reasoning).
+ //
+ lines& ls (script_->body);
+ diag_preamble_.insert (diag_preamble_.end (),
+ make_move_iterator (ls.begin ()),
+ make_move_iterator (ls.end ()));
+ ls.clear ();
- diag_name_ = nullopt;
- diag_name2_ = nullopt;
+ // Also move the body_temp_dir flag, if it is true.
+ //
+ if (script_->body_temp_dir)
+ {
+ script_->diag_preamble_temp_dir = true;
+ script_->body_temp_dir = false;
+ }
+
+ // Similar to the depdb preamble collection, instruct the parser
+ // to save the depdb builtin line separately from the script
+ // lines.
+ //
+ diag_preamble_.push_back (line ());
+ save_line_ = &diag_preamble_.back ();
+
+ diag_weight_ = 4;
+ diag_name_ = nullopt;
+ diag_name2_ = nullopt;
// Note that the rest of the line contains the builtin argument to
// be printed, thus we parse it in the value lexer mode.
@@ -478,17 +743,16 @@ namespace build2
{
if (a != perform_update_id)
fail (l) << "'depdb' builtin cannot be used to "
- << ctx.meta_operation_table[a.meta_operation ()].name
- << ' ' << ctx.operation_table[a.operation ()];
+ << ctx->meta_operation_table[a.meta_operation ()].name
+ << ' ' << ctx->operation_table[a.operation ()];
}
if (!file_based_)
- fail (l) << "'depdb' builtin can only be used for file-based "
- << "targets";
+ fail (l) << "'depdb' builtin can only be used for file- or "
+ << "file group-based targets";
- if (diag_line_)
- fail (diag_line_->second)
- << "'diag' builtin call before 'depdb' call" <<
+ if (!diag_preamble_.empty ())
+ fail (diag_loc ()) << "'diag' builtin call before 'depdb' call" <<
info (l) << "'depdb' call is here";
// Note that the rest of the line contains the builtin command
@@ -567,8 +831,18 @@ namespace build2
fail (l) << "multiple 'depdb dyndep' calls" <<
info (depdb_dyndep_->first) << "previous call is here";
- if (peek () == type::word && peeked ().value == "--byproduct")
- depdb_dyndep_byproduct_ = true;
+ if (peek () == type::word)
+ {
+ const string& v (peeked ().value);
+
+ // Note: --byproduct and --dyn-target are mutually
+ // exclusive.
+ //
+ if (v == "--byproduct")
+ depdb_dyndep_byproduct_ = true;
+ else if (v == "--dyn-target")
+ depdb_dyndep_dyn_target_ = true;
+ }
}
else
{
@@ -577,6 +851,8 @@ namespace build2
info (depdb_dyndep_->first) << "'depdb dyndep' call is here";
}
+ depdb_value_ = depdb_value_ || (v == "string" || v == "hash");
+
// Move the script body to the end of the depdb preamble.
//
// Note that at this (pre-parsing) stage we cannot evaluate if
@@ -601,10 +877,12 @@ namespace build2
script_->body_temp_dir = false;
}
- // Reset the impure function call info since it's valid for the
- // depdb preamble.
+ // Reset the impure function call and computed variable
+ // expansion tracking since both are valid for the depdb
+ // preamble.
//
impure_func_ = nullopt;
+ computed_var_ = nullopt;
// Instruct the parser to save the depdb builtin line separately
// from the script lines, when it is fully parsed. Note that the
@@ -656,10 +934,53 @@ namespace build2
//
// This is also the reason why we add a diag frame.
//
+ // The problem turned out to be worse than originally thought: we
+ // may call a function (for example, as part of if) with invalid
+ // arguments. And this could happen in the depdb preamble, which
+ // means we cannot fix this by moving the depdb builtin (which must
+ // come after the preamble). So let's peek at what's ahead and omit
+ // the expansion if it's anything iffy, namely, eval context or
+ // function call.
+ //
+ bool skip_diag (false);
if (pre_parse_ && diag_weight_ != 4)
{
- pre_parse_ = false; // Make parse_names() perform expansions.
- pre_parse_suspended_ = true;
+ // Based on the buildfile expansion parsing logic.
+ //
+ if (tt == type::lparen) // Evaluation context.
+ skip_diag = true;
+ else if (tt == type::dollar)
+ {
+ type ptt (peek (lexer_mode::variable));
+
+ if (!peeked ().separated)
+ {
+ if (ptt == type::lparen)
+ {
+ // While strictly speaking this can also be a function call,
+ // this is highly unusual and we will assume it's a variable
+ // expansion.
+ }
+ else if (ptt == type::word)
+ {
+ pair<char, bool> r (lexer_->peek_char ());
+
+ if (r.first == '(' && !r.second) // Function call.
+ skip_diag = true;
+ }
+ }
+ }
+
+ if (!skip_diag)
+ {
+ // Sanity check: we should not be suspending the pre-parse mode
+ // turned on by the base parser.
+ //
+ assert (top_pre_parse_);
+
+ pre_parse_ = false; // Make parse_names() perform expansions.
+ pre_parse_suspended_ = true;
+ }
}
auto df = make_diag_frame (
@@ -687,7 +1008,7 @@ namespace build2
pre_parse_ = true;
}
- if (pre_parse_ && diag_weight_ == 4)
+ if (pre_parse_ && (diag_weight_ == 4 || skip_diag))
return nullopt;
}
@@ -709,6 +1030,19 @@ namespace build2
return nullopt;
}
+ // If this is a value of the special cmdline type, then only do
+ // certain tests below if the value is not quoted and doesn't contain
+ // any characters that would be consumed by re-lexing.
+ //
+ // This is somewhat of a hack but handling this properly would not
+ // only require unquoting but also keeping track of which special
+ // characters were quoted (and thus should be treated literally) and
+ // which were not (and thus should act as separators, etc).
+ //
+ bool qs (pr.type != nullptr &&
+ pr.type->is_a<cmdline> () &&
+ need_cmdline_relex (ns[0].value));
+
// We have to handle process_path[_ex] and executable target. The
// process_path[_ex] we may have to recognize syntactically because
// of the loss of type, for example:
@@ -742,10 +1076,14 @@ namespace build2
pp_vt = pr.type;
ns.clear ();
}
- else if (ns[0].file ())
+ else if (ns[0].file () && !qs)
{
// Find the end of the value.
//
+ // Note that here we ignore the whole cmdline issue (see above)
+ // for the further values assuming that they are unquoted and
+ // don't contain any special characters.
+ //
auto b (ns.begin ());
auto i (value_traits<process_path_ex>::find_end (ns));
@@ -812,40 +1150,45 @@ namespace build2
//
else if (!ns[0].simple ())
{
- if (const target* t = search_existing (
- ns[0], *scope_, ns[0].pair ? ns[1].dir : empty_dir_path))
+ if (!qs)
{
- if (const auto* et = t->is_a<exe> ())
+ // This could be a script from src so search like a prerequisite.
+ //
+ if (const target* t = search_existing (
+ ns[0], *scope_, ns[0].pair ? ns[1].dir : empty_dir_path))
{
- if (pre_parse_)
+ if (const auto* et = t->is_a<exe> ())
{
- if (auto* n = et->lookup_metadata<string> ("name"))
+ if (pre_parse_)
{
- set_diag (*n, 3);
- return nullopt;
+ if (auto* n = et->lookup_metadata<string> ("name"))
+ {
+ set_diag (*n, 3);
+ return nullopt;
+ }
+ // Fall through.
}
- // Fall through.
- }
- else
- {
- process_path pp (et->process_path ());
+ else
+ {
+ process_path pp (et->process_path ());
- if (pp.empty ())
- fail (l) << "target " << *et << " is out of date" <<
- info << "consider specifying it as a prerequisite of "
- << environment_->target;
+ if (pp.empty ())
+ fail (l) << "target " << *et << " is out of date" <<
+ info << "consider specifying it as a prerequisite of "
+ << environment_->target;
- ns.erase (ns.begin (), ns.begin () + (ns[0].pair ? 2 : 1));
- return optional<process_path> (move (pp));
+ ns.erase (ns.begin (), ns.begin () + (ns[0].pair ? 2 : 1));
+ return optional<process_path> (move (pp));
+ }
}
- }
- if (pre_parse_)
- {
- diag_record dr (fail (l));
- dr << "unable to deduce low-verbosity script diagnostics name "
- << "from target " << *t;
- suggest_diag (dr);
+ if (pre_parse_)
+ {
+ diag_record dr (fail (l));
+ dr << "unable to deduce low-verbosity script diagnostics name "
+ << "from target " << *t;
+ suggest_diag (dr);
+ }
}
}
@@ -863,26 +1206,29 @@ namespace build2
{
// If we are here, the name is simple and is not part of a pair.
//
- string& v (ns[0].value);
+ if (!qs)
+ {
+ string& v (ns[0].value);
- // Try to interpret the name as a builtin.
- //
- const builtin_info* bi (builtins.find (v));
+ // Try to interpret the name as a builtin.
+ //
+ const builtin_info* bi (builtins.find (v));
- if (bi != nullptr)
- {
- set_diag (move (v), bi->weight);
- return nullopt;
- }
- //
- // Try to interpret the name as a pseudo-builtin.
- //
- // Note that both of them has the zero weight and cannot be picked
- // up as a script name.
- //
- else if (v == "set" || v == "exit")
- {
- return nullopt;
+ if (bi != nullptr)
+ {
+ set_diag (move (v), bi->weight);
+ return nullopt;
+ }
+ //
+ // Try to interpret the name as a pseudo-builtin.
+ //
+ // Note that both of them has the zero weight and cannot be picked
+ // up as a script name.
+ //
+ else if (v == "set" || v == "exit")
+ {
+ return nullopt;
+ }
}
diag_record dr (fail (l));
@@ -907,8 +1253,9 @@ namespace build2
// Note that we rely on "small function object" optimization here.
//
auto exec_cmd = [this] (token& t, build2::script::token_type& tt,
- size_t li,
+ const iteration_index* ii, size_t li,
bool single,
+ const function<command_function>& cf,
const location& ll)
{
// We use the 0 index to signal that this is the only command.
@@ -919,7 +1266,7 @@ namespace build2
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- runner_->run (*environment_, ce, li, ll);
+ runner_->run (*environment_, ce, ii, li, cf, ll);
};
exec_lines (s.body, exec_cmd);
@@ -928,11 +1275,30 @@ namespace build2
runner_->leave (e, s.end_loc);
}
+ // Return true if the specified expression executes the set builtin or
+ // is a for-loop.
+ //
+ static bool
+ valid_preamble_cmd (const command_expr& ce,
+ const function<command_function>& cf)
+ {
+ return find_if (
+ ce.begin (), ce.end (),
+ [&cf] (const expr_term& et)
+ {
+ const process_path& p (et.pipe.back ().program);
+ return p.initial == nullptr &&
+ (p.recall.string () == "set" ||
+ (cf != nullptr && p.recall.string () == "for"));
+ }) != ce.end ();
+ }
+
void parser::
- exec_depdb_preamble (action a, const scope& bs, const file& t,
+ exec_depdb_preamble (action a, const scope& bs, const target& t,
environment& e, const script& s, runner& r,
lines_iterator begin, lines_iterator end,
depdb& dd,
+ dynamic_targets* dyn_targets,
bool* update,
optional<timestamp> mt,
bool* deferred_failure,
@@ -955,28 +1321,34 @@ namespace build2
action a;
const scope& bs;
- const file& t;
+ const target& t;
environment& env;
const script& scr;
depdb& dd;
+ dynamic_targets* dyn_targets;
bool* update;
bool* deferred_failure;
optional<timestamp> mt;
dyndep_byproduct* byp;
- } data {trace, a, bs, t, e, s, dd, update, deferred_failure, mt, byp};
+ } data {
+ trace,
+ a, bs, t,
+ e, s,
+ dd, dyn_targets, update, deferred_failure, mt, byp};
auto exec_cmd = [this, &data] (token& t,
build2::script::token_type& tt,
- size_t li,
+ const iteration_index* ii, size_t li,
bool /* single */,
+ const function<command_function>& cf,
const location& ll)
{
// Note that we never reset the line index to zero (as we do in
- // execute_body()) assuming that there are some script body
- // commands to follow.
+ // execute_body()) assuming that there are some script body commands
+ // to follow.
//
if (tt == type::word && t.value == "depdb")
{
@@ -995,8 +1367,9 @@ namespace build2
//
exec_depdb_dyndep (t, tt,
li, ll,
- data.a, data.bs, const_cast<file&> (data.t),
+ data.a, data.bs, const_cast<target&> (data.t),
data.dd,
+ *data.dyn_targets,
*data.update,
*data.mt,
*data.deferred_failure,
@@ -1006,35 +1379,29 @@ namespace build2
{
names ns (exec_special (t, tt, true /* skip <cmd> */));
+ string v;
+ const char* w (nullptr);
if (cmd == "hash")
{
sha256 cs;
for (const name& n: ns)
to_checksum (cs, n);
- if (data.dd.expect (cs.string ()) != nullptr)
- l4 ([&] {
- data.trace (ll)
- << "'depdb hash' argument change forcing update of "
- << data.t;});
+ v = cs.string ();
+ w = "argument";
}
else if (cmd == "string")
{
- string s;
try
{
- s = convert<string> (move (ns));
+ v = convert<string> (move (ns));
}
catch (const invalid_argument& e)
{
fail (ll) << "invalid 'depdb string' argument: " << e;
}
- if (data.dd.expect (s) != nullptr)
- l4 ([&] {
- data.trace (ll)
- << "'depdb string' argument change forcing update of "
- << data.t;});
+ w = "argument";
}
else if (cmd == "env")
{
@@ -1055,14 +1422,32 @@ namespace build2
fail (ll) << pf << e;
}
- if (data.dd.expect (cs.string ()) != nullptr)
- l4 ([&] {
- data.trace (ll)
- << "'depdb env' environment change forcing update of "
- << data.t;});
+ v = cs.string ();
+ w = "environment";
}
else
assert (false);
+
+ // Prefix the value with the type letter. This serves two
+ // purposes:
+ //
+ // 1. It makes sure the result is never a blank line. We use
+ // blank lines as anchors to skip directly to certain entries
+ // (e.g., dynamic targets).
+ //
+ // 2. It allows us to detect the beginning of prerequisites
+ // since an absolute path will be distinguishable from these
+ // entries (in the future we may want to add an explicit
+ // blank after such custom entries to make this easier).
+ //
+ v.insert (0, 1, ' ');
+ v.insert (0, 1, cmd[0]); // `h`, `s`, or `e`
+
+ if (data.dd.expect (v) != nullptr)
+ l4 ([&] {
+ data.trace (ll)
+ << "'depdb " << cmd << "' " << w << " change forcing "
+ << "update of " << data.t;});
}
}
else
@@ -1070,15 +1455,7 @@ namespace build2
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- // Verify that this expression executes the set builtin.
- //
- if (find_if (ce.begin (), ce.end (),
- [] (const expr_term& et)
- {
- const process_path& p (et.pipe.back ().program);
- return p.initial == nullptr &&
- p.recall.string () == "set";
- }) == ce.end ())
+ if (!valid_preamble_cmd (ce, cf))
{
const replay_tokens& rt (data.scr.depdb_preamble.back ().tokens);
assert (!rt.empty ());
@@ -1089,20 +1466,93 @@ namespace build2
info (rt[0].location ()) << "depdb preamble ends here";
}
- runner_->run (*environment_, ce, li, ll);
+ runner_->run (*environment_, ce, ii, li, cf, ll);
}
};
exec_lines (begin, end, exec_cmd);
}
+ pair<names, location> parser::
+ execute_diag_preamble (const scope& rs, const scope& bs,
+ environment& e, const script& s, runner& r,
+ bool diag, bool enter, bool leave)
+ {
+ tracer trace ("execute_diag_preamble");
+
+ assert (!s.diag_preamble.empty ());
+
+ const line& dl (s.diag_preamble.back ()); // Diag builtin line.
+
+ pre_exec (rs, bs, e, &s, &r);
+
+ if (enter)
+ runner_->enter (e, s.start_loc);
+
+ // Perform the variable assignments.
+ //
+ auto exec_cmd = [&dl, this] (token& t,
+ build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ bool /* single */,
+ const function<command_function>& cf,
+ const location& ll)
+ {
+ // Note that we never reset the line index to zero (as we do in
+ // execute_body()) assuming that there are some script body commands
+ // to follow.
+ //
+ command_expr ce (
+ parse_command_line (t, static_cast<token_type&> (tt)));
+
+ if (!valid_preamble_cmd (ce, cf))
+ {
+ const replay_tokens& rt (dl.tokens);
+ assert (!rt.empty ());
+
+ fail (ll) << "disallowed command in diag preamble" <<
+ info << "only variable assignments are allowed in diag preamble"
+ << info (rt[0].location ()) << "diag preamble ends here";
+ }
+
+ runner_->run (*environment_, ce, ii, li, cf, ll);
+ };
+
+ exec_lines (s.diag_preamble.begin (), s.diag_preamble.end () - 1,
+ exec_cmd);
+
+ // Execute the diag line, if requested.
+ //
+ names ns;
+
+ if (diag)
+ {
+ // Copy the tokens and start playing.
+ //
+ replay_data (replay_tokens (dl.tokens));
+
+ token t;
+ build2::script::token_type tt;
+ next (t, tt);
+
+ ns = exec_special (t, tt, true /* skip_first */);
+
+ replay_stop ();
+ }
+
+ if (leave)
+ runner_->leave (e, s.end_loc);
+
+ return make_pair (ns, dl.tokens.front ().location ());
+ }
+
void parser::
pre_exec (const scope& rs, const scope& bs,
environment& e, const script* s, runner* r)
{
path_ = nullptr; // Set by replays.
- pre_parse_ = false;
+ top_pre_parse_ = pre_parse_ = false;
set_lexer (nullptr);
@@ -1152,22 +1602,37 @@ namespace build2
apply_value_attributes (&var, lhs, move (rhs), kind);
};
- auto exec_if = [this] (token& t, build2::script::token_type& tt,
- size_t li,
- const location& ll)
+ auto exec_cond = [this] (token& t, build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- // Assume if-else always involves multiple commands.
+ // Assume a flow control construct always involves multiple
+ // commands.
//
- return runner_->run_if (*environment_, ce, li, ll);
+ return runner_->run_cond (*environment_, ce, ii, li, ll);
+ };
+
+ auto exec_for = [this] (const variable& var,
+ value&& val,
+ const attributes& val_attrs,
+ const location&)
+ {
+ value& lhs (environment_->assign (var));
+
+ attributes_.push_back (val_attrs);
+
+ apply_value_attributes (&var, lhs, move (val), type::assign);
};
- build2::script::parser::exec_lines (begin, end,
- exec_set, exec_cmd, exec_if,
- environment_->exec_line,
- &environment_->var_pool);
+ build2::script::parser::exec_lines (
+ begin, end,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */,
+ environment_->exec_line,
+ &environment_->var_pool);
}
names parser::
@@ -1184,33 +1649,12 @@ namespace build2
: names ();
}
- names parser::
- execute_special (const scope& rs, const scope& bs,
- environment& e,
- const line& ln,
- bool omit_builtin)
- {
- pre_exec (rs, bs, e, nullptr /* script */, nullptr /* runner */);
-
- // Copy the tokens and start playing.
- //
- replay_data (replay_tokens (ln.tokens));
-
- token t;
- build2::script::token_type tt;
- next (t, tt);
-
- names r (exec_special (t, tt, omit_builtin));
-
- replay_stop ();
- return r;
- }
-
void parser::
exec_depdb_dyndep (token& lt, build2::script::token_type& ltt,
size_t li, const location& ll,
- action a, const scope& bs, file& t,
+ action a, const scope& bs, target& t,
depdb& dd,
+ dynamic_targets& dyn_targets,
bool& update,
timestamp mt,
bool& deferred_failure,
@@ -1223,6 +1667,7 @@ namespace build2
depdb_dyndep_options ops;
bool prog (false);
bool byprod (false);
+ bool dyn_tgt (false);
// Prerequisite update filter (--update-*).
//
@@ -1265,11 +1710,9 @@ namespace build2
next (t, tt); // Skip the 'dyndep' command.
- if (tt == type::word && t.value == "--byproduct")
- {
- byprod = true;
+ if (tt == type::word && ((byprod = (t.value == "--byproduct")) ||
+ (dyn_tgt = (t.value == "--dyn-target"))))
next (t, tt);
- }
assert (byprod == (byprod_result != nullptr));
@@ -1439,7 +1882,7 @@ namespace build2
{
diag_record dr (fail (l));
dr << "depdb dyndep: invalid string value ";
- to_stream (dr.os, n, true /* quote */);
+ to_stream (dr.os, n, quote_mode::normal);
}
}
}
@@ -1488,10 +1931,23 @@ namespace build2
continue;
}
- // Handle --byproduct in the wrong place.
+ // Handle --byproduct and --dyn-target in the wrong place.
//
if (strcmp (a, "--byproduct") == 0)
- fail (ll) << "depdb dyndep: --byproduct must be first option";
+ {
+ fail (ll) << "depdb dyndep: "
+ << (dyn_tgt
+ ? "--byproduct specified with --dyn-target"
+ : "--byproduct must be first option");
+ }
+
+ if (strcmp (a, "--dyn-target") == 0)
+ {
+ fail (ll) << "depdb dyndep: "
+ << (byprod
+ ? "--dyn-target specified with --byproduct"
+ : "--dyn-target must be first option");
+ }
// Handle non-literal --update-*.
//
@@ -1516,29 +1972,31 @@ namespace build2
}
}
- // --what
- //
- const char* what (ops.what_specified ()
- ? ops.what ().c_str ()
- : "file");
-
// --format
//
dyndep_format format (dyndep_format::make);
-
if (ops.format_specified ())
{
const string& f (ops.format ());
- if (f != "make")
+ if (f == "lines") format = dyndep_format::lines;
+ else if (f != "make")
fail (ll) << "depdb dyndep: invalid --format option value '"
<< f << "'";
}
+ // Prerequisite-specific options.
+ //
+
+ // --what
+ //
+ const char* what (ops.what_specified ()
+ ? ops.what ().c_str ()
+ : "file");
+
// --cwd
//
optional<dir_path> cwd;
-
if (ops.cwd_specified ())
{
if (!byprod)
@@ -1558,28 +2016,6 @@ namespace build2
fail (ll) << "depdb dyndep: -I specified with --byproduct";
}
- // --file
- //
- // Note that if --file is specified without a program, then we assume
- // it is one of the static prerequisites.
- //
- optional<path> file;
-
- if (ops.file_specified ())
- {
- file = move (ops.file ());
-
- if (file->relative ())
- {
- if (!cwd)
- fail (ll) << "depdb dyndep: relative path specified with --file";
-
- *file = *cwd / *file;
- }
- }
- else if (!prog)
- fail (ll) << "depdb dyndep: program or --file expected";
-
// --default-type
//
// Get the default prerequisite type falling back to file{} if not
@@ -1591,7 +2027,7 @@ namespace build2
// translation unit would want to make sure it resolves extracted
// system headers to h{} targets analogous to the c module's rule.
//
- const target_type* def_pt;
+ const target_type* def_pt (&file::static_type);
if (ops.default_type_specified ())
{
const string& t (ops.default_type ());
@@ -1599,10 +2035,8 @@ namespace build2
def_pt = bs.find_target_type (t);
if (def_pt == nullptr)
fail (ll) << "depdb dyndep: unknown target type '" << t
- << "' specific with --default-type";
+ << "' specified with --default-type";
}
- else
- def_pt = &file::static_type;
// --adhoc
//
@@ -1612,6 +2046,93 @@ namespace build2
fail (ll) << "depdb dyndep: --adhoc specified with --byproduct";
}
+ // Target-specific options.
+ //
+
+ // --target-what
+ //
+ const char* what_tgt ("file");
+ if (ops.target_what_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-what specified without "
+ << "--dyn-target";
+
+ what_tgt = ops.target_what ().c_str ();
+ }
+
+ // --target-cwd
+ //
+ optional<dir_path> cwd_tgt;
+ if (ops.target_cwd_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-cwd specified without "
+ << "--dyn-target";
+
+ cwd_tgt = move (ops.target_cwd ());
+
+ if (cwd_tgt->relative ())
+ fail (ll) << "depdb dyndep: relative path specified with "
+ << "--target-cwd";
+ }
+
+ // --target-default-type
+ //
+ const target_type* def_tt (&file::static_type);
+ if (ops.target_default_type_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-default-type specified "
+ << "without --dyn-target";
+
+ const string& t (ops.target_default_type ());
+
+ def_tt = bs.find_target_type (t);
+ if (def_tt == nullptr)
+ fail (ll) << "depdb dyndep: unknown target type '" << t
+ << "' specified with --target-default-type";
+ }
+
+ map<string, const target_type*> map_tt;
+ if (ops.target_extension_type_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-extension-type specified "
+ << "without --dyn-target";
+
+ for (pair<const string, string>& p: ops.target_extension_type ())
+ {
+ const target_type* tt (bs.find_target_type (p.second));
+ if (tt == nullptr)
+ fail (ll) << "depdb dyndep: unknown target type '" << p.second
+ << "' specified with --target-extension-type";
+
+ map_tt[p.first] = tt;
+ }
+ }
+
+ // --file (last since need --*cwd)
+ //
+ // Note that if --file is specified without a program, then we assume
+ // it is one of the static prerequisites.
+ //
+ optional<path> file;
+ if (ops.file_specified ())
+ {
+ file = move (ops.file ());
+
+ if (file->relative ())
+ {
+ if (!cwd && !cwd_tgt)
+ fail (ll) << "depdb dyndep: relative path specified with --file";
+
+ *file = (cwd ? *cwd : *cwd_tgt) / *file;
+ }
+ }
+ else if (!prog)
+ fail (ll) << "depdb dyndep: program or --file expected";
+
// Update prerequisite targets.
//
using dyndep = dyndep_rule;
@@ -1622,12 +2143,25 @@ namespace build2
{
if (const target* pt =
(p.target != nullptr ? p.target :
- p.adhoc ? reinterpret_cast<target*> (p.data)
+ p.adhoc () ? reinterpret_cast<target*> (p.data)
: nullptr))
{
+ // Automatically skip update=unmatch that we could not unmatch.
+ //
+ // Note that we don't skip update=match here (unless filtered out)
+ // in order to incorporate the result into our out-of-date'ness.
+ // So there is a nuanced interaction between update=match and
+ // --update-*.
+ //
+ if ((p.include & adhoc_buildscript_rule::include_unmatch) != 0)
+ {
+ l6 ([&]{trace << "skipping unmatched " << *pt;});
+ continue;
+ }
+
// Apply the --update-* filter.
//
- if (!p.adhoc && !filters.empty ())
+ if (!p.adhoc () && !filters.empty ())
{
// Compute and cache "effective" name that we will be pattern-
// matching (similar code to variable_type_map::find()).
@@ -1692,10 +2226,15 @@ namespace build2
update = dyndep::update (
trace, a, *pt, update ? timestamp_unknown : mt) || update;
+ // While implicit, it is for a static prerequisite, so marking it
+ // feels correct.
+ //
+ p.include |= prerequisite_target::include_udm;
+
// Mark as updated (see execute_update_prerequisites() for
// details.
//
- if (!p.adhoc)
+ if (!p.adhoc ())
p.data = 1;
}
}
@@ -1724,6 +2263,10 @@ namespace build2
return;
}
+ const scope& rs (*bs.root_scope ());
+
+ group* g (t.is_a<group> ()); // If not group then file.
+
// This code is based on the prior work in the cc module (specifically
// extract_headers()) where you can often find more detailed rationale
// for some of the steps performed.
@@ -1821,9 +2364,29 @@ namespace build2
command_expr cmd;
srcout_map so_map;
+ // Save/restore script cleanups.
+ //
+ struct cleanups
+ {
+ build2::script::cleanups ordinary;
+ paths special;
+ };
+ optional<cleanups> script_cleanups;
+
+ auto cleanups_guard = make_guard (
+ [this, &script_cleanups] ()
+ {
+ if (script_cleanups)
+ {
+ swap (environment_->cleanups, script_cleanups->ordinary);
+ swap (environment_->special_cleanups, script_cleanups->special);
+ }
+ });
+
auto init_run = [this, &ctx,
&lt, &ltt, &ll,
- prog, &file, &ops, &cmd, &so_map] ()
+ prog, &file, &ops,
+ &cmd, &so_map, &script_cleanups] ()
{
// Populate the srcout map with the -I$out_base -I$src_base pairs.
//
@@ -1836,6 +2399,10 @@ namespace build2
if (prog)
{
+ script_cleanups = cleanups {};
+ swap (environment_->cleanups, script_cleanups->ordinary);
+ swap (environment_->special_cleanups, script_cleanups->special);
+
cmd = parse_command_line (lt, static_cast<token_type&> (ltt));
// If the output goes to stdout, then this should be a single
@@ -1851,15 +2418,10 @@ namespace build2
// they include the line index in their names to avoid clashes
// between lines).
//
- // Cleanups are not an issue, they will simply replaced. And
+ // Cleanups are not an issue, they will simply be replaced. And
// overriding the contents of the special files seems harmless and
// consistent with what would happen if the command redirects its
// output to a non-special file.
- //
- if (file)
- environment_->clean (
- {build2::script::cleanup_type::always, *file},
- true /* implicit */);
}
};
@@ -1869,7 +2431,7 @@ namespace build2
size_t skip_count (0);
auto add = [this, &trace, what,
- a, &bs, &t, &pts, pts_n = pts.size (),
+ a, &bs, &t, g, &pts, pts_n = pts.size (),
&ops, &map_ext, def_pt, &pfx_map, &so_map,
&dd, &skip_count] (path fp,
size_t* skip,
@@ -1879,6 +2441,61 @@ namespace build2
bool cache (skip == nullptr);
+ // Handle fsdir{} prerequisite separately.
+ //
+ // Note: inspired by inject_fsdir().
+ //
+ if (fp.to_directory ())
+ {
+ if (!cache)
+ {
+ // Note: already absolute since cannot be non-existent.
+ //
+ fp.normalize ();
+ }
+
+ const fsdir* dt (&search<fsdir> (t,
+ path_cast<dir_path> (fp),
+ dir_path (),
+ string (), nullptr, nullptr));
+
+ // Subset of code for file below.
+ //
+ if (!cache)
+ {
+ for (size_t i (0); i != pts_n; ++i)
+ {
+ const prerequisite_target& p (pts[i]);
+
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr))
+ {
+ if (dt == pt)
+ return false;
+ }
+ }
+
+ if (*skip != 0)
+ {
+ --(*skip);
+ return false;
+ }
+ }
+
+ match_sync (a, *dt);
+ pts.push_back (
+ prerequisite_target (
+ nullptr, true /* adhoc */, reinterpret_cast<uintptr_t> (dt)));
+
+ if (!cache)
+ dd.expect (fp.representation ());
+
+ skip_count++;
+ return false;
+ }
+
// We can only defer the failure if we will be running the recipe
// body.
//
@@ -1923,23 +2540,36 @@ namespace build2
if (const target* pt =
(p.target != nullptr ? p.target :
- p.adhoc ? reinterpret_cast<target*> (p.data) :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
nullptr))
{
- if (ft == pt && (p.adhoc || p.data == 1))
+ if (ft == pt && (p.adhoc () || p.data == 1))
return false;
}
}
// Skip if this is one of the targets.
//
+ // Note that for dynamic targets this only works if we see the
+ // targets before prerequisites (like in the make dependency
+ // format).
+ //
if (ops.drop_cycles ())
{
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
{
- if (ft == m)
+ auto& ms (g->members);
+ if (find (ms.begin (), ms.end (), ft) != ms.end ())
return false;
}
+ else
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ return false;
+ }
+ }
}
// Skip until where we left off.
@@ -1968,10 +2598,15 @@ namespace build2
{
prerequisite_target& pt (pts.back ());
- if (pt.adhoc)
+ // Note: set the include_target flag for consistency (the
+ // updated_during_match() check does not apply since it's a
+ // dynamic prerequisite).
+ //
+ if (pt.adhoc ())
{
pt.data = reinterpret_cast<uintptr_t> (pt.target);
pt.target = nullptr;
+ pt.include |= prerequisite_target::include_target;
}
else
pt.data = 1; // Already updated.
@@ -2005,13 +2640,27 @@ namespace build2
<< t;
});
+ // While in the make format targets come before prerequisites, in
+ // depdb we store them after since any change to prerequisites can
+ // invalidate the set of targets. So we save them first and process
+ // later.
+ //
+ // Note also that we need to return them to the caller in case we are
+ // updating.
+
// If nothing so far has invalidated the dependency database, then try
// the cached data before running the program.
//
bool cache (!update);
+ bool skip_blank (false);
for (bool restart (true), first_run (true); restart; cache = false)
{
+ // Clear the state in case we are restarting.
+ //
+ if (dyn_tgt)
+ dyn_targets.clear ();
+
restart = false;
if (cache)
@@ -2020,7 +2669,8 @@ namespace build2
//
assert (skip_count == 0);
- // We should always end with a blank line.
+ // We should always end with a blank line after the list of
+ // dynamic prerequisites.
//
for (;;)
{
@@ -2034,8 +2684,11 @@ namespace build2
break;
}
- if (l->empty ()) // Done, nothing changed.
- return;
+ if (l->empty ()) // Done with prerequisites, nothing changed.
+ {
+ skip_blank = true;
+ break;
+ }
if (optional<bool> r = add (path (move (*l)), nullptr, mt))
{
@@ -2057,6 +2710,52 @@ namespace build2
return;
}
}
+
+ if (!restart) // Nothing changed.
+ {
+ if (dyn_tgt)
+ {
+ // We should always end with a blank line after the list of
+ // dynamic targets.
+ //
+ for (;;)
+ {
+ string* l (dd.read ());
+
+ // If the line is invalid, run the compiler.
+ //
+ if (l == nullptr)
+ {
+ restart = true;
+ break;
+ }
+
+ if (l->empty ()) // Done with targets.
+ break;
+
+ // Split into type and path (see below for background).
+ //
+ size_t p (l->find (' '));
+ if (p == string::npos || // Invalid format.
+ p == 0 || // Empty type.
+ p + 1 == l->size ()) // Empty path.
+ {
+ dd.write (); // Invalidate this line.
+ restart = true;
+ break;
+ }
+
+ string t (*l, 0, p);
+ l->erase (0, p + 1);
+
+ dyn_targets.push_back (
+ dynamic_target {move (t), path (move (*l))});
+ }
+ }
+
+ if (!restart) // Done, nothing changed.
+ break; // Break earliy to keep cache=true.
+ }
}
else
{
@@ -2065,9 +2764,16 @@ namespace build2
init_run ();
first_run = false;
}
- else if (!prog)
+ else
{
- fail (ll) << "generated " << what << " without program to retry";
+ if (!prog)
+ fail (ll) << "generated " << what << " without program to retry";
+
+ // Drop dyndep cleanups accumulated on the previous run.
+ //
+ assert (script_cleanups); // Sanity check.
+ environment_->cleanups.clear ();
+ environment_->special_cleanups.clear ();
}
// Save the timestamp just before we run the command. If we depend
@@ -2088,18 +2794,50 @@ namespace build2
istringstream iss;
if (prog)
{
- string s;
- build2::script::run (*environment_,
- cmd,
- li,
- ll,
- !file ? &s : nullptr);
-
+ // Note: depdb is disallowed inside flow control constructs.
+ //
if (!file)
{
- iss.str (move (s));
+ function<command_function> cf (
+ [&iss]
+ (build2::script::environment&,
+ const strings&,
+ auto_fd in,
+ pipe_command* pipe,
+ const optional<deadline>& dl,
+ const location& ll)
+ {
+ read (move (in),
+ false /* whitespace */,
+ false /* newline */,
+ true /* exact */,
+ [&iss] (string&& s) {iss.str (move (s));},
+ pipe,
+ dl,
+ ll,
+ "depdb-dyndep");
+ });
+
+ build2::script::run (*environment_,
+ cmd,
+ nullptr /* iteration_index */, li,
+ ll,
+ cf, false /* last_cmd */);
+
iss.exceptions (istream::badbit);
}
+ else
+ {
+ build2::script::run (
+ *environment_, cmd, nullptr /* iteration_index */, li, ll);
+
+ // Note: make it a maybe-cleanup in case the command cleans it
+ // up itself.
+ //
+ environment_->clean (
+ {build2::script::cleanup_type::maybe, *file},
+ true /* implicit */);
+ }
}
ifdstream ifs (ifdstream::badbit);
@@ -2167,32 +2905,72 @@ namespace build2
if (r.second.empty ())
continue;
- // @@ TODO: what should we do about targets?
+ // Skip targets unless requested to extract.
//
- // Note that if we take GCC as an example, things are
+ // BTW, if you are wondering why don't we extract targets
+ // by default, take GCC as an example, where things are
// quite messed up: by default it ignores -o and just
// takes the source file name and replaces the extension
// with a platform-appropriate object file extension. One
// can specify a custom target (or even multiple targets)
- // with -MT or with -MQ (quoting). Though MinGW GCC still
- // does not quote `:` with -MQ. So in this case it's
+ // with -MT or with -MQ (quoting). So in this case it's
// definitely easier for the user to ignore the targets
// and just specify everything in the buildfile.
//
- // On the other hand, other tools are likely to produce
- // more sensible output (except perhaps for quoting).
- //
- // @@ Maybe in the lax mode we should only recognize `:`
- // if it's separated on at least one side?
- //
- // Alternatively, we could detect Windows drives in
- // paths and "handle" them (I believe this is what GNU
- // make does). Maybe we should have three formats:
- // make-lax, make, make-strict?
- //
if (r.first == make_type::target)
+ {
+ // NOTE: similar code below.
+ //
+ if (dyn_tgt)
+ {
+ path& f (r.second);
+
+ if (f.relative ())
+ {
+ if (!cwd_tgt)
+ fail (il) << "relative " << what_tgt
+ << " target path '" << f
+ << "' in make dependency declaration" <<
+ info << "consider using --target-cwd to specify "
+ << "relative path base";
+
+ f = *cwd_tgt / f;
+ }
+
+ // Note that unlike prerequisites, here we don't need
+ // normalize_external() since we expect the targets to
+ // be within this project.
+ //
+ try
+ {
+ f.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what_tgt << " target "
+ << "path '" << f.string () << "'";
+ }
+
+ // The target must be within this project.
+ //
+ if (!f.sub (rs.out_path ()))
+ {
+ fail (il) << what_tgt << " target path " << f
+ << " must be inside project output "
+ << "directory " << rs.out_path ();
+ }
+
+ // Note: type is resolved later.
+ //
+ dyn_targets.push_back (
+ dynamic_target {string (), move (f)});
+ }
+
continue;
+ }
+ // NOTE: similar code below.
+ //
if (optional<bool> u = add (move (r.second), &skip, rmt))
{
restart = *u;
@@ -2220,20 +2998,380 @@ namespace build2
break;
}
- break;
+ break; // case
+ }
+ case dyndep_format::lines:
+ {
+ bool tgt (dyn_tgt); // Reading targets or prerequisites.
+
+ for (string l; !restart; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ break;
+
+ if (l.empty ())
+ {
+ if (!tgt)
+ fail (il) << "blank line in prerequisites list";
+
+ tgt = false; // Targets/prerequisites separating blank.
+ continue;
+ }
+
+ // See if this line start with space to indicate a non-
+ // existent prerequisite. This variable serves both as a
+ // flag and as a position of the beginning of the path.
+ //
+ size_t n (l.front () == ' ' ? 1 : 0);
+
+ if (tgt)
+ {
+ // NOTE: similar code above.
+ //
+ path f;
+ try
+ {
+ // Non-existent target doesn't make sense.
+ //
+ if (n)
+ throw invalid_path ("");
+
+ f = path (l);
+
+ if (f.relative ())
+ {
+ if (!cwd_tgt)
+ fail (il) << "relative " << what_tgt
+ << " target path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --target-cwd to specify "
+ << "relative path base";
+
+ f = *cwd_tgt / f;
+ }
+
+ // Note that unlike prerequisites, here we don't need
+ // normalize_external() since we expect the targets to
+ // be within this project.
+ //
+ f.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what_tgt << " target path '"
+ << l << "'";
+ }
+
+ // The target must be within this project.
+ //
+ if (!f.sub (rs.out_path ()))
+ {
+ fail (il) << what_tgt << " target path " << f
+ << " must be inside project output directory "
+ << rs.out_path ();
+ }
+
+ // Note: type is resolved later.
+ //
+ dyn_targets.push_back (
+ dynamic_target {string (), move (f)});
+ }
+ else
+ {
+ path f;
+ try
+ {
+ f = path (l.c_str () + n, l.size () - n);
+
+ if (f.empty () ||
+ (n && f.to_directory ())) // Non-existent fsdir{}.
+ throw invalid_path ("");
+
+ if (f.relative ())
+ {
+ if (!n)
+ {
+ if (!cwd)
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --cwd to specify "
+ << "relative path base";
+
+ f = *cwd / f;
+ }
+ }
+ else if (n)
+ {
+ // @@ TODO: non-existent absolute paths.
+ //
+ throw invalid_path ("");
+ }
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what << " prerequisite path '"
+ << l << "'";
+ }
+
+ // NOTE: similar code above.
+ //
+ if (optional<bool> u = add (move (f), &skip, rmt))
+ {
+ restart = *u;
+
+ if (restart)
+ {
+ update = true;
+ l6 ([&]{trace << "restarting";});
+ }
+ }
+ else
+ {
+ // Trigger recompilation, mark as expected to fail, and
+ // bail out.
+ //
+ update = true;
+ deferred_failure = true;
+ break;
+ }
+ }
+ }
+
+ break; // case
}
}
+ if (file)
+ ifs.close ();
+
// Bail out early if we have deferred a failure.
//
if (deferred_failure)
return;
+
+ // Clean after each depdb-dyndep execution.
+ //
+ if (prog)
+ clean (*environment_, ll);
}
}
- // Add the terminating blank line (we are updating depdb).
+ // Add the dynamic prerequisites terminating blank line if we are
+ // updating depdb and unless it's already there.
+ //
+ if (!cache && !skip_blank)
+ dd.expect ("");
+
+ // Handle dynamic targets.
//
- dd.expect ("");
+ if (dyn_tgt)
+ {
+ if (g != nullptr && g->members_static == 0 && dyn_targets.empty ())
+ fail (ll) << "group " << *g << " has no static or dynamic members";
+
+ // There is one more level (at least that we know of) to this rabbit
+ // hole: if the set of dynamic targets changes between clean and
+ // update and we do a `clean update` batch, then we will end up with
+ // old targets (as entered by clean from old depdb information)
+ // being present during update. So we need to clean them out.
+ //
+ // Optimize this for a first/single batch (common case) by noticing
+ // that there are only real targets to start with.
+ //
+ // Note that this doesn't affect explicit groups where we reset the
+ // members on each update (see adhoc_rule_buildscript::apply()).
+ //
+ optional<vector<const target*>> dts;
+ if (g == nullptr)
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (m->decl != target_decl::real)
+ dts = vector<const target*> ();
+ }
+ }
+
+ struct map_ext_data
+ {
+ const char* what_tgt;
+ const map<string, const target_type*>& map_tt;
+ const path* f; // Updated on each iteration.
+ } d {what_tgt, map_tt, nullptr};
+
+ function<dyndep::map_extension_func> map_ext (
+ [this, &d] (const scope& bs, const string& n, const string& e)
+ {
+ small_vector<const target_type*, 2> tts;
+
+ // Check the custom mapping first.
+ //
+ auto i (d.map_tt.find (e));
+ if (i != d.map_tt.end ())
+ tts.push_back (i->second);
+ else
+ {
+ tts = dyndep::map_extension (bs, n, e, nullptr);
+
+ // Issue custom diagnostics suggesting --target-extension-type.
+ //
+ if (tts.size () > 1)
+ {
+ diag_record dr (fail);
+
+ dr << "mapping of " << d.what_tgt << " target path " << *d.f
+ << " to target type is ambiguous";
+
+ for (const target_type* tt: tts)
+ dr << info << "can be " << tt->name << "{}";
+
+ dr << info << "use --target-extension-type to provide custom "
+ << "mapping";
+ }
+ }
+
+ return tts;
+ });
+
+ function<dyndep::group_filter_func> filter;
+ if (g != nullptr)
+ {
+ // Skip static/duplicate members in explicit group.
+ //
+ filter = [] (mtime_target& g, const build2::file& m)
+ {
+ auto& ms (g.as<group> ().members);
+ return find (ms.begin (), ms.end (), &m) == ms.end ();
+ };
+ }
+
+ // Unlike for prerequisites, for targets we store in depdb both the
+ // resolved target type and path. The target type is used in clean
+ // (see adhoc_rule_buildscript::apply()) where we cannot easily get
+ // hold of all the dyndep options to map the path to target type.
+ // So the format of the target line is:
+ //
+ // <type> <path>
+ //
+ string l; // Reuse the buffer.
+ for (dynamic_target& dt: dyn_targets)
+ {
+ const path& f (dt.path);
+
+ d.f = &f; // Current file being mapped.
+
+ // Note that this logic should be consistent with what we have in
+ // adhoc_buildscript_rule::apply() for perform_clean.
+ //
+ const build2::file* ft (nullptr);
+ if (g != nullptr)
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_group_member (
+ what_tgt,
+ a, bs, *g,
+ f, // Can't move since need to return dyn_targets.
+ map_ext, *def_tt, filter));
+
+ // Note: no target_decl shenanigans since reset the members on
+ // each update.
+ //
+ if (!r.second)
+ {
+ dt.type.clear (); // Static indicator.
+ continue;
+ }
+
+ ft = &r.first;
+
+ // Note: we only currently support dynamic file members so it
+ // will be file if first.
+ //
+ g->members.push_back (ft);
+ }
+ else
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_adhoc_group_member (
+ what_tgt,
+ a, bs, t,
+ f, // Can't move since need to return dyn_targets.
+ map_ext, *def_tt));
+
+ // Note that we have to track the dynamic target even if it was
+ // already a member (think `b update && b clean update`).
+ //
+ if (!r.second && r.first.decl == target_decl::real)
+ {
+ dt.type.clear (); // Static indicator.
+ continue;
+ }
+
+ ft = &r.first;
+
+ if (dts)
+ dts->push_back (ft);
+ }
+
+ const char* tn (ft->type ().name);
+
+ if (dt.type.empty ())
+ dt.type = tn;
+ else if (dt.type != tn)
+ {
+ // This can, for example, happen if the user changed the
+ // extension to target type mapping. Say swapped extension
+ // variable values of two target types.
+ //
+ fail << "mapping of " << what_tgt << " target path " << f
+ << " to target type has changed" <<
+ info << "previously mapped to " << dt.type << "{}" <<
+ info << "now mapped to " << tn << "{}" <<
+ info << "perform from scratch rebuild of " << t;
+ }
+
+ if (!cache)
+ {
+ l = dt.type;
+ l += ' ';
+ l += f.string ();
+ dd.expect (l);
+ }
+ }
+
+ // Add the dynamic targets terminating blank line.
+ //
+ if (!cache)
+ dd.expect ("");
+
+ // Clean out old dynamic targets (skip the primary member).
+ //
+ if (dts)
+ {
+ assert (g == nullptr);
+
+ for (target* p (&t); p->adhoc_member != nullptr; )
+ {
+ target* m (p->adhoc_member);
+
+ if (m->decl != target_decl::real)
+ {
+ // While there could be quite a few dynamic targets (think
+ // something like Doxygen), this will hopefully be optimized
+ // down to a contiguous memory region scan for an integer and
+ // so should be fast.
+ //
+ if (find (dts->begin (), dts->end (), m) == dts->end ())
+ {
+ p->adhoc_member = m->adhoc_member; // Drop m.
+ continue;
+ }
+ }
+
+ p = m;
+ }
+ }
+ }
// Reload $< and $> to make sure they contain the newly discovered
// prerequisites and targets.
@@ -2242,7 +3380,8 @@ namespace build2
environment_->set_special_variables (a);
}
- // When add a special variable don't forget to update lexer::word().
+ // When add a special variable don't forget to update lexer::word() and
+ // for-loop parsing in pre_parse_line().
//
bool parser::
special_variable (const string& n) noexcept
@@ -2251,15 +3390,24 @@ namespace build2
}
lookup parser::
- lookup_variable (name&& qual, string&& name, const location& loc)
+ lookup_variable (names&& qual, string&& name, const location& loc)
{
// In the pre-parse mode collect the referenced variable names for the
// script semantics change tracking.
//
+ // Note that during pre-parse a computed (including qualified) name
+ // is signalled as an empty name.
+ //
if (pre_parse_ || pre_parse_suspended_)
{
lookup r;
+ // Note that pre-parse can be switched on by the base parser even
+ // during execute.
+ //
+ if (!top_pre_parse_)
+ return r;
+
// Add the variable name skipping special variables and suppressing
// duplicates, unless the default variables change tracking is
// canceled with `depdb clear`. While at it, check if the script
@@ -2275,10 +3423,8 @@ namespace build2
{
if (pre_parse_suspended_)
{
- const variable* pvar (scope_->ctx.var_pool.find (name));
-
- if (pvar != nullptr)
- r = (*scope_)[*pvar];
+ if (const variable* var = scope_->var_pool ().find (name))
+ r = (*scope_)[*var];
}
if (!depdb_clear_)
@@ -2289,12 +3435,27 @@ namespace build2
vars.push_back (move (name));
}
}
+ else
+ {
+ // What about pre_parse_suspended_? Don't think it makes sense to
+ // diagnose this since it can be indirect (that is, via an
+ // intermediate variable).
+ //
+ if (perform_update_ && file_based_ && !computed_var_)
+ computed_var_ = loc;
+ }
return r;
}
if (!qual.empty ())
- fail (loc) << "qualified variable name";
+ {
+ // Qualified variable is computed and we expect the user to track
+ // its changes manually.
+ //
+ return build2::script::parser::lookup_variable (
+ move (qual), move (name), loc);
+ }
lookup r (environment_->lookup (name));
@@ -2305,13 +3466,13 @@ namespace build2
// diag builtin argument change (which can be affected by such a
// variable expansion) doesn't affect the script semantics and the
// depdb argument is specifically used for the script semantics change
- // tracking. We also omit this check it the depdb builtin is used in
- // the script, assuming that such variables are tracked manually, if
- // required.
+ // tracking. We also omit this check if the depdb "value" (string,
+ // hash) builtin is used in the script, assuming that such variables
+ // are tracked manually, if required.
//
if (script_ != nullptr &&
!script_->depdb_clear &&
- script_->depdb_preamble.empty ())
+ !script_->depdb_value)
{
if (r.defined () && !r.belongs (*environment_))
{
@@ -2329,9 +3490,12 @@ namespace build2
void parser::
lookup_function (string&& name, const location& loc)
{
- if (perform_update_ && file_based_ && !impure_func_)
+ // Note that pre-parse can be switched on by the base parser even
+ // during execute.
+ //
+ if (top_pre_parse_ && perform_update_ && file_based_ && !impure_func_)
{
- const function_overloads* f (ctx.functions.find (name));
+ const function_overloads* f (ctx->functions.find (name));
if (f != nullptr && !f->pure)
impure_func_ = make_pair (move (name), loc);
diff --git a/libbuild2/build/script/parser.hxx b/libbuild2/build/script/parser.hxx
index 362c834..8f86b24 100644
--- a/libbuild2/build/script/parser.hxx
+++ b/libbuild2/build/script/parser.hxx
@@ -28,7 +28,7 @@ namespace build2
// Pre-parse. Issue diagnostics and throw failed in case of an error.
//
public:
- parser (context& c): build2::script::parser (c, false /* relex */) {}
+ parser (context& c): build2::script::parser (c) {}
// Note that the returned script object references the passed path
// name.
@@ -65,11 +65,18 @@ namespace build2
pre_parse_script ();
void
- pre_parse_line (token&, token_type&, bool if_line = false);
+ pre_parse_line (token&, token_type&,
+ optional<line_type> flow_control_type = nullopt);
+
+ void
+ pre_parse_block_line (token&, token_type&, line_type block_type);
void
pre_parse_if_else (token&, token_type&);
+ void
+ pre_parse_loop (token&, token_type&, line_type);
+
command_expr
parse_command_line (token&, token_type&);
@@ -89,7 +96,6 @@ namespace build2
environment&, const script&, runner&,
bool enter = true, bool leave = true);
-
// Execute the first or the second (dyndep) half of the depdb
// preamble.
//
@@ -97,8 +103,10 @@ namespace build2
// runner's enter() function is called before the first preamble/body
// command execution and leave() -- after the last command.
//
+ // Note: target must be file or group.
+ //
void
- execute_depdb_preamble (action a, const scope& base, const file& t,
+ execute_depdb_preamble (action a, const scope& base, const target& t,
environment& e, const script& s, runner& r,
depdb& dd)
{
@@ -113,18 +121,28 @@ namespace build2
dd);
}
+ struct dynamic_target
+ {
+ string type; // Target type name (absent if static member).
+ build2::path path;
+ };
+
+ using dynamic_targets = vector<dynamic_target>;
+
void
execute_depdb_preamble_dyndep (
- action a, const scope& base, file& t,
+ action a, const scope& base, target& t,
environment& e, const script& s, runner& r,
- depdb& dd, bool& update, timestamp mt, bool& deferred_failure)
+ depdb& dd,
+ dynamic_targets& dyn_targets,
+ bool& update, timestamp mt, bool& deferred_failure)
{
exec_depdb_preamble (
a, base, t,
e, s, r,
s.depdb_preamble.begin () + *s.depdb_dyndep,
s.depdb_preamble.end (),
- dd, &update, mt, &deferred_failure);
+ dd, &dyn_targets, &update, mt, &deferred_failure);
}
// This version doesn't actually execute the depdb-dyndep builtin (but
@@ -133,7 +151,7 @@ namespace build2
// depdb-dyndep --byproduct logic (which fits better into the rule
// implementation).
//
- enum class dyndep_format {make};
+ enum class dyndep_format {make, lines};
struct dyndep_byproduct
{
@@ -148,14 +166,17 @@ namespace build2
dyndep_byproduct
execute_depdb_preamble_dyndep_byproduct (
- action a, const scope& base, const file& t,
+ action a, const scope& base, const target& t,
environment& e, const script& s, runner& r,
depdb& dd, bool& update, timestamp mt)
{
+ // Dummies.
+ //
// This is getting a bit ugly (we also don't really need to pass
// depdb here). One day we will find a better way...
//
- bool deferred_failure; // Dymmy.
+ dynamic_targets dyn_targets;
+ bool deferred_failure;
dyndep_byproduct v;
exec_depdb_preamble (
@@ -163,19 +184,26 @@ namespace build2
e, s, r,
s.depdb_preamble.begin () + *s.depdb_dyndep,
s.depdb_preamble.end (),
- dd, &update, mt, &deferred_failure, &v);
+ dd, &dyn_targets, &update, mt, &deferred_failure, &v);
return v;
}
- // Parse a special builtin line into names, performing the variable
- // and pattern expansions. If omit_builtin is true, then omit the
- // builtin name from the result.
+ // If the diag argument is true, then execute the preamble including
+ // the (trailing) diagnostics line and return the resulting names and
+ // its location (see exec_special() for the diagnostics line execution
+ // semantics). Otherwise, execute the preamble excluding the
+ // diagnostics line and return an empty names list and location. If
+ // requested, call the runner's enter() and leave() functions that
+ // initialize/clean up the environment before/after the preamble
+ // execution.
//
- names
- execute_special (const scope& root, const scope& base,
- environment&,
- const line&,
- bool omit_builtin = true);
+ // Note: having both root and base scopes for testing (where we pass
+ // global scope for both).
+ //
+ pair<names, location>
+ execute_diag_preamble (const scope& root, const scope& base,
+ environment&, const script&, runner&,
+ bool diag, bool enter, bool leave);
protected:
// Setup the parser for subsequent exec_*() function calls.
@@ -196,24 +224,34 @@ namespace build2
exec_lines (l.begin (), l.end (), c);
}
+ // Parse a special builtin line into names, performing the variable
+ // and pattern expansions. Optionally, skip the first token (builtin
+ // name, etc).
+ //
names
exec_special (token&, build2::script::token_type&, bool skip_first);
+ // Note: target must be file or group.
+ //
void
- exec_depdb_preamble (action, const scope& base, const file&,
+ exec_depdb_preamble (action, const scope& base, const target&,
environment&, const script&, runner&,
lines_iterator begin, lines_iterator end,
depdb&,
+ dynamic_targets* dyn_targets = nullptr,
bool* update = nullptr,
optional<timestamp> mt = nullopt,
bool* deferred_failure = nullptr,
dyndep_byproduct* = nullptr);
+ // Note: target must be file or group.
+ //
void
exec_depdb_dyndep (token&, build2::script::token_type&,
size_t line_index, const location&,
- action, const scope& base, file&,
+ action, const scope& base, target&,
depdb&,
+ dynamic_targets& dyn_targets,
bool& update,
timestamp,
bool& deferred_failure,
@@ -229,7 +267,7 @@ namespace build2
//
protected:
virtual lookup
- lookup_variable (name&&, string&&, const location&) override;
+ lookup_variable (names&&, string&&, const location&) override;
virtual void
lookup_function (string&&, const location&) override;
@@ -254,9 +292,9 @@ namespace build2
script* script_;
const small_vector<action, 1>* actions_; // Non-NULL during pre-parse.
- // True if this script is for file-based targets and performing update
- // is one of the actions, respectively. Only set for the pre-parse
- // mode.
+ // True if this script is for file- or file group-based targets and
+ // performing update is one of the actions, respectively. Only set for
+ // the pre-parse mode.
//
bool file_based_;
bool perform_update_;
@@ -286,18 +324,24 @@ namespace build2
//
// If the diag builtin is encountered, then its whole line is saved
// (including the leading 'diag' word) for later execution and the
- // diagnostics weight is set to 4.
+ // diagnostics weight is set to 4. The preceding lines, which can only
+ // contain variable assignments (including via the set builtin,
+ // potentially inside the flow control constructs), are also saved.
//
// Any attempt to manually set the custom diagnostics twice (the diag
// builtin after the script name or after another diag builtin) is
// reported as ambiguity.
//
- // At the end of pre-parsing either diag_name_ or diag_line_ (but not
- // both) are present.
+ // If no script name is deduced by the end of pre-parsing and the
+ // script is used for a single operation, then use this operation's
+ // name as a script name.
+ //
+ // At the end of pre-parsing either diag_name_ is present or
+ // diag_preamble_ is not empty (but not both).
//
optional<pair<string, location>> diag_name_;
optional<pair<string, location>> diag_name2_; // Ambiguous script name.
- optional<pair<line, location>> diag_line_;
+ lines diag_preamble_;
uint8_t diag_weight_ = 0;
// Custom dependency change tracking.
@@ -327,10 +371,12 @@ namespace build2
// recipe should be provided.
//
//
- optional<location> depdb_clear_; // depdb-clear location.
+ optional<location> depdb_clear_; // depdb-clear location.
+ bool depdb_value_ = false; // depdb-{string,hash}
optional<pair<location, size_t>>
depdb_dyndep_; // depdb-dyndep location/position.
bool depdb_dyndep_byproduct_ = false; // --byproduct
+ bool depdb_dyndep_dyn_target_ = false; // --dyn-target
lines depdb_preamble_; // Note: excluding depdb-clear.
// If present, the first impure function called in the body of the
@@ -344,7 +390,18 @@ namespace build2
//
optional<pair<string, location>> impure_func_;
- // True during pre-parsing when the pre-parse mode is temporarily
+ // Similar to the impure function above but for a computed (e.g.,
+ // target-qualified) variable expansion. In this case we don't have a
+ // name (it's computed).
+ //
+ optional<location> computed_var_;
+
+ // True if we (rather than the base parser) turned on the pre-parse
+ // mode.
+ //
+ bool top_pre_parse_;
+
+ // True during top-pre-parsing when the pre-parse mode is temporarily
// suspended to perform expansion.
//
bool pre_parse_suspended_ = false;
@@ -354,19 +411,19 @@ namespace build2
// Before the script line gets parsed, it is set to a temporary value
// that will by default be appended to the script. However,
// parse_program() can point it to a different location where the line
- // should be saved instead (e.g., diag_line_, etc) or set it to NULL
- // if the line is handled in an ad-hoc way and should be dropped
- // (e.g., depdb_clear_, etc).
+ // should be saved instead (e.g., diag_preamble_ back, etc) or set it
+ // to NULL if the line is handled in an ad-hoc way and should be
+ // dropped (e.g., depdb_clear_, etc).
//
line* save_line_;
- // The if-else nesting level (and in the future for other flow
- // control constructs).
+ // The flow control constructs nesting level.
//
- // Maintained during pre-parsing and is incremented when the cmd_if or
- // cmd_ifn lines are encountered, which in particular means that it is
- // already incremented by the time the if-condition expression is
- // pre-parsed. Decremented when the cmd_end line is encountered.
+ // Maintained during pre-parsing and is incremented when flow control
+ // construct condition lines are encountered, which in particular
+ // means that it is already incremented by the time the condition
+ // expression is pre-parsed. Decremented when the cmd_end line is
+ // encountered.
//
size_t level_ = 0;
diff --git a/libbuild2/build/script/parser.test.cxx b/libbuild2/build/script/parser.test.cxx
index 4089efa..97eac22 100644
--- a/libbuild2/build/script/parser.test.cxx
+++ b/libbuild2/build/script/parser.test.cxx
@@ -29,35 +29,58 @@ namespace build2
class print_runner: public runner
{
public:
- print_runner (bool line): line_ (line) {}
+ print_runner (bool line, bool iterations):
+ line_ (line),
+ iterations_ (iterations) {}
virtual void
enter (environment&, const location&) override {}
virtual void
- run (environment&,
+ run (environment& env,
const command_expr& e,
- size_t i,
- const location&) override
+ const iteration_index* ii, size_t i,
+ const function<command_function>& cf,
+ const location& ll) override
{
+ // If the functions is specified, then just execute it with an empty
+ // stdin so it can perform the housekeeping (stop replaying tokens,
+ // increment line index, etc).
+ //
+ if (cf != nullptr)
+ {
+ assert (e.size () == 1 && !e[0].pipe.empty ());
+
+ const command& c (e[0].pipe.back ());
+
+ // Must be enforced by the caller.
+ //
+ assert (!c.out && !c.err && !c.exit);
+
+ cf (env, c.arguments,
+ fdopen_null (), nullptr /* pipe */,
+ nullopt /* deadline */,
+ ll);
+ }
+
cout << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
}
virtual bool
- run_if (environment&,
- const command_expr& e,
- size_t i,
- const location&) override
+ run_cond (environment&,
+ const command_expr& e,
+ const iteration_index* ii, size_t i,
+ const location&) override
{
cout << "? " << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
@@ -68,16 +91,36 @@ namespace build2
leave (environment&, const location&) override {}
private:
+ void
+ print_line_info (const iteration_index* ii, size_t i) const
+ {
+ cout << " #";
+
+ if (line_)
+ cout << ' ' << i;
+
+ if (iterations_ && ii != nullptr)
+ {
+ string s;
+ for (const iteration_index* i (ii); i != nullptr; i = i->prev)
+ s.insert (0, " i" + to_string (i->index));
+
+ cout << s;
+ }
+ }
+
+ private:
bool line_;
+ bool iterations_;
};
// Usages:
//
- // argv[0] [-l]
+ // argv[0] [-l] [-r]
// argv[0] -b [-t]
// argv[0] -d [-t]
+ // argv[0] -g [-t] [<diag-name>]
// argv[0] -q
- // argv[0] -g [<diag-name>]
//
// In the first form read the script from stdin and trace the script
// body execution to stdout using the custom print runner.
@@ -88,26 +131,33 @@ namespace build2
// In the third form read the script from stdin, parse it and dump the
// depdb preamble lines to stdout.
//
- // In the forth form read the script from stdin, parse it and print
- // line tokens quoting information to stdout.
- //
- // In the fifth form read the script from stdin, parse it and print the
+ // In the forth form read the script from stdin, parse it and print the
// low-verbosity script diagnostics name or custom low-verbosity
// diagnostics to stdout. If the script doesn't deduce any of them, then
// print the diagnostics and exit with non-zero code.
//
+ // In the fifth form read the script from stdin, parse it and print
+ // line tokens quoting information to stdout.
+ //
// -l
// Print the script line number for each executed expression.
//
+ // -r
+ // Print the loop iteration numbers for each executed expression.
+ //
// -b
// Dump the parsed script body to stdout.
//
// -d
// Dump the parsed script depdb preamble to stdout.
//
+ // -g
+ // Dump the low-verbosity script diagnostics name or custom
+ // low-verbosity diagnostics to stdout.
+ //
// -t
- // Print true if the body (-b) or depdb preamble (-d) references the
- // temporary directory and false otherwise.
+ // Print true if the body (-b), depdb preamble (-d), or diag preamble
+ // (-g) references the temporary directory and false otherwise.
//
// -q
// Print the parsed script tokens quoting information to sdout. If a
@@ -117,10 +167,6 @@ namespace build2
// <quoting> := 'S' | 'D' | 'M'
// <completeness> := 'C' | 'P'
//
- // -g
- // Dump the low-verbosity script diagnostics name or custom
- // low-verbosity diagnostics to stdout.
- //
int
main (int argc, char* argv[])
{
@@ -131,11 +177,12 @@ namespace build2
run,
body,
depdb_preamble,
- quoting,
- diag
+ diag,
+ quoting
} m (mode::run);
bool print_line (false);
+ bool print_iterations (false);
optional<string> diag_name;
bool temp_dir (false);
@@ -145,19 +192,23 @@ namespace build2
if (a == "-l")
print_line = true;
+ else if (a == "-r")
+ print_iterations = true;
else if (a == "-b")
m = mode::body;
else if (a == "-d")
m = mode::depdb_preamble;
+ else if (a == "-g")
+ m = mode::diag;
else if (a == "-t")
{
- assert (m == mode::body || m == mode::depdb_preamble);
+ assert (m == mode::body ||
+ m == mode::depdb_preamble ||
+ m == mode::diag);
temp_dir = true;
}
else if (a == "-q")
m = mode::quoting;
- else if (a == "-g")
- m = mode::diag;
else
{
if (m == mode::diag)
@@ -170,19 +221,20 @@ namespace build2
}
}
- assert (!print_line || m == mode::run);
- assert (!diag_name || m == mode::diag);
+ assert (!print_line || m == mode::run || m == mode::diag);
+ assert (!print_iterations || m == mode::run || m == mode::diag);
+ assert (!diag_name || m == mode::diag);
// Fake build system driver, default verbosity.
//
init_diag (1);
- init (nullptr, argv[0]);
+ init (nullptr, argv[0], true);
// Serial execution.
//
scheduler sched (1);
global_mutexes mutexes (1);
- file_cache fcache;
+ file_cache fcache (true);
context ctx (sched, mutexes, fcache);
try
@@ -203,6 +255,8 @@ namespace build2
tt.path (path ("driver"));
+ const scope& bs (tt.base_scope ());
+
small_vector<action, 1> acts {perform_update_id};
// Parse and run.
@@ -210,7 +264,7 @@ namespace build2
parser p (ctx);
path_name nm ("buildfile");
- script s (p.pre_parse (tt.base_scope (), tt.type (), acts,
+ script s (p.pre_parse (bs, tt.type (), acts,
cin, nm,
11 /* line */,
(m != mode::diag
@@ -222,9 +276,29 @@ namespace build2
{
case mode::run:
{
- environment e (perform_update_id, tt, s.body_temp_dir);
- print_runner r (print_line);
- p.execute_body (ctx.global_scope, ctx.global_scope, e, s, r);
+ environment e (perform_update_id, tt, bs, false /* temp_dir */);
+ print_runner r (print_line, print_iterations);
+
+ bool exec_diag (!s.diag_preamble.empty ());
+
+ if (exec_diag)
+ {
+ if (s.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ p.execute_diag_preamble (ctx.global_scope, ctx.global_scope,
+ e, s, r,
+ false /* diag */,
+ true /* enter */,
+ false /* leave */);
+ }
+
+ if (s.body_temp_dir && !s.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ p.execute_body (ctx.global_scope, ctx.global_scope,
+ e, s, r,
+ !exec_diag /* enter */);
break;
}
case mode::diag:
@@ -235,14 +309,26 @@ namespace build2
}
else
{
- assert (s.diag_line);
+ if (!temp_dir)
+ {
+ environment e (perform_update_id,
+ tt,
+ bs,
+ s.diag_preamble_temp_dir);
- environment e (perform_update_id, tt, false /* temp_dir */);
+ print_runner r (print_line, print_iterations);
- cout << "diag: " << p.execute_special (ctx.global_scope,
+ names diag (p.execute_diag_preamble (ctx.global_scope,
ctx.global_scope,
- e,
- *s.diag_line) << endl;
+ e, s, r,
+ true /* diag */,
+ true /* enter */,
+ true /* leave */).first);
+
+ cout << "diag: " << diag << endl;
+ }
+ else
+ cout << (s.diag_preamble_temp_dir ? "true" : "false") << endl;
}
break;
diff --git a/libbuild2/build/script/runner.cxx b/libbuild2/build/script/runner.cxx
index 51139d4..5d9764b 100644
--- a/libbuild2/build/script/runner.cxx
+++ b/libbuild2/build/script/runner.cxx
@@ -28,12 +28,37 @@ namespace build2
//
for (auto i (env.cleanups.begin ()); i != env.cleanups.end (); )
{
- const target* m (&env.target);
- for (; m != nullptr; m = m->adhoc_member)
+ const target* m (nullptr);
+ if (const group* g = env.target.is_a<group> ())
{
- if (const path_target* pm = m->is_a<path_target> ())
- if (i->path == pm->path ())
- break;
+ for (const target* gm: g->members)
+ {
+ if (const path_target* pm = gm->is_a<path_target> ())
+ {
+ if (i->path == pm->path ())
+ {
+ m = gm;
+ break;
+ }
+ }
+ }
+ }
+ else if (const fsdir* fd = env.target.is_a<fsdir> ())
+ {
+ // Compare ignoring the trailing directory separator.
+ //
+ if (path_traits::compare (i->path.string (),
+ fd->dir.string ()) == 0)
+ m = fd;
+ }
+ else
+ {
+ for (m = &env.target; m != nullptr; m = m->adhoc_member)
+ {
+ if (const path_target* pm = m->is_a<path_target> ())
+ if (i->path == pm->path ())
+ break;
+ }
}
if (m != nullptr)
@@ -96,39 +121,43 @@ namespace build2
void default_runner::
run (environment& env,
const command_expr& expr,
- size_t li,
+ const iteration_index* ii, size_t li,
+ const function<command_function>& cf,
const location& ll)
{
if (verb >= 3)
text << ": " << expr;
// Run the expression if we are not in the dry-run mode or if it
- // executes the set or exit builtin and just print the expression
- // otherwise at verbosity level 2 and up.
+ // executes the set or exit builtin or it is a for-loop. Otherwise,
+ // just print the expression otherwise at verbosity level 2 and up.
//
if (!env.context.dry_run ||
find_if (expr.begin (), expr.end (),
- [] (const expr_term& et)
+ [&cf] (const expr_term& et)
{
const process_path& p (et.pipe.back ().program);
return p.initial == nullptr &&
(p.recall.string () == "set" ||
- p.recall.string () == "exit");
+ p.recall.string () == "exit" ||
+ (cf != nullptr &&
+ p.recall.string () == "for"));
}) != expr.end ())
- build2::script::run (env, expr, li, ll);
+ build2::script::run (env, expr, ii, li, ll, cf);
else if (verb >= 2)
text << expr;
}
bool default_runner::
- run_if (environment& env,
- const command_expr& expr,
- size_t li, const location& ll)
+ run_cond (environment& env,
+ const command_expr& expr,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
if (verb >= 3)
text << ": ?" << expr;
- return build2::script::run_if (env, expr, li, ll);
+ return build2::script::run_cond (env, expr, ii, li, ll);
}
}
}
diff --git a/libbuild2/build/script/runner.hxx b/libbuild2/build/script/runner.hxx
index 558de9b..ec8a948 100644
--- a/libbuild2/build/script/runner.hxx
+++ b/libbuild2/build/script/runner.hxx
@@ -32,17 +32,21 @@ namespace build2
// Location is the start position of this command line in the script.
// It can be used in diagnostics.
//
+ // Optionally, execute the specified function instead of the last
+ // pipe command.
+ //
virtual void
run (environment&,
const command_expr&,
- size_t index,
+ const iteration_index*, size_t index,
+ const function<command_function>&,
const location&) = 0;
virtual bool
- run_if (environment&,
- const command_expr&,
- size_t,
- const location&) = 0;
+ run_cond (environment&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) = 0;
// Location is the script end location (for diagnostics, etc).
//
@@ -52,9 +56,9 @@ namespace build2
// Run command expressions.
//
- // In dry-run mode don't run the expressions unless they are if-
- // conditions or execute the set or exit builtins, but print them at
- // verbosity level 2 and up.
+ // In dry-run mode don't run the expressions unless they are flow
+ // control construct conditions or execute the set or exit builtins, but
+ // print them at verbosity level 2 and up.
//
class default_runner: public runner
{
@@ -65,14 +69,15 @@ namespace build2
virtual void
run (environment&,
const command_expr&,
- size_t,
+ const iteration_index*, size_t,
+ const function<command_function>&,
const location&) override;
virtual bool
- run_if (environment&,
- const command_expr&,
- size_t,
- const location&) override;
+ run_cond (environment&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) override;
virtual void
leave (environment&, const location&) override;
diff --git a/libbuild2/build/script/script.cxx b/libbuild2/build/script/script.cxx
index 480903e..0d96cc3 100644
--- a/libbuild2/build/script/script.cxx
+++ b/libbuild2/build/script/script.cxx
@@ -7,6 +7,8 @@
#include <libbuild2/target.hxx>
+#include <libbuild2/adhoc-rule-buildscript.hxx> // include_unmatch*
+
#include <libbuild2/script/timeout.hxx>
#include <libbuild2/build/script/parser.hxx>
@@ -28,18 +30,20 @@ namespace build2
environment::
environment (action a,
const target_type& t,
+ const scope_type& s,
bool temp,
const optional<timestamp>& dl)
: build2::script::environment (
t.ctx,
- cast<target_triplet> (t.ctx.global_scope["build.host"]),
+ *t.ctx.build_host,
dir_name_view (&work, &wd_name),
temp_dir.path, false /* temp_dir_keep */,
redirect (redirect_type::none),
redirect (redirect_type::merge, 2),
redirect (redirect_type::pass)),
target (t),
- vars (context, false /* global */),
+ scope (s),
+ vars (context, false /* shared */), // Note: managed.
var_ts (var_pool.insert (">")),
var_ps (var_pool.insert ("<")),
script_deadline (to_deadline (dl, false /* success */))
@@ -56,11 +60,27 @@ namespace build2
{
// $>
//
+ // What should it contain for an explicit group? While it may seem
+ // that just the members should be enough (and analogous to the ad
+ // hoc case), this won't let us get the group name for diagnostics.
+ // So the group name followed by all the members seems like the
+ // logical choice.
+ //
names ns;
- for (const target_type* m (&target);
- m != nullptr;
- m = m->adhoc_member)
- m->as_name (ns);
+
+ if (const group* g = target.is_a<group> ())
+ {
+ g->as_name (ns);
+ for (const target_type* m: g->members)
+ m->as_name (ns);
+ }
+ else
+ {
+ for (const target_type* m (&target);
+ m != nullptr;
+ m = m->adhoc_member)
+ m->as_name (ns);
+ }
assign (var_ts) = move (ns);
}
@@ -73,13 +93,25 @@ namespace build2
// much sense, they could be handy to exclude certain prerequisites
// from $< while still treating them as such, especially in rule.
//
+ // While initially we treated update=unmatch prerequisites as
+ // implicitly ad hoc, this turned out to be not quite correct, so
+ // now we add them unless they are explicitly marked ad hoc.
+ //
names ns;
- for (const prerequisite_target& pt: target.prerequisite_targets[a])
+ for (const prerequisite_target& p: target.prerequisite_targets[a])
{
// See adhoc_buildscript_rule::execute_update_prerequisites().
//
- if (pt.target != nullptr && !pt.adhoc)
- pt.target->as_name (ns);
+ if (const target_type* pt =
+ p.target != nullptr ? (p.adhoc () ? nullptr : p.target) :
+ (p.include & adhoc_buildscript_rule::include_unmatch) != 0 &&
+ (p.include & prerequisite_target::include_adhoc) == 0 &&
+ (p.include & adhoc_buildscript_rule::include_unmatch_adhoc) == 0
+ ? reinterpret_cast<target_type*> (p.data)
+ : nullptr)
+ {
+ pt->as_name (ns);
+ }
}
assign (var_ps) = move (ns);
@@ -154,7 +186,7 @@ namespace build2
}
void environment::
- set_variable (string&& nm,
+ set_variable (string nm,
names&& val,
const string& attrs,
const location& ll)
@@ -233,7 +265,7 @@ namespace build2
// in parallel). Plus, if there is no such variable, then we cannot
// possibly find any value.
//
- const variable* pvar (context.var_pool.find (n));
+ const variable* pvar (scope.var_pool ().find (n));
if (pvar == nullptr)
return lookup_type ();
diff --git a/libbuild2/build/script/script.hxx b/libbuild2/build/script/script.hxx
index 0619253..08f1bf4 100644
--- a/libbuild2/build/script/script.hxx
+++ b/libbuild2/build/script/script.hxx
@@ -20,14 +20,18 @@ namespace build2
namespace script
{
using build2::script::line;
- using build2::script::lines;
using build2::script::line_type;
+ using build2::script::lines;
using build2::script::redirect;
using build2::script::redirect_type;
+ using build2::script::command;
using build2::script::expr_term;
using build2::script::command_expr;
+ using build2::script::iteration_index;
using build2::script::deadline;
using build2::script::timeout;
+ using build2::script::pipe_command;
+ using build2::script::command_function;
// Forward declarations.
//
@@ -44,13 +48,11 @@ namespace build2
class script
{
public:
- using lines_type = build::script::lines;
-
// Note that the variables are not pre-entered into a pool during the
// parsing phase, so the line variable pointers are NULL.
//
- lines_type body;
- bool body_temp_dir = false; // True if the body references $~.
+ lines body;
+ bool body_temp_dir = false; // True if the body references $~.
// Referenced ordinary (non-special) variables.
//
@@ -65,20 +67,24 @@ namespace build2
small_vector<string, 2> vars; // 2 for command and options.
// Command name for low-verbosity diagnostics and custom low-verbosity
- // diagnostics line. Note: cannot be both (see the script parser for
+ // diagnostics line, potentially preceded with the variable
+ // assignments. Note: cannot be both (see the script parser for
// details).
//
optional<string> diag_name;
- optional<line> diag_line;
+ lines diag_preamble;
+ bool diag_preamble_temp_dir = false; // True if refs $~.
// The script's custom dependency change tracking lines (see the
// script parser for details).
//
bool depdb_clear;
+ bool depdb_value; // String or hash.
optional<size_t> depdb_dyndep; // Pos of first dyndep.
bool depdb_dyndep_byproduct = false; // dyndep --byproduct
- lines_type depdb_preamble;
- bool depdb_preamble_temp_dir = false; // True if refs $~.
+ bool depdb_dyndep_dyn_target = false;// dyndep --dyn-target
+ lines depdb_preamble; // Note include vars.
+ bool depdb_preamble_temp_dir = false;// True if refs $~.
location start_loc;
location end_loc;
@@ -87,10 +93,12 @@ namespace build2
class environment: public build2::script::environment
{
public:
+ using scope_type = build2::scope;
using target_type = build2::target;
environment (action,
const target_type&,
+ const scope_type&,
bool temp_dir,
const optional<timestamp>& deadline = nullopt);
@@ -111,11 +119,12 @@ namespace build2
environment& operator= (const environment&) = delete;
public:
- // Primary target this environment is for.
+ // Primary target this environment is for and its base scope;
//
const target_type& target;
+ const scope_type& scope;
- // Script-local variable pool and map.
+ // Script-private variable pool and map.
//
// Note that it may be tempting to reuse the rule-specific variables
// for this but they should not be modified during execution (i.e.,
@@ -161,7 +170,7 @@ namespace build2
size_t exec_line = 1;
virtual void
- set_variable (string&& name,
+ set_variable (string name,
names&&,
const string& attrs,
const location&) override;
diff --git a/libbuild2/build/script/types-parsers.cxx b/libbuild2/build/script/types-parsers.cxx
deleted file mode 100644
index 9ecfa13..0000000
--- a/libbuild2/build/script/types-parsers.cxx
+++ /dev/null
@@ -1,56 +0,0 @@
-// file : libbuild2/build/script/types-parsers.cxx -*- C++ -*-
-// license : MIT; see accompanying LICENSE file
-
-#include <libbuild2/build/script/types-parsers.hxx>
-
-#include <libbuild2/build/script/builtin-options.hxx> // cli namespace
-
-namespace build2
-{
- namespace build
- {
- namespace script
- {
- namespace cli
- {
- template <typename T>
- static void
- parse_path (T& x, scanner& s)
- {
- const char* o (s.next ());
-
- if (!s.more ())
- throw missing_value (o);
-
- const char* v (s.next ());
-
- try
- {
- x = T (v);
-
- if (x.empty ())
- throw invalid_value (o, v);
- }
- catch (const invalid_path&)
- {
- throw invalid_value (o, v);
- }
- }
-
- void parser<path>::
- parse (path& x, bool& xs, scanner& s)
- {
- xs = true;
- parse_path (x, s);
- }
-
- void parser<dir_path>::
- parse (dir_path& x, bool& xs, scanner& s)
- {
- xs = true;
- parse_path (x, s);
- }
- }
- }
- }
-}
diff --git a/libbuild2/build/script/types-parsers.hxx b/libbuild2/build/script/types-parsers.hxx
deleted file mode 100644
index a42dab7..0000000
--- a/libbuild2/build/script/types-parsers.hxx
+++ /dev/null
@@ -1,49 +0,0 @@
-// file : libbuild2/build/script/types-parsers.hxx -*- C++ -*-
-// license : MIT; see accompanying LICENSE file
-
-// CLI parsers, included into the generated source files.
-//
-
-#ifndef LIBBUILD2_BUILD_SCRIPT_TYPES_PARSERS_HXX
-#define LIBBUILD2_BUILD_SCRIPT_TYPES_PARSERS_HXX
-
-#include <libbuild2/types.hxx>
-
-namespace build2
-{
- namespace build
- {
- namespace script
- {
- namespace cli
- {
- class scanner;
-
- template <typename T>
- struct parser;
-
- template <>
- struct parser<path>
- {
- static void
- parse (path&, bool&, scanner&);
-
- static void
- merge (path& b, const path& a) {b = a;}
- };
-
- template <>
- struct parser<dir_path>
- {
- static void
- parse (dir_path&, bool&, scanner&);
-
- static void
- merge (dir_path& b, const dir_path& a) {b = a;}
- };
- }
- }
- }
-}
-
-#endif // LIBBUILD2_BUILD_SCRIPT_TYPES_PARSERS_HXX
diff --git a/libbuild2/buildfile b/libbuild2/buildfile
index ee320e4..3518d93 100644
--- a/libbuild2/buildfile
+++ b/libbuild2/buildfile
@@ -4,7 +4,7 @@
# NOTE: remember to update bundled_modules in libbuild2/module.cxx if adding a
# new module.
#
-bundled_modules = bash/ bin/ c/ cc/ cxx/ in/ version/
+bundled_modules = bash/ bin/ c/ cc/ cli/ cxx/ in/ version/
./: lib{build2} $bundled_modules
@@ -25,8 +25,15 @@ include $bundled_modules
#
intf_libs = $libbutl
-lib{build2}: libul{build2}: \
- {hxx ixx txx cxx}{* -utility-*installed -config -version -*.test...} \
+lib{build2}: libul{build2}: \
+ {hxx ixx txx cxx}{* -utility-*installed \
+ -common-options \
+ -b-options \
+ -config \
+ -version \
+ -*.test...} \
+ {hxx ixx cxx}{common-options} \
+ {hxx ixx cxx}{b-options} \
{hxx}{config version}
libul{build2}: script/{hxx ixx txx cxx}{** -*-options -**.test...} \
@@ -52,42 +59,120 @@ lib{build2}: cxx{utility-uninstalled}: for_install = false
libul{build2}: config/{hxx ixx txx cxx}{** -host-config -**.test...} \
config/cxx{host-config}
+# Derive ~host and ~build2 configurations from current configuration.
+#
# This will of course blow up spectacularly if we are cross-compiling. But
# let's wait and enjoy the fireworks (and get a sense of why someone would
# want to cross-compile a build system).
#
-config/cxx{host-config}: config/in{host-config}
+# For the ~host configuration we only want c/cxx/cc and bin that they load.
+# For ~build2 we want to keep everything except dist.
+#
+# We also remove comment lines which could be confused with preprocessor
+# directives by some lesser compilers and blank lines between groups of
+# options which could cause spurious rebuilds when we filter out entire
+# groups.
+#
+# For ~host also filter out config.bin.lib/config.bin.*.lib (static/shared
+# library build/link preferences). In particular, we don't want to force
+# config.bin.lib=shared since that will cause static libraries to link shared
+# versions of their prerequisites (see mysql-client for a case where this can
+# make a difference).
+#
+# For ~build2 also filter out config.install.chroot -- we definitely don't
+# want it carried through. Also filter out variables that control tests
+# execution.
+#
+# Finally, for both ~host and ~build2 we keep config.config.environment
+# but strip config.config.hermetic* (we shouldn't be forcing hermiticity
+# on the users of ~host/~build2; they can decide for themselves if they
+# want it).
+#
+# The *_no_warnings variants are with the suppressed C/C++ compiler warnings
+# (in particular, used for private host configuration in bpkg).
+#
+#
+host_config_lines = [strings]
+build2_config_lines = [strings]
+
+host_config_no_warnings_lines = [strings]
+build2_config_no_warnings_lines = [strings]
+
+for l: $regex.replace_lines( \
+ $config.save(), \
+ '^( *(#|(config\.(test[. ]|dist\.|install\.chroot|config\.hermetic))).*|)$', \
+ [null])
{
- # For the ~host configuration we only want c/cxx/cc and bin that they load.
- # For ~build2 we want to keep everything except dist.
- #
- # We also remove comment lines which could be confused with preprocessor
- # directives by some lesser compilers and blank lines between groups of
- # options which could cause spurious rebuilds when we filter out entire
- # groups.
- #
- # For ~build2 also filter out config.install.chroot -- we definitely don't
- # want it carried through. Also filter out variables that control tests
- # execution.
- #
- # Finally, for both ~host and ~build2 we keep config.config.environment
- # but strip config.config.hermetic* (we shouldn't be forcing hermiticity
- # on the users of ~host/~build2; they can decide for themselves if they
- # want it).
+ # Note: also preserve config.version.
#
- build2_config = $regex.replace_lines( \
- $config.save(), \
- '^( *(#|(config\.(test[. ]|dist\.|install\.chroot|config\.hermetic))).*|)$', \
- [null], \
- return_lines)
+ h = [null]
+ if $regex.match( \
+ $l, \
+ ' *config\.(c[. ]|cxx[. ]|cc[.]|bin[.]|config.environment |version ).*')
+ {
+ if! ($regex.match(\
+ $l, \
+ ' *config\.bin\.(lib|exe\.lib|liba\.lib|libs\.lib)[ =].*'))
+ {
+ # Filter out sanitizer options in ~host. We run the toolchain with
+ # various sanitizers on CI but sanitizers cause issues in some packages.
+ # Note that we can have both -fsanitize and -fno-sanitize forms. For
+ # example:
+ #
+ # -fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all
+ #
+ if $regex.match($l, ' *config\.(c|cxx|cc)\.(coptions|loptions)[ =].*')
+ {
+ h = $regex.replace($l, ' ?-f(no-)?sanitize[=-][^ ]+', '')
+ }
+ else
+ h = $l
+ }
+ }
+
+ if ($h != [null])
+ host_config_lines += $h
+
+ build2_config_lines += $l
- # Also preserve config.version.
+ # Append the warning suppressing option to config.{c,cxx}.coptions rather
+ # than config.cc.coptions since the former could re-enable them.
#
- host_config = $regex.replace_lines( \
- $build2_config, \
- '^ *config\.(c[. ]|cxx[. ]|cc[.]|bin[.]|config.environment |version ).*$', \
- '$&', \
- format_no_copy return_lines)
+ if ($regex.match($l, ' *config\.(c|cxx)\.coptions[ =].*'))
+ {
+ # Note that in MSVC overriding one warning option (say /W3) with another
+ # (say /w) triggers a warning. However, our compile_rule sanitizes the
+ # command line to resolve such overrides (see msvc_sanitize_cl()).
+ #
+ o = ($cxx.class == 'gcc' ? -w : $cxx.class == 'msvc' ? /w : )
+
+ if ($regex.match($l, '[^=]+= *\[null\] *'))
+ {
+ l = $regex.replace($l, '= *\[null\] *$', "= $o")
+ h = $regex.replace($h, '= *\[null\] *$', "= $o")
+ }
+ else
+ {
+ l = $regex.replace($l, '=(.*)$', "=\\1 $o")
+ h = $regex.replace($h, '=(.*)$', "=\\1 $o")
+ }
+ }
+
+ if ($h != [null])
+ host_config_no_warnings_lines += $h
+
+ build2_config_no_warnings_lines += $l
+}
+
+config/cxx{host-config}: config/in{host-config}
+{
+ host_config = $regex.merge($host_config_lines, '(.+)', '\1\n')
+ build2_config = $regex.merge($build2_config_lines, '(.+)', '\1\n')
+
+ host_config_no_warnings = $regex.merge($host_config_no_warnings_lines, \
+ '(.+)', '\1\n')
+ build2_config_no_warnings = $regex.merge($build2_config_no_warnings_lines, \
+ '(.+)', '\1\n')
}
libul{build2}: dist/{hxx ixx txx cxx}{** -**.test...}
@@ -162,14 +247,48 @@ if! $cross
{
{obja objs}{context}: cxx.poptions += \
-DBUILD2_IMPORT_PATH=\"$regex.replace($out_root, '\\', '\\\\')\"
+}
- # While this object file should only be linked when we are installing, it
- # will be compiled even in the uninstalled case.
+# Note that while the -installed object file should only be linked when we
+# are installing, it will be compiled even in the uninstalled case.
+#
+if ($install.root != [null])
+{
+ # Only if installed.
+ #
+ {obja objs}{utility-installed}: cxx.poptions += \
+ -DBUILD2_INSTALL_LIB=\"$regex.replace(\
+ $install.resolve($install.lib), '\\', '\\\\')\"
+
+ # Only if configured.
+ #
+ # Note: strip the last directory component (<project>).
#
- if ($install.root != [null])
- {obja objs}{utility-installed}: cxx.poptions += \
- -DBUILD2_INSTALL_LIB=\"$regex.replace(\
- $install.resolve($install.lib), '\\', '\\\\')\"
+ # @@ TMP drop after 0.16.0 release.
+ #
+ install_buildfile = ($install.buildfile != [null] \
+ ? $directory($install.resolve($install.buildfile)) \
+ :)
+ {obja objs}{utility-installed utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_BUILDFILE=\"$regex.replace($install_buildfile, '\\', '\\\\')\"
+
+ #\
+ {obja objs}{utility-installed utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_BUILDFILE=\"$regex.replace(\
+ $directory($install.resolve($install.buildfile)), '\\', '\\\\')\"
+ #\
+
+ # Data directory or src_root if not installed.
+ #
+ # Note: normalized in both cases.
+ #
+ {obja objs}{utility-installed}: cxx.poptions += \
+ -DBUILD2_INSTALL_DATA=\"$regex.replace(\
+ $install.resolve($install.data), '\\', '\\\\')\"
+
+ {obja objs}{utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_DATA=\"$regex.replace(\
+ $src_root, '\\', '\\\\')\"
}
if ($cxx.target.class != 'windows')
@@ -220,26 +339,26 @@ else
# Generated options parser.
#
-# @@ Consider generating common cli runtime namespace if adding more option
-# files. Plus sommon types-parser.?xx (which could also potentially be
-# reused by the driver).
+# Note that the cli runtime namespace is build2::build::cli rather than
+# build2::cli. That's because the cli namespace inside build2 is reserved for
+# the cli build system module (libbuild2-cli). In fact, every namespace inside
+# build2 is reserved for a potential module and the only namespace names we
+# can use are build (this name, along with import and export, is reserved by
+# the build system core) and names that start with an underscore.
#
if $cli.configured
{
cli.options += --std c++11 -I $src_root --include-with-brackets \
---generate-vector-scanner --generate-modifier --generate-specifier \
---suppress-usage
+--cli-namespace build2::build::cli --generate-specifier
cli.cxx{*}:
{
# Include the generated cli files into the distribution and don't remove
# them when cleaning in src (so that clean results in a state identical
- # to distributed). But don't install their headers since they are only
- # used internally in the testscript implementation.
+ # to distributed).
#
dist = true
clean = ($src_root != $out_root)
- install = false
# We keep the generated code in the repository so copy it back to src in
# case of a forwarded configuration.
@@ -247,29 +366,61 @@ if $cli.configured
backlink = overwrite
}
+ cli.cxx{common-options}: cli{common}
+ {
+ cli.options += --include-prefix libbuild2 --guard-prefix LIBBUILD2 \
+--export-symbol LIBBUILD2_SYMEXPORT \
+--hxx-prologue '#include <libbuild2/export.hxx>' \
+--generate-file-scanner --generate-vector-scanner
+ }
+
+ cli.cxx{b-options}: cli{b}
+ {
+ cli.options += --include-prefix libbuild2 --guard-prefix LIBBUILD2 \
+--export-symbol LIBBUILD2_SYMEXPORT \
+--hxx-prologue '#include <libbuild2/export.hxx>' \
+--cxx-prologue "#include <libbuild2/types-parsers.hxx>" \
+--keep-separator --generate-parse --generate-merge
+
+ # Usage options.
+ #
+ cli.options += --suppress-undocumented --long-usage --ansi-color \
+--ascii-tree --page-usage 'build2::print_$name$_' --option-length 23
+ }
+
script/cli.cxx{builtin-options}: script/cli{builtin}
{
- cli.options += --cli-namespace build2::script::cli \
---include-prefix libbuild2/script --guard-prefix LIBBUILD2_SCRIPT
+ cli.options += --include-prefix libbuild2/script \
+--guard-prefix LIBBUILD2_SCRIPT --generate-modifier --suppress-usage
+
+ # Don't install the generated cli headers since they are only used
+ # internally in the script implementation.
+ #
+ install = false
}
build/script/cli.cxx{builtin-options}: build/script/cli{builtin}
{
- cli.options += --cli-namespace build2::build::script::cli \
---include-prefix libbuild2/build/script --guard-prefix LIBBUILD2_BUILD_SCRIPT \
---cxx-prologue "#include <libbuild2/build/script/types-parsers.hxx>" \
---generate-parse
+ cli.options += --include-prefix libbuild2/build/script \
+--guard-prefix LIBBUILD2_BUILD_SCRIPT \
+--cxx-prologue "#include <libbuild2/types-parsers.hxx>" \
+--generate-parse --generate-modifier --suppress-usage
+
+ # Don't install the generated cli headers since they are only used
+ # internally in the buildscript implementation.
+ #
+ install = false
}
}
else
{
# No install for the pre-generated case.
#
- script/hxx{builtin-options}@./ \
- script/ixx{builtin-options}@./: install = false
+ script/hxx{builtin-options}@script/ \
+ script/ixx{builtin-options}@script/: install = false
- build/script/hxx{builtin-options}@./ \
- build/script/ixx{builtin-options}@./: install = false
+ build/script/hxx{builtin-options}@build/script/ \
+ build/script/ixx{builtin-options}@build/script/: install = false
}
# Install into the libbuild2/ subdirectory of, say, /usr/include/
diff --git a/libbuild2/buildspec.cxx b/libbuild2/buildspec.cxx
index bd580ca..2eeaf31 100644
--- a/libbuild2/buildspec.cxx
+++ b/libbuild2/buildspec.cxx
@@ -53,7 +53,7 @@ namespace build2
if (v)
{
names storage;
- os << reverse (v, storage);
+ os << reverse (v, storage, true /* reduce */);
}
else
os << "[null]";
@@ -86,7 +86,7 @@ namespace build2
if (v)
{
names storage;
- os << reverse (v, storage);
+ os << reverse (v, storage, true /* reduce */);
}
else
os << "[null]";
diff --git a/libbuild2/c/init.cxx b/libbuild2/c/init.cxx
index be001a8..8bc2f7d 100644
--- a/libbuild2/c/init.cxx
+++ b/libbuild2/c/init.cxx
@@ -6,9 +6,12 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/install/utility.hxx>
+
#include <libbuild2/cc/guess.hxx>
#include <libbuild2/cc/module.hxx>
+#include <libbuild2/cc/target.hxx> // pc*
#include <libbuild2/c/target.hxx>
#ifndef BUILD2_DEFAULT_C
@@ -27,6 +30,7 @@ namespace build2
namespace c
{
using cc::compiler_id;
+ using cc::compiler_type;
using cc::compiler_class;
using cc::compiler_info;
@@ -53,6 +57,26 @@ namespace build2
strings& mode,
const string* v) const
{
+ // The standard is `NN` but can also be `gnuNN`.
+
+ // This helper helps recognize both NN and [cC]NN to avoid an endless
+ // stream of user questions. It can also be used to recognize Nx in
+ // addition to NN (e.g., "23" and "2x").
+ //
+ auto stdcmp = [v] (const char* nn, const char* nx = nullptr)
+ {
+ if (v != nullptr)
+ {
+ const char* s (v->c_str ());
+ if (s[0] == 'c' || s[0] == 'C')
+ s += 1;
+
+ return strcmp (s, nn) == 0 || (nx != nullptr && strcmp (s, nx) == 0);
+ }
+
+ return false;
+ };
+
switch (ci.class_)
{
case compiler_class::msvc:
@@ -77,7 +101,12 @@ namespace build2
// C17/18 is a bug-fix version of C11 so here we assume it is the
// same as C11.
//
- // And it's still early days for C2X.
+ // And it's still early days for C2X. Specifically, there is not
+ // much about C2X in MSVC in the official places and the following
+ // page shows that it's pretty much unimplement at the time of the
+ // MSVC 17.6 release:
+ //
+ // https://en.cppreference.com/w/c/compiler_support/23
//
// From version 16.8 VC now supports /std:c11 and /std:c17 options
// which enable C11/17 conformance. However, as of version 16.10,
@@ -86,17 +115,17 @@ namespace build2
//
if (v == nullptr)
;
- else if (*v != "90")
+ else if (!stdcmp ("90"))
{
uint64_t cver (ci.version.major);
- if ((*v == "99" && cver < 16) || // Since VS2010/10.0.
- ((*v == "11" ||
- *v == "17" ||
- *v == "18") && cver < 18) ||
- (*v == "2x" ))
+ if ((stdcmp ("99") && cver < 16) || // Since VS2010/10.0.
+ ((stdcmp ("11") ||
+ stdcmp ("17") ||
+ stdcmp ("18")) && cver < 18) || // Since VS????/11.0.
+ (stdcmp ("23", "2x") ))
{
- fail << "C" << *v << " is not supported by " << ci.signature <<
+ fail << "C " << *v << " is not supported by " << ci.signature <<
info << "required by " << project (rs) << '@' << rs;
}
}
@@ -113,12 +142,12 @@ namespace build2
{
string o ("-std=");
- if (*v == "2x") o += "c2x"; // GCC 9, Clang 9 (8?).
- else if (*v == "17" ||
- *v == "18") o += "c17"; // GCC 8, Clang 6.
- else if (*v == "11") o += "c1x";
- else if (*v == "99") o += "c9x";
- else if (*v == "90") o += "c90";
+ if (stdcmp ("23", "2x")) o += "c2x"; // GCC 9, Clang 9 (8?).
+ else if (stdcmp ("17") ||
+ stdcmp ("18")) o += "c17"; // GCC 8, Clang 6.
+ else if (stdcmp ("11")) o += "c1x";
+ else if (stdcmp ("99")) o += "c9x";
+ else if (stdcmp ("90")) o += "c90";
else o += *v; // In case the user specifies `gnuNN` or some such.
mode.insert (mode.begin (), move (o));
@@ -128,6 +157,79 @@ namespace build2
}
}
+ // See cc::data::x_{hdr,inc} for background.
+ //
+ static const target_type* const hdr[] =
+ {
+ &h::static_type,
+ nullptr
+ };
+
+ // Note that we include S{} here because .S files can include each other.
+ // (And maybe from inline assembler instructions?)
+ //
+ static const target_type* const inc[] =
+ {
+ &h::static_type,
+ &c::static_type,
+ &m::static_type,
+ &S::static_type,
+ &c_inc::static_type,
+ nullptr
+ };
+
+ bool
+ types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.types module must be loaded in project root";
+
+ // Register target types and configure their "installability".
+ //
+ using namespace install;
+
+ bool install_loaded (cast_false<bool> (rs["install.loaded"]));
+
+ // Note: not registering m{} or S{} (they are registered seperately
+ // by the respective optional .types submodules).
+ //
+ rs.insert_target_type<c> ();
+
+ auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
+ {
+ rs.insert_target_type (tt);
+
+ // Install headers into install.include.
+ //
+ if (install_loaded)
+ install_path (rs, tt, dir_path ("include"));
+ };
+
+ for (const target_type* const* ht (hdr); *ht != nullptr; ++ht)
+ insert_hdr (**ht);
+
+ // @@ PERF: maybe factor this to cc.types?
+ //
+ rs.insert_target_type<cc::pc> ();
+ rs.insert_target_type<cc::pca> ();
+ rs.insert_target_type<cc::pcs> ();
+
+ if (install_loaded)
+ install_path<cc::pc> (rs, dir_path ("pkgconfig"));
+
+ return true;
+ }
+
static const char* const hinters[] = {"cxx", nullptr};
// See cc::module for details on guess_init vs config_init.
@@ -154,15 +256,20 @@ namespace build2
// Enter all the variables and initialize the module data.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
cc::config_data d {
cc::lang::c,
"c",
"c",
+ "obj-c",
BUILD2_DEFAULT_C,
".i",
+ ".mi",
hinters,
@@ -225,6 +332,9 @@ namespace build2
vp["cc.export.libs"],
vp["cc.export.impl_libs"],
+ vp["cc.pkgconfig.include"],
+ vp["cc.pkgconfig.lib"],
+
vp.insert_alias (vp["cc.stdlib"], "c.stdlib"), // Same as cc.stdlib.
vp["cc.runtime"],
@@ -235,6 +345,7 @@ namespace build2
vp["cc.module_name"],
vp["cc.importable"],
vp["cc.reprocess"],
+ vp["cc.serialize"],
vp.insert<string> ("c.preprocessed"), // See cxx.preprocessed.
nullptr, // No __symexport (no modules).
@@ -276,6 +387,9 @@ namespace build2
vp.insert_alias (d.c_runtime, "c.runtime");
vp.insert_alias (d.c_importable, "c.importable");
+ vp.insert_alias (d.c_pkgconfig_include, "c.pkgconfig.include");
+ vp.insert_alias (d.c_pkgconfig_lib, "c.pkgconfig.lib");
+
auto& m (extra.set_module (new config_module (move (d))));
m.guess (rs, loc, extra.hints);
@@ -306,19 +420,6 @@ namespace build2
return true;
}
- static const target_type* const hdr[] =
- {
- &h::static_type,
- nullptr
- };
-
- static const target_type* const inc[] =
- {
- &h::static_type,
- &c::static_type,
- nullptr
- };
-
bool
init (scope& rs,
scope& bs,
@@ -346,10 +447,8 @@ namespace build2
"c.compile",
"c.link",
"c.install",
- "c.uninstall",
- cm.x_info->id.type,
- cm.x_info->id.variant,
+ cm.x_info->id,
cm.x_info->class_,
cm.x_info->version.major,
cm.x_info->version.minor,
@@ -382,25 +481,192 @@ namespace build2
c::static_type,
nullptr, // No C modules yet.
+ c_inc::static_type,
hdr,
inc
};
- auto& m (extra.set_module (new module (move (d))));
+ auto& m (extra.set_module (new module (move (d), rs)));
m.init (rs, loc, extra.hints, *cm.x_info);
return true;
}
+ bool
+ objc_types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::objc_types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.objc.types module must be loaded in project root";
+
+ // Register the m{} target type.
+ //
+ rs.insert_target_type<m> ();
+
+ return true;
+ }
+
+ bool
+ objc_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::objc_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.objc module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.objc module must be loaded after c module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C compiler is capable of compiling Objective-C. But we enable only
+ // if it is.
+ //
+ // Note: see similar code in the cxx module.
+ //
+ load_module (rs, rs, "c.objc.types", loc);
+
+ // Note that while Objective-C is supported by MinGW GCC, it's unlikely
+ // Clang supports it when targeting MSVC or Emscripten. But let's keep
+ // the check simple for now.
+ //
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_obj = &m::static_type;
+
+ return true;
+ }
+
+ bool
+ as_cpp_types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::as_cpp_types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.as-cpp.types module must be loaded in project root";
+
+ // Register the S{} target type.
+ //
+ rs.insert_target_type<S> ();
+
+ return true;
+ }
+
+ bool
+ as_cpp_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::as_cpp_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.as-cpp module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.as-cpp module must be loaded after c module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C compiler is capable of compiling Assember with C preprocessor. But
+ // we enable only if it is.
+ //
+ load_module (rs, rs, "c.as-cpp.types", loc);
+
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_asp = &S::static_type;
+
+ return true;
+ }
+
+ bool
+ predefs_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::predefs_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.predefs module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.predefs module must be loaded after c module";
+
+ // Register the c.predefs rule.
+ //
+ // Why invent a separate module instead of just always registering it in
+ // the c module? The reason is performance: this rule will be called for
+ // every C header.
+ //
+ cc::predefs_rule& r (*mod);
+
+ rs.insert_rule<h> (perform_update_id, r.rule_name, r);
+ rs.insert_rule<h> (perform_clean_id, r.rule_name, r);
+ rs.insert_rule<h> (configure_update_id, r.rule_name, r);
+
+ return true;
+ }
+
static const module_functions mod_functions[] =
{
// NOTE: don't forget to also update the documentation in init.hxx if
// changing anything here.
- {"c.guess", nullptr, guess_init},
- {"c.config", nullptr, config_init},
- {"c", nullptr, init},
- {nullptr, nullptr, nullptr}
+ {"c.types", nullptr, types_init},
+ {"c.guess", nullptr, guess_init},
+ {"c.config", nullptr, config_init},
+ {"c.objc.types", nullptr, objc_types_init},
+ {"c.objc", nullptr, objc_init},
+ {"c.as-cpp.types", nullptr, as_cpp_types_init},
+ {"c.as-cpp", nullptr, as_cpp_init},
+ {"c.predefs", nullptr, predefs_init},
+ {"c", nullptr, init},
+ {nullptr, nullptr, nullptr}
};
const module_functions*
diff --git a/libbuild2/c/init.hxx b/libbuild2/c/init.hxx
index 2662bb1..38515c1 100644
--- a/libbuild2/c/init.hxx
+++ b/libbuild2/c/init.hxx
@@ -19,9 +19,22 @@ namespace build2
//
// Submodules:
//
- // `c.guess` -- registers and sets some variables.
- // `c.config` -- loads c.guess and sets more variables.
- // `c` -- loads c.config and registers target types and rules.
+ // `c.types` -- registers target types.
+ // `c.guess` -- registers and sets some variables.
+ // `c.config` -- loads c.guess and sets more variables.
+ // `c` -- loads c.{types,config} and registers rules and
+ // functions.
+ //
+ // `c.objc.types` -- registers m{} target type.
+ // `c.objc` -- loads c.objc.types and enables Objective-C
+ // compilation. Must be loaded after c.
+ //
+ // `c.as-cpp.types` -- registers S{} target type.
+ // `c.as-cpp` -- loads c.as-cpp.types and enables Assembler with C
+ // preprocessor compilation. Must be loaded after c.
+ //
+ // `c.predefs` -- registers rule for generating a C header with
+ // predefined compiler macros. Must be loaded after c.
//
extern "C" LIBBUILD2_C_SYMEXPORT const module_functions*
build2_c_load ();
diff --git a/libbuild2/c/target.hxx b/libbuild2/c/target.hxx
index 333d39f..c9955e3 100644
--- a/libbuild2/c/target.hxx
+++ b/libbuild2/c/target.hxx
@@ -15,6 +15,9 @@ namespace build2
{
using cc::h;
using cc::c;
+ using cc::m;
+ using cc::S;
+ using cc::c_inc;
}
}
diff --git a/libbuild2/cc/buildfile b/libbuild2/cc/buildfile
index e98e3de..7dcd811 100644
--- a/libbuild2/cc/buildfile
+++ b/libbuild2/cc/buildfile
@@ -6,14 +6,33 @@
include ../
impl_libs = ../lib{build2} # Implied interface dependency.
-import impl_libs += libpkgconf%lib{pkgconf}
+libpkgconf = $config.build2.libpkgconf
+
+if $libpkgconf
+ import impl_libs += libpkgconf%lib{pkgconf}
+else
+ import impl_libs += libpkg-config%lib{pkg-config}
include ../bin/
intf_libs = ../bin/lib{build2-bin}
-./: lib{build2-cc}: libul{build2-cc}: {hxx ixx txx cxx}{** -**.test...} \
- h{msvc-setup} \
- $intf_libs $impl_libs
+./: lib{build2-cc}: libul{build2-cc}: \
+ {hxx ixx txx cxx}{** -pkgconfig-lib* -**.test...} \
+ h{msvc-setup}
+
+libul{build2-cc}: cxx{pkgconfig-libpkgconf}: include = $libpkgconf
+libul{build2-cc}: cxx{pkgconfig-libpkg-config}: include = (!$libpkgconf)
+
+libul{build2-cc}: $intf_libs $impl_libs
+
+# libc++ std module interface translation unit.
+#
+# Hopefully temporary, see llvm-project GH issues #73089.
+#
+# @@ TMP: make sure sync'ed with upstream before release (keep this note).
+#
+lib{build2-cc}: file{std.cppm}
+file{std.cppm}@./: install = data/libbuild2/cc/
# Unit tests.
#
@@ -38,6 +57,9 @@ for t: cxx{**.test...}
obja{*}: cxx.poptions += -DLIBBUILD2_CC_STATIC_BUILD
objs{*}: cxx.poptions += -DLIBBUILD2_CC_SHARED_BUILD
+if $libpkgconf
+ cxx.poptions += -DBUILD2_LIBPKGCONF
+
if ($cxx.target.class == 'windows')
cxx.libs += $regex.apply(advapi32 ole32 oleaut32, \
'(.+)', \
diff --git a/libbuild2/cc/common.cxx b/libbuild2/cc/common.cxx
index 09a1752..2a8bc50 100644
--- a/libbuild2/cc/common.cxx
+++ b/libbuild2/cc/common.cxx
@@ -39,6 +39,11 @@ namespace build2
// 3. dependency libs (prerequisite_targets, left to right, depth-first)
// 4. dependency libs (*.libs variables).
//
+ // If proc_opt_group is true, then pass to proc_opt the group rather than
+ // the member if a member was picked (according to linfo) form a group.
+ // This is useful when we only want to see the common options set on the
+ // group.
+ //
// If either proc_opt or proc_lib return false, then any further
// processing of this library or its dependencies is skipped. This can be
// used to "prune" the graph traversal in case of duplicates. Note that
@@ -49,19 +54,19 @@ namespace build2
// array that contains the current library dependency chain all the way to
// the library passed to process_libraries(). The first element of this
// array is NULL. If this argument is NULL, then this is a library without
- // a target (e.g., -lpthread) and its name is in the second argument
- // (which could be resolved to an absolute path or passed as an -l<name>
- // option). Otherwise, (the first argument is not NULL), the second
- // argument contains the target path (which can be empty in case of the
- // unknown DLL path).
+ // a target (e.g., -lm, -pthread, etc) and its name is in the second
+ // argument (which could be resolved to an absolute path or passed as an
+ // -l<name>/-pthread option). Otherwise, (the first argument is not NULL),
+ // the second argument contains the target path (which can be empty in
+ // case of the unknown DLL path).
//
- // Initially, the second argument (library name) was a string (e.g.,
- // -lpthread) but there are cases where the library is identified with
- // multiple options, such as -framework CoreServices (there are also cases
- // like -Wl,--whole-archive -lfoo -lbar -Wl,--no-whole-archive). So now it
- // is a vector_view that contains a fragment of options (from one of the
- // *.libs variables) that corresponds to the library (or several
- // libraries, as in the --whole-archive example above).
+ // Initially, the second argument (library name) was a string (e.g., -lm)
+ // but there are cases where the library is identified with multiple
+ // options, such as -framework CoreServices (there are also cases like
+ // -Wl,--whole-archive -lfoo -lbar -Wl,--no-whole-archive). So now it is a
+ // vector_view that contains a fragment of options (from one of the *.libs
+ // variables) that corresponds to the library (or several libraries, as in
+ // the --whole-archive example above).
//
// Storing a reference to elements of library name in proc_lib is legal
// (they come either from the target's path or from one of the *.libs
@@ -72,10 +77,18 @@ namespace build2
// not to pick the liba/libs{} member for installed libraries instead
// passing the lib{} group itself. This can be used to match the semantics
// of file_rule which, when matching prerequisites, does not pick the
- // liba/libs{} member (naturally) but just matches the lib{} group.
+ // liba/libs{} member (naturally) but just matches the lib{} group. Note
+ // that currently this truly only works for installed lib{} since non-
+ // installed ones don't have cc.type set. See proc_opt_group for an
+ // alternative way to (potentially) achieve the desired semantics.
//
// Note that if top_li is present, then the target passed to proc_impl,
- // proc_lib, and proc_opt is always a file.
+ // proc_lib, and proc_opt (unless proc_opt_group is true) is always a
+ // file.
+ //
+ // The dedup argument is part of the interface dependency deduplication
+ // functionality, similar to $x.deduplicate_export_libs(). Note, however,
+ // that here we do it "properly" (i.e., using group members, etc).
//
void common::
process_libraries (
@@ -83,7 +96,7 @@ namespace build2
const scope& top_bs,
optional<linfo> top_li,
const dir_paths& top_sysd,
- const mtime_target& l, // liba/libs{} or lib{}
+ const mtime_target& l, // liba/libs{}, libux{}, or lib{}
bool la,
lflags lf,
const function<bool (const target&,
@@ -92,34 +105,73 @@ namespace build2
const small_vector<reference_wrapper<
const string>, 2>&, // Library "name".
lflags, // Link flags.
- const string* type, // cc.type
+ const string* type, // whole cc.type
bool sys)>& proc_lib, // System library?
const function<bool (const target&,
- const string& type, // cc.type
+ const string& lang, // lang from cc.type
bool com, // cc. or x.
bool exp)>& proc_opt, // *.export.
- bool self /*= false*/, // Call proc_lib on l?
- library_cache* cache,
- small_vector<const target*, 24>* chain) const
+ bool self, // Call proc_lib on l?
+ bool proc_opt_group, // Call proc_opt on group instead of member?
+ library_cache* cache) const
{
library_cache cache_storage;
if (cache == nullptr)
cache = &cache_storage;
- small_vector<const target*, 24> chain_storage;
- if (chain == nullptr)
- {
- chain = &chain_storage;
+ small_vector<const target*, 32> chain;
- if (proc_lib)
- chain->push_back (nullptr);
- }
+ if (proc_lib)
+ chain.push_back (nullptr);
+
+ process_libraries_impl (a, top_bs, top_li, top_sysd,
+ nullptr, l, la, lf,
+ proc_impl, proc_lib, proc_opt,
+ self, proc_opt_group,
+ cache, &chain, nullptr);
+ }
+ void common::
+ process_libraries_impl (
+ action a,
+ const scope& top_bs,
+ optional<linfo> top_li,
+ const dir_paths& top_sysd,
+ const target* lg,
+ const mtime_target& l,
+ bool la,
+ lflags lf,
+ const function<bool (const target&,
+ bool la)>& proc_impl,
+ const function<bool (const target* const*,
+ const small_vector<reference_wrapper<
+ const string>, 2>&,
+ lflags,
+ const string* type,
+ bool sys)>& proc_lib,
+ const function<bool (const target&,
+ const string& lang,
+ bool com,
+ bool exp)>& proc_opt,
+ bool self,
+ bool proc_opt_group,
+ library_cache* cache,
+ small_vector<const target*, 32>* chain,
+ small_vector<const target*, 32>* dedup) const
+ {
// Add the library to the chain.
//
if (self && proc_lib)
+ {
+ if (find (chain->begin (), chain->end (), &l) != chain->end ())
+ fail << "dependency cycle detected involving library " << l;
+
chain->push_back (&l);
+ }
+ // We only lookup public variables so go straight for the public
+ // variable pool.
+ //
auto& vp (top_bs.ctx.var_pool);
do // Breakout loop.
@@ -131,25 +183,45 @@ namespace build2
// performance we use lookup_original() directly and only look in the
// target (so no target type/pattern-specific).
//
- const string* t (
+ const string* pt (
cast_null<string> (
l.state[a].lookup_original (c_type, true /* target_only */).first));
+ // cc.type value format is <lang>[,...].
+ //
+ size_t p;
+ const string& t (pt != nullptr
+ ? ((p = pt->find (',')) == string::npos
+ ? *pt
+ : string (*pt, 0, p))
+ : string ());
+
+ // Why are we bothering with impl for binless libraries since all
+ // their dependencies are by definition interface? Well, for one, it
+ // could be that it is dynamically-binless (e.g., binless on some
+ // platforms or in some configurations and binful on/in others). In
+ // this case it would be helpful to have a uniform semantics so that,
+ // for example, *.libs are used for liba{} regardless of whether it is
+ // binless or not. On the other hand, having to specify both
+ // *.export.libs=-lm and *.libs=-lm (or *.export.impl_libs) for an
+ // always-binless library is sure not very intuitive. Not sure if we
+ // can win here.
+ //
bool impl (proc_impl && proc_impl (l, la));
bool cc (false), same (false);
- if (t != nullptr)
+ if (!t.empty ())
{
- cc = (*t == "cc");
- same = (!cc && *t == x);
+ cc = (t == "cc");
+ same = (!cc && t == x);
}
- const scope& bs (t == nullptr || cc ? top_bs : l.base_scope ());
+ const scope& bs (t.empty () || cc ? top_bs : l.base_scope ());
lookup c_e_libs;
lookup x_e_libs;
- if (t != nullptr)
+ if (!t.empty ())
{
// Note that we used to treat *.export.libs set on the liba/libs{}
// members as *.libs overrides rather than as member-specific
@@ -168,8 +240,6 @@ namespace build2
//
// See also deduplicate_export_libs() if changing anything here.
//
- // @@ PERF: do target_only (helps a bit in non-installed case)?
- //
{
const variable& v (impl ? c_export_impl_libs : c_export_libs);
c_e_libs = l.lookup_original (v, false, &bs).first;
@@ -180,7 +250,7 @@ namespace build2
const variable& v (
same
? (impl ? x_export_impl_libs : x_export_libs)
- : vp[*t + (impl ? ".export.impl_libs" : ".export.libs")]);
+ : vp[t + (impl ? ".export.impl_libs" : ".export.libs")]);
x_e_libs = l.lookup_original (v, false, &bs).first;
}
@@ -188,12 +258,14 @@ namespace build2
//
if (proc_opt)
{
+ const target& ol (proc_opt_group && lg != nullptr ? *lg : l);
+
// If all we know is it's a C-common library, then in both cases
// we only look for cc.export.*.
//
if (cc)
{
- if (!proc_opt (l, *t, true, true)) break;
+ if (!proc_opt (ol, t, true, true)) break;
}
else
{
@@ -210,24 +282,24 @@ namespace build2
//
// Note: options come from *.export.* variables.
//
- if (!proc_opt (l, *t, false, true) ||
- !proc_opt (l, *t, true, true)) break;
+ if (!proc_opt (ol, t, false, true) ||
+ !proc_opt (ol, t, true, true)) break;
}
else
{
// For default export we use the same options as were used
// to build the library.
//
- if (!proc_opt (l, *t, false, false) ||
- !proc_opt (l, *t, true, false)) break;
+ if (!proc_opt (ol, t, false, false) ||
+ !proc_opt (ol, t, true, false)) break;
}
}
else
{
// Interface: only add *.export.* (interface dependencies).
//
- if (!proc_opt (l, *t, false, true) ||
- !proc_opt (l, *t, true, true)) break;
+ if (!proc_opt (ol, t, false, true) ||
+ !proc_opt (ol, t, true, true)) break;
}
}
}
@@ -268,12 +340,12 @@ namespace build2
const file* f;
const path& p ((f = l.is_a<file> ()) ? f->path () : empty_path);
- bool s (t != nullptr // If cc library (matched or imported).
+ bool s (pt != nullptr // If cc library (matched or imported).
? cast_false<bool> (l.vars[c_system])
: !p.empty () && sys (top_sysd, p.string ()));
proc_lib_name = {p.string ()};
- if (!proc_lib (&chain->back (), proc_lib_name, lf, t, s))
+ if (!proc_lib (&chain->back (), proc_lib_name, lf, pt, s))
break;
}
@@ -283,21 +355,21 @@ namespace build2
// Find system search directories corresponding to this library, i.e.,
// from its project and for its type (C, C++, etc).
//
- auto find_sysd = [&top_sysd, t, cc, same, &bs, &sysd, this] ()
+ auto find_sysd = [&top_sysd, &vp, t, cc, same, &bs, &sysd, this] ()
{
// Use the search dirs corresponding to this library scope/type.
//
- sysd = (t == nullptr || cc)
+ sysd = (t.empty () || cc)
? &top_sysd // Imported library, use importer's sysd.
: &cast<dir_paths> (
bs.root_scope ()->vars[same
? x_sys_lib_dirs
- : bs.ctx.var_pool[*t + ".sys_lib_dirs"]]);
+ : vp[t + ".sys_lib_dirs"]]);
};
auto find_linfo = [top_li, t, cc, &bs, &l, &li] ()
{
- li = (t == nullptr || cc)
+ li = (t.empty () || cc)
? top_li
: optional<linfo> (link_info (bs, link_type (l).type)); // @@ PERF
};
@@ -315,11 +387,16 @@ namespace build2
for (const prerequisite_target& pt: l.prerequisite_targets[a])
{
// Note: adhoc prerequisites are not part of the library metadata
- // protocol (and we should check for adhoc first to avoid races).
+ // protocol (and we should check for adhoc first to avoid races
+ // during execute).
//
- if (pt.adhoc || pt == nullptr)
+ if (pt.adhoc () || pt == nullptr)
continue;
+ if (marked (pt))
+ fail << "implicit dependency cycle detected involving library "
+ << l;
+
bool la;
const file* f;
@@ -327,13 +404,20 @@ namespace build2
(la = (f = pt->is_a<libux> ())) ||
( f = pt->is_a<libs> ()))
{
+ // See link_rule for details.
+ //
+ const target* g ((pt.include & include_group) != 0
+ ? f->group
+ : nullptr);
+
if (sysd == nullptr) find_sysd ();
if (!li) find_linfo ();
- process_libraries (a, bs, *li, *sysd,
- *f, la, pt.data,
- proc_impl, proc_lib, proc_opt, true,
- cache, chain);
+ process_libraries_impl (a, bs, *li, *sysd,
+ g, *f, la, pt.data /* lflags */,
+ proc_impl, proc_lib, proc_opt,
+ true /* self */, proc_opt_group,
+ cache, chain, nullptr);
}
}
}
@@ -344,7 +428,7 @@ namespace build2
// If it is not a C-common library, then it probably doesn't have any
// of the *.libs.
//
- if (t != nullptr)
+ if (!t.empty ())
{
optional<dir_paths> usrd; // Extract lazily.
@@ -366,8 +450,8 @@ namespace build2
// Determine the length of the library name fragment as well as
// whether it is a system library. Possible length values are:
//
- // 1 - just the argument itself (-lpthread)
- // 2 - argument and next element (-l pthread, -framework CoreServices)
+ // 1 - just the argument itself (-lm, -pthread)
+ // 2 - argument and next element (-l m, -framework CoreServices)
// 0 - unrecognized/until the end (-Wl,--whole-archive ...)
//
// See similar code in find_system_library().
@@ -398,9 +482,9 @@ namespace build2
{
if (l[0] == '-')
{
- // -l<name>, -l <name>
+ // -l<name>, -l <name>, -pthread
//
- if (l[1] == 'l')
+ if (l[1] == 'l' || l == "-pthread")
{
n = l.size () == 2 ? 2 : 1;
}
@@ -427,11 +511,14 @@ namespace build2
return make_pair (n, s);
};
- auto proc_int = [&l, cache, chain,
- &proc_impl, &proc_lib, &proc_lib_name, &proc_opt,
- &sysd, &usrd,
- &find_sysd, &find_linfo, &sense_fragment,
- &bs, a, &li, impl, this] (const lookup& lu)
+ auto proc_intf = [&l, proc_opt_group, cache, chain,
+ &proc_impl, &proc_lib, &proc_lib_name, &proc_opt,
+ &sysd, &usrd,
+ &find_sysd, &find_linfo, &sense_fragment,
+ &bs, a, &li, impl, this] (
+ const lookup& lu,
+ small_vector<const target*, 32>* dedup,
+ size_t dedup_start) // Start of our deps.
{
const vector<name>* ns (cast_null<vector<name>> (lu));
if (ns == nullptr || ns->empty ())
@@ -441,12 +528,15 @@ namespace build2
{
const name& n (*i);
+ // Note: see also recursively-binless logic in link_rule if
+ // changing anything in simple library handling.
+ //
if (n.simple ())
{
- // This is something like -lpthread or shell32.lib so should
- // be a valid path. But it can also be an absolute library
- // path (e.g., something that may come from our
- // .{static/shared}.pc files).
+ // This is something like -lm or shell32.lib so should be a
+ // valid path. But it can also be an absolute library path
+ // (e.g., something that may come from our .{static/shared}.pc
+ // files).
//
if (proc_lib)
{
@@ -471,68 +561,145 @@ namespace build2
if (sysd == nullptr) find_sysd ();
if (!li) find_linfo ();
- const mtime_target& t (
- resolve_library (a,
- bs,
- n,
- (n.pair ? (++i)->dir : dir_path ()),
- *li,
- *sysd, usrd,
- cache));
+ const mtime_target* t;
+ const target* g;
- if (proc_lib)
+ const char* w (nullptr);
+ try
{
- // This can happen if the target is mentioned in
- // *.export.libs (i.e., it is an interface dependency) but
- // not in the library's prerequisites (i.e., it is not an
- // implementation dependency).
+ pair<const mtime_target&, const target*> p (
+ resolve_library (a,
+ bs,
+ n,
+ (n.pair ? (++i)->dir : dir_path ()),
+ *li,
+ *sysd, usrd,
+ cache));
+
+ t = &p.first;
+ g = p.second;
+
+ // Deduplicate.
//
- // Note that we used to just check for path being assigned
- // but on Windows import-installed DLLs may legally have
- // empty paths.
+ // Note that dedup_start makes sure we only consider our
+ // interface dependencies while maintaining the "through"
+ // list.
//
- const char* w (nullptr);
- if (t.ctx.phase == run_phase::match)
+ if (dedup != nullptr)
{
- size_t o (
- t.state[a].task_count.load (memory_order_consume) -
- t.ctx.count_base ());
+ if (find (dedup->begin () + dedup_start,
+ dedup->end (),
+ t) != dedup->end ())
+ {
+ ++i;
+ continue;
+ }
+
+ dedup->push_back (t);
+ }
+ }
+ catch (const non_existent_library& e)
+ {
+ // This is another manifestation of the "mentioned in
+ // *.export.libs but not in prerequisites" case (see below).
+ //
+ t = &e.target;
+ w = "unknown";
+ }
- if (o != target::offset_applied &&
- o != target::offset_executed)
+ // This can happen if the target is mentioned in *.export.libs
+ // (i.e., it is an interface dependency) but not in the
+ // library's prerequisites (i.e., it is not an implementation
+ // dependency).
+ //
+ // Note that we used to just check for path being assigned but
+ // on Windows import-installed DLLs may legally have empty
+ // paths.
+ //
+ if (w != nullptr)
+ ; // See above.
+ else if (l.ctx.phase == run_phase::match)
+ {
+ // We allow not matching installed libraries if all we need
+ // is their options (see compile_rule::apply()).
+ //
+ if (proc_lib || t->base_scope ().root_scope () != nullptr)
+ {
+ if (!t->matched (a))
w = "not matched";
}
- else if (t.mtime () == timestamp_unknown)
- w = "out of date";
-
- if (w != nullptr)
- fail << (impl ? "implementation" : "interface")
- << " dependency " << t << " is " << w <<
- info << "mentioned in *.export." << (impl ? "impl_" : "")
- << "libs of target " << l <<
- info << "is it a prerequisite of " << l << "?";
+ }
+ else
+ {
+ // Note that this check we only do if there is proc_lib
+ // (since it's valid to process library's options before
+ // updating it).
+ //
+ if (proc_lib)
+ {
+ if (t->mtime () == timestamp_unknown)
+ w = "out of date";
+ }
+ }
+
+ if (w != nullptr)
+ {
+ fail << (impl ? "implementation" : "interface")
+ << " dependency " << *t << " is " << w <<
+ info << "mentioned in *.export." << (impl ? "impl_" : "")
+ << "libs of target " << l <<
+ info << "is it a prerequisite of " << l << "?" << endf;
}
// Process it recursively.
//
- // @@ Where can we get the link flags? Should we try to find
- // them in the library's prerequisites? What about
- // installed stuff?
+ bool u;
+ bool la ((u = t->is_a<libux> ()) || t->is_a<liba> ());
+ lflags lf (0);
+
+ // If this is a static library, see if we need to link it
+ // whole.
//
- process_libraries (a, bs, *li, *sysd,
- t, t.is_a<liba> () || t.is_a<libux> (), 0,
- proc_impl, proc_lib, proc_opt, true,
- cache, chain);
+ if (la && proc_lib)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ const variable& var (t->ctx.var_pool["bin.whole"]);
+
+ // See the link rule for the lookup semantics.
+ //
+ lookup l (
+ t->lookup_original (var, true /* target_only */).first);
+
+ if (l ? cast<bool> (*l) : u)
+ lf |= lflag_whole;
+ }
+
+ process_libraries_impl (
+ a, bs, *li, *sysd,
+ g, *t, la, lf,
+ proc_impl, proc_lib, proc_opt,
+ true /* self */, proc_opt_group,
+ cache, chain, dedup);
}
++i;
}
};
+ auto proc_intf_storage = [&proc_intf] (const lookup& lu1,
+ const lookup& lu2 = lookup ())
+ {
+ small_vector<const target*, 32> dedup_storage;
+
+ if (lu1) proc_intf (lu1, &dedup_storage, 0);
+ if (lu2) proc_intf (lu2, &dedup_storage, 0);
+ };
+
// Process libraries from *.libs (of type strings).
//
- auto proc_imp = [&proc_lib, &proc_lib_name,
- &sense_fragment] (const lookup& lu)
+ auto proc_impl = [&proc_lib, &proc_lib_name,
+ &sense_fragment] (const lookup& lu)
{
const strings* ns (cast_null<strings> (lu));
if (ns == nullptr || ns->empty ())
@@ -540,8 +707,8 @@ namespace build2
for (auto i (ns->begin ()), e (ns->end ()); i != e; )
{
- // This is something like -lpthread or shell32.lib so should be
- // a valid path.
+ // This is something like -lm or shell32.lib so should be a
+ // valid path.
//
pair<size_t, bool> r (sense_fragment (*i));
@@ -564,10 +731,26 @@ namespace build2
//
if (cc)
{
- if (c_e_libs) proc_int (c_e_libs);
+ if (impl)
+ {
+ if (c_e_libs) proc_intf (c_e_libs, nullptr, 0);
+ }
+ else
+ {
+ if (c_e_libs)
+ {
+ if (dedup != nullptr)
+ proc_intf (c_e_libs, dedup, dedup->size ());
+ else
+ proc_intf_storage (c_e_libs);
+ }
+ }
}
else
{
+ // Note: see also recursively-binless logic in link_rule if
+ // changing anything here.
+
if (impl)
{
// Interface and implementation: as discussed above, we can have
@@ -575,8 +758,12 @@ namespace build2
//
if (c_e_libs.defined () || x_e_libs.defined ())
{
- if (c_e_libs) proc_int (c_e_libs);
- if (x_e_libs) proc_int (x_e_libs);
+ // Why are we calling proc_intf() on *.impl_libs? Perhaps
+ // because proc_impl() expects strings, not names? Yes, and
+ // proc_intf() checks impl.
+ //
+ if (c_e_libs) proc_intf (c_e_libs, nullptr, 0);
+ if (x_e_libs) proc_intf (x_e_libs, nullptr, 0);
}
else
{
@@ -590,9 +777,9 @@ namespace build2
//
if (proc_lib)
{
- const variable& v (same ? x_libs : vp[*t + ".libs"]);
- proc_imp (l.lookup_original (c_libs, false, &bs).first);
- proc_imp (l.lookup_original (v, false, &bs).first);
+ const variable& v (same ? x_libs : vp[t + ".libs"]);
+ proc_impl (l.lookup_original (c_libs, false, &bs).first);
+ proc_impl (l.lookup_original (v, false, &bs).first);
}
}
}
@@ -600,8 +787,18 @@ namespace build2
{
// Interface: only add *.export.* (interface dependencies).
//
- if (c_e_libs) proc_int (c_e_libs);
- if (x_e_libs) proc_int (x_e_libs);
+ if (c_e_libs.defined () || x_e_libs.defined ())
+ {
+ if (dedup != nullptr)
+ {
+ size_t s (dedup->size ()); // Start of our interface deps.
+
+ if (c_e_libs) proc_intf (c_e_libs, dedup, s);
+ if (x_e_libs) proc_intf (x_e_libs, dedup, s);
+ }
+ else
+ proc_intf_storage (c_e_libs, x_e_libs);
+ }
}
}
}
@@ -628,9 +825,14 @@ namespace build2
//
// If li is absent, then don't pick the liba/libs{} member, returning the
// lib{} target itself. If li is present, then the returned target is
- // always a file.
+ // always a file. The second half of the returned pair is the group, if
+ // the member was picked.
+ //
+ // Note: paths in sysd/usrd are expected to be absolute and normalized.
//
- const mtime_target& common::
+ // Note: may throw non_existent_library.
+ //
+ pair<const mtime_target&, const target*> common::
resolve_library (action a,
const scope& s,
const name& cn,
@@ -651,7 +853,8 @@ namespace build2
// large number of times (see Boost for an extreme example of this).
//
// Note also that for non-utility libraries we know that only the link
- // order from linfo is used.
+ // order from linfo is used. While not caching it and always picking an
+ // alternative could also work, we cache it to avoid the lookup.
//
if (cache != nullptr)
{
@@ -671,7 +874,7 @@ namespace build2
}));
if (i != cache->end ())
- return i->lib;
+ return pair<const mtime_target&, const target*> {i->lib, i->group};
}
else
cache = nullptr; // Do not cache.
@@ -710,29 +913,36 @@ namespace build2
fail << "unable to find library " << pk;
}
- // If this is lib{}/libu*{}, pick appropriate member unless we were
+ // If this is lib{}/libul{}, pick appropriate member unless we were
// instructed not to.
//
+ const target* g (nullptr);
if (li)
{
if (const libx* l = xt->is_a<libx> ())
+ {
+ g = xt;
xt = link_member (*l, a, *li); // Pick lib*{e,a,s}{}.
+ }
}
auto& t (xt->as<mtime_target> ());
if (cache != nullptr)
- cache->push_back (library_cache_entry {lo, cn.type, cn.value, t});
+ cache->push_back (library_cache_entry {lo, cn.type, cn.value, t, g});
- return t;
+ return pair<const mtime_target&, const target*> {t, g};
}
- // Note that pk's scope should not be NULL (even if dir is absolute).
+ // Action should be absent if called during the load phase. Note that pk's
+ // scope should not be NULL (even if dir is absolute).
+ //
+ // Note: paths in sysd/usrd are expected to be absolute and normalized.
//
// Note: see similar logic in find_system_library().
//
target* common::
- search_library (action act,
+ search_library (optional<action> act,
const dir_paths& sysd,
optional<dir_paths>& usrd,
const prerequisite_key& p,
@@ -740,7 +950,7 @@ namespace build2
{
tracer trace (x, "search_library");
- assert (p.scope != nullptr);
+ assert (p.scope != nullptr && (!exist || act));
context& ctx (p.scope->ctx);
const scope& rs (*p.scope->root_scope ());
@@ -857,6 +1067,21 @@ namespace build2
{
context& ctx (p.scope->ctx);
+ // Whether to look for a binless variant using the common .pc file
+ // (see below).
+ //
+ // Normally we look for a binless version if the binful one was not
+ // found. However, sometimes we may find what looks like a binful
+ // library but on a closer examination realize that there is something
+ // wrong with it (for example, it's not a Windows import library). In
+ // such cases we want to omit looking for a binless library using the
+ // common .pc file since it most likely corresponds to the binful
+ // library (and we may end up in a infinite loop trying to resolve
+ // itself).
+ //
+ bool ba (true);
+ bool bs (true);
+
timestamp mt;
// libs
@@ -928,6 +1153,31 @@ namespace build2
s->path_mtime (move (f), mt);
}
}
+ else if (!ext && tsys == "darwin")
+ {
+ // Besides .dylib, Mac OS now also has "text-based stub libraries"
+ // that use the .tbd extension. They appear to be similar to
+ // Windows import libraries and contain information such as the
+ // location of the .dylib library, its symbols, etc. For example,
+ // there is /Library/.../MacOSX13.3.sdk/usr/lib/libsqlite3.tbd
+ // which points to /usr/lib/libsqlite3.dylib (but which itself is
+ // invisible/inaccessible, presumably for security).
+ //
+ // Note that for now we are treating the .tbd library as the
+ // shared library but could probably do the more elaborate dance
+ // with ad hoc members like on Windows if really necessary.
+ //
+ se = string ("tbd");
+ f = f.base (); // Remove .dylib.
+ f += ".tbd";
+ mt = mtime (f);
+
+ if (mt != timestamp_nonexistent)
+ {
+ insert_library (ctx, s, name, d, ld, se, exist, trace);
+ s->path_mtime (move (f), mt);
+ }
+ }
}
// liba
@@ -957,10 +1207,24 @@ namespace build2
if (tsys == "win32-msvc")
{
if (s == nullptr && !sn.empty ())
- s = msvc_search_shared (ld, d, p, exist);
+ {
+ pair<libs*, bool> r (msvc_search_shared (ld, d, p, exist));
+
+ if (r.first != nullptr)
+ s = r.first;
+ else if (!r.second)
+ bs = false;
+ }
if (a == nullptr && !an.empty ())
- a = msvc_search_static (ld, d, p, exist);
+ {
+ pair<liba*, bool> r (msvc_search_static (ld, d, p, exist));
+
+ if (r.first != nullptr)
+ a = r.first;
+ else if (!r.second)
+ ba = false;
+ }
}
// Look for binary-less libraries via pkg-config .pc files. Note that
@@ -977,7 +1241,10 @@ namespace build2
// is no binful variant.
//
pair<path, path> r (
- pkgconfig_search (d, p.proj, name, na && ns /* common */));
+ pkgconfig_search (d,
+ p.proj,
+ name,
+ na && ns && ba && bs /* common */));
if (na && !r.first.empty ())
{
@@ -1030,6 +1297,8 @@ namespace build2
// making it the only one to allow things to be overriden (e.g.,
// if build2 was moved or some such).
//
+ // Note: build_install_lib is already normalized.
+ //
usrd->insert (usrd->begin (), build_install_lib);
}
}
@@ -1082,20 +1351,87 @@ namespace build2
if (exist)
return r;
- // If we cannot acquire the lock then this mean the target has already
- // been matched and we assume all of this has already been done.
+ // Try to extract library information from pkg-config. We only add the
+ // default macro if we could not extract more precise information. The
+ // idea is that in .pc files that we generate, we copy those macros (or
+ // custom ones) from *.export.poptions.
+ //
+ // @@ Should we add .pc files as ad hoc members so pkgconfig_save() can
+ // use their names when deriving -l-names (this would be especially
+ // helpful for binless libraries to get hold of prefix/suffix, etc).
//
- auto lock = [act] (const target* t) -> target_lock
+ auto load_pc = [this, &trace,
+ act, &p, &name,
+ &sysd, &usrd,
+ pd, &pc, lt, a, s] (pair<bool, bool> metaonly)
{
- auto l (t != nullptr ? build2::lock (act, *t, true) : target_lock ());
+ l5 ([&]{trace << "loading pkg-config information during "
+ << (act ? "match" : "load") << " for "
+ << (a != nullptr ? "static " : "")
+ << (s != nullptr ? "shared " : "")
+ << "member(s) of " << *lt << "; metadata only: "
+ << metaonly.first << " " << metaonly.second;});
+
+ // Add the "using static/shared library" macro (used, for example, to
+ // handle DLL export). The absence of either of these macros would
+ // mean some other build system that cannot distinguish between the
+ // two (and no pkg-config information).
+ //
+ auto add_macro = [this] (target& t, const char* suffix)
+ {
+ // If there is already a value (either in cc.export or x.export),
+ // don't add anything: we don't want to be accumulating defines nor
+ // messing with custom values. And if we are adding, then use the
+ // generic cc.export.
+ //
+ // The only way we could already have this value is if this same
+ // library was also imported as a project (as opposed to installed).
+ // Unlikely but possible. In this case the values were set by the
+ // export stub and we shouldn't touch them.
+ //
+ if (!t.vars[x_export_poptions])
+ {
+ auto p (t.vars.insert (c_export_poptions));
- if (l && l.offset == target::offset_matched)
+ if (p.second)
+ {
+ // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED},
+ // where <name> is the target name. Here we want to strike a
+ // balance between being unique and not too noisy.
+ //
+ string d ("-DLIB");
+
+ d += sanitize_identifier (
+ ucase (const_cast<const string&> (t.name)));
+
+ d += '_';
+ d += suffix;
+
+ strings o;
+ o.push_back (move (d));
+ p.first = move (o);
+ }
+ }
+ };
+
+ if (pc.first.empty () && pc.second.empty ())
{
- assert ((*t)[act].rule == &file_rule::rule_match);
- l.unlock ();
+ if (!pkgconfig_load (act, *p.scope,
+ *lt, a, s,
+ p.proj, name,
+ *pd, sysd, *usrd,
+ metaonly))
+ {
+ if (a != nullptr && !metaonly.first) add_macro (*a, "STATIC");
+ if (s != nullptr && !metaonly.second) add_macro (*s, "SHARED");
+ }
}
-
- return l;
+ else
+ pkgconfig_load (act, *p.scope,
+ *lt, a, s,
+ pc,
+ *pd, sysd, *usrd,
+ metaonly);
};
// Mark as a "cc" library (unless already marked) and set the system
@@ -1116,6 +1452,85 @@ namespace build2
return p.second;
};
+ // Deal with the load phase case. The rest is already hairy enough so
+ // let's not try to weave this logic into that.
+ //
+ if (!act)
+ {
+ assert (ctx.phase == run_phase::load);
+
+ // The overall idea here is to set everything up so that the default
+ // file_rule matches the returned targets, the same way as it would if
+ // multiple operations were executed for the match phase (see below).
+ //
+ // Note however, that there is no guarantee that we won't end up in
+ // the match phase code below even after loading things here. For
+ // example, the same library could be searched from pkgconfig_load()
+ // if specified as -l. And if we try to re-assign group members, then
+ // that would be a race condition. So we use the cc mark to detect
+ // this.
+ //
+ timestamp mt (timestamp_nonexistent);
+ if (a != nullptr) {lt->a = a; a->group = lt; mt = a->mtime ();}
+ if (s != nullptr) {lt->s = s; s->group = lt; mt = s->mtime ();}
+
+ // @@ TODO: we currently always reload pkgconfig for lt (and below).
+ //
+ mark_cc (*lt);
+ lt->mtime (mt); // Note: problematic, see below for details.
+
+ // We can only load metadata from here since we can only do this
+ // during the load phase. But it's also possible that a racing match
+ // phase already found and loaded this library without metadata. So
+ // looks like the only way is to load the metadata incrementally. We
+ // can base this decision on the presense/absense of cc.type and
+ // export.metadata.
+ //
+ pair<bool, bool> metaonly {false, false};
+
+ if (a != nullptr && !mark_cc (*a))
+ {
+ if (a->vars[ctx.var_export_metadata])
+ a = nullptr;
+ else
+ metaonly.first = true;
+ }
+
+ if (s != nullptr && !mark_cc (*s))
+ {
+ if (s->vars[ctx.var_export_metadata])
+ s = nullptr;
+ else
+ metaonly.second = true;
+ }
+
+ // Try to extract library information from pkg-config.
+ //
+ if (a != nullptr || s != nullptr)
+ load_pc (metaonly);
+
+ return r;
+ }
+
+ // If we cannot acquire the lock then this mean the target has already
+ // been matched and we assume all of this has already been done.
+ //
+ auto lock = [a = *act] (const target* t) -> target_lock
+ {
+ auto l (t != nullptr ? build2::lock (a, *t, true) : target_lock ());
+
+ if (l && l.offset == target::offset_matched)
+ {
+ assert ((*t)[a].rule == &file_rule::rule_match);
+ l.unlock ();
+ }
+
+ return l;
+ };
+
+ target_lock al (lock (a));
+ target_lock sl (lock (s));
+
target_lock ll (lock (lt));
// Set lib{} group members to indicate what's available. Note that we
@@ -1125,96 +1540,41 @@ namespace build2
timestamp mt (timestamp_nonexistent);
if (ll)
{
- if (s != nullptr) {lt->s = s; mt = s->mtime ();}
- if (a != nullptr) {lt->a = a; mt = a->mtime ();}
-
// Mark the group since sometimes we use it itself instead of one of
- // the liba/libs{} members (see process_libraries() for details).
+ // the liba/libs{} members (see process_libraries_impl() for details).
//
- mark_cc (*lt);
+ // If it's already marked, then it could have been imported during
+ // load (see above).
+ //
+ // @@ TODO: we currently always reload pkgconfig for lt (and above).
+ // Maybe pass NULL lt to pkgconfig_load() in this case?
+ //
+ if (mark_cc (*lt))
+ {
+ if (a != nullptr) {lt->a = a; mt = a->mtime ();}
+ if (s != nullptr) {lt->s = s; mt = s->mtime ();}
+ }
+ else
+ ll.unlock ();
}
- target_lock al (lock (a));
- target_lock sl (lock (s));
-
if (!al) a = nullptr;
if (!sl) s = nullptr;
- if (a != nullptr) a->group = lt;
- if (s != nullptr) s->group = lt;
-
- // If the library already has cc.type, then assume it was either
- // already imported or was matched by a rule.
+ // If the library already has cc.type, then assume it was either already
+ // imported (e.g., during load) or was matched by a rule.
//
if (a != nullptr && !mark_cc (*a)) a = nullptr;
if (s != nullptr && !mark_cc (*s)) s = nullptr;
- // Add the "using static/shared library" macro (used, for example, to
- // handle DLL export). The absence of either of these macros would
- // mean some other build system that cannot distinguish between the
- // two (and no pkg-config information).
- //
- auto add_macro = [this] (target& t, const char* suffix)
- {
- // If there is already a value (either in cc.export or x.export),
- // don't add anything: we don't want to be accumulating defines nor
- // messing with custom values. And if we are adding, then use the
- // generic cc.export.
- //
- // The only way we could already have this value is if this same
- // library was also imported as a project (as opposed to installed).
- // Unlikely but possible. In this case the values were set by the
- // export stub and we shouldn't touch them.
- //
- if (!t.vars[x_export_poptions])
- {
- auto p (t.vars.insert (c_export_poptions));
-
- if (p.second)
- {
- // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED},
- // where <name> is the target name. Here we want to strike a
- // balance between being unique and not too noisy.
- //
- string d ("-DLIB");
-
- d += sanitize_identifier (
- ucase (const_cast<const string&> (t.name)));
-
- d += '_';
- d += suffix;
-
- strings o;
- o.push_back (move (d));
- p.first = move (o);
- }
- }
- };
+ if (a != nullptr) a->group = lt;
+ if (s != nullptr) s->group = lt;
if (ll && (a != nullptr || s != nullptr))
{
- // Try to extract library information from pkg-config. We only add the
- // default macro if we could not extract more precise information. The
- // idea is that in .pc files that we generate, we copy those macros
- // (or custom ones) from *.export.poptions.
- //
- // @@ Should we add .pc files as ad hoc members so pkconfig_save() can
- // use their names when deriving -l-names (this would be expecially
- // helpful for binless libraries to get hold of prefix/suffix, etc).
+ // Try to extract library information from pkg-config.
//
- if (pc.first.empty () && pc.second.empty ())
- {
- if (!pkgconfig_load (act, *p.scope,
- *lt, a, s,
- p.proj, name,
- *pd, sysd, *usrd))
- {
- if (a != nullptr) add_macro (*a, "STATIC");
- if (s != nullptr) add_macro (*s, "SHARED");
- }
- }
- else
- pkgconfig_load (act, *p.scope, *lt, a, s, pc, *pd, sysd, *usrd);
+ load_pc ({false, false} /* metaonly */);
}
// If we have the lock (meaning this is the first time), set the matched
@@ -1227,10 +1587,38 @@ namespace build2
//
// Note also that these calls clear target data.
//
- if (al) match_rule (al, file_rule::rule_match);
- if (sl) match_rule (sl, file_rule::rule_match);
+ if (a != nullptr) match_rule (al, file_rule::rule_match);
+ if (s != nullptr) match_rule (sl, file_rule::rule_match);
if (ll)
{
+ // @@ Turns out this has a problem: file_rule won't match/execute
+ // group members. So what happens is that if we have two installed
+ // libraries, say lib{build2} that depends on lib{butl}, then
+ // lib{build2} will have lib{butl} as a prerequisite and file_rule
+ // that matches lib{build2} will update lib{butl} (also matched by
+ // file_rule), but not its members. Later, someone (for example,
+ // the newer() call in append_libraries()) will pick one of the
+ // members assuming it is executed and things will go sideways.
+ //
+ // For now we hacked around the issue but the long term solution is
+ // probably to add to the bin module a special rule that is
+ // registered on the global scope and matches the installed lib{}
+ // targets. This rule will have to both update prerequisites like
+ // the file_rule and group members like the lib_rule (or maybe it
+ // can skip prerequisites since one of the member will do that; in
+ // which case maybe we will be able to reuse lib_rule maybe with
+ // the "all members" flag or some such). A few additional
+ // notes/thoughts:
+ //
+ // - Will be able to stop inheriting lib{} from mtime_target.
+ //
+ // - Will need to register for perform_update/clean like in context
+ // as well as for configure as in the config module (feels like
+ // shouldn't need to register for dist).
+ //
+ // - Will need to test batches, immediate import thoroughly (this
+ // stuff is notoriously tricky to get right in all situations).
+ //
match_rule (ll, file_rule::rule_match);
// Also bless the library group with a "trust me it exists" timestamp.
@@ -1239,6 +1627,8 @@ namespace build2
// won't match.
//
lt->mtime (mt);
+
+ ll.unlock (); // Unlock group before members, for good measure.
}
return r;
@@ -1280,5 +1670,85 @@ namespace build2
return r;
}
+
+ void common::
+ append_diag_color_options (cstrings& args) const
+ {
+ switch (cclass)
+ {
+ case compiler_class::msvc:
+ {
+ // MSVC has the /diagnostics: option which has an undocumented value
+ // `color`. It's unclear from which version of MSVC this value is
+ // supported, but it works in 17.0, so let's start from there.
+ //
+ // Note that there is currently no way to disable color in the MSVC
+ // diagnostics specifically (the /diagnostics:* option values are
+ // cumulative and there doesn't seem to be a `color-` value). This
+ // is probably not a big deal since one can just disable the color
+ // globally (--no-diag-color).
+ //
+ // Note that clang-cl appears to use -fansi-escape-codes. See GH
+ // issue #312 for background.
+ //
+ if (show_diag_color ())
+ {
+ if (cvariant.empty () &&
+ (cmaj > 19 || (cmaj == 19 && cmin >= 30)))
+ {
+ // Check for the prefix in case /diagnostics:color- gets added
+ // eventually.
+ //
+ if (!find_option_prefixes ({"/diagnostics:color",
+ "-diagnostics:color"}, args))
+ {
+ args.push_back ("/diagnostics:color");
+ }
+ }
+ }
+
+ break;
+ }
+ case compiler_class::gcc:
+ {
+ // Enable/disable diagnostics color unless a custom option is
+ // specified.
+ //
+ // Supported from GCC 4.9 (8.1 on Windows) and (at least) from Clang
+ // 3.5. Clang supports -f[no]color-diagnostics in addition to the
+ // GCC's spelling.
+ //
+ if (
+#ifndef _WIN32
+ ctype == compiler_type::gcc ? cmaj > 4 || (cmaj == 4 && cmin >= 9) :
+#else
+ ctype == compiler_type::gcc ? cmaj > 8 || (cmaj == 8 && cmin >= 1) :
+#endif
+ ctype == compiler_type::clang ? cmaj > 3 || (cmaj == 3 && cmin >= 5) :
+ false)
+ {
+ if (!(find_option_prefix ("-fdiagnostics-color", args) ||
+ find_option ("-fno-diagnostics-color", args) ||
+ find_option ("-fdiagnostics-plain-output", args) ||
+ (ctype == compiler_type::clang &&
+ (find_option ("-fcolor-diagnostics", args) ||
+ find_option ("-fno-color-diagnostics", args)))))
+ {
+ // Omit -fno-diagnostics-color if stderr is not a terminal (we
+ // know there will be no color in this case and the option will
+ // just add noise, for example, in build logs).
+ //
+ if (const char* o = (
+ show_diag_color () ? "-fdiagnostics-color" :
+ stderr_term ? "-fno-diagnostics-color" :
+ nullptr))
+ args.push_back (o);
+ }
+ }
+
+ break;
+ }
+ }
+ }
}
}
diff --git a/libbuild2/cc/common.hxx b/libbuild2/cc/common.hxx
index 78442f8..cb85632 100644
--- a/libbuild2/cc/common.hxx
+++ b/libbuild2/cc/common.hxx
@@ -32,10 +32,12 @@ namespace build2
{
lang x_lang;
- const char* x; // Module name ("c", "cxx").
- const char* x_name; // Compiler name ("c", "c++").
- const char* x_default; // Compiler default ("gcc", "g++").
- const char* x_pext; // Preprocessed source extension (".i", ".ii").
+ const char* x; // Module name ("c", "cxx").
+ const char* x_name; // Compiler name ("c", "c++").
+ const char* x_obj_name; // Same for Objective-X ("obj-c", "obj-c++").
+ const char* x_default; // Compiler default ("gcc", "g++").
+ const char* x_pext; // Preprocessed source extension (".i", ".ii").
+ const char* x_obj_pext; // Same for Objective-X (".mi", ".mii").
// Array of modules that can hint us the toolchain, terminate with
// NULL.
@@ -102,6 +104,9 @@ namespace build2
const variable& c_export_libs;
const variable& c_export_impl_libs;
+ const variable& c_pkgconfig_include;
+ const variable& c_pkgconfig_lib;
+
const variable& x_stdlib; // x.stdlib
const variable& c_runtime; // cc.runtime
@@ -112,6 +117,7 @@ namespace build2
const variable& c_module_name; // cc.module_name
const variable& c_importable; // cc.importable
const variable& c_reprocess; // cc.reprocess
+ const variable& c_serialize; // cc.serialize
const variable& x_preprocessed; // x.preprocessed
const variable* x_symexport; // x.features.symexport
@@ -153,14 +159,14 @@ namespace build2
struct data: config_data
{
- const char* x_compile; // Rule names.
- const char* x_link;
- const char* x_install;
- const char* x_uninstall;
+ string x_compile; // Rule names.
+ string x_link;
+ string x_install;
// Cached values for some commonly-used variables/values.
//
+ const compiler_id& cid; // x.id
compiler_type ctype; // x.id.type
const string& cvariant; // x.id.variant
compiler_class cclass; // x.class
@@ -194,34 +200,68 @@ namespace build2
build2::cc::importable_headers* importable_headers;
// The order of sys_*_dirs is the mode entries first, followed by the
- // compiler built-in entries, and finished off with any extra entries
- // (e.g., fallback directories such as /usr/local/*).
+ // extra entries (e.g., /usr/local/*), followed by the compiler built-in
+ // entries.
+ //
+ // Note that even if we wanted to, we wouldn't be able to support extra
+ // trailing (after built-in) directories since we would need a portable
+ // equivalent of -idirafter for both headers and libraries.
//
const dir_paths& sys_lib_dirs; // x.sys_lib_dirs
const dir_paths& sys_hdr_dirs; // x.sys_hdr_dirs
const dir_paths* sys_mod_dirs; // compiler_info::sys_mod_dirs
- size_t sys_lib_dirs_mode; // Number of leading mode entries (0 if none).
+ size_t sys_lib_dirs_mode; // Number of mode entries (0 if none).
size_t sys_hdr_dirs_mode;
size_t sys_mod_dirs_mode;
- size_t sys_lib_dirs_extra; // First trailing extra entry (size if none).
+ size_t sys_lib_dirs_extra; // Number of extra entries (0 if none).
size_t sys_hdr_dirs_extra;
+ // Note that x_obj is patched in by the x.objx module. So it stays NULL
+ // if Objective-X compilation is not enabled. Similarly for x_asp except
+ // here we don't have duality and it's purely to signal (by the c.as-cpp
+ // module) that it's enabled.
+ //
const target_type& x_src; // Source target type (c{}, cxx{}).
const target_type* x_mod; // Module target type (mxx{}), if any.
+ const target_type& x_inc; // Includable base target type (e.g., c_inc{}).
+ const target_type* x_obj; // Objective-X target type (m{}, mm{}).
+ const target_type* x_asp; // Assembler with CPP target type (S{}).
+
+ // Check if an object (target, prerequisite, etc) is an Objective-X
+ // source.
+ //
+ template <typename T>
+ bool
+ x_objective (const T& t) const
+ {
+ return x_obj != nullptr && t.is_a (*x_obj);
+ }
+
+ // Check if an object (target, prerequisite, etc) is an Assembler with
+ // C preprocessor source.
+ //
+ template <typename T>
+ bool
+ x_assembler_cpp (const T& t) const
+ {
+ return x_asp != nullptr && t.is_a (*x_asp);
+ }
// Array of target types that are considered the X-language headers
// (excluding h{} except for C). Keep them in the most likely to appear
// order with the "real header" first and terminated with NULL.
//
- const target_type* const* x_hdr;
+ const target_type* const* x_hdrs;
+ // Check if an object (target, prerequisite, etc) is a header.
+ //
template <typename T>
bool
x_header (const T& t, bool c_hdr = true) const
{
- for (const target_type* const* ht (x_hdr); *ht != nullptr; ++ht)
+ for (const target_type* const* ht (x_hdrs); *ht != nullptr; ++ht)
if (t.is_a (**ht))
return true;
@@ -232,7 +272,7 @@ namespace build2
// extensions to target types. Keep them in the most likely to appear
// order and terminate with NULL.
//
- const target_type* const* x_inc;
+ const target_type* const* x_incs;
// Aggregate-like constructor with from-base support.
//
@@ -240,9 +280,7 @@ namespace build2
const char* compile,
const char* link,
const char* install,
- const char* uninstall,
- compiler_type ct,
- const string& cv,
+ const compiler_id& ci,
compiler_class cl,
uint64_t mj, uint64_t mi,
uint64_t vmj, uint64_t vmi,
@@ -261,14 +299,14 @@ namespace build2
size_t sle, size_t she,
const target_type& src,
const target_type* mod,
- const target_type* const* hdr,
- const target_type* const* inc)
+ const target_type& inc,
+ const target_type* const* hdrs,
+ const target_type* const* incs)
: config_data (cd),
x_compile (compile),
x_link (link),
x_install (install),
- x_uninstall (uninstall),
- ctype (ct), cvariant (cv), cclass (cl),
+ cid (ci), ctype (ci.type), cvariant (ci.variant), cclass (cl),
cmaj (mj), cmin (mi),
cvmaj (vmj), cvmin (vmi),
cpath (path), cmode (mode),
@@ -283,7 +321,9 @@ namespace build2
sys_lib_dirs_mode (slm), sys_hdr_dirs_mode (shm),
sys_mod_dirs_mode (smm),
sys_lib_dirs_extra (sle), sys_hdr_dirs_extra (she),
- x_src (src), x_mod (mod), x_hdr (hdr), x_inc (inc) {}
+ x_src (src), x_mod (mod), x_inc (inc),
+ x_obj (nullptr), x_asp (nullptr),
+ x_hdrs (hdrs), x_incs (incs) {}
};
class LIBBUILD2_CC_SYMEXPORT common: public data
@@ -300,10 +340,16 @@ namespace build2
string type; // name::type
string value; // name::value
reference_wrapper<const mtime_target> lib;
+ const target* group;
};
using library_cache = small_vector<library_cache_entry, 32>;
+ // The prerequisite_target::include bit that indicates a library
+ // member has been picked from the group.
+ //
+ static const uintptr_t include_group = 0x100;
+
void
process_libraries (
action,
@@ -319,8 +365,29 @@ namespace build2
lflags, const string*, bool)>&,
const function<bool (const target&, const string&, bool, bool)>&,
bool = false,
- library_cache* = nullptr,
- small_vector<const target*, 24>* = nullptr) const;
+ bool = false,
+ library_cache* = nullptr) const;
+
+ void
+ process_libraries_impl (
+ action,
+ const scope&,
+ optional<linfo>,
+ const dir_paths&,
+ const target*,
+ const mtime_target&,
+ bool,
+ lflags,
+ const function<bool (const target&, bool)>&,
+ const function<bool (const target* const*,
+ const small_vector<reference_wrapper<const string>, 2>&,
+ lflags, const string*, bool)>&,
+ const function<bool (const target&, const string&, bool, bool)>&,
+ bool,
+ bool,
+ library_cache*,
+ small_vector<const target*, 32>*,
+ small_vector<const target*, 32>*) const;
const target*
search_library (action a,
@@ -347,7 +414,7 @@ namespace build2
}
public:
- const mtime_target&
+ pair<const mtime_target&, const target*>
resolve_library (action,
const scope&,
const name&,
@@ -357,6 +424,11 @@ namespace build2
optional<dir_paths>&,
library_cache* = nullptr) const;
+ struct non_existent_library
+ {
+ const mtime_target& target;
+ };
+
template <typename T>
static ulock
insert_library (context&,
@@ -369,7 +441,7 @@ namespace build2
tracer&);
target*
- search_library (action,
+ search_library (optional<action>,
const dir_paths&,
optional<dir_paths>&,
const prerequisite_key&,
@@ -389,13 +461,16 @@ namespace build2
// Alternative search logic for VC (msvc.cxx).
//
- bin::liba*
+ // The second half is false if we should poison the binless search via
+ // the common .pc file.
+ //
+ pair<bin::liba*, bool>
msvc_search_static (const process_path&,
const dir_path&,
const prerequisite_key&,
bool existing) const;
- bin::libs*
+ pair<bin::libs*, bool>
msvc_search_shared (const process_path&,
const dir_path&,
const prerequisite_key&,
@@ -415,21 +490,28 @@ namespace build2
bool) const;
void
- pkgconfig_load (action, const scope&,
+ pkgconfig_load (optional<action>, const scope&,
bin::lib&, bin::liba*, bin::libs*,
const pair<path, path>&,
const dir_path&,
const dir_paths&,
- const dir_paths&) const;
+ const dir_paths&,
+ pair<bool, bool>) const;
bool
- pkgconfig_load (action, const scope&,
+ pkgconfig_load (optional<action>, const scope&,
bin::lib&, bin::liba*, bin::libs*,
const optional<project_name>&,
const string&,
const dir_path&,
const dir_paths&,
- const dir_paths&) const;
+ const dir_paths&,
+ pair<bool, bool>) const;
+
+ // Append compiler-specific diagnostics color options as necessary.
+ //
+ void
+ append_diag_color_options (cstrings&) const;
};
}
}
diff --git a/libbuild2/cc/common.txx b/libbuild2/cc/common.txx
index d14f966..8c80686 100644
--- a/libbuild2/cc/common.txx
+++ b/libbuild2/cc/common.txx
@@ -19,15 +19,18 @@ namespace build2
bool exist,
tracer& trace)
{
- auto p (ctx.targets.insert_locked (T::static_type,
- move (dir),
- path_cast<dir_path> (out.effect),
- name,
- move (ext),
- target_decl::implied,
- trace));
+ auto p (ctx.targets.insert_locked (
+ T::static_type,
+ move (dir),
+ dir_path (out.effect_string ()).normalize (),
+ name,
+ move (ext),
+ target_decl::implied,
+ trace));
+
+ if (exist && p.second)
+ throw non_existent_library {p.first.template as<mtime_target> ()};
- assert (!exist || !p.second);
r = &p.first.template as<T> ();
return move (p.second);
}
diff --git a/libbuild2/cc/compile-rule.cxx b/libbuild2/cc/compile-rule.cxx
index 77d01c6..2e4775e 100644
--- a/libbuild2/cc/compile-rule.cxx
+++ b/libbuild2/cc/compile-rule.cxx
@@ -3,6 +3,7 @@
#include <libbuild2/cc/compile-rule.hxx>
+#include <cerrno>
#include <cstdlib> // exit()
#include <cstring> // strlen(), strchr(), strncmp()
@@ -175,7 +176,7 @@ namespace build2
if (s == "includes") return preprocessed::includes;
if (s == "modules") return preprocessed::modules;
if (s == "all") return preprocessed::all;
- throw invalid_argument ("invalid preprocessed value '" + s + "'");
+ throw invalid_argument ("invalid preprocessed value '" + s + '\'');
}
// Return true if the compiler supports -isystem (GCC class) or
@@ -229,10 +230,17 @@ namespace build2
return nullopt;
}
+ // Note that we don't really need this for clean (where we only need
+ // unrefined unit type) so we could make this update-only. But let's keep
+ // it simple for now. Note that now we do need the source prerequisite
+ // type in clean to deal with Objective-X.
+ //
struct compile_rule::match_data
{
- match_data (unit_type t, const prerequisite_member& s)
- : type (t), src (s) {}
+ match_data (const compile_rule& r,
+ unit_type t,
+ const prerequisite_member& s)
+ : type (t), src (s), rule (r) {}
unit_type type;
preprocessed pp = preprocessed::none;
@@ -245,39 +253,67 @@ namespace build2
path dd; // Dependency database path.
size_t header_units = 0; // Number of imported header units.
module_positions modules = {0, 0, 0}; // Positions of imported modules.
+
+ const compile_rule& rule;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return rule.perform_update (a, t, *this);
+ }
};
compile_rule::
- compile_rule (data&& d)
+ compile_rule (data&& d, const scope& rs)
: common (move (d)),
rule_id (string (x) += ".compile 6")
{
- static_assert (sizeof (match_data) <= target::data_size,
- "insufficient space");
+ // Locate the header cache (see enter_header() for details).
+ //
+ {
+ string mn (string (x) + ".config");
+
+ header_cache_ = rs.find_module<config_module> (mn); // Must be there.
+
+ const scope* ws (rs.weak_scope ());
+ if (ws != &rs)
+ {
+ const scope* s (&rs);
+ do
+ {
+ s = s->parent_scope ()->root_scope ();
+
+ if (const auto* m = s->find_module<config_module> (mn))
+ header_cache_ = m;
+
+ } while (s != ws);
+ }
+ }
}
template <typename T>
void compile_rule::
append_sys_hdr_options (T& args) const
{
- assert (sys_hdr_dirs_extra <= sys_hdr_dirs.size ());
+ assert (sys_hdr_dirs_mode + sys_hdr_dirs_extra <= sys_hdr_dirs.size ());
// Note that the mode options are added as part of cmode.
//
auto b (sys_hdr_dirs.begin () + sys_hdr_dirs_mode);
- auto m (sys_hdr_dirs.begin () + sys_hdr_dirs_extra);
- auto e (sys_hdr_dirs.end ());
+ auto x (b + sys_hdr_dirs_extra);
+ // Add extras.
+ //
// Note: starting from 16.10, MSVC gained /external:I option though it
// doesn't seem to affect the order, only "system-ness".
//
append_option_values (
args,
- cclass == compiler_class::gcc ? "-idirafter" :
+ cclass == compiler_class::gcc ? "-isystem" :
cclass == compiler_class::msvc ? (isystem (*this)
? "/external:I"
: "/I") : "-I",
- m, e,
+ b, x,
[] (const dir_path& d) {return d.string ().c_str ();});
// For MSVC if we have no INCLUDE environment variable set, then we
@@ -293,7 +329,7 @@ namespace build2
{
append_option_values (
args, "/I",
- b, m,
+ x, sys_hdr_dirs.end (),
[] (const dir_path& d) {return d.string ().c_str ();});
}
}
@@ -318,6 +354,35 @@ namespace build2
case lang::c: o1 = "/TC"; break;
case lang::cxx: o1 = "/TP"; break;
}
+
+ // Note: /interface and /internalPartition are in addition to /TP.
+ //
+ switch (md.type)
+ {
+ case unit_type::non_modular:
+ case unit_type::module_impl:
+ {
+ break;
+ }
+ case unit_type::module_intf:
+ case unit_type::module_intf_part:
+ {
+ o2 = "/interface";
+ break;
+ }
+ case unit_type::module_impl_part:
+ {
+ o2 = "/internalPartition";
+ break;
+ }
+ case unit_type::module_header:
+ {
+ //@@ MODHDR TODO: /exportHeader
+ assert (false);
+ break;
+ }
+ }
+
break;
}
case compiler_class::gcc:
@@ -336,11 +401,20 @@ namespace build2
case unit_type::module_impl:
{
o1 = "-x";
- switch (x_lang)
+
+ if (x_assembler_cpp (md.src))
+ o2 = "assembler-with-cpp";
+ else
{
- case lang::c: o2 = "c"; break;
- case lang::cxx: o2 = "c++"; break;
+ bool obj (x_objective (md.src));
+
+ switch (x_lang)
+ {
+ case lang::c: o2 = obj ? "objective-c" : "c"; break;
+ case lang::cxx: o2 = obj ? "objective-c++" : "c++"; break;
+ }
}
+
break;
}
case unit_type::module_intf:
@@ -380,9 +454,11 @@ namespace build2
default:
assert (false);
}
+
break;
}
}
+
break;
}
}
@@ -406,7 +482,7 @@ namespace build2
}
bool compile_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t) const
{
tracer trace (x, "compile_rule::match");
@@ -439,13 +515,15 @@ namespace build2
// For a header unit we check the "real header" plus the C header.
//
- if (ut == unit_type::module_header ? p.is_a (**x_hdr) || p.is_a<h> () :
- ut == unit_type::module_intf ? p.is_a (*x_mod) :
- p.is_a (x_src))
+ if (ut == unit_type::module_header ? p.is_a (**x_hdrs) || p.is_a<h> () :
+ ut == unit_type::module_intf ? p.is_a (*x_mod) :
+ p.is_a (x_src) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj)))
{
// Save in the target's auxiliary storage.
//
- t.data (match_data (ut, p));
+ t.data (a, match_data (*this, ut, p));
return true;
}
}
@@ -456,13 +534,16 @@ namespace build2
// Append or hash library options from a pair of *.export.* variables
// (first is x.* then cc.*) recursively, prerequisite libraries first.
+ // If common is true, then only append common options from the lib{}
+ // groups.
//
template <typename T>
void compile_rule::
append_library_options (appended_libraries& ls, T& args,
const scope& bs,
const scope* is, // Internal scope.
- action a, const file& l, bool la, linfo li,
+ action a, const file& l, bool la,
+ linfo li, bool common,
library_cache* lib_cache) const
{
struct data
@@ -476,7 +557,7 @@ namespace build2
//
auto imp = [] (const target& l, bool la) {return la && l.is_a<libux> ();};
- auto opt = [&d, this] (const target& lt,
+ auto opt = [&d, this] (const target& l, // Note: could be lib{}
const string& t, bool com, bool exp)
{
// Note that in our model *.export.poptions are always "interface",
@@ -485,8 +566,6 @@ namespace build2
if (!exp) // Ignore libux.
return true;
- const file& l (lt.as<file> ());
-
// Suppress duplicates.
//
// Compilation is the simple case: we can add the options on the first
@@ -496,6 +575,8 @@ namespace build2
if (find (d.ls.begin (), d.ls.end (), &l) != d.ls.end ())
return false;
+ // Note: go straight for the public variable pool.
+ //
const variable& var (
com
? c_export_poptions
@@ -645,16 +726,24 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // lflags unused.
- imp, nullptr, opt, false /* self */, lib_cache);
+ imp, nullptr, opt,
+ false /* self */,
+ common /* proc_opt_group */,
+ lib_cache);
}
void compile_rule::
append_library_options (appended_libraries& ls, strings& args,
const scope& bs,
- action a, const file& l, bool la, linfo li) const
+ action a, const file& l, bool la,
+ linfo li,
+ bool common,
+ bool original) const
{
- const scope* is (isystem (*this) ? effective_iscope (bs) : nullptr);
- append_library_options (ls, args, bs, is, a, l, la, li, nullptr);
+ const scope* is (!original && isystem (*this)
+ ? effective_iscope (bs)
+ : nullptr);
+ append_library_options (ls, args, bs, is, a, l, la, li, common, nullptr);
}
template <typename T>
@@ -695,7 +784,9 @@ namespace build2
append_library_options (ls,
args,
bs, iscope (),
- a, *f, la, li,
+ a, *f, la,
+ li,
+ false /* common */,
&lc);
}
}
@@ -737,6 +828,8 @@ namespace build2
//
if (const scope* rs = l.base_scope ().root_scope ())
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (
com
? c_export_poptions
@@ -777,7 +870,9 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
pt->as<file> (), la, 0, // lflags unused.
- impf, nullptr, optf, false /* self */,
+ impf, nullptr, optf,
+ false /* self */,
+ false /* proc_opt_group */,
&lib_cache);
}
}
@@ -790,7 +885,7 @@ namespace build2
file& t (xt.as<file> ()); // Either obj*{} or bmi*{}.
- match_data& md (t.data<match_data> ());
+ match_data& md (t.data<match_data> (a));
context& ctx (t.ctx);
@@ -883,7 +978,9 @@ namespace build2
//
// Note: ut is still unrefined.
//
- if (ut == unit_type::module_intf && cast_true<bool> (t[b_binless]))
+ if ((ut == unit_type::module_intf ||
+ ut == unit_type::module_intf_part ||
+ ut == unit_type::module_impl_part) && cast_true<bool> (t[b_binless]))
{
// The module interface unit can be the same as an implementation
// (e.g., foo.mxx and foo.cxx) which means obj*{} targets could
@@ -944,6 +1041,12 @@ namespace build2
// to match it if we may need its modules or importable headers
// (see search_modules(), make_header_sidebuild() for details).
//
+ // Well, that was the case until we've added support for immediate
+ // importation of libraries, which happens during the load phase
+ // and natually leaves the library unmatched. While we could have
+ // returned from search_library() an indication of whether the
+ // library has been matched, this doesn't seem worth the trouble.
+ //
if (p.proj ())
{
pt = search_library (a,
@@ -951,8 +1054,10 @@ namespace build2
usr_lib_dirs,
p.prerequisite);
+#if 0
if (pt != nullptr && !modules)
continue;
+#endif
}
if (pt == nullptr)
@@ -980,7 +1085,8 @@ namespace build2
{
pt = &p.search (t);
- if (a.operation () == clean_id && !pt->dir.sub (rs.out_path ()))
+ if (pt == dir ||
+ (a.operation () == clean_id && !pt->dir.sub (rs.out_path ())))
continue;
}
@@ -1010,10 +1116,10 @@ namespace build2
// @@ If for some reason unmatch fails, this messes up the for_install
// logic because we will update this library during match. Perhaps
// we should postpone updating them until execute if we failed to
- // unmatch.
+ // unmatch. See how we do this in ad hoc rule.
//
pair<bool, target_state> mr (
- build2::match (
+ match_complete (
a,
*pt,
pt->is_a<liba> () || pt->is_a<libs> () || pt->is_a<libux> ()
@@ -1072,12 +1178,14 @@ namespace build2
// this can very well be happening in parallel. But that's not a
// problem since fsdir{}'s update is idempotent.
//
- fsdir_rule::perform_update_direct (a, t);
+ fsdir_rule::perform_update_direct (a, *dir);
}
// Note: the leading '@' is reserved for the module map prefix (see
// extract_modules()) and no other line must start with it.
//
+ // NOTE: see also the predefs rule if changing anything here.
+ //
depdb dd (tp + ".d");
// First should come the rule name/version.
@@ -1242,7 +1350,7 @@ namespace build2
//
l5 ([&]{trace << "extracting headers from " << src;});
auto& is (tu.module_info.imports);
- psrc = extract_headers (a, bs, t, li, src, md, dd, u, mt, is);
+ extract_headers (a, bs, t, li, src, md, dd, u, mt, is, psrc);
is.clear (); // No longer needed.
}
@@ -1297,6 +1405,10 @@ namespace build2
//
if (mt != timestamp_nonexistent)
{
+ // Appended to by to_module_info() below.
+ //
+ tu.module_info.imports.clear ();
+
u = false;
md.touch = true;
}
@@ -1381,24 +1493,6 @@ namespace build2
extract_modules (a, bs, t, li,
tts, src,
md, move (tu.module_info), dd, u);
-
- // Currently in VC module interface units must be compiled from
- // the original source (something to do with having to detect and
- // store header boundaries in the .ifc files).
- //
- // @@ MODHDR MSVC: should we do the same for header units? I guess
- // we will figure it out when MSVC supports header units.
- //
- // @@ TMP: probably outdated. Probably the same for partitions.
- //
- // @@ See also similar check in extract_headers(), existing entry
- // case.
- //
- if (ctype == compiler_type::msvc)
- {
- if (ut == unit_type::module_intf)
- psrc.second = false;
- }
}
}
@@ -1417,7 +1511,7 @@ namespace build2
// to keep re-validating the file on every subsequent dry-run as well
// on the real run).
//
- if (u && dd.reading () && !ctx.dry_run)
+ if (u && dd.reading () && !ctx.dry_run_option)
dd.touch = timestamp_unknown;
dd.close (false /* mtime_check */);
@@ -1478,14 +1572,14 @@ namespace build2
switch (a)
{
- case perform_update_id: return [this] (action a, const target& t)
- {
- return perform_update (a, t);
- };
- case perform_clean_id: return [this] (action a, const target& t)
+ case perform_update_id: return move (md);
+ case perform_clean_id:
{
- return perform_clean (a, t);
- };
+ return [this, srct = &md.src.type ()] (action a, const target& t)
+ {
+ return perform_clean (a, t, *srct);
+ };
+ }
default: return noop_recipe; // Configure update.
}
}
@@ -1582,72 +1676,6 @@ namespace build2
return pm;
}
- // @@ TMP
- //
-#if 0
- // Return the next make prerequisite starting from the specified
- // position and update position to point to the start of the
- // following prerequisite or l.size() if there are none left.
- //
- static string
- next_make (const string& l, size_t& p)
- {
- size_t n (l.size ());
-
- // Skip leading spaces.
- //
- for (; p != n && l[p] == ' '; p++) ;
-
- // Lines containing multiple prerequisites are 80 characters max.
- //
- string r;
- r.reserve (n);
-
- // Scan the next prerequisite while watching out for escape sequences.
- //
- for (; p != n && l[p] != ' '; p++)
- {
- char c (l[p]);
-
- if (p + 1 != n)
- {
- if (c == '$')
- {
- // Got to be another (escaped) '$'.
- //
- if (l[p + 1] == '$')
- ++p;
- }
- else if (c == '\\')
- {
- // This may or may not be an escape sequence depending on whether
- // what follows is "escapable".
- //
- switch (c = l[++p])
- {
- case '\\': break;
- case ' ': break;
- default: c = '\\'; --p; // Restore.
- }
- }
- }
-
- r += c;
- }
-
- // Skip trailing spaces.
- //
- for (; p != n && l[p] == ' '; p++) ;
-
- // Skip final '\'.
- //
- if (p == n - 1 && l[p] == '\\')
- p++;
-
- return r;
- }
-#endif
-
// VC /showIncludes output. The first line is the file being compiled
// (unless clang-cl; handled by our caller). Then we have the list of
// headers, one per line, in this form (text can presumably be
@@ -1843,7 +1871,7 @@ namespace build2
// Any unhandled io_error is handled by the caller as a generic module
// mapper io error. Returning false terminates the communication.
//
- struct compile_rule::module_mapper_state //@@ gcc_module_mapper_state
+ struct compile_rule::gcc_module_mapper_state
{
size_t skip; // Number of depdb entries to skip.
size_t header_units = 0; // Number of header units imported.
@@ -1854,15 +1882,20 @@ namespace build2
optional<const build2::cc::translatable_headers*> translatable_headers;
small_vector<string, 2> batch; // Reuse buffers.
+ size_t batch_n = 0;
- module_mapper_state (size_t s, module_imports& i)
+ gcc_module_mapper_state (size_t s, module_imports& i)
: skip (s), imports (i) {}
};
- bool compile_rule::
- gcc_module_mapper (module_mapper_state& st,
+ // The module mapper is called on one line of input at a time. It should
+ // return nullopt if another line is expected (batch), false if the mapper
+ // interaction should be terminated, and true if it should be continued.
+ //
+ optional<bool> compile_rule::
+ gcc_module_mapper (gcc_module_mapper_state& st,
action a, const scope& bs, file& t, linfo li,
- ifdstream& is,
+ const string& l,
ofdstream& os,
depdb& dd, bool& update, bool& bad_error,
optional<prefix_map>& pfx_map, srcout_map& so_map) const
@@ -1878,35 +1911,40 @@ namespace build2
// Read in the entire batch trying hard to reuse the buffers.
//
- auto& batch (st.batch);
- size_t batch_n (0);
+ small_vector<string, 2>& batch (st.batch);
+ size_t& batch_n (st.batch_n);
- for (;;)
+ // Add the next line.
+ //
{
if (batch.size () == batch_n)
- batch.push_back (string ());
-
- string& r (batch[batch_n]);
-
- if (eof (getline (is, r)))
- break;
+ batch.push_back (l);
+ else
+ batch[batch_n] = l;
batch_n++;
+ }
- if (r.back () != ';')
- break;
+ // Check if more is expected in this batch.
+ //
+ {
+ string& r (batch[batch_n - 1]);
- // Strip the trailing `;` word.
- //
- r.pop_back ();
- r.pop_back ();
- }
+ if (r.back () == ';')
+ {
+ // Strip the trailing `;` word.
+ //
+ r.pop_back ();
+ r.pop_back ();
- if (batch_n == 0) // EOF
- return false;
+ return nullopt;
+ }
+ }
if (verb >= 3)
{
+ // It doesn't feel like buffering this would be useful.
+ //
// Note that we show `;` in requests/responses so that the result
// could be replayed.
//
@@ -1928,23 +1966,211 @@ namespace build2
for (size_t i (0); i != batch_n; ++i)
{
string& r (batch[i]);
+ size_t rn (r.size ());
+
+ // The protocol uses a peculiar quoting/escaping scheme that can be
+ // summarized as follows (see the libcody documentation for details):
+ //
+ // - Words are seperated with spaces and/or tabs.
+ //
+ // - Words need not be quoted if they only containing characters from
+ // the [-+_/%.A-Za-z0-9] set.
+ //
+ // - Otherwise words need to be single-quoted.
+ //
+ // - Inside single-quoted words, the \n \t \' and \\ escape sequences
+ // are recognized.
+ //
+ // Note that we currently don't treat abutted quotes (as in a' 'b) as
+ // a single word (it doesn't seem plausible that we will ever receive
+ // something like this).
+ //
+ size_t b (0), e (0), n; bool q; // Next word.
+
+ auto next = [&r, rn, &b, &e, &n, &q] () -> size_t
+ {
+ if (b != e)
+ b = e;
+
+ // Skip leading whitespaces.
+ //
+ for (; b != rn && (r[b] == ' ' || r[b] == '\t'); ++b) ;
+
+ if (b != rn)
+ {
+ q = (r[b] == '\'');
+
+ // Find first trailing whitespace or closing quote.
+ //
+ for (e = b + 1; e != rn; ++e)
+ {
+ // Note that we deal with invalid quoting/escaping in unquote().
+ //
+ switch (r[e])
+ {
+ case ' ':
+ case '\t':
+ if (q)
+ continue;
+ else
+ break;
+ case '\'':
+ if (q)
+ {
+ ++e; // Include closing quote (hopefully).
+ break;
+ }
+ else
+ {
+ assert (false); // Abutted quote.
+ break;
+ }
+ case '\\':
+ if (++e != rn) // Skip next character (hopefully).
+ continue;
+ else
+ break;
+ default:
+ continue;
+ }
+
+ break;
+ }
- // @@ TODO: quoting and escaping.
+ n = e - b;
+ }
+ else
+ {
+ q = false;
+ e = rn;
+ n = 0;
+ }
+
+ return n;
+ };
+
+ // Unquote into tmp the current word returning false if malformed.
//
- size_t b (0), e (0), n; // Next word.
+ auto unquote = [&r, &b, &n, &q, &tmp] (bool clear = true) -> bool
+ {
+ if (q && n > 1)
+ {
+ size_t e (b + n - 1);
+
+ if (r[b] == '\'' && r[e] == '\'')
+ {
+ if (clear)
+ tmp.clear ();
+
+ size_t i (b + 1);
+ for (; i != e; ++i)
+ {
+ char c (r[i]);
+ if (c == '\\')
+ {
+ if (++i == e)
+ {
+ i = 0;
+ break;
+ }
+
+ c = r[i];
+ if (c == 'n') c = '\n';
+ else if (c == 't') c = '\t';
+ }
+ tmp += c;
+ }
+
+ if (i == e)
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+#if 0
+#define UNQUOTE(x, y) \
+ r = x; rn = r.size (); b = e = 0; \
+ assert (next () && unquote () && tmp == y)
+
+ UNQUOTE ("'foo bar'", "foo bar");
+ UNQUOTE (" 'foo bar' ", "foo bar");
+ UNQUOTE ("'foo\\\\bar'", "foo\\bar");
+ UNQUOTE ("'\\'foo bar'", "'foo bar");
+ UNQUOTE ("'foo bar\\''", "foo bar'");
+ UNQUOTE ("'\\'foo\\\\bar\\''", "'foo\\bar'");
+
+ fail << "all good";
+#endif
+
+ // Escape if necessary the specified string and append to r.
+ //
+ auto escape = [&r] (const string& s)
+ {
+ size_t b (0), e, n (s.size ());
+ while (b != n && (e = s.find_first_of ("\\'\n\t", b)) != string::npos)
+ {
+ r.append (s, b, e - b); // Preceding chunk.
+
+ char c (s[e]);
+ r += '\\';
+ r += (c == '\n' ? 'n' : c == '\t' ? 't' : c);
+ b = e + 1;
+ }
+
+ if (b != n)
+ r.append (s, b, e); // Final chunk.
+ };
- auto next = [&r, &b, &e, &n] () -> size_t
+ // Quote and escape if necessary the specified string and append to r.
+ //
+ auto quote = [&r, &escape] (const string& s)
{
- return (n = next_word (r, b, e, ' ', '\t'));
+ if (find_if (s.begin (), s.end (),
+ [] (char c)
+ {
+ return !((c >= 'a' && c <= 'z') ||
+ (c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'Z') ||
+ c == '-' || c == '_' || c == '/' ||
+ c == '.' || c == '+' || c == '%');
+ }) == s.end ())
+ {
+ r += s;
+ }
+ else
+ {
+ r += '\'';
+ escape (s);
+ r += '\'';
+ }
};
+#if 0
+#define QUOTE(x, y) \
+ r.clear (); quote (x); \
+ assert (r == y)
+
+ QUOTE ("foo/Bar-7.h", "foo/Bar-7.h");
+
+ QUOTE ("foo bar", "'foo bar'");
+ QUOTE ("foo\\bar", "'foo\\\\bar'");
+ QUOTE ("'foo bar", "'\\'foo bar'");
+ QUOTE ("foo bar'", "'foo bar\\''");
+ QUOTE ("'foo\\bar'", "'\\'foo\\\\bar\\''");
+
+ fail << "all good";
+#endif
+
next (); // Request name.
- auto name = [&r, b, n] (const char* c) -> bool
+ auto name = [&r, b, n, q] (const char* c) -> bool
{
// We can reasonably assume a command will never be quoted.
//
- return (r.compare (b, n, c) == 0 &&
+ return (!q &&
+ r.compare (b, n, c) == 0 &&
(r[n] == ' ' || r[n] == '\t' || r[n] == '\0'));
};
@@ -1993,7 +2219,17 @@ namespace build2
if (next ())
{
- path f (r, b, n);
+ path f;
+ if (!q)
+ f = path (r, b, n);
+ else if (unquote ())
+ f = path (tmp);
+ else
+ {
+ r = "ERROR 'malformed quoting/escaping in request'";
+ continue;
+ }
+
bool exists (true);
// The TU path we pass to the compiler is always absolute so any
@@ -2004,8 +2240,9 @@ namespace build2
//
if (exists && f.relative ())
{
- tmp.assign (r, b, n);
- r = "ERROR relative header path '"; r += tmp; r += '\'';
+ r = "ERROR 'relative header path ";
+ escape (f.string ());
+ r += '\'';
continue;
}
@@ -2044,7 +2281,7 @@ namespace build2
pair<const file*, bool> er (
enter_header (
a, bs, t, li,
- f, false /* cache */, false /* normalized */,
+ move (f), false /* cache */, false /* normalized */,
pfx_map, so_map));
ht = er.first;
@@ -2052,7 +2289,7 @@ namespace build2
if (remapped)
{
- r = "ERROR remapping of headers not supported";
+ r = "ERROR 'remapping of headers not supported'";
continue;
}
@@ -2062,7 +2299,7 @@ namespace build2
// diagnostics won't really add anything to the compiler's. So
// let's only print it at -V or higher.
//
- if (ht == nullptr)
+ if (ht == nullptr) // f is still valid.
{
assert (!exists); // Sanity check.
@@ -2110,8 +2347,10 @@ namespace build2
// messy, let's keep both (it would have been nicer to print
// ours after the compiler's but that isn't easy).
//
- r = "ERROR unable to update header '";
- r += (ht != nullptr ? ht->path () : f).string ();
+ // Note: if ht is NULL, f is still valid.
+ //
+ r = "ERROR 'unable to update header ";
+ escape ((ht != nullptr ? ht->path () : f).string ());
r += '\'';
continue;
}
@@ -2246,17 +2485,27 @@ namespace build2
// original (which we may need to normalize when we read
// this mapping in extract_headers()).
//
- tmp = "@ "; tmp.append (r, b, n); tmp += ' '; tmp += bp;
+ // @@ This still breaks if the header path contains spaces.
+ // GCC bug 110153.
+ //
+ tmp = "@ ";
+ if (!q) tmp.append (r, b, n);
+ else unquote (false /* clear */); // Can't fail.
+ tmp += ' ';
+ tmp += bp;
+
dd.expect (tmp);
st.header_units++;
}
- r = "PATHNAME "; r += bp;
+ r = "PATHNAME ";
+ quote (bp);
}
catch (const failed&)
{
r = "ERROR 'unable to update header unit for ";
- r += hs; r += '\'';
+ escape (hs);
+ r += '\'';
continue;
}
}
@@ -2282,7 +2531,7 @@ namespace build2
// Truncate the response batch and terminate the communication (see
// also libcody issue #22).
//
- tmp.assign (r, b, n);
+ tmp.assign (r, b, n); // Request name (unquoted).
r = "ERROR '"; r += w; r += ' '; r += tmp; r += '\'';
batch_n = i + 1;
term = true;
@@ -2298,6 +2547,9 @@ namespace build2
// Write the response batch.
//
+ // @@ It's theoretically possible that we get blocked writing the
+ // response while the compiler gets blocked writing the diagnostics.
+ //
for (size_t i (0);; )
{
string& r (batch[i]);
@@ -2318,6 +2570,8 @@ namespace build2
os.flush ();
+ batch_n = 0; // Start a new batch.
+
return !term;
}
@@ -2573,7 +2827,7 @@ namespace build2
pair<const file*, bool> r (
enter_header (
a, bs, t, li,
- f, false /* cache */, false /* normalized */,
+ move (f), false /* cache */, false /* normalized */,
pfx_map, so_map));
if (!r.second) // Shouldn't be remapped.
@@ -2582,7 +2836,7 @@ namespace build2
if (ht != pts.back ())
{
- ht = static_cast<const file*> (pts.back ().target);
+ ht = &pts.back ().target->as<file> ();
rs = "ERROR expected header '" + ht->path ().string () +
"' to be found instead";
bad_error = true; // We expect an error from the compiler.
@@ -2601,7 +2855,7 @@ namespace build2
pair<const file*, bool> er (
enter_header (
a, bs, t, li,
- f, false /* cache */, false /* normalized */,
+ move (f), false /* cache */, false /* normalized */,
pfx_map, so_map));
ht = er.first;
@@ -2620,7 +2874,7 @@ namespace build2
// diagnostics won't really add anything to the compiler's. So
// let's only print it at -V or higher.
//
- if (ht == nullptr)
+ if (ht == nullptr) // f is still valid.
{
assert (!exists); // Sanity check.
@@ -2667,10 +2921,12 @@ namespace build2
// messy, let's keep both (it would have been nicer to print
// ours after the compiler's but that isn't easy).
//
+ // Note: if ht is NULL, f is still valid.
+ //
rs = !exists
? string ("INCLUDE")
: ("ERROR unable to update header '" +
- (ht != nullptr ? ht->path () : f).string () + "'");
+ (ht != nullptr ? ht->path () : f).string () + '\'');
bad_error = true;
break;
@@ -2748,7 +3004,7 @@ namespace build2
}
catch (const failed&)
{
- rs = "ERROR unable to update header unit '" + hp + "'";
+ rs = "ERROR unable to update header unit '" + hp + '\'';
bad_error = true;
break;
}
@@ -2790,17 +3046,123 @@ namespace build2
}
#endif
+ //atomic_count cache_hit {0};
+ //atomic_count cache_mis {0};
+ //atomic_count cache_cls {0};
+
+ // The fp path is only moved from on success.
+ //
// Note: this used to be a lambda inside extract_headers() so refer to the
// body of that function for the overall picture.
//
pair<const file*, bool> compile_rule::
enter_header (action a, const scope& bs, file& t, linfo li,
- path& fp, bool cache, bool norm,
+ path&& fp, bool cache, bool norm,
optional<prefix_map>& pfx_map,
const srcout_map& so_map) const
{
tracer trace (x, "compile_rule::enter_header");
+ // It's reasonable to expect the same header to be included by multiple
+ // translation units, which means we will be re-doing this work over and
+ // over again. And it's not exactly cheap, taking up to 50% of an
+ // up-to-date check time on some projects. So we are going to cache the
+ // header path to target mapping.
+ //
+ // While we pass quite a bit of specific "context" (target, base scope)
+ // to enter_file(), here is the analysis why the result will not depend
+ // on this context for the non-absent header (fp is absolute):
+ //
+ // 1. Let's start with the base scope (bs). Firstly, the base scope
+ // passed to map_extension() is the scope of the header (i.e., it is
+ // the scope of fp.directory()). Other than that, the target base
+ // scope is only passed to build_prefix_map() which is only called
+ // for the absent header (linfo is also only used here).
+ //
+ // 2. Next is the target (t). It is passed to build_prefix_map() but
+ // that doesn't matter for the same reason as in (1). Other than
+ // that, it is only passed to build2::search() which in turn passes
+ // it to target type-specific prerequisite search callback (see
+ // target_type::search) if one is not NULL. The target type in
+ // question here is one of the headers and we know all of them use
+ // the standard file_search() which ignores the passed target.
+ //
+ // 3. Finally, so_map could be used for an absolute fp. While we could
+ // simply not cache the result if it was used (second half of the
+ // result pair is true), there doesn't seem to be any harm in caching
+ // the remapped path->target mapping. In fact, if to think about it,
+ // there is no harm in caching the generated file mapping since it
+ // will be immediately generated and any subsequent inclusions we
+ // will "see" with an absolute path, which we can resolve from the
+ // cache.
+ //
+ // To put it another way, all we need to do is make sure that if we were
+ // to not return an existing cache entry, the call to enter_file() would
+ // have returned exactly the same path/target.
+ //
+ // @@ Could it be that the header is re-mapped in one config but not the
+ // other (e.g., when we do both in src and in out builds and we pick
+ // the generated header in src)? If so, that would lead to a
+ // divergence. I.e., we would cache the no-remap case first and then
+ // return it even though the re-map is necessary? Why can't we just
+ // check for re-mapping ourselves? A: the remapping logic in
+ // enter_file() is not exactly trivial.
+ //
+ // But on the other hand, I think we can assume that different
+ // configurations will end up with different caches. In other words,
+ // we can assume that for the same "cc amalgamation" we use only a
+ // single "version" of a header. Seems reasonable.
+ //
+ // Note also that while it would have been nice to have a unified cc
+ // cache, the map_extension() call is passed x_incs which is module-
+ // specific. In other words, we may end up mapping the same header to
+ // two different targets depending on whether it is included from, say,
+ // C or C++ translation unit. We could have used a unified cache for
+ // headers that were mapped using the fallback target type, which would
+ // cover the installed headers. Maybe, one day (it's also possible that
+ // separate caches reduce contention).
+ //
+ // Another related question is where we want to keep the cache: project,
+ // strong amalgamation, or weak amalgamation (like module sidebuilds).
+ // Some experimentation showed that weak has the best performance (which
+ // suggest that a unified cache will probably be a win).
+ //
+ // Note also that we don't need to clear this cache since we never clear
+ // the targets set. In other words, the only time targets are
+ // invalidated is when we destroy the build context, which also destroys
+ // the cache.
+ //
+ const config_module& hc (*header_cache_);
+
+ // First check the cache.
+ //
+ config_module::header_key hk;
+
+ bool e (fp.absolute ());
+ if (e)
+ {
+ if (!norm)
+ {
+ normalize_external (fp, "header");
+ norm = true;
+ }
+
+ hk.file = move (fp);
+ hk.hash = hash<path> () (hk.file);
+
+ slock l (hc.header_map_mutex);
+ auto i (hc.header_map.find (hk));
+ if (i != hc.header_map.end ())
+ {
+ //cache_hit.fetch_add (1, memory_order_relaxed);
+ return make_pair (i->second, false);
+ }
+
+ fp = move (hk.file);
+
+ //cache_mis.fetch_add (1, memory_order_relaxed);
+ }
+
struct data
{
linfo li;
@@ -2810,24 +3172,52 @@ namespace build2
// If it is outside any project, or the project doesn't have such an
// extension, assume it is a plain old C header.
//
- return enter_file (
- trace, "header",
- a, bs, t,
- fp, cache, norm,
- [this] (const scope& bs, const string& n, const string& e)
+ auto r (enter_file (
+ trace, "header",
+ a, bs, t,
+ fp, cache, norm,
+ [this] (const scope& bs, const string& n, const string& e)
+ {
+ return map_extension (bs, n, e, x_incs);
+ },
+ h::static_type,
+ [this, &d] (action a, const scope& bs, const target& t)
+ -> const prefix_map&
+ {
+ if (!d.pfx_map)
+ d.pfx_map = build_prefix_map (bs, a, t, d.li);
+
+ return *d.pfx_map;
+ },
+ so_map));
+
+ // Cache.
+ //
+ if (r.first != nullptr)
+ {
+ hk.file = move (fp);
+
+ // Calculate the hash if we haven't yet and re-calculate it if the
+ // path has changed (header has been remapped).
+ //
+ if (!e || r.second)
+ hk.hash = hash<path> () (hk.file);
+
+ const file* f;
{
- return map_extension (bs, n, e, x_inc);
- },
- h::static_type,
- [this, &d] (action a, const scope& bs, const target& t)
- -> const prefix_map&
+ ulock l (hc.header_map_mutex);
+ auto p (hc.header_map.emplace (move (hk), r.first));
+ f = p.second ? nullptr : p.first->second;
+ }
+
+ if (f != nullptr)
{
- if (!d.pfx_map)
- d.pfx_map = build_prefix_map (bs, a, t, d.li);
+ //cache_cls.fetch_add (1, memory_order_relaxed);
+ assert (r.first == f);
+ }
+ }
- return *d.pfx_map;
- },
- so_map);
+ return r;
}
// Note: this used to be a lambda inside extract_headers() so refer to the
@@ -2842,16 +3232,18 @@ namespace build2
return inject_file (trace, "header", a, t, pt, mt, fail);
}
- // Extract and inject header dependencies. Return the preprocessed source
- // file as well as an indication if it is usable for compilation (see
- // below for details).
+ // Extract and inject header dependencies. Return (in result) the
+ // preprocessed source file as well as an indication if it is usable for
+ // compilation (see below for details). Note that result is expected to
+ // be initialized to {entry (), false}. Not using return type due to
+ // GCC bug #107555.
//
// This is also the place where we handle header units which are a lot
// more like auto-generated headers than modules. In particular, if a
// header unit BMI is out-of-date, then we have to re-preprocess this
// translation unit.
//
- pair<file_cache::entry, bool> compile_rule::
+ void compile_rule::
extract_headers (action a,
const scope& bs,
file& t,
@@ -2861,7 +3253,8 @@ namespace build2
depdb& dd,
bool& update,
timestamp mt,
- module_imports& imports) const
+ module_imports& imports,
+ pair<file_cache::entry, bool>& result) const
{
tracer trace (x, "compile_rule::extract_headers");
@@ -2874,9 +3267,16 @@ namespace build2
file_cache::entry psrc;
bool puse (true);
+ // Preprocessed file extension.
+ //
+ const char* pext (x_assembler_cpp (src) ? ".Si" :
+ x_objective (src) ? x_obj_pext :
+ x_pext);
+
// Preprocesor mode that preserves as much information as possible while
// still performing inclusions. Also serves as a flag indicating whether
- // this compiler uses the separate preprocess and compile setup.
+ // this (non-MSVC) compiler uses the separate preprocess and compile
+ // setup.
//
const char* pp (nullptr);
@@ -2887,7 +3287,16 @@ namespace build2
// -fdirectives-only is available since GCC 4.3.0.
//
if (cmaj > 4 || (cmaj == 4 && cmin >= 3))
- pp = "-fdirectives-only";
+ {
+ // Note that for assembler-with-cpp GCC currently forces full
+ // preprocessing in (what appears to be) an attempt to paper over
+ // a deeper issue (see GCC bug 109534). If/when that bug gets
+ // fixed, we can enable this on our side. Note that Clang's
+ // -frewrite-includes also has issues (see below).
+ //
+ if (!x_assembler_cpp (src))
+ pp = "-fdirectives-only";
+ }
break;
}
@@ -2896,7 +3305,16 @@ namespace build2
// -frewrite-includes is available since Clang 3.2.0.
//
if (cmaj > 3 || (cmaj == 3 && cmin >= 2))
- pp = "-frewrite-includes";
+ {
+ // While Clang's -frewrite-includes appears to work, there are
+ // some issues with correctly tracking location information
+ // (manifests itself as wrong line numbers in debug info, for
+ // example). The result also appears to reference the .Si file
+ // instead of the original source file for some reason.
+ //
+ if (!x_assembler_cpp (src))
+ pp = "-frewrite-includes";
+ }
break;
}
@@ -2977,7 +3395,7 @@ namespace build2
//
// GCC's -fdirective-only, on the other hand, processes all the
// directives so they are gone from the preprocessed source. Here is
- // what we are going to do to work around this: we will detect if any
+ // what we are going to do to work around this: we will sense if any
// diagnostics has been written to stderr on the -E run. If that's the
// case (but the compiler indicated success) then we assume they are
// warnings and disable the use of the preprocessed output for
@@ -3015,7 +3433,9 @@ namespace build2
// not found, and there is no problem with outdated generated headers
// since we update/remap them before the compiler has a chance to read
// them. Overall, this "dependency mapper" approach is how it should
- // have been done from the beginning.
+ // have been done from the beginning. Note: that's the ideal world,
+ // the reality is that the required mapper extensions are not (yet)
+ // in libcody/GCC.
// Note: diagnostics sensing is currently only supported if dependency
// info is written to a file (see above).
@@ -3025,7 +3445,7 @@ namespace build2
// And here is another problem: if we have an already generated header
// in src and the one in out does not yet exist, then the compiler will
// pick the one in src and we won't even notice. Note that this is not
- // only an issue with mixing in- and out-of-tree builds (which does feel
+ // only an issue with mixing in and out of source builds (which does feel
// wrong but is oh so convenient): this is also a problem with
// pre-generated headers, a technique we use to make installing the
// generator by end-users optional by shipping pre-generated headers.
@@ -3064,13 +3484,13 @@ namespace build2
// The gen argument to init_args() is in/out. The caller signals whether
// to force the generated header support and on return it signals
- // whether this support is enabled. The first call to init_args is
- // expected to have gen false.
+ // whether this support is enabled. If gen is false, then stderr is
+ // expected to be either discarded or merged with sdtout.
//
// Return NULL if the dependency information goes to stdout and a
// pointer to the temporary file path otherwise.
//
- auto init_args = [a, &t, ot, li, reprocess,
+ auto init_args = [a, &t, ot, li, reprocess, pext,
&src, &md, &psrc, &sense_diag, &mod_mapper, &bs,
pp, &env, &args, &args_gen, &args_i, &out, &drm,
&so_map, this]
@@ -3216,16 +3636,6 @@ namespace build2
// Some compile options (e.g., -std, -m) affect the preprocessor.
//
- // Currently Clang supports importing "header modules" even when in
- // the TS mode. And "header modules" support macros which means
- // imports have to be resolved during preprocessing. Which poses a
- // bit of a chicken and egg problem for us. For now, the workaround
- // is to remove the -fmodules-ts option when preprocessing. Hopefully
- // there will be a "pure modules" mode at some point.
- //
- // @@ MODHDR Clang: should be solved with the dynamic module mapper
- // if/when Clang supports it?
- //
// Don't treat warnings as errors.
//
@@ -3254,11 +3664,18 @@ namespace build2
append_options (args, cmode);
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note that for MSVC stderr is merged with stdout and is then
+ // parsed, so no append_diag_color_options() call.
+
// See perform_update() for details on the choice of options.
//
+ // NOTE: see also the predefs rule if adding anything here.
+ //
{
- bool sc (find_option_prefix ("/source-charset:", args));
- bool ec (find_option_prefix ("/execution-charset:", args));
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
if (!sc && !ec)
args.push_back ("/utf-8");
@@ -3274,15 +3691,18 @@ namespace build2
if (cvariant != "clang" && isystem (*this))
{
- if (find_option_prefix ("/external:I", args) &&
- !find_option_prefix ("/external:W", args))
+ if (find_option_prefixes ({"/external:I", "-external:I"}, args) &&
+ !find_option_prefixes ({"/external:W", "-external:W"}, args))
args.push_back ("/external:W0");
}
- if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+ if (x_lang == lang::cxx &&
+ !find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ // NOTE: see similar code in search_modules().
+ //
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
args.push_back ("/P"); // Preprocess to file.
@@ -3293,7 +3713,7 @@ namespace build2
msvc_sanitize_cl (args);
- psrc = ctx.fcache.create (t.path () + x_pext, !modules);
+ psrc = ctx.fcache->create (t.path () + pext, !modules);
if (fc)
{
@@ -3312,12 +3732,18 @@ namespace build2
}
case compiler_class::gcc:
{
- append_options (args, cmode,
- cmode.size () - (modules && clang ? 1 : 0));
+ append_options (args, cmode);
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // If not gen, then stderr is discarded.
+ //
+ if (gen)
+ append_diag_color_options (args);
+
// See perform_update() for details on the choice of options.
//
+ // NOTE: see also the predefs rule if adding anything here.
+ //
if (!find_option_prefix ("-finput-charset=", args))
args.push_back ("-finput-charset=UTF-8");
@@ -3329,8 +3755,7 @@ namespace build2
if (ctype == compiler_type::clang && tsys == "win32-msvc")
{
- initializer_list<const char*> os {"-nostdlib", "-nostartfiles"};
- if (!find_options (os, cmode) && !find_options (os, args))
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
{
args.push_back ("-D_MT");
args.push_back ("-D_DLL");
@@ -3440,7 +3865,7 @@ namespace build2
// Preprocessor output.
//
- psrc = ctx.fcache.create (t.path () + x_pext, !modules);
+ psrc = ctx.fcache->create (t.path () + pext, !modules);
args.push_back ("-o");
args.push_back (psrc.path ().string ().c_str ());
}
@@ -3588,7 +4013,7 @@ namespace build2
if (const file* ht = enter_header (
a, bs, t, li,
- hp, cache, cache /* normalized */,
+ move (hp), cache, cache /* normalized */,
pfx_map, so_map).first)
{
// If we are reading the cache, then it is possible the file has
@@ -3603,7 +4028,7 @@ namespace build2
// Verify/add it to the dependency database.
//
if (!cache)
- dd.expect (ht->path ()); // @@ Use hp (or verify match)?
+ dd.expect (ht->path ());
skip_count++;
return *u;
@@ -3617,7 +4042,7 @@ namespace build2
return fail (*ht);
}
else
- return fail (hp);
+ return fail (hp); // hp is still valid.
};
// As above but for a header unit. Note that currently it is only used
@@ -3634,10 +4059,10 @@ namespace build2
const file* ht (
enter_header (a, bs, t, li,
- hp, true /* cache */, false /* normalized */,
+ move (hp), true /* cache */, false /* normalized */,
pfx_map, so_map).first);
- if (ht == nullptr)
+ if (ht == nullptr) // hp is still valid.
{
diag_record dr;
dr << error << "header " << hp << " not found and no rule to "
@@ -3730,13 +4155,13 @@ namespace build2
// If modules are enabled, then we keep the preprocessed output
// around (see apply() for details).
//
- // See apply() for details on the extra MSVC check.
- //
- return modules && (ctype != compiler_type::msvc ||
- md.type != unit_type::module_intf)
- ? make_pair (ctx.fcache.create_existing (t.path () + x_pext),
- true)
- : make_pair (file_cache::entry (), false);
+ if (modules)
+ {
+ result.first = ctx.fcache->create_existing (t.path () + pext);
+ result.second = true;
+ }
+
+ return;
}
// This can be a header or a header unit (mapping).
@@ -3789,7 +4214,7 @@ namespace build2
// Bail out early if we have deferred a failure.
//
- return make_pair (file_cache::entry (), false);
+ return;
}
}
}
@@ -3815,6 +4240,12 @@ namespace build2
process pr;
+ // We use the fdstream_mode::skip mode on stdout (cannot be used
+ // on both) and so dbuf must be destroyed (closed) first.
+ //
+ ifdstream is (ifdstream::badbit);
+ diag_buffer dbuf (ctx);
+
try
{
// Assume the preprocessed output (if produced) is usable
@@ -3835,217 +4266,229 @@ namespace build2
//
bool good_error (false), bad_error (false);
- // If we have no generated header support, then suppress all
- // diagnostics (if things go badly we will restart with this
- // support).
- //
- if (drmp == nullptr) // Dependency info goes to stdout.
+ if (mod_mapper) // Dependency info is implied by mapper requests.
{
- assert (!sense_diag); // Note: could support with fdselect().
+ assert (gen && !sense_diag); // Not used in this mode.
- // For VC with /P the dependency info and diagnostics all go
- // to stderr so redirect it to stdout.
+ // Note that here we use the skip mode on the diagnostics
+ // stream which means we have to use own instance of stdout
+ // stream for the correct destruction order (see below).
//
- pr = process (
- cpath,
- args.data (),
- 0,
- -1,
- cclass == compiler_class::msvc ? 1 : gen ? 2 : -2,
- nullptr, // CWD
- env.empty () ? nullptr : env.data ());
- }
- else // Dependency info goes to a temporary file.
- {
pr = process (cpath,
- args.data (),
- mod_mapper ? -1 : 0,
- mod_mapper ? -1 : 2, // Send stdout to stderr.
- gen ? 2 : sense_diag ? -1 : -2,
+ args,
+ -1,
+ -1,
+ diag_buffer::pipe (ctx),
nullptr, // CWD
env.empty () ? nullptr : env.data ());
- // Monitor for module mapper requests and/or diagnostics. If
- // diagnostics is detected, mark the preprocessed output as
- // unusable for compilation.
- //
- if (mod_mapper || sense_diag)
+ dbuf.open (args[0],
+ move (pr.in_efd),
+ fdstream_mode::non_blocking |
+ fdstream_mode::skip);
+ try
{
- module_mapper_state mm_state (skip_count, imports);
+ gcc_module_mapper_state mm_state (skip_count, imports);
+
+ // Note that while we read both streams until eof in normal
+ // circumstances, we cannot use fdstream_mode::skip for the
+ // exception case on both of them: we may end up being
+ // blocked trying to read one stream while the process may
+ // be blocked writing to the other. So in case of an
+ // exception we only skip the diagnostics and close the
+ // mapper stream hard. The latter (together with closing of
+ // the stdin stream) should happen first so the order of
+ // the following variable is important.
+ //
+ // Note also that we open the stdin stream in the blocking
+ // mode.
+ //
+ ifdstream is (move (pr.in_ofd),
+ fdstream_mode::non_blocking,
+ ifdstream::badbit); // stdout
+ ofdstream os (move (pr.out_fd)); // stdin (badbit|failbit)
- const char* w (nullptr);
- try
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically
+ // get an inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ bool more (false);
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
{
- // For now we don't need to do both so let's use a simpler
- // blocking implementation. Note that the module mapper
- // also needs to be adjusted when switching to the
- // non-blocking version.
+ // @@ Currently we will accept a (potentially truncated)
+ // line that ends with EOF rather than newline.
//
-#if 1
- assert (mod_mapper != sense_diag);
-
- if (mod_mapper)
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
{
- w = "module mapper request";
-
- // Note: the order is important (see the non-blocking
- // verison for details).
- //
- ifdstream is (move (pr.in_ofd),
- fdstream_mode::skip,
- ifdstream::badbit);
- ofdstream os (move (pr.out_fd));
-
- do
+ if (eof (is))
{
- if (!gcc_module_mapper (mm_state,
- a, bs, t, li,
- is, os,
- dd, update, bad_error,
- pfx_map, so_map))
- break;
-
- } while (!is.eof ());
+ os.close ();
+ is.close ();
- os.close ();
- is.close ();
- }
-
- if (sense_diag)
- {
- w = "diagnostics";
- ifdstream is (move (pr.in_efd), fdstream_mode::skip);
- puse = puse && (is.peek () == ifdstream::traits_type::eof ());
- is.close ();
- }
-#else
- fdselect_set fds;
- auto add = [&fds] (const auto_fd& afd) -> fdselect_state*
- {
- int fd (afd.get ());
- fdmode (fd, fdstream_mode::non_blocking);
- fds.push_back (fd);
- return &fds.back ();
- };
-
- // Note that while we read both streams until eof in
- // normal circumstances, we cannot use fdstream_mode::skip
- // for the exception case on both of them: we may end up
- // being blocked trying to read one stream while the
- // process may be blocked writing to the other. So in case
- // of an exception we only skip the diagnostics and close
- // the mapper stream hard. The latter should happen first
- // so the order of the following variable is important.
- //
- ifdstream es;
- ofdstream os;
- ifdstream is;
+ if (more)
+ throw_generic_ios_failure (EIO, "unexpected EOF");
- fdselect_state* ds (nullptr);
- if (sense_diag)
- {
- w = "diagnostics";
- ds = add (pr.in_efd);
- es.open (move (pr.in_efd), fdstream_mode::skip);
- }
-
- fdselect_state* ms (nullptr);
- if (mod_mapper)
- {
- w = "module mapper request";
- ms = add (pr.in_ofd);
- is.open (move (pr.in_ofd));
- os.open (move (pr.out_fd)); // Note: blocking.
- }
-
- // Set each state pointer to NULL when the respective
- // stream reaches eof.
- //
- while (ds != nullptr || ms != nullptr)
- {
- w = "output";
- ifdselect (fds);
-
- // First read out the diagnostics in case the mapper
- // interaction produces more. To make sure we don't get
- // blocked by full stderr, the mapper should only handle
- // one request at a time.
- //
- if (ds != nullptr && ds->ready)
+ ist.fd = nullfd;
+ }
+ else
{
- w = "diagnostics";
-
- for (char buf[4096];;)
- {
- streamsize c (sizeof (buf));
- streamsize n (es.readsome (buf, c));
+ optional<bool> r (
+ gcc_module_mapper (mm_state,
+ a, bs, t, li,
+ l, os,
+ dd, update, bad_error,
+ pfx_map, so_map));
- if (puse && n > 0)
- puse = false;
+ more = !r.has_value ();
- if (n < c)
- break;
- }
-
- if (es.eof ())
- {
- es.close ();
- ds->fd = nullfd;
- ds = nullptr;
- }
- }
-
- if (ms != nullptr && ms->ready)
- {
- w = "module mapper request";
-
- gcc_module_mapper (mm_state,
- a, bs, t, li,
- is, os,
- dd, update, bad_error,
- pfx_map, so_map);
- if (is.eof ())
+ if (more || *r)
+ l.clear ();
+ else
{
os.close ();
is.close ();
- ms->fd = nullfd;
- ms = nullptr;
+ ist.fd = nullfd;
}
}
+
+ continue;
}
-#endif
- }
- catch (const io_error& e)
- {
- if (pr.wait ())
- fail << "io error handling " << x_lang << " compiler "
- << w << ": " << e;
- // Fall through.
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
}
- if (mod_mapper)
- md.header_units += mm_state.header_units;
+ md.header_units += mm_state.header_units;
+ }
+ catch (const io_error& e)
+ {
+ // Note that diag_buffer handles its own io errors so this
+ // is about mapper stdin/stdout.
+ //
+ if (pr.wait ())
+ fail << "io error handling " << x_lang << " compiler "
+ << "module mapper request: " << e;
+
+ // Fall through.
}
// The idea is to reduce this to the stdout case.
//
- pr.wait ();
-
- // With -MG we want to read dependency info even if there is
- // an error (in case an outdated header file caused it). But
- // with the GCC module mapper an error is non-negotiable, so
- // to speak, and so we want to skip all of that. In fact, we
- // now write directly to depdb without generating and then
+ // We now write directly to depdb without generating and then
// parsing an intermadiate dependency makefile.
//
- pr.in_ofd = (ctype == compiler_type::gcc && mod_mapper)
- ? auto_fd (nullfd)
- : fdopen (*drmp, fdopen_mode::in);
+ pr.wait ();
+ pr.in_ofd = nullfd;
+ }
+ else
+ {
+ // If we have no generated header support, then suppress all
+ // diagnostics (if things go badly we will restart with this
+ // support).
+ //
+ if (drmp == nullptr) // Dependency info goes to stdout.
+ {
+ assert (!sense_diag); // Note: could support if necessary.
+
+ // For VC with /P the dependency info and diagnostics all go
+ // to stderr so redirect it to stdout.
+ //
+ int err (
+ cclass == compiler_class::msvc ? 1 : // stdout
+ !gen ? -2 : // /dev/null
+ diag_buffer::pipe (ctx, sense_diag /* force */));
+
+ pr = process (
+ cpath,
+ args,
+ 0,
+ -1,
+ err,
+ nullptr, // CWD
+ env.empty () ? nullptr : env.data ());
+
+ if (cclass != compiler_class::msvc && gen)
+ {
+ dbuf.open (args[0],
+ move (pr.in_efd),
+ fdstream_mode::non_blocking); // Skip on stdout.
+ }
+ }
+ else // Dependency info goes to temporary file.
+ {
+ // Since we only need to read from one stream (dbuf) let's
+ // use the simpler blocking setup.
+ //
+ int err (
+ !gen && !sense_diag ? -2 : // /dev/null
+ diag_buffer::pipe (ctx, sense_diag /* force */));
+
+ pr = process (cpath,
+ args,
+ 0,
+ 2, // Send stdout to stderr.
+ err,
+ nullptr, // CWD
+ env.empty () ? nullptr : env.data ());
+
+ if (gen || sense_diag)
+ {
+ dbuf.open (args[0], move (pr.in_efd));
+ dbuf.read (sense_diag /* force */);
+ }
+
+ if (sense_diag)
+ {
+ if (!dbuf.buf.empty ())
+ {
+ puse = false;
+ dbuf.buf.clear (); // Discard.
+ }
+ }
+
+ // The idea is to reduce this to the stdout case.
+ //
+ // Note that with -MG we want to read dependency info even
+ // if there is an error (in case an outdated header file
+ // caused it).
+ //
+ pr.wait ();
+ pr.in_ofd = fdopen (*drmp, fdopen_mode::in);
+ }
}
+ // Read and process dependency information, if any.
+ //
if (pr.in_ofd != nullfd)
{
+ // We have two cases here: reading from stdout and potentially
+ // stderr (dbuf) or reading from file (see the process startup
+ // code above for details). If we have to read from two
+ // streams, then we have to use the non-blocking setup. But we
+ // cannot use the non-blocking setup uniformly because on
+ // Windows it's only suppored for pipes. So things are going
+ // to get a bit hairy.
+ //
+ // And there is another twist to this: for MSVC we redirect
+ // stderr to stdout since the header dependency information is
+ // part of the diagnostics. If, however, there is some real
+ // diagnostics, we need to pass it through, potentially with
+ // buffering. The way we achieve this is by later opening dbuf
+ // in the EOF state and using it to buffer or stream the
+ // diagnostics.
+ //
+ bool nb (dbuf.is.is_open ());
+
// We may not read all the output (e.g., due to a restart).
// Before we used to just close the file descriptor to signal
// to the other end that we are not interested in the rest.
@@ -4053,20 +4496,69 @@ namespace build2
// impolite and complains, loudly (broken pipe). So now we are
// going to skip until the end.
//
- ifdstream is (move (pr.in_ofd),
- fdstream_mode::text | fdstream_mode::skip,
- ifdstream::badbit);
+ // Note that this means we are not using skip on dbuf (see
+ // above for the destruction order details).
+ //
+ {
+ fdstream_mode m (fdstream_mode::text |
+ fdstream_mode::skip);
+
+ if (nb)
+ m |= fdstream_mode::non_blocking;
+
+ is.open (move (pr.in_ofd), m);
+ }
+
+ fdselect_set fds;
+ if (nb)
+ fds = {is.fd (), dbuf.is.fd ()};
size_t skip (skip_count);
string l, l2; // Reuse.
for (bool first (true), second (false); !restart; )
{
- if (eof (getline (is, l)))
+ if (nb)
{
- if (bad_error && !l2.empty ())
- text << l2;
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
- break;
+ // We read until we reach EOF on both streams.
+ //
+ if (ist.fd == nullfd && dst.fd == nullfd)
+ break;
+
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ {
+ ist.fd = nullfd;
+ continue;
+ }
+
+ // Fall through to parse (and clear) the line.
+ }
+ else
+ {
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
+
+ continue;
+ }
+ }
+ else
+ {
+ if (eof (getline (is, l)))
+ {
+ if (bad_error && !l2.empty ()) // MSVC only (see below).
+ dbuf.write (l2, true /* newline */);
+
+ break;
+ }
}
l6 ([&]{trace << "header dependency line '" << l << "'";});
@@ -4117,9 +4609,15 @@ namespace build2
else
{
l2 = l;
- bad_error = true;
+
+ if (!bad_error)
+ {
+ dbuf.open_eof (args[0]);
+ bad_error = true;
+ }
}
+ l.clear ();
continue;
}
@@ -4129,6 +4627,7 @@ namespace build2
}
first = false;
+ l.clear ();
continue;
}
@@ -4136,8 +4635,13 @@ namespace build2
if (f.empty ()) // Some other diagnostics.
{
- text << l;
- bad_error = true;
+ if (!bad_error)
+ {
+ dbuf.open_eof (args[0]);
+ bad_error = true;
+ }
+
+ dbuf.write (l, true /* newline */);
break;
}
@@ -4231,12 +4735,9 @@ namespace build2
if (l.empty () ||
l[0] != '^' || l[1] != ':' || l[2] != ' ')
{
- // @@ Hm, we don't seem to redirect stderr to stdout
- // for this class of compilers so I wonder why
- // we are doing this?
- //
if (!l.empty ())
- text << l;
+ l5 ([&]{trace << "invalid header dependency line '"
+ << l << "'";});
bad_error = true;
break;
@@ -4251,7 +4752,10 @@ namespace build2
// "^: \".
//
if (l.size () == 4 && l[3] == '\\')
+ {
+ l.clear ();
continue;
+ }
else
pos = 3; // Skip "^: ".
@@ -4266,10 +4770,8 @@ namespace build2
if (pos != l.size () && l[pos] == ':')
{
- // @@ Hm, the same as above.
- //
- text << l;
-
+ l5 ([&]{trace << "invalid header dependency line '"
+ << l << "'";});
bad_error = true;
break;
}
@@ -4324,19 +4826,56 @@ namespace build2
}
if (bad_error || md.deferred_failure)
+ {
+ // Note that it may be tempting to finish reading out the
+ // diagnostics before bailing out. But that may end up in
+ // a deadlock if the process gets blocked trying to write
+ // to stdout.
+ //
break;
+ }
+
+ l.clear ();
+ }
+
+ // We may bail out early from the above loop in case of a
+ // restart or error. Which means the stderr stream (dbuf) may
+ // still be open and we need to close it before closing the
+ // stdout stream (which may try to skip).
+ //
+ // In this case we may also end up with incomplete diagnostics
+ // so discard it.
+ //
+ // Generally, it may be tempting to start thinking if we
+ // should discard buffered diagnostics in other cases, such as
+ // restart. But remember that during serial execution it will
+ // go straight to stderr so for consistency (and simplicity)
+ // we should just print it unless there are good reasons not
+ // to (also remember that in the restartable modes we normally
+ // redirect stderr to /dev/null; see the process startup code
+ // for details).
+ //
+ if (dbuf.is.is_open ())
+ {
+ dbuf.is.close ();
+ dbuf.buf.clear ();
}
// Bail out early if we have deferred a failure.
//
+ // Let's ignore any buffered diagnostics in this case since
+ // it would appear after the deferred failure note.
+ //
if (md.deferred_failure)
{
is.close ();
- return make_pair (file_cache::entry (), false);
+ return;
}
- // In case of VC, we are parsing stderr and if things go
- // south, we need to copy the diagnostics for the user to see.
+ // In case of VC, we are parsing redirected stderr and if
+ // things go south, we need to copy the diagnostics for the
+ // user to see. Note that we should have already opened dbuf
+ // at EOF above.
//
if (bad_error && cclass == compiler_class::msvc)
{
@@ -4351,7 +4890,7 @@ namespace build2
l.compare (p.first, 4, "1083") != 0 &&
msvc_header_c1083 (l, p))
{
- diag_stream_lock () << l << endl;
+ dbuf.write (l, true /* newline */);
}
}
}
@@ -4374,27 +4913,42 @@ namespace build2
if (pr.wait ())
{
- if (!bad_error) // Ignore expected successes (we are done).
{
- if (!restart && psrc)
- psrcw.close ();
+ diag_record dr;
- continue;
+ if (bad_error)
+ dr << fail << "expected error exit status from "
+ << x_lang << " compiler";
+
+ if (dbuf.is_open ())
+ dbuf.close (move (dr)); // Throws if error.
}
- fail << "expected error exit status from " << x_lang
- << " compiler";
+ // Ignore expected successes (we are done).
+ //
+ if (!restart && psrc)
+ psrcw.close ();
+
+ continue;
}
else if (pr.exit->normal ())
{
if (good_error) // Ignore expected errors (restart).
+ {
+ if (dbuf.is_open ())
+ dbuf.close ();
+
continue;
+ }
}
// Fall through.
}
catch (const io_error& e)
{
+ // Ignore buffered diagnostics (since reading it could be the
+ // cause of this failure).
+ //
if (pr.wait ())
fail << "unable to read " << x_lang << " compiler header "
<< "dependency output: " << e;
@@ -4403,18 +4957,23 @@ namespace build2
}
assert (pr.exit && !*pr.exit);
- const process_exit& e (*pr.exit);
+ const process_exit& pe (*pr.exit);
// For normal exit we assume the child process issued some
// diagnostics.
//
- if (e.normal ())
+ if (pe.normal ())
{
- // If this run was with the generated header support then we
- // have issued diagnostics and it's time to give up.
+ // If this run was with the generated header support then it's
+ // time to give up.
//
if (gen)
+ {
+ if (dbuf.is_open ())
+ dbuf.close (args, pe, 2 /* verbosity */);
+
throw failed ();
+ }
// Just to recap, being here means something is wrong with the
// source: it can be a missing generated header, it can be an
@@ -4432,7 +4991,12 @@ namespace build2
// or will issue diagnostics.
//
if (restart)
+ {
+ if (dbuf.is_open ())
+ dbuf.close ();
+
l6 ([&]{trace << "trying again without generated headers";});
+ }
else
{
// In some pathological situations we may end up switching
@@ -4457,19 +5021,24 @@ namespace build2
// example, because we have removed all the partially
// preprocessed source files).
//
- if (force_gen_skip && *force_gen_skip == skip_count)
{
- diag_record dr (fail);
+ diag_record dr;
+ if (force_gen_skip && *force_gen_skip == skip_count)
+ {
+ dr <<
+ fail << "inconsistent " << x_lang << " compiler behavior" <<
+ info << "run the following two commands to investigate";
- dr << "inconsistent " << x_lang << " compiler behavior" <<
- info << "run the following two commands to investigate";
+ dr << info;
+ print_process (dr, args.data ()); // No pipes.
- dr << info;
- print_process (dr, args.data ()); // No pipes.
+ init_args ((gen = true));
+ dr << info << "";
+ print_process (dr, args.data ()); // No pipes.
+ }
- init_args ((gen = true));
- dr << info << "";
- print_process (dr, args.data ()); // No pipes.
+ if (dbuf.is_open ())
+ dbuf.close (move (dr)); // Throws if error.
}
restart = true;
@@ -4480,7 +5049,15 @@ namespace build2
continue;
}
else
- run_finish (args, pr); // Throws.
+ {
+ if (dbuf.is_open ())
+ {
+ dbuf.close (args, pe, 2 /* verbosity */);
+ throw failed ();
+ }
+ else
+ run_finish (args, pr, 2 /* verbosity */);
+ }
}
catch (const process_error& e)
{
@@ -4506,7 +5083,9 @@ namespace build2
dd.expect ("");
puse = puse && !reprocess && psrc;
- return make_pair (move (psrc), puse);
+
+ result.first = move (psrc);
+ result.second = puse;
}
// Return the translation unit information (last argument) and its
@@ -4525,6 +5104,18 @@ namespace build2
{
tracer trace (x, "compile_rule::parse_unit");
+ // Scanning .S files with our parser is hazardous since such files
+ // sometimes use `#`-style comments. Presumably real compilers just
+ // ignore them in some way, but it doesn't seem worth it to bother in
+ // our case. Also, the checksum calculation over assembler tokens feels
+ // iffy.
+ //
+ if (x_assembler_cpp (src))
+ {
+ tu.type = unit_type::non_modular;
+ return "";
+ }
+
otype ot (li.type);
// If things go wrong give the user a bit extra context. Let's call it
@@ -4603,8 +5194,6 @@ namespace build2
case compiler_class::msvc: werror = "/WX"; break;
}
- bool clang (ctype == compiler_type::clang);
-
append_options (args, t, c_coptions, werror);
append_options (args, t, x_coptions, werror);
@@ -4619,11 +5208,16 @@ namespace build2
append_options (args, cmode);
append_sys_hdr_options (args);
+ // Note: no append_diag_color_options() call since the
+ // diagnostics is discarded.
+
// See perform_update() for details on the choice of options.
//
{
- bool sc (find_option_prefix ("/source-charset:", args));
- bool ec (find_option_prefix ("/execution-charset:", args));
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
if (!sc && !ec)
args.push_back ("/utf-8");
@@ -4639,15 +5233,16 @@ namespace build2
if (cvariant != "clang" && isystem (*this))
{
- if (find_option_prefix ("/external:I", args) &&
- !find_option_prefix ("/external:W", args))
+ if (find_option_prefixes ({"/external:I", "-external:I"}, args) &&
+ !find_option_prefixes ({"/external:W", "-external:W"}, args))
args.push_back ("/external:W0");
}
- if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+ if (x_lang == lang::cxx &&
+ !find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
args.push_back ("/E");
@@ -4661,10 +5256,12 @@ namespace build2
}
case compiler_class::gcc:
{
- append_options (args, cmode,
- cmode.size () - (modules && clang ? 1 : 0));
+ append_options (args, cmode);
append_sys_hdr_options (args);
+ // Note: no append_diag_color_options() call since the
+ // diagnostics is discarded.
+
// See perform_update() for details on the choice of options.
//
if (!find_option_prefix ("-finput-charset=", args))
@@ -4678,8 +5275,7 @@ namespace build2
if (ctype == compiler_type::clang && tsys == "win32-msvc")
{
- initializer_list<const char*> os {"-nostdlib", "-nostartfiles"};
- if (!find_options (os, cmode) && !find_options (os, args))
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
{
args.push_back ("-D_MT");
args.push_back ("-D_DLL");
@@ -4706,12 +5302,36 @@ namespace build2
//
if (ps)
{
- if (ctype == compiler_type::gcc)
+ switch (ctype)
{
- // Note that only these two *plus* -x do the trick.
- //
- args.push_back ("-fpreprocessed");
- args.push_back ("-fdirectives-only");
+ case compiler_type::gcc:
+ {
+ // Note that only these two *plus* -x do the trick.
+ //
+ args.push_back ("-fpreprocessed");
+ args.push_back ("-fdirectives-only");
+ break;
+ }
+ case compiler_type::clang:
+ {
+ // See below for details.
+ //
+ if (ctype == compiler_type::clang &&
+ cmaj >= (cvariant != "apple" ? 15 : 16))
+ {
+ if (find_options ({"-pedantic", "-pedantic-errors",
+ "-Wpedantic", "-Werror=pedantic"},
+ args))
+ {
+ args.push_back ("-Wno-gnu-line-marker");
+ }
+ }
+
+ break;
+ }
+ case compiler_type::msvc:
+ case compiler_type::icc:
+ assert (false);
}
}
@@ -4765,10 +5385,10 @@ namespace build2
print_process (args);
// We don't want to see warnings multiple times so ignore all
- // diagnostics.
+ // diagnostics (thus no need for diag_buffer).
//
pr = process (cpath,
- args.data (),
+ args,
0, -1, -2,
nullptr, // CWD
env.empty () ? nullptr : env.data ());
@@ -4780,7 +5400,7 @@ namespace build2
fdstream_mode::binary | fdstream_mode::skip);
parser p;
- p.parse (is, path_name (*sp), tu);
+ p.parse (is, path_name (*sp), tu, cid);
is.close ();
@@ -4795,7 +5415,9 @@ namespace build2
if (!modules)
{
if (ut != unit_type::non_modular || !mi.imports.empty ())
- fail << "modules support required by " << src;
+ fail << "modules support required by " << src <<
+ info << "consider enabling modules with "
+ << x << ".features.modules=true in root.build";
}
else
{
@@ -4820,25 +5442,21 @@ namespace build2
ut = md.type;
mi.name = src.path ().string ();
}
-
- // Prior to 15.5 (19.12) VC was not using the 'export module M;'
- // syntax so we use the preprequisite type to distinguish
- // between interface and implementation units.
- //
- // @@ TMP: probably outdated.
- //
- if (ctype == compiler_type::msvc && cmaj == 19 && cmin <= 11)
- {
- if (ut == unit_type::module_impl && src.is_a (*x_mod))
- ut = unit_type::module_intf;
- }
}
// If we were forced to reprocess, assume the checksum is not
// accurate (parts of the translation unit could have been
// #ifdef'ed out; see __build2_preprocess).
//
- return reprocess ? string () : move (p.checksum);
+ // Also, don't use the checksum for header units since it ignores
+ // preprocessor directives and may therefore cause us to ignore a
+ // change to an exported macro. @@ TODO: maybe we should add a
+ // flag to the parser not to waste time calculating the checksum
+ // in these cases.
+ //
+ return reprocess || ut == unit_type::module_header
+ ? string ()
+ : move (p.checksum);
}
// Fall through.
@@ -4869,7 +5487,7 @@ namespace build2
info << "then run failing command to display compiler diagnostics";
}
else
- run_finish (args, pr); // Throws.
+ run_finish (args, pr, 2 /* verbosity */); // Throws.
}
catch (const process_error& e)
{
@@ -5038,6 +5656,9 @@ namespace build2
{
tracer trace (x, "compile_rule::search_modules");
+ context& ctx (bs.ctx);
+ const scope& rs (*bs.root_scope ());
+
// NOTE: currently we don't see header unit imports (they are handled by
// extract_headers() and are not in imports).
@@ -5073,7 +5694,7 @@ namespace build2
// So, the fuzzy match: the idea is that each match gets a score, the
// number of characters in the module name that got matched. A match
// with the highest score is used. And we use the (length + 1) for a
- // match against an actual module name.
+ // match against an actual (extracted) module name.
//
// Actually, the scoring system is a bit more elaborate than that.
// Consider module name core.window and two files, window.mxx and
@@ -5101,10 +5722,10 @@ namespace build2
// module (or partition) component. Failed that, we will match `format`
// to `print` because the last character (`t`) is the same.
//
- // For std.* modules we only accept non-fuzzy matches (think std.core vs
- // some core.mxx). And if such a module is unresolved, then we assume it
- // is pre-built and will be found by some other means (e.g., VC's
- // IFCPATH).
+ // For std.* modules we only accept non-fuzzy matches (think std.compat
+ // vs some compat.mxx). And if such a module is unresolved, then we
+ // assume it is pre-built and will be found by some other means (e.g.,
+ // VC's IFCPATH).
//
// Note also that we handle module partitions the same as submodules. In
// other words, for matching, `.` and `:` are treated the same.
@@ -5117,7 +5738,7 @@ namespace build2
// PPPPABBBB
//
// Where PPPP is the primary score, A is the A) score, and BBBB is
- // the B) scope described above. Zero signifies no match.
+ // the B) score described above. Zero signifies no match.
//
// We use decimal instead of binary packing to make it easier for the
// human to separate fields in the trace messages, during debugging,
@@ -5223,6 +5844,31 @@ namespace build2
if (!match)
return 0;
+ // Here is another corner case, the module is async_simple:IOExecutor
+ // and the file names are:
+ //
+ // IOExecutor.mxx
+ // SimpleIOExecutor.mxx
+ //
+ // The above implementation treats the latter as better because
+ // `Simple` in SimpleIOExecutor matches `simple` in async_simple. It's
+ // unclear what we can do about it without potentially breaking other
+ // legitimate cases (think Boost_Simple:IOExecutor). Maybe we could
+ // boost the exact partition name match score, similar to the exact
+ // module match, as some sort of a heuristics? Let's try.
+ //
+ if (fi == 0 && mi != 0 && m[mi - 1] == ':')
+ {
+ // Pretend we matched one short of the next module component. This
+ // way AsyncSimpleIOExecutor.mxx would still be a better match.
+ //
+ while (--mi != 0 && m[mi - 1] != '.')
+ ;
+
+ msep = (mi != 0); // For uncount logic below.
+ mi++; // One short.
+ }
+
// "Uncount" real separators.
//
if (fsep) fi++;
@@ -5251,6 +5897,20 @@ namespace build2
return ps * 100000 + as * 10000 + bs;
};
+#if 0
+ assert (match ("IOExecutor", "async_simple:IOExecutor") >
+ match ("SimpleIOExecutor", "async_simple:IOExecutor"));
+
+ assert (match ("IOExecutor", "async_simple:IOExecutor") <
+ match ("AsyncSimpleIOExecutor", "async_simple:IOExecutor"));
+
+ assert (match ("IOExecutor", "x.async_simple:IOExecutor") >
+ match ("SimpleIOExecutor", "x.async_simple:IOExecutor"));
+
+ assert (match ("IOExecutor", "x.async_simple:IOExecutor") <
+ match ("AsyncSimpleIOExecutor", "x.async_simple:IOExecutor"));
+#endif
+
auto& pts (t.prerequisite_targets[a]);
size_t start (pts.size ()); // Index of the first to be added.
@@ -5265,7 +5925,7 @@ namespace build2
// promise. It has to do with module re-exporting (export import M;).
// In this case (currently) all implementations simply treat it as a
// shallow (from the BMI's point of view) reference to the module (or an
- // implicit import, if you will). Do you see where it's going? Nowever
+ // implicit import, if you will). Do you see where it's going? Nowhere
// good, that's right. This shallow reference means that the compiler
// should be able to find BMIs for all the re-exported modules,
// recursively. The good news is we are actually in a pretty good shape
@@ -5282,10 +5942,11 @@ namespace build2
// 1. There is no good place in prerequisite_targets to store the
// exported flag (no, using the marking facility across match/execute
// is a bad idea). So what we are going to do is put re-exported
- // bmi{}s at the back and store (in the target's data pad) the start
- // position. One bad aspect about this part is that we assume those
- // bmi{}s have been matched by the same rule. But let's not kid
- // ourselves, there will be no other rule that matches bmi{}s.
+ // bmi{}s at the back and store (in the target's auxiliary data
+ // storage) the start position. One bad aspect about this part is
+ // that we assume those bmi{}s have been matched by the same
+ // rule. But let's not kid ourselves, there will be no other rule
+ // that matches bmi{}s.
//
// @@ I think now we could use prerequisite_targets::data for this?
//
@@ -5310,6 +5971,7 @@ namespace build2
// so we actually don't need to pass any extra options (unless things
// get moved) but they still need access to the BMIs (and things will
// most likely have to be done differenly for distributed compilation).
+ // @@ Note: no longer the case for Clang either.
//
// So the revised plan: on the off chance that some implementation will
// do it differently we will continue maintaing the imported/re-exported
@@ -5403,6 +6065,8 @@ namespace build2
continue; // Scan the rest to detect if all done.
}
}
+ else
+ assert (name != m.name); // No duplicates.
done = false;
}
@@ -5430,10 +6094,18 @@ namespace build2
//
if (pt->is_a<bmix> ())
{
- const string& n (cast<string> (pt->state[a].vars[c_module_name]));
-
- if (const target** p = check_exact (n))
- *p = pt;
+ // If the extraction of the module information for this BMI failed
+ // and we have deferred failure to compiler diagnostics, then
+ // there will be no module name assigned. It would have been
+ // better to make sure that's the cause, but that won't be easy.
+ //
+ const string* n (cast_null<string> (
+ pt->state[a].vars[c_module_name]));
+ if (n != nullptr)
+ {
+ if (const target** p = check_exact (*n))
+ *p = pt;
+ }
}
else if (pt->is_a (*x_mod))
{
@@ -5442,7 +6114,8 @@ namespace build2
// rule puts them into prerequisite_targets for us).
//
// The module names should be specified but if not assume
- // something else is going on and ignore.
+ // something else is going on (like a deferred failure) and
+ // ignore.
//
// Note also that besides modules, prerequisite_targets may
// contain libraries which are interface dependencies of this
@@ -5455,7 +6128,15 @@ namespace build2
continue;
if (const target** p = check_exact (*n))
- *p = &this->make_module_sidebuild (a, bs, l, *pt, *n); // GCC 4.9
+ {
+ // It seems natural to build a BMI type that corresponds to the
+ // library type. After all, this is where the object file part
+ // of the BMI is going to come from (unless it's a module
+ // interface-only library).
+ //
+ *p = &this->make_module_sidebuild (
+ a, bs, &l, link_type (l).type, *pt, *n).first; // GCC 4.9
+ }
}
// Note that in prerequisite targets we will have the libux{}
// members, not the group.
@@ -5470,112 +6151,295 @@ namespace build2
}
};
- for (prerequisite_member p: group_prerequisite_members (a, t))
+ // Pre-resolve std modules in an ad hoc way for certain compilers.
+ //
+ // @@ TODO: cache x_stdlib value.
+ //
+ if ((ctype == compiler_type::msvc) ||
+ (ctype == compiler_type::clang &&
+ cmaj >= 17 &&
+ cast<string> (rs[x_stdlib]) == "libc++"))
{
- if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
- continue;
-
- const target* pt (p.load ()); // Should be cached for libraries.
+ // Similar logic to check_exact() above.
+ //
+ done = true;
- if (pt != nullptr)
+ for (size_t i (0); i != n; ++i)
{
- const file* lt (nullptr);
-
- if (const libx* l = pt->is_a<libx> ())
- lt = link_member (*l, a, li);
- else if (pt->is_a<liba> () || pt->is_a<libs> () || pt->is_a<libux> ())
- lt = &pt->as<file> ();
+ module_import& m (imports[i]);
- // If this is a library, check its bmi{}s and mxx{}s.
- //
- if (lt != nullptr)
+ if (m.name == "std" || m.name == "std.compat")
{
- find (*lt, find);
+ otype ot (otype::e);
+ const target* mt (nullptr);
- if (done)
- break;
+ switch (ctype)
+ {
+ case compiler_type::clang:
+ {
+ if (m.name != "std")
+ fail << "module " << m.name << " not yet provided by libc++";
- continue;
- }
+ // Find or insert std.cppm (similar code to pkgconfig.cxx).
+ //
+ // Note: build_install_data is absolute and normalized.
+ //
+ mt = &ctx.targets.insert_locked (
+ *x_mod,
+ (dir_path (build_install_data) /= "libbuild2") /= "cc",
+ dir_path (),
+ "std",
+ string ("cppm"), // For C++14 during bootstrap.
+ target_decl::implied,
+ trace).first;
+
+ // Which output type should we use, static or shared? The
+ // correct way would be to detect whether static or shared
+ // version of libc++ is to be linked and use the corresponding
+ // type. And we could do that by looking for -static-libstdc++
+ // in loption (and no, it's not -static-libc++).
+ //
+ // But, looking at the object file produced from std.cppm, it
+ // only contains one symbol, the static object initializer.
+ // And this is unlikely to change since all other non-inline
+ // or template symbols should be in libc++. So feels like it's
+ // not worth the trouble and one variant should be good enough
+ // for both cases. Let's use the shared one for less
+ // surprising diagnostics (as in, "why are you linking obje{}
+ // to a shared library?")
+ //
+ // (Of course, theoretically, std.cppm could detect via a
+ // macro whether it's being compiled with -fPIC or not and do
+ // things differently, but this seems far-fetched).
+ //
+ ot = otype::s;
- // Fall through.
- }
+ break;
+ }
+ case compiler_type::msvc:
+ {
+ // For MSVC, the source files std.ixx and std.compat.ixx are
+ // found in the modules/ subdirectory which is a sibling of
+ // include/ in the MSVC toolset (and "that is a contract with
+ // customers" to quote one of the developers).
+ //
+ // The problem of course is that there are multiple system
+ // header search directories (for example, as specified in the
+ // INCLUDE environment variable) and which one of them is for
+ // the MSVC toolset is not specified. So what we are going to
+ // do is search for one of the well-known standard C++ headers
+ // and assume that the directory where we found it is the one
+ // we are looking for. Or we could look for something
+ // MSVC-specific like vcruntime.h.
+ //
+ dir_path modules;
+ if (optional<path> p = find_system_header (path ("vcruntime.h")))
+ {
+ p->make_directory (); // Strip vcruntime.h.
+ if (p->leaf () == path ("include")) // Sanity check.
+ {
+ modules = path_cast<dir_path> (move (p->make_directory ()));
+ modules /= "modules";
+ }
+ }
- // While it would have been even better not to search for a target, we
- // need to get hold of the corresponding mxx{} (unlikely but possible
- // for bmi{} to have a different name).
- //
- // While we want to use group_prerequisite_members() below, we cannot
- // call resolve_group() since we will be doing it "speculatively" for
- // modules that we may use but also for modules that may use us. This
- // quickly leads to deadlocks. So instead we are going to perform an
- // ad hoc group resolution.
- //
- const target* pg;
- if (p.is_a<bmi> ())
- {
- pg = pt != nullptr ? pt : &p.search (t);
- pt = &search (t, btt, p.key ()); // Same logic as in picking obj*{}.
- }
- else if (p.is_a (btt))
- {
- pg = &search (t, bmi::static_type, p.key ());
- if (pt == nullptr) pt = &p.search (t);
+ if (modules.empty ())
+ fail << "unable to locate MSVC standard modules directory";
+
+ mt = &ctx.targets.insert_locked (
+ *x_mod,
+ move (modules),
+ dir_path (),
+ m.name,
+ string ("ixx"), // For C++14 during bootstrap.
+ target_decl::implied,
+ trace).first;
+
+ // For MSVC it's easier to detect the runtime being used since
+ // it's specified with the compile options (/MT[d], /MD[d]).
+ //
+ // Similar semantics as in extract_headers() except here we
+ // use options visible from the root scope. Note that
+ // find_option_prefixes() looks in reverse, so look in the
+ // cmode, x_coptions, c_coptions order.
+ //
+ initializer_list<const char*> os {"/MD", "/MT", "-MD", "-MT"};
+
+ const string* o;
+ if ((o = find_option_prefixes (os, cmode)) != nullptr ||
+ (o = find_option_prefixes (os, rs, x_coptions)) != nullptr ||
+ (o = find_option_prefixes (os, rs, c_coptions)) != nullptr)
+ {
+ ot = (*o)[2] == 'D' ? otype::s : otype::a;
+ }
+ else
+ ot = otype::s; // The default is /MD.
+
+ break;
+ }
+ case compiler_type::gcc:
+ case compiler_type::icc:
+ assert (false);
+ };
+
+ pair<target&, ulock> tl (
+ this->make_module_sidebuild ( // GCC 4.9
+ a, bs, nullptr, ot, *mt, m.name));
+
+ if (tl.second.owns_lock ())
+ {
+ // Special compile options for the std modules.
+ //
+ if (ctype == compiler_type::clang)
+ {
+ value& v (tl.first.append_locked (x_coptions));
+
+ if (v.null)
+ v = strings {};
+
+ strings& cops (v.as<strings> ());
+
+ switch (ctype)
+ {
+ case compiler_type::clang:
+ {
+ cops.push_back ("-Wno-reserved-module-identifier");
+ break;
+ }
+ case compiler_type::msvc:
+ // It appears nothing special is needed to compile MSVC
+ // standard modules.
+ case compiler_type::gcc:
+ case compiler_type::icc:
+ assert (false);
+ };
+ }
+
+ tl.second.unlock ();
+ }
+
+ pts[start + i].target = &tl.first;
+ m.score = match_max (m.name) + 1;
+ continue; // Scan the rest to detect if all done.
+ }
+
+ done = false;
}
- else
- continue;
+ }
- // Find the mxx{} prerequisite and extract its "file name" for the
- // fuzzy match unless the user specified the module name explicitly.
- //
- for (prerequisite_member p:
- prerequisite_members (a, t, group_prerequisites (*pt, pg)))
+ // Go over prerequisites and try to resolve imported modules with them.
+ //
+ if (!done)
+ {
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
continue;
- if (p.is_a (*x_mod))
+ const target* pt (p.load ()); // Should be cached for libraries.
+
+ if (pt != nullptr)
{
- // Check for an explicit module name. Only look for an existing
- // target (which means the name can only be specified on the
- // target itself, not target type/pattern-spec).
+ const file* lt (nullptr);
+
+ if (const libx* l = pt->is_a<libx> ())
+ lt = link_member (*l, a, li);
+ else if (pt->is_a<liba> () ||
+ pt->is_a<libs> () ||
+ pt->is_a<libux> ())
+ lt = &pt->as<file> ();
+
+ // If this is a library, check its bmi{}s and mxx{}s.
//
- const target* t (p.search_existing ());
- const string* n (t != nullptr
- ? cast_null<string> (t->vars[c_module_name])
- : nullptr);
- if (n != nullptr)
+ if (lt != nullptr)
{
- if (const target** p = check_exact (*n))
- *p = pt;
+ find (*lt, find);
+
+ if (done)
+ break;
+
+ continue;
}
- else
+
+ // Fall through.
+ }
+
+ // While it would have been even better not to search for a target,
+ // we need to get hold of the corresponding mxx{} (unlikely but
+ // possible for bmi{} to have a different name).
+ //
+ // While we want to use group_prerequisite_members() below, we
+ // cannot call resolve_group() since we will be doing it
+ // "speculatively" for modules that we may use but also for modules
+ // that may use us. This quickly leads to deadlocks. So instead we
+ // are going to perform an ad hoc group resolution.
+ //
+ const target* pg;
+ if (p.is_a<bmi> ())
+ {
+ pg = pt != nullptr ? pt : &p.search (t);
+ pt = &search (t, btt, p.key ()); // Same logic as in picking obj*{}.
+ }
+ else if (p.is_a (btt))
+ {
+ pg = &search (t, bmi::static_type, p.key ());
+ if (pt == nullptr) pt = &p.search (t);
+ }
+ else
+ continue;
+
+ // Find the mxx{} prerequisite and extract its "file name" for the
+ // fuzzy match unless the user specified the module name explicitly.
+ //
+ for (prerequisite_member p:
+ prerequisite_members (a, t, group_prerequisites (*pt, pg)))
+ {
+ if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
+ continue;
+
+ if (p.is_a (*x_mod))
{
- // Fuzzy match.
+ // Check for an explicit module name. Only look for an existing
+ // target (which means the name can only be specified on the
+ // target itself, not target type/pattern-spec).
//
- string f;
+ const target* mt (p.search_existing ());
+ const string* n (mt != nullptr
+ ? cast_null<string> (mt->vars[c_module_name])
+ : nullptr);
+ if (n != nullptr)
+ {
+ if (const target** p = check_exact (*n))
+ *p = pt;
+ }
+ else
+ {
+ // Fuzzy match.
+ //
+ string f;
- // Add the directory part if it is relative. The idea is to
- // include it into the module match, say hello.core vs
- // hello/mxx{core}.
- //
- // @@ MOD: Why not for absolute? Good question. What if it
- // contains special components, say, ../mxx{core}?
- //
- const dir_path& d (p.dir ());
+ // Add the directory part if it is relative. The idea is to
+ // include it into the module match, say hello.core vs
+ // hello/mxx{core}.
+ //
+ // @@ MOD: Why not for absolute? Good question. What if it
+ // contains special components, say, ../mxx{core}?
+ //
+ const dir_path& d (p.dir ());
- if (!d.empty () && d.relative ())
- f = d.representation (); // Includes trailing slash.
+ if (!d.empty () && d.relative ())
+ f = d.representation (); // Includes trailing slash.
- f += p.name ();
- check_fuzzy (pt, f);
+ f += p.name ();
+ check_fuzzy (pt, f);
+ }
+ break;
}
- break;
}
- }
- if (done)
- break;
+ if (done)
+ break;
+ }
}
// Diagnose unresolved modules.
@@ -5645,9 +6509,12 @@ namespace build2
if (m.score <= match_max (in))
{
- const string& mn (cast<string> (bt->state[a].vars[c_module_name]));
+ // As above (deffered failure).
+ //
+ const string* mn (
+ cast_null<string> (bt->state[a].vars[c_module_name]));
- if (in != mn)
+ if (mn != nullptr && in != *mn)
{
// Note: matched, so the group should be resolved.
//
@@ -5661,7 +6528,7 @@ namespace build2
fail (relative (src))
<< "failed to correctly guess module name from " << p <<
info << "guessed: " << in <<
- info << "actual: " << mn <<
+ info << "actual: " << *mn <<
info << "consider adjusting module interface file names or" <<
info << "consider specifying module name with " << x
<< ".module_name";
@@ -5672,11 +6539,11 @@ namespace build2
// Hash (we know it's a file).
//
- cs.append (static_cast<const file&> (*bt).path ().string ());
+ cs.append (bt->as<file> ().path ().string ());
// Copy over bmi{}s from our prerequisites weeding out duplicates.
//
- if (size_t j = bt->data<match_data> ().modules.start)
+ if (size_t j = bt->data<match_data> (a).modules.start)
{
// Hard to say whether we should reserve or not. We will probably
// get quite a bit of duplications.
@@ -5689,26 +6556,29 @@ namespace build2
if (et == nullptr)
continue; // Unresolved (std.*).
- const string& mn (cast<string> (et->state[a].vars[c_module_name]));
+ // As above (deferred failure).
+ //
+ const string* mn (cast_null<string> (et->state[a].vars[c_module_name]));
- if (find_if (imports.begin (), imports.end (),
- [&mn] (const module_import& i)
+ if (mn != nullptr &&
+ find_if (imports.begin (), imports.end (),
+ [mn] (const module_import& i)
{
- return i.name == mn;
+ return i.name == *mn;
}) == imports.end ())
{
pts.push_back (et);
- cs.append (static_cast<const file&> (*et).path ().string ());
+ cs.append (et->as<file> ().path ().string ());
// Add to the list of imports for further duplicate suppression.
// We could have stored reference to the name (e.g., in score)
// but it's probably not worth it if we have a small string
// optimization.
//
- import_type t (mn.find (':') != string::npos
+ import_type t (mn->find (':') != string::npos
? import_type::module_part
: import_type::module_intf);
- imports.push_back (module_import {t, mn, true, 0});
+ imports.push_back (module_import {t, *mn, true, 0});
}
}
}
@@ -5728,6 +6598,10 @@ namespace build2
// Find or create a modules sidebuild subproject returning its root
// directory.
//
+ // @@ Could we omit creating a subproject if the sidebuild scope is the
+ // project scope itself? This would speed up simple examples (and
+ // potentially direct compilation that we may support).
+ //
pair<dir_path, const scope&> compile_rule::
find_modules_sidebuild (const scope& rs) const
{
@@ -5738,6 +6612,9 @@ namespace build2
// cc.config module and that is within our amalgmantion seems like a
// good place.
//
+ // @@ TODO: maybe we should cache this in compile_rule ctor like we
+ // do for the header cache?
+ //
const scope* as (&rs);
{
const scope* ws (as->weak_scope ());
@@ -5753,7 +6630,7 @@ namespace build2
// This is also the module that registers the scope operation
// callback that cleans up the subproject.
//
- if (cast_false<bool> ((*s)["cc.core.vars.loaded"]))
+ if (cast_false<bool> (s->vars["cc.core.vars.loaded"]))
as = s;
} while (s != ws);
@@ -5829,13 +6706,18 @@ namespace build2
return pair<dir_path, const scope&> (move (pd), *as);
}
- // Synthesize a dependency for building a module binary interface on
- // the side.
+ // Synthesize a dependency for building a module binary interface of a
+ // library on the side. If library is missing, then assume it's some
+ // ad hoc/system library case (in which case we assume it's binless,
+ // for now).
//
- const file& compile_rule::
+ // The return value semantics is as in target_set::insert_locked().
+ //
+ pair<target&, ulock> compile_rule::
make_module_sidebuild (action a,
const scope& bs,
- const file& lt,
+ const file* lt,
+ otype ot,
const target& mt,
const string& mn) const
{
@@ -5856,24 +6738,20 @@ namespace build2
back_inserter (mf),
[] (char c) {return c == '.' ? '-' : c == ':' ? '+' : c;});
- // It seems natural to build a BMI type that corresponds to the library
- // type. After all, this is where the object file part of the BMI is
- // going to come from (unless it's a module interface-only library).
- //
- const target_type& tt (compile_types (link_type (lt).type).bmi);
+ const target_type& tt (compile_types (ot).bmi);
// Store the BMI target in the subproject root. If the target already
// exists then we assume all this is already done (otherwise why would
// someone have created such a target).
//
- if (const file* bt = bs.ctx.targets.find<file> (
+ if (const target* bt = bs.ctx.targets.find (
tt,
pd,
dir_path (), // Always in the out tree.
mf,
nullopt, // Use default extension.
trace))
- return *bt;
+ return pair<target&, ulock> (const_cast<target&> (*bt), ulock ());
prerequisites ps;
ps.push_back (prerequisite (mt));
@@ -5886,16 +6764,22 @@ namespace build2
//
// Note: lt is matched and so the group is resolved.
//
- ps.push_back (prerequisite (lt));
- for (prerequisite_member p: group_prerequisite_members (a, lt))
+ if (lt != nullptr)
{
- if (include (a, lt, p) != include_type::normal) // Excluded/ad hoc.
- continue;
-
- if (p.is_a<libx> () ||
- p.is_a<liba> () || p.is_a<libs> () || p.is_a<libux> ())
+ ps.push_back (prerequisite (*lt));
+ for (prerequisite_member p: group_prerequisite_members (a, *lt))
{
- ps.push_back (p.as_prerequisite ());
+ // Ignore update=match.
+ //
+ lookup l;
+ if (include (a, *lt, p, &l) != include_type::normal) // Excluded/ad hoc.
+ continue;
+
+ if (p.is_a<libx> () ||
+ p.is_a<liba> () || p.is_a<libs> () || p.is_a<libux> ())
+ {
+ ps.push_back (p.as_prerequisite ());
+ }
}
}
@@ -5906,23 +6790,24 @@ namespace build2
move (mf),
nullopt, // Use default extension.
target_decl::implied,
- trace));
- file& bt (static_cast<file&> (p.first));
+ trace,
+ true /* skip_find */));
// Note that this is racy and someone might have created this target
// while we were preparing the prerequisite list.
//
if (p.second)
{
- bt.prerequisites (move (ps));
+ p.first.prerequisites (move (ps));
// Unless this is a binless library, we don't need the object file
// (see config_data::b_binless for details).
//
- bt.vars.assign (b_binless) = (lt.mtime () == timestamp_unreal);
+ p.first.vars.assign (b_binless) = (lt == nullptr ||
+ lt->mtime () == timestamp_unreal);
}
- return bt;
+ return p;
}
// Synthesize a dependency for building a header unit binary interface on
@@ -6038,7 +6923,9 @@ namespace build2
//
process_libraries (a, bs, nullopt, sys_lib_dirs,
*f, la, 0, // lflags unused.
- imp, lib, nullptr, true /* self */,
+ imp, lib, nullptr,
+ true /* self */,
+ false /* proc_opt_group */,
&lib_cache);
if (lt != nullptr)
@@ -6123,7 +7010,10 @@ namespace build2
//
for (prerequisite_member p: group_prerequisite_members (a, t))
{
- if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
+ // Ignore update=match.
+ //
+ lookup l;
+ if (include (a, t, p, &l) != include_type::normal) // Excluded/ad hoc.
continue;
if (p.is_a<libx> () ||
@@ -6141,8 +7031,9 @@ namespace build2
move (mf),
nullopt, // Use default extension.
target_decl::implied,
- trace));
- file& bt (static_cast<file&> (p.first));
+ trace,
+ true /* skip_find */));
+ file& bt (p.first.as<file> ());
// Note that this is racy and someone might have created this target
// while we were preparing the prerequisite list.
@@ -6180,7 +7071,7 @@ namespace build2
// Filter cl.exe noise (msvc.cxx).
//
void
- msvc_filter_cl (ifdstream&, const path& src);
+ msvc_filter_cl (diag_buffer&, const path& src);
// Append header unit-related options.
//
@@ -6231,7 +7122,7 @@ namespace build2
// options).
//
void compile_rule::
- append_module_options (environment& env,
+ append_module_options (environment&,
cstrings& args,
small_vector<string, 2>& stor,
action a,
@@ -6242,8 +7133,6 @@ namespace build2
unit_type ut (md.type);
const module_positions& ms (md.modules);
- dir_path stdifc; // See the VC case below.
-
switch (ctype)
{
case compiler_type::gcc:
@@ -6272,15 +7161,12 @@ namespace build2
if (ms.start == 0)
return;
- // Clang embeds module file references so we only need to specify
- // our direct imports.
- //
- // If/when we get the ability to specify the mapping in a file, we
- // will pass the whole list.
+ // If/when we get the ability to specify the mapping in a file.
//
#if 0
// In Clang the module implementation's unit .pcm is special and
- // must be "loaded".
+ // must be "loaded". Note: not anymore, not from Clang 16 and is
+ // deprecated in 17.
//
if (ut == unit_type::module_impl)
{
@@ -6297,10 +7183,7 @@ namespace build2
stor.push_back (move (s));
#else
auto& pts (t.prerequisite_targets[a]);
- for (size_t i (ms.start),
- n (ms.copied != 0 ? ms.copied : pts.size ());
- i != n;
- ++i)
+ for (size_t i (ms.start), n (pts.size ()); i != n; ++i)
{
const target* pt (pts[i]);
@@ -6313,17 +7196,9 @@ namespace build2
const file& f (pt->as<file> ());
string s (relative (f.path ()).string ());
- // In Clang the module implementation's unit .pcm is special and
- // must be "loaded".
- //
- if (ut == unit_type::module_impl && i == ms.start)
- s.insert (0, "-fmodule-file=");
- else
- {
- s.insert (0, 1, '=');
- s.insert (0, cast<string> (f.state[a].vars[c_module_name]));
- s.insert (0, "-fmodule-file=");
- }
+ s.insert (0, 1, '=');
+ s.insert (0, cast<string> (f.state[a].vars[c_module_name]));
+ s.insert (0, "-fmodule-file=");
stor.push_back (move (s));
}
@@ -6335,10 +7210,11 @@ namespace build2
if (ms.start == 0)
return;
+ // MSVC requires a transitive set of interfaces, including
+ // implementation partitions.
+ //
auto& pts (t.prerequisite_targets[a]);
- for (size_t i (ms.start), n (pts.size ());
- i != n;
- ++i)
+ for (size_t i (ms.start), n (pts.size ()); i != n; ++i)
{
const target* pt (pts[i]);
@@ -6349,34 +7225,14 @@ namespace build2
// of these are bmi's.
//
const file& f (pt->as<file> ());
+ string s (relative (f.path ()).string ());
- // In VC std.* modules can only come from a single directory
- // specified with the IFCPATH environment variable or the
- // /module:stdIfcDir option.
- //
- if (std_module (cast<string> (f.state[a].vars[c_module_name])))
- {
- dir_path d (f.path ().directory ());
+ s.insert (0, 1, '=');
+ s.insert (0, cast<string> (f.state[a].vars[c_module_name]));
- if (stdifc.empty ())
- {
- // Go one directory up since /module:stdIfcDir will look in
- // either Release or Debug subdirectories. Keeping the result
- // absolute feels right.
- //
- stor.push_back ("/module:stdIfcDir");
- stor.push_back (d.directory ().string ());
- stdifc = move (d);
- }
- else if (d != stdifc) // Absolute and normalized.
- fail << "multiple std.* modules in different directories";
- }
- else
- {
- stor.push_back ("/module:reference");
- stor.push_back (relative (f.path ()).string ());
- }
+ stor.push_back (move (s));
}
+
break;
}
case compiler_type::icc:
@@ -6387,35 +7243,20 @@ namespace build2
// into storage? Because of potential reallocations.
//
for (const string& a: stor)
- args.push_back (a.c_str ());
-
- if (getenv ("IFCPATH"))
- {
- // VC's IFCPATH takes precedence over /module:stdIfcDir so unset it if
- // we are using our own std modules. Note: IFCPATH saved in guess.cxx.
- //
- if (!stdifc.empty ())
- env.push_back ("IFCPATH");
- }
- else if (stdifc.empty ())
{
- // Add the VC's default directory (should be only one).
- //
- if (sys_mod_dirs != nullptr && !sys_mod_dirs->empty ())
- {
- args.push_back ("/module:stdIfcDir");
- args.push_back (sys_mod_dirs->front ().string ().c_str ());
- }
+ if (ctype == compiler_type::msvc)
+ args.push_back ("/reference");
+
+ args.push_back (a.c_str ());
}
}
target_state compile_rule::
- perform_update (action a, const target& xt) const
+ perform_update (action a, const target& xt, match_data& md) const
{
const file& t (xt.as<file> ());
const path& tp (t.path ());
- match_data md (move (t.data<match_data> ()));
unit_type ut (md.type);
context& ctx (t.ctx);
@@ -6479,7 +7320,8 @@ namespace build2
// If we are building a module interface or partition, then the target
// is bmi*{} and it may have an ad hoc obj*{} member. For header units
// there is no obj*{} (see the corresponding add_adhoc_member() call in
- // apply()).
+ // apply()). For named modules there may be no obj*{} if this is a
+ // sidebuild (obj*{} is already in the library binary).
//
path relm;
path relo;
@@ -6527,9 +7369,6 @@ namespace build2
small_vector<string, 2> header_args; // Header unit options storage.
small_vector<string, 2> module_args; // Module options storage.
- size_t out_i (0); // Index of the -o option.
- size_t lang_n (0); // Number of lang options.
-
switch (cclass)
{
case compiler_class::msvc:
@@ -6549,14 +7388,20 @@ namespace build2
if (md.pp != preprocessed::all)
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note: could be overridden in mode.
+ //
+ append_diag_color_options (args);
+
// Set source/execution charsets to UTF-8 unless a custom charset
// is specified.
//
// Note that clang-cl supports /utf-8 and /*-charset.
//
{
- bool sc (find_option_prefix ("/source-charset:", args));
- bool ec (find_option_prefix ("/execution-charset:", args));
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
if (!sc && !ec)
args.push_back ("/utf-8");
@@ -6575,8 +7420,8 @@ namespace build2
//
if (cvariant != "clang" && isystem (*this))
{
- if (find_option_prefix ("/external:I", args) &&
- !find_option_prefix ("/external:W", args))
+ if (find_option_prefixes ({"/external:I", "-external:I"}, args) &&
+ !find_option_prefixes ({"/external:W", "-external:W"}, args))
args.push_back ("/external:W0");
}
@@ -6590,7 +7435,9 @@ namespace build2
// For C looks like no /EH* (exceptions supported but no C++ objects
// destroyed) is a reasonable default.
//
- if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+
+ if (x_lang == lang::cxx &&
+ !find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
// The runtime is a bit more interesting. At first it may seem like
@@ -6612,7 +7459,7 @@ namespace build2
// unreasonable thing to do). So by default we will always use the
// release runtime.
//
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
msvc_sanitize_cl (args);
@@ -6635,9 +7482,8 @@ namespace build2
// Note also that what we are doing here appears to be incompatible
// with PCH (/Y* options) and /Gm (minimal rebuild).
//
- // @@ MOD: TODO deal with absent relo.
- //
- if (find_options ({"/Zi", "/ZI"}, args))
+ if (!relo.empty () &&
+ find_options ({"/Zi", "/ZI", "-Zi", "-ZI"}, args))
{
if (fc)
args.push_back ("/Fd:");
@@ -6650,27 +7496,38 @@ namespace build2
args.push_back (out1.c_str ());
}
- if (fc)
- {
- args.push_back ("/Fo:");
- args.push_back (relo.string ().c_str ());
- }
- else
+ if (ut == unit_type::module_intf ||
+ ut == unit_type::module_intf_part ||
+ ut == unit_type::module_impl_part ||
+ ut == unit_type::module_header)
{
- out = "/Fo" + relo.string ();
- args.push_back (out.c_str ());
- }
+ assert (ut != unit_type::module_header); // @@ MODHDR
- // @@ MODHDR MSVC
- // @@ MODPART MSVC
- //
- if (ut == unit_type::module_intf)
- {
relm = relative (tp);
- args.push_back ("/module:interface");
- args.push_back ("/module:output");
+ args.push_back ("/ifcOutput");
args.push_back (relm.string ().c_str ());
+
+ if (relo.empty ())
+ args.push_back ("/ifcOnly");
+ else
+ {
+ args.push_back ("/Fo:");
+ args.push_back (relo.string ().c_str ());
+ }
+ }
+ else
+ {
+ if (fc)
+ {
+ args.push_back ("/Fo:");
+ args.push_back (relo.string ().c_str ());
+ }
+ else
+ {
+ out = "/Fo" + relo.string ();
+ args.push_back (out.c_str ());
+ }
}
// Note: no way to indicate that the source if already preprocessed.
@@ -6685,9 +7542,53 @@ namespace build2
{
append_options (args, cmode);
+ // Clang 15 introduced the unqualified-std-cast-call warning which
+ // warns about unqualified calls to std::move() and std::forward()
+ // (because they can be "hijacked" via ADL). Surprisingly, this
+ // warning is enabled by default, as opposed to with -Wextra or at
+ // least -Wall. It has also proven to be quite disruptive, causing a
+ // large number of warnings in a large number of packages. So we are
+ // going to "remap" it to -Wextra for now and in the future may
+ // "relax" it to -Wall and potentially to being enabled by default.
+ // See GitHub issue #259 for background and details.
+ //
+ if (x_lang == lang::cxx &&
+ ctype == compiler_type::clang &&
+ cmaj >= 15)
+ {
+ bool w (false); // Seen -W[no-]unqualified-std-cast-call
+ optional<bool> extra; // Seen -W[no-]extra
+
+ for (const char* s: reverse_iterate (args))
+ {
+ if (s != nullptr)
+ {
+ if (strcmp (s, "-Wunqualified-std-cast-call") == 0 ||
+ strcmp (s, "-Wno-unqualified-std-cast-call") == 0)
+ {
+ w = true;
+ break;
+ }
+
+ if (!extra) // Last seen option wins.
+ {
+ if (strcmp (s, "-Wextra") == 0) extra = true;
+ else if (strcmp (s, "-Wno-extra") == 0) extra = false;
+ }
+ }
+ }
+
+ if (!w && (!extra || !*extra))
+ args.push_back ("-Wno-unqualified-std-cast-call");
+ }
+
if (md.pp != preprocessed::all)
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note: could be overridden in mode.
+ //
+ append_diag_color_options (args);
+
// Set the input charset to UTF-8 unless a custom one is specified.
//
// Note that the execution charset (-fexec-charset) is UTF-8 by
@@ -6741,8 +7642,7 @@ namespace build2
// either -nostdlib or -nostartfiles is specified. Let's do
// the same.
//
- initializer_list<const char*> os {"-nostdlib", "-nostartfiles"};
- if (!find_options (os, cmode) && !find_options (os, args))
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
{
args.push_back ("-D_MT");
args.push_back ("-D_DLL");
@@ -6804,10 +7704,6 @@ namespace build2
append_header_options (env, args, header_args, a, t, md, md.dd);
append_module_options (env, args, module_args, a, t, md, md.dd);
- // Note: the order of the following options is relied upon below.
- //
- out_i = args.size (); // Index of the -o option.
-
if (ut == unit_type::module_intf ||
ut == unit_type::module_intf_part ||
ut == unit_type::module_impl_part ||
@@ -6846,21 +7742,35 @@ namespace build2
}
case compiler_type::clang:
{
- // @@ MOD TODO: deal with absent relo.
+ assert (ut != unit_type::module_header); // @@ MODHDR
relm = relative (tp);
- args.push_back ("-o");
- args.push_back (relm.string ().c_str ());
- args.push_back ("--precompile");
-
// Without this option Clang's .pcm will reference source
- // files. In our case this file may be transient (.ii). Plus,
+ // files. In our case this file may be transient (.ii). Plus,
// it won't play nice with distributed compilation.
//
+ // Note that this sort of appears to be the default from Clang
+ // 17, but not quite, see llvm-project issued #72383.
+ //
args.push_back ("-Xclang");
args.push_back ("-fmodules-embed-all-files");
+ if (relo.empty ())
+ {
+ args.push_back ("-o");
+ args.push_back (relm.string ().c_str ());
+ args.push_back ("--precompile");
+ }
+ else
+ {
+ out1 = "-fmodule-output=" + relm.string ();
+ args.push_back (out1.c_str ());
+ args.push_back ("-o");
+ args.push_back (relo.string ().c_str ());
+ args.push_back ("-c");
+ }
+
break;
}
case compiler_type::msvc:
@@ -6875,7 +7785,7 @@ namespace build2
args.push_back ("-c");
}
- lang_n = append_lang_options (args, md);
+ append_lang_options (args, md);
if (md.pp == preprocessed::all)
{
@@ -6920,23 +7830,44 @@ namespace build2
if (!env.empty ())
env.push_back (nullptr);
+ // We have no choice but to serialize early if we want the command line
+ // printed shortly before actually executing the compiler. Failed that,
+ // it may look like we are still executing in parallel.
+ //
+ scheduler::alloc_guard jobs_ag;
+ if (!ctx.dry_run && cast_false<bool> (t[c_serialize]))
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, phase_unlock (nullptr));
+
// With verbosity level 2 print the command line as if we are compiling
// the source file, not its preprocessed version (so that it's easy to
// copy and re-run, etc). Only at level 3 and above print the real deal.
//
+ // @@ TODO: why don't we print env (here and/or below)? Also link rule.
+ //
if (verb == 1)
- text << x_name << ' ' << s;
+ {
+ const char* name (x_assembler_cpp (s) ? "as-cpp" :
+ x_objective (s) ? x_obj_name :
+ x_name);
+
+ print_diag (name, s, t);
+ }
else if (verb == 2)
print_process (args);
// If we have the (partially) preprocessed output, switch to that.
//
- bool psrc (md.psrc);
+ // But we remember the original source/position to restore later.
+ //
+ bool psrc (md.psrc); // Note: false if cc.reprocess.
bool ptmp (psrc && md.psrc.temporary);
+ pair<size_t, const char*> osrc;
if (psrc)
{
args.pop_back (); // nullptr
+ osrc.second = args.back ();
args.pop_back (); // sp
+ osrc.first = args.size ();
sp = &md.psrc.path ();
@@ -6946,25 +7877,40 @@ namespace build2
{
case compiler_type::gcc:
{
- // The -fpreprocessed is implied by .i/.ii. But not when compiling
- // a header unit (there is no .hi/.hii).
+ // -fpreprocessed is implied by .i/.ii unless compiling a header
+ // unit (there is no .hi/.hii). Also, we would need to pop -x
+ // since it takes precedence over the extension, which would mess
+ // up our osrc logic. So in the end it feels like always passing
+ // explicit -fpreprocessed is the way to go.
//
- if (ut == unit_type::module_header)
- args.push_back ("-fpreprocessed");
- else
- // Pop -x since it takes precedence over the extension.
- //
- // @@ I wonder why bother and not just add -fpreprocessed? Are
- // we trying to save an option or does something break?
- //
- for (; lang_n != 0; --lang_n)
- args.pop_back ();
-
+ // Also note that similarly there is no .Si for .S files.
+ //
+ args.push_back ("-fpreprocessed");
args.push_back ("-fdirectives-only");
break;
}
case compiler_type::clang:
{
+ // Clang 15 and later with -pedantic warns about GNU-style line
+ // markers that it wrote itself in the -frewrite-includes output
+ // (llvm-project issue 63284). So we suppress this warning unless
+ // compiling from source.
+ //
+ // In Apple Clang this warning/option are absent in 14.0.3 (which
+ // is said to be based on vanilla Clang 15.0.5) for some reason
+ // (let's hope it's because they patched it out rather than due to
+ // a misleading _LIBCPP_VERSION value).
+ //
+ if (ctype == compiler_type::clang &&
+ cmaj >= (cvariant != "apple" ? 15 : 16))
+ {
+ if (find_options ({"-pedantic", "-pedantic-errors",
+ "-Wpedantic", "-Werror=pedantic"}, args))
+ {
+ args.push_back ("-Wno-gnu-line-marker");
+ }
+ }
+
// Note that without -x Clang will treat .i/.ii as fully
// preprocessed.
//
@@ -7013,45 +7959,38 @@ namespace build2
file_cache::read psrcr (psrc ? md.psrc.open () : file_cache::read ());
// VC cl.exe sends diagnostics to stdout. It also prints the file
- // name being compiled as the first line. So for cl.exe we redirect
- // stdout to a pipe, filter that noise out, and send the rest to
- // stderr.
+ // name being compiled as the first line. So for cl.exe we filter
+ // that noise out.
//
- // For other compilers redirect stdout to stderr, in case any of
- // them tries to pull off something similar. For sane compilers this
- // should be harmless.
+ // For other compilers also redirect stdout to stderr, in case any
+ // of them tries to pull off something similar. For sane compilers
+ // this should be harmless.
//
bool filter (ctype == compiler_type::msvc);
process pr (cpath,
- args.data (),
- 0, (filter ? -1 : 2), 2,
+ args,
+ 0, 2, diag_buffer::pipe (ctx, filter /* force */),
nullptr, // CWD
env.empty () ? nullptr : env.data ());
- if (filter)
- {
- try
- {
- ifdstream is (
- move (pr.in_ofd), fdstream_mode::text, ifdstream::badbit);
+ diag_buffer dbuf (ctx, args[0], pr);
- msvc_filter_cl (is, *sp);
+ if (filter)
+ msvc_filter_cl (dbuf, *sp);
- // If anything remains in the stream, send it all to stderr.
- // Note that the eof check is important: if the stream is at
- // eof, this and all subsequent writes to the diagnostics stream
- // will fail (and you won't see a thing).
- //
- if (is.peek () != ifdstream::traits_type::eof ())
- diag_stream_lock () << is.rdbuf ();
+ dbuf.read ();
- is.close ();
- }
- catch (const io_error&) {} // Assume exits with error.
+ // Restore the original source if we switched to preprocessed.
+ //
+ if (psrc)
+ {
+ args.resize (osrc.first);
+ args.push_back (osrc.second);
+ args.push_back (nullptr);
}
- run_finish (args, pr);
+ run_finish (dbuf, args, pr, 1 /* verbosity */);
}
catch (const process_error& e)
{
@@ -7063,6 +8002,8 @@ namespace build2
throw failed ();
}
+ jobs_ag.deallocate ();
+
if (md.deferred_failure)
fail << "expected error exit status from " << x_lang << " compiler";
}
@@ -7072,57 +8013,6 @@ namespace build2
if (ptmp && verb >= 3)
md.psrc.temporary = true;
- // Clang's module compilation requires two separate compiler
- // invocations.
- //
- // @@ MODPART: Clang (all of this is probably outdated).
- //
- if (ctype == compiler_type::clang && ut == unit_type::module_intf)
- {
- // Adjust the command line. First discard everything after -o then
- // build the new "tail".
- //
- args.resize (out_i + 1);
- args.push_back (relo.string ().c_str ()); // Produce .o.
- args.push_back ("-c"); // By compiling .pcm.
- args.push_back ("-Wno-unused-command-line-argument");
- args.push_back (relm.string ().c_str ());
- args.push_back (nullptr);
-
- if (verb >= 2)
- print_process (args);
-
- if (!ctx.dry_run)
- {
- // Remove the target file if this fails. If we don't do that, we
- // will end up with a broken build that is up-to-date.
- //
- auto_rmfile rm (relm);
-
- try
- {
- process pr (cpath,
- args.data (),
- 0, 2, 2,
- nullptr, // CWD
- env.empty () ? nullptr : env.data ());
-
- run_finish (args, pr);
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e;
-
- if (e.child)
- exit (1);
-
- throw failed ();
- }
-
- rm.cancel ();
- }
- }
-
timestamp now (system_clock::now ());
if (!ctx.dry_run)
@@ -7138,25 +8028,27 @@ namespace build2
}
target_state compile_rule::
- perform_clean (action a, const target& xt) const
+ perform_clean (action a, const target& xt, const target_type& srct) const
{
const file& t (xt.as<file> ());
+ // Preprocessed file extension.
+ //
+ const char* pext (x_assembler_cpp (srct) ? ".Si" :
+ x_objective (srct) ? x_obj_pext :
+ x_pext);
+
// Compressed preprocessed file extension.
//
- auto cpext = [this, &t, s = string ()] () mutable -> const char*
- {
- return (s = t.ctx.fcache.compressed_extension (x_pext)).c_str ();
- };
+ string cpext (t.ctx.fcache->compressed_extension (pext));
clean_extras extras;
-
switch (ctype)
{
- case compiler_type::gcc: extras = {".d", x_pext, cpext (), ".t"}; break;
- case compiler_type::clang: extras = {".d", x_pext, cpext ()}; break;
- case compiler_type::msvc: extras = {".d", x_pext, cpext (), ".idb", ".pdb"};break;
- case compiler_type::icc: extras = {".d"}; break;
+ case compiler_type::gcc: extras = {".d", pext, cpext.c_str (), ".t"}; break;
+ case compiler_type::clang: extras = {".d", pext, cpext.c_str ()}; break;
+ case compiler_type::msvc: extras = {".d", pext, cpext.c_str (), ".idb", ".pdb"}; break;
+ case compiler_type::icc: extras = {".d"}; break;
}
return perform_clean_extra (a, t, extras);
diff --git a/libbuild2/cc/compile-rule.hxx b/libbuild2/cc/compile-rule.hxx
index dbb2dd5..0886b4b 100644
--- a/libbuild2/cc/compile-rule.hxx
+++ b/libbuild2/cc/compile-rule.hxx
@@ -22,6 +22,8 @@ namespace build2
namespace cc
{
+ class config_module;
+
// The order is arranged so that their integral values indicate whether
// one is a "stronger" than another.
//
@@ -42,19 +44,21 @@ namespace build2
dyndep_rule
{
public:
- compile_rule (data&&);
+ struct match_data;
+
+ compile_rule (data&&, const scope&);
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
target_state
- perform_update (action, const target&) const;
+ perform_update (action, const target&, match_data&) const;
target_state
- perform_clean (action, const target&) const;
+ perform_clean (action, const target&, const target_type&) const;
public:
using appended_libraries = small_vector<const target*, 256>;
@@ -62,7 +66,8 @@ namespace build2
void
append_library_options (appended_libraries&, strings&,
const scope&,
- action, const file&, bool, linfo) const;
+ action, const file&, bool, linfo,
+ bool, bool) const;
optional<path>
find_system_header (const path&) const;
@@ -72,7 +77,6 @@ namespace build2
functions (function_family&, const char*); // functions.cxx
private:
- struct match_data;
using environment = small_vector<const char*, 2>;
template <typename T>
@@ -84,7 +88,7 @@ namespace build2
append_library_options (appended_libraries&, T&,
const scope&,
const scope*,
- action, const file&, bool, linfo,
+ action, const file&, bool, linfo, bool,
library_cache*) const;
template <typename T>
@@ -109,27 +113,28 @@ namespace build2
prefix_map
build_prefix_map (const scope&, action, const target&, linfo) const;
- struct module_mapper_state;
+ struct gcc_module_mapper_state;
- bool
- gcc_module_mapper (module_mapper_state&,
+ optional<bool>
+ gcc_module_mapper (gcc_module_mapper_state&,
action, const scope&, file&, linfo,
- ifdstream&, ofdstream&,
+ const string&, ofdstream&,
depdb&, bool&, bool&,
optional<prefix_map>&, srcout_map&) const;
pair<const file*, bool>
enter_header (action, const scope&, file&, linfo,
- path&, bool, bool,
+ path&&, bool, bool,
optional<prefix_map>&, const srcout_map&) const;
optional<bool>
inject_header (action, file&, const file&, timestamp, bool) const;
- pair<file_cache::entry, bool>
+ void
extract_headers (action, const scope&, file&, linfo,
const file&, match_data&,
- depdb&, bool&, timestamp, module_imports&) const;
+ depdb&, bool&, timestamp, module_imports&,
+ pair<file_cache::entry, bool>&) const;
string
parse_unit (action, file&, linfo,
@@ -151,8 +156,9 @@ namespace build2
pair<dir_path, const scope&>
find_modules_sidebuild (const scope&) const;
- const file&
- make_module_sidebuild (action, const scope&, const file&,
+ pair<target&, ulock>
+ make_module_sidebuild (action, const scope&,
+ const file*, otype,
const target&, const string&) const;
const file&
@@ -180,6 +186,7 @@ namespace build2
private:
const string rule_id;
+ const config_module* header_cache_;
};
}
}
diff --git a/libbuild2/cc/functions.cxx b/libbuild2/cc/functions.cxx
index abfd32f..9d408af 100644
--- a/libbuild2/cc/functions.cxx
+++ b/libbuild2/cc/functions.cxx
@@ -13,11 +13,10 @@
#include <libbuild2/cc/module.hxx>
#include <libbuild2/cc/utility.hxx>
+#include <libbuild2/functions-name.hxx> // to_target()
+
namespace build2
{
- const target&
- to_target (const scope&, name&&, name&&); // libbuild2/functions-name.cxx
-
namespace cc
{
using namespace bin;
@@ -53,7 +52,7 @@ namespace build2
//
if (bs->ctx.phase != run_phase::match &&
bs->ctx.phase != run_phase::execute)
- fail << f.name << " can only be called during execution";
+ fail << f.name << " can only be called from recipe";
const module* m (rs->find_module<module> (d.x));
@@ -62,6 +61,9 @@ namespace build2
// We can assume these are present due to function's types signature.
//
+ if (vs[0].null)
+ throw invalid_argument ("null value");
+
names& ts_ns (vs[0].as<names> ()); // <targets>
// In a somewhat hackish way strip the outer operation to match how we
@@ -75,20 +77,40 @@ namespace build2
{
name& n (*i), o;
const target& t (to_target (*bs, move (n), move (n.pair ? *++i : o)));
+
+ if (!t.matched (a))
+ fail << t << " is not matched" <<
+ info << "make sure this target is listed as prerequisite";
+
d.f (r, vs, *m, *bs, a, t);
}
return value (move (r));
}
- // Common thunk for $x.lib_*(<targets>, <otype> [, ...]) functions.
+ // Common thunk for $x.lib_*(...) functions.
+ //
+ // The two supported function signatures are:
+ //
+ // $x.lib_*(<targets>, <otype> [, ...]])
+ //
+ // $x.lib_*(<targets>)
+ //
+ // For the first signature, the passed targets cannot be library groups
+ // (so they are always file-based) and linfo is always present.
+ //
+ // For the second signature, targets can only be utility libraries
+ // (including the libul{} group).
+ //
+ // If <otype> in the first signature is NULL, then it is treated as
+ // the second signature.
//
struct lib_thunk_data
{
const char* x;
void (*f) (void*, strings&,
const vector_view<value>&, const module&, const scope&,
- action, const file&, bool, linfo);
+ action, const target&, bool, optional<linfo>);
};
static value
@@ -109,20 +131,25 @@ namespace build2
if (bs->ctx.phase != run_phase::match && // See above.
bs->ctx.phase != run_phase::execute)
- fail << f.name << " can only be called during execution";
+ fail << f.name << " can only be called from recipe";
const module* m (rs->find_module<module> (d.x));
if (m == nullptr)
fail << f.name << " called without " << d.x << " module loaded";
- // We can assume these are present due to function's types signature.
+ // We can assume this is present due to function's types signature.
//
+ if (vs[0].null)
+ throw invalid_argument ("null value");
+
names& ts_ns (vs[0].as<names> ()); // <targets>
- names& ot_ns (vs[1].as<names> ()); // <otype>
- linfo li;
+ optional<linfo> li;
+ if (vs.size () > 1 && !vs[1].null)
{
+ names& ot_ns (vs[1].as<names> ()); // <otype>
+
string t (convert<string> (move (ot_ns)));
const target_type* tt (bs->find_target_type (t));
@@ -168,17 +195,22 @@ namespace build2
name& n (*i), o;
const target& t (to_target (*bs, move (n), move (n.pair ? *++i : o)));
- const file* f;
bool la (false);
-
- if ((la = (f = t.is_a<libux> ())) ||
- (la = (f = t.is_a<liba> ())) ||
- ( (f = t.is_a<libs> ())))
+ if (li
+ ? ((la = t.is_a<libux> ()) ||
+ (la = t.is_a<liba> ()) ||
+ ( t.is_a<libs> ()))
+ : ((la = t.is_a<libux> ()) ||
+ ( t.is_a<libul> ())))
{
- d.f (ls, r, vs, *m, *bs, a, *f, la, li);
+ if (!t.matched (a))
+ fail << t << " is not matched" <<
+ info << "make sure this target is listed as prerequisite";
+
+ d.f (ls, r, vs, *m, *bs, a, t, la, li);
}
else
- fail << t << " is not a library target";
+ fail << t << " is not a library of expected type";
}
return value (move (r));
@@ -205,33 +237,61 @@ namespace build2
void compile_rule::
functions (function_family& f, const char* x)
{
- // $<module>.lib_poptions(<lib-targets>, <otype>)
+ // $<module>.lib_poptions(<lib-targets>[, <otype>[, <original>]])
//
// Return the preprocessor options that should be passed when compiling
// sources that depend on the specified libraries. The second argument
// is the output target type (obje, objs, etc).
//
+ // The output target type may be omitted for utility libraries (libul{}
+ // or libu[eas]{}). In this case, only "common interface" options will
+ // be returned for lib{} dependencies. This is primarily useful for
+ // obtaining poptions to be passed to tools other than C/C++ compilers
+ // (for example, Qt moc).
+ //
+ // If <original> is true, then return the original -I options without
+ // performing any translation (for example, to -isystem or /external:I).
+ // This is the default if <otype> is omitted. To get the translation for
+ // the common interface options, pass [null] for <otype> and true for
+ // <original>.
+ //
// Note that passing multiple targets at once is not a mere convenience:
// this also allows for more effective duplicate suppression.
//
- // Note also that this function can only be called during execution
- // after all the specified library targets have been matched. Normally
- // it is used in ad hoc recipes to implement custom compilation.
+ // Note also that this function can only be called during execution (or,
+ // carefully, during match) after all the specified library targets have
+ // been matched. Normally it is used in ad hoc recipes to implement
+ // custom compilation.
//
// Note that this function is not pure.
//
f.insert (".lib_poptions", false).
- insert<lib_thunk_data, names, names> (
+ insert<lib_thunk_data, names, optional<names*>, optional<names>> (
&lib_thunk<appended_libraries>,
lib_thunk_data {
x,
[] (void* ls, strings& r,
- const vector_view<value>&, const module& m, const scope& bs,
- action a, const file& l, bool la, linfo li)
+ const vector_view<value>& vs, const module& m, const scope& bs,
+ action a, const target& l, bool la, optional<linfo> li)
{
+ // If this is libul{}, get the matched member (see bin::libul_rule
+ // for details).
+ //
+ const file& f (
+ la || li
+ ? l.as<file> ()
+ : (la = true,
+ l.prerequisite_targets[a].back ().target->as<file> ()));
+
+ bool common (!li);
+ bool original (vs.size () > 2 ? convert<bool> (vs[2]) : !li);
+
+ if (!li)
+ li = link_info (bs, link_type (f).type);
+
m.append_library_options (
*static_cast<appended_libraries*> (ls), r,
- bs, a, l, la, li);
+ bs, a, f, la, *li, common, original);
}});
// $<module>.find_system_header(<name>)
@@ -295,9 +355,10 @@ namespace build2
// Note that passing multiple targets at once is not a mere convenience:
// this also allows for more effective duplicate suppression.
//
- // Note also that this function can only be called during execution
- // after all the specified library targets have been matched. Normally
- // it is used in ad hoc recipes to implement custom linking.
+ // Note also that this function can only be called during execution (or,
+ // carefully, during match) after all the specified library targets have
+ // been matched. Normally it is used in ad hoc recipes to implement
+ // custom linking.
//
// Note that this function is not pure.
//
@@ -308,12 +369,15 @@ namespace build2
x,
[] (void* ls, strings& r,
const vector_view<value>& vs, const module& m, const scope& bs,
- action a, const file& l, bool la, linfo li)
+ action a, const target& l, bool la, optional<linfo> li)
{
lflags lf (0);
bool rel (true);
if (vs.size () > 2)
{
+ if (vs[2].null)
+ throw invalid_argument ("null value");
+
for (const name& f: vs[2].as<names> ())
{
string s (convert<string> (name (f)));
@@ -332,7 +396,8 @@ namespace build2
m.append_libraries (
*static_cast<appended_libraries*> (ls), r,
nullptr /* sha256 */, nullptr /* update */, timestamp_unknown,
- bs, a, l, la, lf, li, nullopt /* for_install */, self, rel);
+ bs, a, l.as<file> (), la, lf, *li,
+ nullopt /* for_install */, self, rel);
}});
// $<module>.lib_rpaths(<lib-targets>, <otype> [, <link> [, <self>]])
@@ -364,13 +429,12 @@ namespace build2
x,
[] (void* ls, strings& r,
const vector_view<value>& vs, const module& m, const scope& bs,
- action a, const file& l, bool la, linfo li)
+ action a, const target& l, bool la, optional<linfo> li)
{
bool link (vs.size () > 2 ? convert<bool> (vs[2]) : false);
bool self (vs.size () > 3 ? convert<bool> (vs[3]) : true);
m.rpath_libraries (*static_cast<rpathed_libraries*> (ls), r,
- bs,
- a, l, la, li, link, self);
+ bs, a, l.as<file> (), la, *li, link, self);
}});
// $cxx.obj_modules(<obj-targets>)
@@ -428,7 +492,16 @@ namespace build2
// look for cc.export.libs and <module>.export.libs.
//
// 3. No member/group selection/linkup: we resolve *.export.libs on
- // whatever is listed.
+ // whatever is listed (so no liba{}/libs{} overrides will be
+ // considered).
+ //
+ // Because of (2) and (3), this functionality should only be used on a
+ // controlled list of libraries (usually libraries that belong to the
+ // same family as this library).
+ //
+ // Note that a similar deduplication is also performed when processing
+ // the libraries. However, it may still make sense to do it once at the
+ // source for really severe cases (like Boost).
//
// Note that this function is not pure.
//
@@ -456,6 +529,9 @@ namespace build2
// We can assume the argument is present due to function's types
// signature.
//
+ if (vs[0].null)
+ throw invalid_argument ("null value");
+
names& r (vs[0].as<names> ());
m->deduplicate_export_libs (*bs,
vector<name> (r.begin (), r.end ()),
diff --git a/libbuild2/cc/gcc.cxx b/libbuild2/cc/gcc.cxx
index 30f2092..286ba10 100644
--- a/libbuild2/cc/gcc.cxx
+++ b/libbuild2/cc/gcc.cxx
@@ -45,6 +45,13 @@ namespace build2
d = dir_path (o, 2, string::npos);
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -52,10 +59,91 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
+ r.push_back (move (d));
+ }
+ }
+
+#ifdef _WIN32
+ // Some misconfigured MinGW GCC builds add absolute POSIX directories to
+ // their built-in search paths (e.g., /mingw/{include,lib}) which GCC then
+ // interprets as absolute paths relative to the current drive (so the set
+ // of built-in search paths starts depending on where we run things from).
+ //
+ // While that's definitely misguided, life is short and we don't want to
+ // waste it explaining this in long mailing list threads and telling
+ // people to complain to whomever built their GCC. So we will just
+ // recreate the behavior in a way that's consistent with GCC and let
+ // people discover this on their own.
+ //
+ static inline void
+ add_current_drive (string& s)
+ {
+ s.insert (0, work.string (), 0, 2); // Add e.g., `c:`.
+ }
+#endif
+
+ // Parse color/semicolon-separated list of search directories (from
+ // -print-search-dirs output, environment variables).
+ //
+ static void
+ parse_search_dirs (const string& v, dir_paths& r,
+ const char* what, const char* what2 = "")
+ {
+ // Now the fun part: figuring out which delimiter is used. Normally it
+ // is ':' but on Windows it is ';' (or can be; who knows for sure). Also
+ // note that these paths are absolute (or should be). So here is what we
+ // are going to do: first look for ';'. If found, then that's the
+ // delimiter. If not found, then there are two cases: it is either a
+ // single Windows path or the delimiter is ':'. To distinguish these two
+ // cases we check if the path starts with a Windows drive.
+ //
+ char d (';');
+ string::size_type e (v.find (d));
+
+ if (e == string::npos &&
+ (v.size () < 2 || v[0] == '/' || v[1] != ':'))
+ {
+ d = ':';
+ e = v.find (d);
+ }
+
+ // Now chop it up. We already have the position of the first delimiter
+ // (if any).
+ //
+ for (string::size_type b (0);; e = v.find (d, (b = e + 1)))
+ {
+ dir_path d;
+ try
+ {
+ string ds (v, b, (e != string::npos ? e - b : e));
+
+ // Skip empty entries (sometimes found in random MinGW toolchains).
+ //
+ if (!ds.empty ())
+ {
+#ifdef _WIN32
+ if (path_traits::is_separator (ds[0]))
+ add_current_drive (ds);
+#endif
+ d = dir_path (move (ds));
+
+ if (d.relative ())
+ throw invalid_path (move (d).string ());
+
+ d.normalize ();
+ }
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid directory '" << e.path << "'" << " in "
+ << what << what2;
+ }
+
+ if (!d.empty () && find (r.begin (), r.end (), d) == r.end ())
r.push_back (move (d));
+
+ if (e == string::npos)
+ break;
}
}
@@ -69,14 +157,15 @@ namespace build2
// do this is to run the compiler twice.
//
pair<dir_paths, size_t> config_module::
- gcc_header_search_dirs (const process_path& xc, scope& rs) const
+ gcc_header_search_dirs (const compiler_info& xi, scope& rs) const
{
dir_paths r;
// Note also that any -I and similar that we may specify on the command
- // line are factored into the output.
+ // line are factored into the output. As well as the CPATH, etc.,
+ // environment variable values.
//
- cstrings args {xc.recall_string ()};
+ cstrings args {xi.path.recall_string ()};
append_options (args, rs, x_mode);
// Compile as.
@@ -100,7 +189,7 @@ namespace build2
args.push_back ("-");
args.push_back (nullptr);
- process_env env (xc);
+ process_env env (xi.path);
// For now let's assume that all the platforms other than Windows
// recognize LC_ALL.
@@ -113,107 +202,109 @@ namespace build2
if (verb >= 3)
print_process (env, args);
+ bool found_q (false); // Found `#include "..." ...` marker.
+ bool found_b (false); // Found `#include <...> ...` marker.
+
+ // Open pipe to stderr, redirect stdin and stdout to /dev/null.
+ //
+ process pr (run_start (
+ env,
+ args,
+ -2, /* stdin */
+ -2, /* stdout */
+ -1 /* stderr */));
try
{
- //@@ TODO: why don't we use run_start() here? Because it's unable to
- // open pipe for stderr and we need to change it first, for example,
- // making the err parameter a file descriptor rather than a flag.
- //
+ ifdstream is (
+ move (pr.in_efd), fdstream_mode::skip, ifdstream::badbit);
- // Open pipe to stderr, redirect stdin and stdout to /dev/null.
+ // Normally the system header paths appear between the following
+ // lines:
//
- process pr (xc,
- args.data (),
- -2, /* stdin */
- -2, /* stdout */
- -1, /* stderr */
- nullptr /* cwd */,
- env.vars);
-
- try
+ // #include <...> search starts here:
+ // End of search list.
+ //
+ // The exact text depends on the current locale. What we can rely on
+ // is the presence of the "#include <...>" marker in the "opening"
+ // line and the fact that the paths are indented with a single space
+ // character, unlike the "closing" line.
+ //
+ // Note that on Mac OS we will also see some framework paths among
+ // system header paths, followed with a comment. For example:
+ //
+ // /Library/Frameworks (framework directory)
+ //
+ // For now we ignore framework paths and to filter them out we will
+ // only consider valid paths to existing directories, skipping those
+ // which we fail to normalize or stat. @@ Maybe this is a bit too
+ // loose, especially compared to gcc_library_search_dirs()?
+ //
+ // Note that when there are no paths (e.g., because of -nostdinc),
+ // then GCC prints both #include markers while Clang -- only "...".
+ //
+ for (string s; getline (is, s); )
{
- ifdstream is (
- move (pr.in_efd), fdstream_mode::skip, ifdstream::badbit);
-
- // Normally the system header paths appear between the following
- // lines:
- //
- // #include <...> search starts here:
- // End of search list.
- //
- // The exact text depends on the current locale. What we can rely on
- // is the presence of the "#include <...>" substring in the
- // "opening" line and the fact that the paths are indented with a
- // single space character, unlike the "closing" line.
- //
- // Note that on Mac OS we will also see some framework paths among
- // system header paths, followed with a comment. For example:
- //
- // /Library/Frameworks (framework directory)
- //
- // For now we ignore framework paths and to filter them out we will
- // only consider valid paths to existing directories, skipping those
- // which we fail to normalize or stat.
- //
- string s;
- for (bool found (false); getline (is, s); )
+ if (!found_q)
+ found_q = s.find ("#include \"...\"") != string::npos;
+ else if (!found_b)
+ found_b = s.find ("#include <...>") != string::npos;
+ else
{
- if (!found)
- found = s.find ("#include <...>") != string::npos;
- else
+ if (s[0] != ' ')
+ break;
+
+ dir_path d;
+ try
{
- if (s[0] != ' ')
- break;
-
- try
- {
- dir_path d (s, 1, s.size () - 1);
-
- if (d.absolute () && exists (d, true) &&
- find (r.begin (), r.end (), d.normalize ()) == r.end ())
- r.emplace_back (move (d));
- }
- catch (const invalid_path&)
- {
- // Skip this path.
- }
- }
- }
+ string ds (s, 1, s.size () - 1);
- is.close (); // Don't block.
+#ifdef _WIN32
+ if (path_traits::is_separator (ds[0]))
+ add_current_drive (ds);
+#endif
+ d = dir_path (move (ds));
- if (!pr.wait ())
- {
- // We have read stderr so better print some diagnostics.
- //
- diag_record dr (fail);
+ if (d.relative () || !exists (d, true))
+ continue;
- dr << "failed to extract " << x_lang << " header search paths" <<
- info << "command line: ";
+ d.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ continue;
+ }
- print_process (dr, args);
+ if (find (r.begin (), r.end (), d) == r.end ())
+ r.emplace_back (move (d));
}
}
- catch (const io_error&)
+
+ is.close (); // Don't block.
+
+ if (!run_wait (args, pr))
{
- pr.wait ();
- fail << "error reading " << x_lang << " compiler -v -E output";
+ // We have read stderr so better print some diagnostics.
+ //
+ diag_record dr (fail);
+
+ dr << "failed to extract " << x_lang << " header search paths" <<
+ info << "command line: ";
+
+ print_process (dr, args);
}
}
- catch (const process_error& e)
+ catch (const io_error&)
{
- error << "unable to execute " << args[0] << ": " << e;
-
- if (e.child)
- exit (1);
-
- throw failed ();
+ run_wait (args, pr);
+ fail << "error reading " << x_lang << " compiler -v -E output";
}
- // It's highly unlikely not to have any system directories. More likely
- // we misinterpreted the compiler output.
+ // Note that it's possible that we will have no system directories, for
+ // example, if the user specified -nostdinc. But we must have still seen
+ // at least one marker. Failed that we assume we misinterpreted the
+ // compiler output.
//
- if (r.empty ())
+ if (!found_b && !found_q)
fail << "unable to extract " << x_lang << " compiler system header "
<< "search paths";
@@ -224,7 +315,7 @@ namespace build2
// (Clang, Intel) using the -print-search-dirs option.
//
pair<dir_paths, size_t> config_module::
- gcc_library_search_dirs (const process_path& xc, scope& rs) const
+ gcc_library_search_dirs (const compiler_info& xi, scope& rs) const
{
// The output of -print-search-dirs are a bunch of lines that start with
// "<name>: =" where name can be "install", "programs", or "libraries".
@@ -251,12 +342,12 @@ namespace build2
gcc_extract_library_search_dirs (cast<strings> (rs[x_mode]), r);
size_t rn (r.size ());
- cstrings args {xc.recall_string ()};
+ cstrings args {xi.path.recall_string ()};
append_options (args, rs, x_mode);
args.push_back ("-print-search-dirs");
args.push_back (nullptr);
- process_env env (xc);
+ process_env env (xi.path);
// For now let's assume that all the platforms other than Windows
// recognize LC_ALL.
@@ -271,6 +362,9 @@ namespace build2
// Open pipe to stdout.
//
+ // Note: this function is called in the serial load phase and so no
+ // diagnostics buffering is needed.
+ //
process pr (run_start (env,
args,
0, /* stdin */
@@ -305,42 +399,22 @@ namespace build2
// by that and let run_finish() deal with it.
}
- run_finish (args, pr);
+ run_finish (args, pr, 2 /* verbosity */);
if (l.empty ())
fail << "unable to extract " << x_lang << " compiler system library "
<< "search paths";
- // Now the fun part: figuring out which delimiter is used. Normally it
- // is ':' but on Windows it is ';' (or can be; who knows for sure). Also
- // note that these paths are absolute (or should be). So here is what we
- // are going to do: first look for ';'. If found, then that's the
- // delimiter. If not found, then there are two cases: it is either a
- // single Windows path or the delimiter is ':'. To distinguish these two
- // cases we check if the path starts with a Windows drive.
- //
- char d (';');
- string::size_type e (l.find (d));
-
- if (e == string::npos &&
- (l.size () < 2 || l[0] == '/' || l[1] != ':'))
- {
- d = ':';
- e = l.find (d);
- }
+ parse_search_dirs (l, r, args[0], " -print-search-dirs output");
- // Now chop it up. We already have the position of the first delimiter
- // (if any).
+ // While GCC incorporates the LIBRARY_PATH environment variable value
+ // into the -print-search-dirs output, Clang does not. Also, unlike GCC,
+ // it appears to consider such paths last.
//
- for (string::size_type b (0);; e = l.find (d, (b = e + 1)))
+ if (xi.id.type == compiler_type::clang)
{
- dir_path d (l, b, (e != string::npos ? e - b : e));
-
- if (find (r.begin (), r.end (), d.normalize ()) == r.end ())
- r.emplace_back (move (d));
-
- if (e == string::npos)
- break;
+ if (optional<string> v = getenv ("LIBRARY_PATH"))
+ parse_search_dirs (*v, r, "LIBRARY_PATH environment variable");
}
return make_pair (move (r), rn);
diff --git a/libbuild2/cc/guess.cxx b/libbuild2/cc/guess.cxx
index ff06c5f..d7e9c63 100644
--- a/libbuild2/cc/guess.cxx
+++ b/libbuild2/cc/guess.cxx
@@ -106,7 +106,7 @@ namespace build2
else if (id.compare (0, p, "icc" ) == 0) type = compiler_type::icc;
else
throw invalid_argument (
- "invalid compiler type '" + string (id, 0, p) + "'");
+ "invalid compiler type '" + string (id, 0, p) + '\'');
if (p != string::npos)
{
@@ -181,12 +181,12 @@ namespace build2
// could also be because there is something wrong with the compiler or
// options but that we simply leave to blow up later).
//
- process pr (run_start (3 /* verbosity */,
+ process pr (run_start (3 /* verbosity */,
xp,
args,
- -1 /* stdin */,
- -1 /* stdout */,
- false /* error */));
+ -1 /* stdin */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
string l, r;
try
{
@@ -222,7 +222,7 @@ namespace build2
// that.
}
- if (!run_finish_code (args.data (), pr, l))
+ if (!run_finish_code (args.data (), pr, l, 2 /* verbosity */))
r = "none";
if (r.empty ())
@@ -262,6 +262,8 @@ namespace build2
" stdlib:=\"freebsd\" \n"
"# elif defined(__NetBSD__) \n"
" stdlib:=\"netbsd\" \n"
+"# elif defined(__OpenBSD__) \n"
+" stdlib:=\"openbsd\" \n"
"# elif defined(__APPLE__) \n"
" stdlib:=\"apple\" \n"
"# elif defined(__EMSCRIPTEN__) \n"
@@ -410,11 +412,13 @@ namespace build2
//
// Note that Visual Studio versions prior to 15.0 are not supported.
//
+ // Note also the directories are absolute and normalized.
+ //
struct msvc_info
{
- dir_path msvc_dir; // VC directory (...\Tools\MSVC\<ver>\).
- dir_path psdk_dir; // Platfor SDK version (under Include/, Lib/, etc).
- string psdk_ver; // Platfor SDK directory (...\Windows Kits\<ver>\).
+ dir_path msvc_dir; // VC tools directory (...\Tools\MSVC\<ver>\).
+ dir_path psdk_dir; // Platform SDK directory (...\Windows Kits\<ver>\).
+ string psdk_ver; // Platform SDK version (under Include/, Lib/, etc).
};
#if defined(_WIN32) && !defined(BUILD2_BOOTSTRAP)
@@ -456,13 +460,16 @@ namespace build2
{0x87, 0xBF, 0xD5, 0x77, 0x83, 0x8F, 0x1D, 0x5C}};
// If cl is not empty, then find an installation that contains this cl.exe
- // path.
+ // path. In this case the path must be absolute and normalized.
//
static optional<msvc_info>
- find_msvc (const path& cl = path ())
+ find_msvc (const path& cl = path ())
{
using namespace butl;
+ assert (cl.empty () ||
+ (cl.absolute () && cl.normalized (false /* sep */)));
+
msvc_info r;
// Try to obtain the MSVC directory.
@@ -528,7 +535,7 @@ namespace build2
// Note: we cannot use bstr_t due to the Clang 9.0 bug #42842.
//
BSTR p;
- if (vs->ResolvePath (L"VC", &p) != S_OK)
+ if (vs->ResolvePath (L"VC", &p) != S_OK)
return dir_path ();
unique_ptr<wchar_t, bstr_deleter> deleter (p);
@@ -634,36 +641,73 @@ namespace build2
return nullopt;
}
- // Read the VC version from the file and bail out on error.
+ // If cl.exe path is not specified, then deduce the default VC tools
+ // directory for this Visual Studio instance. Otherwise, extract the
+ // tools directory from this path.
//
- string vc_ver; // For example, 14.23.28105.
+ // Note that in the latter case we could potentially avoid the above
+ // iterating over the VS instances, but let's make sure that the
+ // specified cl.exe path actually belongs to one of them as a sanity
+ // check.
+ //
+ if (cl.empty ())
+ {
+ // Read the VC version from the file and bail out on error.
+ //
+ string vc_ver; // For example, 14.23.28105.
- path vp (
- r.msvc_dir /
- path ("Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"));
+ path vp (
+ r.msvc_dir /
+ path ("Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"));
- try
- {
- ifdstream is (vp);
- vc_ver = trim (is.read_text ());
- }
- catch (const io_error&) {}
+ try
+ {
+ ifdstream is (vp);
+ vc_ver = trim (is.read_text ());
+ }
+ catch (const io_error&) {}
- // Make sure that the VC version directory exists.
- //
- if (!vc_ver.empty ())
- try
- {
- ((r.msvc_dir /= "Tools") /= "MSVC") /= vc_ver;
+ if (vc_ver.empty ())
+ return nullopt;
+
+ // Make sure that the VC version directory exists.
+ //
+ try
+ {
+ ((r.msvc_dir /= "Tools") /= "MSVC") /= vc_ver;
- if (!dir_exists (r.msvc_dir))
- r.msvc_dir.clear ();
+ if (!dir_exists (r.msvc_dir))
+ return nullopt;
+ }
+ catch (const invalid_path&) {return nullopt;}
+ catch (const system_error&) {return nullopt;}
}
- catch (const invalid_path&) {}
- catch (const system_error&) {}
+ else
+ {
+ (r.msvc_dir /= "Tools") /= "MSVC";
- if (r.msvc_dir.empty ())
- return nullopt;
+ // Extract the VC tools version from the cl.exe path and append it
+ // to r.msvc_dir.
+ //
+ if (!cl.sub (r.msvc_dir))
+ return nullopt;
+
+ // For example, 14.23.28105\bin\Hostx64\x64\cl.exe.
+ //
+ path p (cl.leaf (r.msvc_dir)); // Can't throw.
+
+ auto i (p.begin ()); // Tools version.
+ if (i == p.end ())
+ return nullopt;
+
+ r.msvc_dir /= *i; // Can't throw.
+
+ // For good measure, make sure that the tools version is not the
+ // last component in the cl.exe path.
+ //
+ if (++i == p.end ())
+ return nullopt;
+ }
}
// Try to obtain the latest Platform SDK directory and version.
@@ -717,7 +761,7 @@ namespace build2
//
for (const dir_entry& de:
dir_iterator (r.psdk_dir / dir_path ("Include"),
- false /* ignore_dangling */))
+ dir_iterator::no_follow))
{
if (de.type () == entry_type::directory)
{
@@ -735,6 +779,16 @@ namespace build2
return nullopt;
}
+ try
+ {
+ r.msvc_dir.normalize ();
+ r.psdk_dir.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ return nullopt;
+ }
+
return r;
}
#endif
@@ -775,7 +829,8 @@ namespace build2
// Note: allowed to change pre if succeeds.
//
static guess_result
- guess (const char* xm,
+ guess (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const strings& x_mo,
@@ -926,10 +981,12 @@ namespace build2
// We try to find the matching installation only for MSVC (for Clang
// we extract this information from the compiler).
//
- if (xc.absolute () &&
- (pt == type::msvc && !pv))
+ if (xc.absolute () && (pt == type::msvc && !pv))
{
- if (optional<msvc_info> mi = find_msvc (xc))
+ path cl (xc); // Absolute but may not be normalized.
+ cl.normalize (); // Can't throw since this is an existing path.
+
+ if (optional<msvc_info> mi = find_msvc (cl))
{
search_info = info_ptr (
new msvc_info (move (*mi)), msvc_info_deleter);
@@ -965,7 +1022,7 @@ namespace build2
#endif
string cache;
- auto run = [&cs, &env, &args, &cache] (
+ auto run = [&ctx, &cs, &env, &args, &cache] (
const char* o,
auto&& f,
bool checksum = false) -> guess_result
@@ -973,9 +1030,10 @@ namespace build2
args[args.size () - 2] = o;
cache.clear ();
return build2::run<guess_result> (
+ ctx,
3 /* verbosity */,
env,
- args.data (),
+ args,
forward<decltype (f)> (f),
false /* error */,
false /* ignore_exit */,
@@ -1022,7 +1080,7 @@ namespace build2
// The gcc -v output will have a last line in the form:
//
- // "gcc version X.Y[.Z][...] ..."
+ // "gcc version X[.Y[.Z]][...] ..."
//
// The "version" word can probably be translated. For example:
//
@@ -1034,6 +1092,7 @@ namespace build2
// gcc version 5.1.0 (Ubuntu 5.1.0-0ubuntu11~14.04.1)
// gcc version 6.0.0 20160131 (experimental) (GCC)
// gcc version 9.3-win32 20200320 (GCC)
+ // gcc version 10-win32 20220324 (GCC)
//
if (cache.empty ())
{
@@ -1273,7 +1332,11 @@ namespace build2
//
const char* evars[] = {"CL=", "_CL_=", nullptr};
- r = build2::run<guess_result> (3, process_env (xp, evars), f, false);
+ r = build2::run<guess_result> (ctx,
+ 3,
+ process_env (xp, evars),
+ f,
+ false);
if (r.empty ())
{
@@ -1424,10 +1487,12 @@ namespace build2
// And VC 16 seems to have the runtime version 14.1 (and not 14.2, as
// one might expect; DLLs are still *140.dll but there are now _1 and _2
// variants for, say, msvcp140.dll). We will, however, call it 14.2
- // (which is the version of the "toolset") in our target triplet.
+ // (which is the version of the "toolset") in our target triplet. And we
+ // will call VC 17 14.3 (which is also the version of the "toolset").
//
// year ver cl crt/dll toolset
//
+ // 2022 17.X 19.3X 14.?/140 14.3X
// 2019 16.X 19.2X 14.2/140 14.2X
// 2017 15.9 19.16 14.1/140 14.16
// 2017 15.8 19.15 14.1/140
@@ -1446,7 +1511,8 @@ namespace build2
//
// _MSC_VER is the numeric cl version, e.g., 1921 for 19.21.
//
- /**/ if (v.major == 19 && v.minor >= 20) return "14.2";
+ /**/ if (v.major == 19 && v.minor >= 30) return "14.3";
+ else if (v.major == 19 && v.minor >= 20) return "14.2";
else if (v.major == 19 && v.minor >= 10) return "14.1";
else if (v.major == 19 && v.minor == 0) return "14.0";
else if (v.major == 18 && v.minor == 0) return "12.0";
@@ -1470,8 +1536,8 @@ namespace build2
// Studio command prompt puts into INCLUDE) including any paths from the
// compiler mode and their count.
//
- // Note that currently we don't add any ATL/MFC or WinRT paths (but could
- // do that probably first checking if they exist/empty).
+ // Note that currently we don't add any ATL/MFC paths (but could do that
+ // probably first checking if they exist/empty).
//
static pair<dir_paths, size_t>
msvc_hdr (const msvc_info& mi, const strings& mo)
@@ -1483,6 +1549,8 @@ namespace build2
msvc_extract_header_search_dirs (mo, r);
size_t rn (r.size ());
+ // Note: the resulting directories are normalized by construction.
+ //
r.push_back (dir_path (mi.msvc_dir) /= "include");
// This path structure only appeared in Platform SDK 10 (if anyone wants
@@ -1496,6 +1564,7 @@ namespace build2
r.push_back (dir_path (d) /= "ucrt" );
r.push_back (dir_path (d) /= "shared");
r.push_back (dir_path (d) /= "um" );
+ r.push_back (dir_path (d) /= "winrt" );
}
return make_pair (move (r), rn);
@@ -1531,6 +1600,8 @@ namespace build2
msvc_extract_library_search_dirs (mo, r);
size_t rn (r.size ());
+ // Note: the resulting directories are normalized by construction.
+ //
r.push_back ((dir_path (mi.msvc_dir) /= "lib") /= cpu);
// This path structure only appeared in Platform SDK 10 (if anyone wants
@@ -1585,7 +1656,8 @@ namespace build2
"LIB", "LINK", "_LINK_", nullptr};
static compiler_info
- guess_msvc (const char* xm,
+ guess_msvc (context&,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -1608,6 +1680,7 @@ namespace build2
// "x86"
// "x64"
// "ARM"
+ // "ARM64"
//
compiler_version ver;
{
@@ -1671,9 +1744,10 @@ namespace build2
for (size_t b (0), e (0), n;
(n = next_word (s, b, e, ' ', ',')) != 0; )
{
- if (s.compare (b, n, "x64", 3) == 0 ||
- s.compare (b, n, "x86", 3) == 0 ||
- s.compare (b, n, "ARM", 3) == 0 ||
+ if (s.compare (b, n, "x64", 3) == 0 ||
+ s.compare (b, n, "x86", 3) == 0 ||
+ s.compare (b, n, "ARM64", 5) == 0 ||
+ s.compare (b, n, "ARM", 3) == 0 ||
s.compare (b, n, "80x86", 5) == 0)
{
cpu.assign (s, b, n);
@@ -1684,15 +1758,15 @@ namespace build2
if (cpu.empty ())
fail << "unable to extract MSVC target CPU from " << "'" << s << "'";
- // Now we need to map x86, x64, and ARM to the target triplets. The
- // problem is, there aren't any established ones so we got to invent
- // them ourselves. Based on the discussion in
+ // Now we need to map x86, x64, ARM, and ARM64 to the target
+ // triplets. The problem is, there aren't any established ones so we
+ // got to invent them ourselves. Based on the discussion in
// <libbutl/target-triplet.hxx>, we need something in the
// CPU-VENDOR-OS-ABI form.
//
// The CPU part is fairly straightforward with x86 mapped to 'i386'
- // (or maybe 'i686'), x64 to 'x86_64', and ARM to 'arm' (it could also
- // include the version, e.g., 'amrv8').
+ // (or maybe 'i686'), x64 to 'x86_64', ARM to 'arm' (it could also
+ // include the version, e.g., 'amrv8'), and ARM64 to 'aarch64'.
//
// The (toolchain) VENDOR is also straightforward: 'microsoft'. Why
// not omit it? Two reasons: firstly, there are other compilers with
@@ -1702,7 +1776,7 @@ namespace build2
//
// OS-ABI is where things are not as clear cut. The OS part shouldn't
// probably be just 'windows' since we have Win32 and WinCE. And
- // WinRT. And Universal Windows Platform (UWP). So perhaps the
+ // WinRT. And Universal Windows Platform (UWP). So perhaps the
// following values for OS: 'win32', 'wince', 'winrt', 'winup'.
//
// For 'win32' the ABI part could signal the Microsoft C/C++ runtime
@@ -1727,9 +1801,10 @@ namespace build2
// Putting it all together, Visual Studio 2015 will then have the
// following target triplets:
//
- // x86 i386-microsoft-win32-msvc14.0
- // x64 x86_64-microsoft-win32-msvc14.0
- // ARM arm-microsoft-winup-???
+ // x86 i386-microsoft-win32-msvc14.0
+ // x64 x86_64-microsoft-win32-msvc14.0
+ // ARM arm-microsoft-winup-???
+ // ARM64 aarch64-microsoft-win32-msvc14.0
//
if (cpu == "ARM")
fail << "cl.exe ARM/WinRT/UWP target is not yet supported";
@@ -1739,6 +1814,8 @@ namespace build2
t = "x86_64-microsoft-win32-msvc";
else if (cpu == "x86" || cpu == "80x86")
t = "i386-microsoft-win32-msvc";
+ else if (cpu == "ARM64")
+ t = "aarch64-microsoft-win32-msvc";
else
assert (false);
@@ -1750,6 +1827,8 @@ namespace build2
else
ot = t = *xt;
+ target_triplet tt (t); // Shouldn't fail.
+
// If we have the MSVC installation information, then this means we are
// running out of the Visual Studio command prompt and will have to
// supply PATH/INCLUDE/LIB/IFCPATH equivalents ourselves.
@@ -1761,7 +1840,7 @@ namespace build2
if (const msvc_info* mi = static_cast<msvc_info*> (gr.info.get ()))
{
- const char* cpu (msvc_cpu (target_triplet (t).cpu));
+ const char* cpu (msvc_cpu (tt.cpu));
lib_dirs = msvc_lib (*mi, x_mo, cpu);
hdr_dirs = msvc_hdr (*mi, x_mo);
@@ -1849,7 +1928,8 @@ namespace build2
"SDKROOT", "MACOSX_DEPLOYMENT_TARGET", nullptr};
static compiler_info
- guess_gcc (const char* xm,
+ guess_gcc (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -1868,7 +1948,7 @@ namespace build2
// though language words can be translated and even rearranged (see
// examples above).
//
- // "gcc version X.Y[.Z][...]"
+ // "gcc version X[.Y[.Z]][...]"
//
compiler_version ver;
{
@@ -1907,7 +1987,10 @@ namespace build2
//
try
{
- semantic_version v (string (s, b, e - b), ".-+");
+ semantic_version v (string (s, b, e - b),
+ semantic_version::allow_omit_minor |
+ semantic_version::allow_build,
+ ".-+");
ver.major = v.major;
ver.minor = v.minor;
ver.patch = v.patch;
@@ -1959,7 +2042,7 @@ namespace build2
//
auto f = [] (string& l, bool) {return move (l);};
- t = run<string> (3, xp, args.data (), f, false);
+ t = run<string> (ctx, 3, xp, args, f, false);
if (t.empty ())
{
@@ -1967,7 +2050,7 @@ namespace build2
<< "falling back to -dumpmachine";});
args[args.size () - 2] = "-dumpmachine";
- t = run<string> (3, xp, args.data (), f, false);
+ t = run<string> (ctx, 3, xp, args, f, false);
}
if (t.empty ())
@@ -2110,9 +2193,9 @@ namespace build2
process pr (run_start (3 /* verbosity */,
xp,
args,
- -2 /* stdin (/dev/null) */,
- -1 /* stdout */,
- false /* error (2>&1) */));
+ -2 /* stdin (to /dev/null) */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
clang_msvc_info r;
@@ -2264,7 +2347,7 @@ namespace build2
// that.
}
- if (!run_finish_code (args.data (), pr, l))
+ if (!run_finish_code (args.data (), pr, l, 2 /* verbosity */))
fail << "unable to extract MSVC information from " << xp;
if (const char* w = (
@@ -2282,23 +2365,27 @@ namespace build2
// These are derived from gcc_* plus the sparse documentation (clang(1))
// and source code.
//
+ // Note that for now for Clang targeting MSVC we use msvc_env but should
+ // probably use a combined list.
+ //
// See also the note on environment and caching below if adding any new
// variables.
//
static const char* clang_c_env[] = {
- "CPATH", "C_INCLUDE_PATH",
+ "CPATH", "C_INCLUDE_PATH", "CCC_OVERRIDE_OPTIONS",
"LIBRARY_PATH", "LD_RUN_PATH",
"COMPILER_PATH",
nullptr};
static const char* clang_cxx_env[] = {
- "CPATH", "CPLUS_INCLUDE_PATH",
+ "CPATH", "CPLUS_INCLUDE_PATH", "CCC_OVERRIDE_OPTIONS",
"LIBRARY_PATH", "LD_RUN_PATH",
"COMPILER_PATH",
nullptr};
static compiler_info
- guess_clang (const char* xm,
+ guess_clang (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -2337,6 +2424,12 @@ namespace build2
//
// emcc (...) 2.0.8
//
+ // Pre-releases of the vanilla Clang append `rc` or `git` to the
+ // version, unfortunately without a separator. So we will handle these
+ // ad hoc. For example:
+ //
+ // FreeBSD clang version 18.1.0rc (https://github.com/llvm/llvm-project.git llvmorg-18-init-18361-g22683463740e)
+ //
auto extract_version = [] (const string& s, bool patch, const char* what)
-> compiler_version
{
@@ -2351,8 +2444,28 @@ namespace build2
// end of the word position (first space). In fact, we can just
// check if it is >= e.
//
- if (s.find_first_not_of ("1234567890.", b, 11) >= e)
+ size_t p (s.find_first_not_of ("1234567890.", b, 11));
+ if (p >= e)
break;
+
+ // Handle the unseparated `rc` and `git` suffixes.
+ //
+ if (p != string::npos)
+ {
+ if (p + 2 == e && (e - b) > 2 &&
+ s[p] == 'r' && s[p + 1] == 'c')
+ {
+ e -= 2;
+ break;
+ }
+
+ if (p + 3 == e && (e - b) > 3 &&
+ s[p] == 'g' && s[p + 1] == 'i' && s[p + 2] == 't')
+ {
+ e -= 3;
+ break;
+ }
+ }
}
if (b == e)
@@ -2388,7 +2501,14 @@ namespace build2
ver.patch = next ("patch", patch);
if (e != s.size ())
- ver.build.assign (s, e + 1, string::npos);
+ {
+ // Skip the separator (it could also be unseparated `rc` or `git`).
+ //
+ if (s[e] == ' ' || s[e] == '-')
+ e++;
+
+ ver.build.assign (s, e, string::npos);
+ }
return ver;
};
@@ -2412,7 +2532,10 @@ namespace build2
// Some overrides for testing.
//
+ //string s (xv != nullptr ? *xv : "");
+ //
//s = "clang version 3.7.0 (tags/RELEASE_370/final)";
+ //s = "FreeBSD clang version 18.1.0rc (https://github.com/llvm/llvm-project.git llvmorg-18-init-18361-g22683463740e)";
//
//gr.id.variant = "apple";
//s = "Apple LLVM version 7.3.0 (clang-703.0.16.1)";
@@ -2440,10 +2563,21 @@ namespace build2
//
// Specifically, we now look in the libc++'s __config file for the
// _LIBCPP_VERSION and use the previous version as a conservative
- // estimate (note that there could be multiple __config files with
+ // estimate (NOTE: that there could be multiple __config files with
// potentially different versions so compile with -v to see which one
// gets picked up).
//
+ // Also, lately, we started seeing _LIBCPP_VERSION values like 15.0.6
+ // or 16.0.2 which would suggest the base is 15.0.5 or 16.0.1. But
+ // that assumption did not check out with the actual usage. For
+ // example, vanilla Clang 16 should no longer require -fmodules-ts but
+ // the Apple's version (that is presumably based on it) still does. So
+ // the theory here is that Apple upgrades to newer libc++ while
+ // keeping the old compiler. Which means we must be more conservative
+ // and assume something like 15.0.6 is still 14-based. But then you
+ // get -Wunqualified-std-cast-call in 14, which was supposedly only
+ // introduced in Clang 15. So maybe not.
+ //
// Note that this is Apple Clang version and not XCode version.
//
// 4.2 -> 3.2svn
@@ -2463,34 +2597,41 @@ namespace build2
// 12.0.0 -> 9.0
// 12.0.5 -> 10.0 (yes, seriously!)
// 13.0.0 -> 11.0
+ // 13.1.6 -> 12.0
+ // 14.0.0 -> 12.0 (_LIBCPP_VERSION=130000)
+ // 14.0.3 -> 15.0 (_LIBCPP_VERSION=150006)
+ // 15.0.0 -> 16.0 (_LIBCPP_VERSION=160002)
//
uint64_t mj (var_ver->major);
uint64_t mi (var_ver->minor);
uint64_t pa (var_ver->patch);
- if (mj >= 13) {mj = 11; mi = 0;}
- else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0;}
- else if (mj == 12) {mj = 9; mi = 0;}
- else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0;}
- else if (mj == 11) {mj = 7; mi = 0;}
- else if (mj == 10) {mj = 6; mi = 0;}
- else if (mj == 9 && mi >= 1) {mj = 5; mi = 0;}
- else if (mj == 9) {mj = 4; mi = 0;}
- else if (mj == 8) {mj = 3; mi = 9;}
- else if (mj == 7 && mi >= 3) {mj = 3; mi = 8;}
- else if (mj == 7) {mj = 3; mi = 7;}
- else if (mj == 6 && mi >= 1) {mj = 3; mi = 5;}
- else if (mj == 6) {mj = 3; mi = 4;}
- else if (mj == 5 && mi >= 1) {mj = 3; mi = 3;}
- else if (mj == 5) {mj = 3; mi = 2;}
- else if (mj == 4 && mi >= 2) {mj = 3; mi = 1;}
- else {mj = 3; mi = 0;}
+ if (mj >= 15) {mj = 16; mi = 0; pa = 0;}
+ else if (mj == 14 && (mi > 0 || pa >= 3)) {mj = 15; mi = 0; pa = 0;}
+ else if (mj == 14 || (mj == 13 && mi >= 1)) {mj = 12; mi = 0; pa = 0;}
+ else if (mj == 13) {mj = 11; mi = 0; pa = 0;}
+ else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0; pa = 0;}
+ else if (mj == 12) {mj = 9; mi = 0; pa = 0;}
+ else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0; pa = 0;}
+ else if (mj == 11) {mj = 7; mi = 0; pa = 0;}
+ else if (mj == 10) {mj = 6; mi = 0; pa = 0;}
+ else if (mj == 9 && mi >= 1) {mj = 5; mi = 0; pa = 0;}
+ else if (mj == 9) {mj = 4; mi = 0; pa = 0;}
+ else if (mj == 8) {mj = 3; mi = 9; pa = 0;}
+ else if (mj == 7 && mi >= 3) {mj = 3; mi = 8; pa = 0;}
+ else if (mj == 7) {mj = 3; mi = 7; pa = 0;}
+ else if (mj == 6 && mi >= 1) {mj = 3; mi = 5; pa = 0;}
+ else if (mj == 6) {mj = 3; mi = 4; pa = 0;}
+ else if (mj == 5 && mi >= 1) {mj = 3; mi = 3; pa = 0;}
+ else if (mj == 5) {mj = 3; mi = 2; pa = 0;}
+ else if (mj == 4 && mi >= 2) {mj = 3; mi = 1; pa = 0;}
+ else {mj = 3; mi = 0; pa = 0;}
ver = compiler_version {
- to_string (mj) + '.' + to_string (mi) + ".0",
+ to_string (mj) + '.' + to_string (mi) + '.' + to_string (pa),
mj,
mi,
- 0,
+ pa,
""};
}
else if (emscr)
@@ -2543,7 +2684,7 @@ namespace build2
// for LC_ALL.
//
auto f = [] (string& l, bool) {return move (l);};
- t = run<string> (3, xp, args.data (), f, false);
+ t = run<string> (ctx, 3, xp, args, f, false);
if (t.empty ())
fail << "unable to extract target architecture from " << xc
@@ -2603,7 +2744,7 @@ namespace build2
const char* cpu (msvc_cpu (tt.cpu));
// Come up with the system library search paths. Ideally we would want
- // to extract this from Clang and -print-search-paths would have been
+ // to extract this from Clang and -print-search-dirs would have been
// the natural way for Clang to report it. But no luck.
//
lib_dirs = msvc_lib (mi, x_mo, cpu);
@@ -2771,7 +2912,8 @@ namespace build2
}
static compiler_info
- guess_icc (const char* xm,
+ guess_icc (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -2835,7 +2977,7 @@ namespace build2
//
// @@ TODO: running without the mode options.
//
- s = run<string> (3, env, "-V", f, false);
+ s = run<string> (ctx, 3, env, "-V", f, false);
if (s.empty ())
fail << "unable to extract signature from " << xc << " -V output";
@@ -2961,7 +3103,7 @@ namespace build2
// The -V output is sent to STDERR.
//
- t = run<string> (3, env, args.data (), f, false);
+ t = run<string> (ctx, 3, env, args, f, false);
if (t.empty ())
fail << "unable to extract target architecture from " << xc
@@ -3012,7 +3154,7 @@ namespace build2
//
{
auto f = [] (string& l, bool) {return move (l);};
- t = run<string> (3, xp, "-dumpmachine", f);
+ t = run<string> (ctx, 3, xp, "-dumpmachine", f);
}
if (t.empty ())
@@ -3093,7 +3235,8 @@ namespace build2
static global_cache<compiler_info> cache;
const compiler_info&
- guess (const char* xm,
+ guess (context& ctx,
+ const char* xm,
lang xl,
const string& ec,
const path& xc,
@@ -3167,7 +3310,7 @@ namespace build2
if (pre.type != invalid_compiler_type)
{
- gr = guess (xm, xl, xc, x_mo, xi, pre, cs);
+ gr = guess (ctx, xm, xl, xc, x_mo, xi, pre, cs);
if (gr.empty ())
{
@@ -3183,13 +3326,14 @@ namespace build2
}
if (gr.empty ())
- gr = guess (xm, xl, xc, x_mo, xi, pre, cs);
+ gr = guess (ctx, xm, xl, xc, x_mo, xi, pre, cs);
if (gr.empty ())
fail << "unable to guess " << xl << " compiler type of " << xc <<
info << "use config." << xm << ".id to specify explicitly";
compiler_info (*gf) (
+ context&,
const char*, lang, const path&, const string*, const string*,
const strings&,
const strings*, const strings*,
@@ -3209,7 +3353,8 @@ namespace build2
case compiler_type::icc: gf = &guess_icc; break;
}
- compiler_info r (gf (xm, xl, xc, xv, xt,
+ compiler_info r (gf (ctx,
+ xm, xl, xc, xv, xt,
x_mo, c_po, x_po, c_co, x_co, c_lo, x_lo,
move (gr), cs));
@@ -3367,6 +3512,7 @@ namespace build2
// In the future we will probably have to maintain per-standard additions.
//
static const char* std_importable[] = {
+ "<initializer_list>", // Note: keep first (present in freestanding).
"<algorithm>",
"<any>",
"<array>",
@@ -3391,7 +3537,6 @@ namespace build2
"<fstream>",
"<functional>",
"<future>",
- "<initializer_list>",
"<iomanip>",
"<ios>",
"<iosfwd>",
@@ -3490,6 +3635,9 @@ namespace build2
// is currently not provided by GCC. Though entering missing headers
// should be harmless.
//
+ // Plus, a freestanding implementation may only have a subset of such
+ // headers (see [compliance]).
+ //
pair<const path, importable_headers::groups>* p;
auto add_groups = [&p] (bool imp)
{
@@ -3511,29 +3659,39 @@ namespace build2
}
else
{
+ // While according to [compliance] a freestanding implementation
+ // should provide a subset of headers, including <initializer_list>,
+ // there seem to be cases where no headers are provided at all (see GH
+ // issue #219). So if we cannot find <initializer_list>, we just skip
+ // the whole thing.
+ //
p = hs.insert_angle (sys_hdr_dirs, std_importable[0]);
- assert (p != nullptr);
- add_groups (true);
+ if (p != nullptr)
+ {
+ assert (p != nullptr);
- dir_path d (p->first.directory ());
+ add_groups (true);
- auto add_header = [&hs, &d, &p, add_groups] (const char* f, bool imp)
- {
- path fp (d);
- fp.combine (f + 1, strlen (f) - 2, '\0'); // Assuming simple.
+ dir_path d (p->first.directory ());
- p = &hs.insert_angle (move (fp), f);
- add_groups (imp);
- };
+ auto add_header = [&hs, &d, &p, add_groups] (const char* f, bool imp)
+ {
+ path fp (d);
+ fp.combine (f + 1, strlen (f) - 2, '\0'); // Assuming simple.
- for (size_t i (1);
- i != sizeof (std_importable) / sizeof (std_importable[0]);
- ++i)
- add_header (std_importable[i], true);
+ p = &hs.insert_angle (move (fp), f);
+ add_groups (imp);
+ };
- for (const char* f: std_non_importable)
- add_header (f, false);
+ for (size_t i (1);
+ i != sizeof (std_importable) / sizeof (std_importable[0]);
+ ++i)
+ add_header (std_importable[i], true);
+
+ for (const char* f: std_non_importable)
+ add_header (f, false);
+ }
}
}
}
diff --git a/libbuild2/cc/guess.hxx b/libbuild2/cc/guess.hxx
index 53acc15..7cbbd87 100644
--- a/libbuild2/cc/guess.hxx
+++ b/libbuild2/cc/guess.hxx
@@ -253,7 +253,8 @@ namespace build2
// that most of it will be the same, at least for C and C++.
//
const compiler_info&
- guess (const char* xm, // Module (for var names in diagnostics).
+ guess (context&,
+ const char* xm, // Module (for var names in diagnostics).
lang xl, // Language.
const string& ec, // Environment checksum.
const path& xc, // Compiler path.
diff --git a/libbuild2/cc/init.cxx b/libbuild2/cc/init.cxx
index affc4ab..e124450 100644
--- a/libbuild2/cc/init.cxx
+++ b/libbuild2/cc/init.cxx
@@ -86,7 +86,10 @@ namespace build2
// Enter variables.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
auto v_t (variable_visibility::target);
@@ -97,13 +100,19 @@ namespace build2
vp.insert<strings> ("config.cc.loptions");
vp.insert<strings> ("config.cc.aoptions");
vp.insert<strings> ("config.cc.libs");
- vp.insert<string> ("config.cc.internal.scope");
+
+ vp.insert<string> ("config.cc.internal.scope");
+
+ vp.insert<bool> ("config.cc.reprocess"); // See cc.preprocess below.
+
+ vp.insert<abs_dir_path> ("config.cc.pkgconfig.sysroot");
vp.insert<strings> ("cc.poptions");
vp.insert<strings> ("cc.coptions");
vp.insert<strings> ("cc.loptions");
vp.insert<strings> ("cc.aoptions");
vp.insert<strings> ("cc.libs");
+
vp.insert<string> ("cc.internal.scope");
vp.insert<strings> ("cc.internal.libs");
@@ -113,6 +122,13 @@ namespace build2
vp.insert<vector<name>> ("cc.export.libs");
vp.insert<vector<name>> ("cc.export.impl_libs");
+ // Header (-I) and library (-L) search paths to use in the generated .pc
+ // files instead of the default install.{include,lib}. Relative paths
+ // are resolved as install paths.
+ //
+ vp.insert<dir_paths> ("cc.pkgconfig.include");
+ vp.insert<dir_paths> ("cc.pkgconfig.lib");
+
// Hint variables (not overridable).
//
vp.insert<string> ("config.cc.id", false);
@@ -126,15 +142,20 @@ namespace build2
vp.insert<string> ("cc.runtime");
vp.insert<string> ("cc.stdlib");
- // Target type, for example, "C library" or "C++ library". Should be set
- // on the target as a rule-specific variable by the matching rule to the
- // name of the module (e.g., "c", "cxx"). Currenly only set for
- // libraries and is used to decide which *.libs to use during static
- // linking.
- //
- // It can also be the special "cc" value which means a C-common library
- // but specific language is not known. Used in the import installed
- // logic.
+ // Library target type in the <lang>[,<type>...] form where <lang> is
+ // "c" (C library), "cxx" (C++ library), or "cc" (C-common library but
+ // the specific language is not known). Currently recognized <type>
+ // values are "binless" (library is binless) and "recursively-binless"
+ // (library and all its prerequisite libraries are binless). Note that
+ // another indication of a binless library is an empty path, which could
+ // be easier/faster to check. Note also that there should be no
+ // whitespaces of any kind and <lang> is always first.
+ //
+ // This value should be set on the library target as a rule-specific
+ // variable by the matching rule. It is also saved in the generated
+ // pkg-config files. Currently <lang> is used to decide which *.libs to
+ // use during static linking. The "cc" language is used in the import
+ // installed logic.
//
// Note that this variable cannot be set via the target type/pattern-
// specific mechanism (see process_libraries()).
@@ -162,9 +183,15 @@ namespace build2
// Ability to disable using preprocessed output for compilation.
//
- vp.insert<bool> ("config.cc.reprocess");
vp.insert<bool> ("cc.reprocess");
+ // Execute serially with regards to any other recipe. This is primarily
+ // useful when compiling large translation units or linking large
+ // binaries that require so much memory that doing that in parallel with
+ // other compilation/linking jobs is likely to summon the OOM killer.
+ //
+ vp.insert<bool> ("cc.serialize");
+
// Register scope operation callback.
//
// It feels natural to clean up sidebuilds as a post operation but that
@@ -322,14 +349,24 @@ namespace build2
if (lookup l = lookup_config (rs, "config.cc.reprocess"))
rs.assign ("cc.reprocess") = *l;
+ // config.cc.pkgconfig.sysroot
+ //
+ // Let's look it up instead of just marking for saving to make sure the
+ // path is valid.
+ //
+ // Note: save omitted.
+ //
+ lookup_config (rs, "config.cc.pkgconfig.sysroot");
+
// Load the bin.config module.
//
if (!cast_false<bool> (rs["bin.config.loaded"]))
{
- // Prepare configuration hints. They are only used on the first load
- // of bin.config so we only populate them on our first load.
+ // Prepare configuration hints (pretend it belongs to root scope).
+ // They are only used on the first load of bin.config so we only
+ // populate them on our first load.
//
- variable_map h (rs.ctx);
+ variable_map h (rs);
if (first)
{
diff --git a/libbuild2/cc/install-rule.cxx b/libbuild2/cc/install-rule.cxx
index 560b8a7..6758e03 100644
--- a/libbuild2/cc/install-rule.cxx
+++ b/libbuild2/cc/install-rule.cxx
@@ -18,20 +18,67 @@ namespace build2
{
using namespace bin;
+ using posthoc_prerequisite_target =
+ context::posthoc_target::prerequisite_target;
+
// install_rule
//
install_rule::
install_rule (data&& d, const link_rule& l)
: common (move (d)), link_ (l) {}
- const target* install_rule::
+ // Wrap the file_rule's recipe into a data-carrying recipe.
+ //
+ struct install_match_data
+ {
+ build2::recipe recipe;
+ uint64_t options; // Match options.
+ link_rule::libs_paths libs_paths;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return recipe (a, t);
+ }
+ };
+
+ bool install_rule::
+ filter (action a, const target& t, const target& m) const
+ {
+ if (!t.is_a<exe> ())
+ {
+ // If runtime-only, filter out all known buildtime target types.
+ //
+ const auto& md (t.data<install_match_data> (a));
+
+ if ((md.options & lib::option_install_buildtime) == 0)
+ {
+ if (m.is_a<liba> () || // Staic library.
+ m.is_a<pc> () || // pkg-config file.
+ m.is_a<libi> ()) // Import library.
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ pair<const target*, uint64_t> install_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
// NOTE: see libux_install_rule::filter() if changing anything here.
const prerequisite& p (i->prerequisite);
+ uint64_t options (match_extra::all_options);
+
+ otype ot (link_type (t).type);
+
+ // @@ TMP: drop eventually.
+ //
+#if 0
// If this is a shared library prerequisite, install it as long as it is
// in the installation scope.
//
@@ -43,10 +90,14 @@ namespace build2
//
// Note: we install ad hoc prerequisites by default.
//
- otype ot (link_type (t).type);
+ // Note: at least one must be true since we only register this rule for
+ // exe{}, and lib[as]{} (this makes sure the following if-condition will
+ // always be true for libx{}).
+ //
bool st (t.is_a<exe> () || t.is_a<libs> ()); // Target needs shared.
bool at (t.is_a<liba> () || t.is_a<libs> ()); // Target needs static.
+ assert (st || at);
if ((st && (p.is_a<libx> () || p.is_a<libs> ())) ||
(at && (p.is_a<libx> () || p.is_a<liba> ())))
@@ -59,26 +110,115 @@ namespace build2
if (const libx* l = pt->is_a<libx> ())
pt = link_member (*l, a, link_info (t.base_scope (), ot));
- // Note: not redundant since we are returning a member.
+ // Note: not redundant since we could be returning a member.
//
if ((st && pt->is_a<libs> ()) || (at && pt->is_a<liba> ()))
- return is == nullptr || pt->in (*is) ? pt : nullptr;
+ {
+ // Adjust match options.
+ //
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<exe> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ // This is a library prerequisite of a library target and
+ // runtime-only begets runtime-only.
+ //
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
// See through to libu*{} members. Note that we are always in the same
// project (and thus amalgamation).
//
if (pt->is_a<libux> ())
- return pt;
+ {
+ // Adjust match options (similar to above).
+ //
+ if (a.operation () != update_id && !pt->is_a<libue> ())
+ {
+ if (t.is_a<exe> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (pt, options);
+ }
}
+#else
+ // Note that at first it may seem like we don't need to install static
+ // library prerequisites of executables. But such libraries may still
+ // have prerequisites that are needed at runtime (say, some data files).
+ // So we install all libraries as long as they are in the installation
+ // scope and deal with runtime vs buildtime distiction using match
+ // options.
+ //
+ // Note: for now we assume these prerequisites never come from see-
+ // through groups.
+ //
+ // Note: we install ad hoc prerequisites by default.
+ //
+ if (p.is_a<libx> () || p.is_a<libs> () || p.is_a<liba> ())
+ {
+ const target* pt (&search (t, p));
+
+ // If this is the lib{}/libu*{} group, pick a member which we would
+ // link. For libu*{} we want the "see through" logic.
+ //
+ if (const libx* l = pt->is_a<libx> ())
+ pt = link_member (*l, a, link_info (t.base_scope (), ot));
+
+ // Adjust match options.
+ //
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<exe> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ // This is a library prerequisite of a library target and
+ // runtime-only begets runtime-only.
+ //
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ // Note: not redundant since we could be returning a member.
+ //
+ if (pt->is_a<libs> () || pt->is_a<liba> ())
+ {
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
+ else // libua{} or libus{}
+ {
+ // See through to libu*{} members. Note that we are always in the
+ // same project (and thus amalgamation).
+ //
+ return make_pair (pt, options);
+ }
+ }
+#endif
// The rest of the tests only succeed if the base filter() succeeds.
//
- const target* pt (file_rule::filter (is, a, t, p));
+ const target* pt (file_rule::filter (is, a, t, p, me).first);
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
- // Don't install executable's prerequisite headers and module
- // interfaces.
+ // Don't install executable's or runtime-only library's prerequisite
+ // headers and module interfaces.
//
// Note that if they come from a group, then we assume the entire
// group is not to be installed.
@@ -88,16 +228,22 @@ namespace build2
//
auto header_source = [this] (const auto& p)
{
- return (x_header (p) ||
- p.is_a (x_src) ||
- (x_mod != nullptr && p.is_a (*x_mod)));
+ return (x_header (p) ||
+ p.is_a (x_src) ||
+ p.is_a (c::static_type) ||
+ p.is_a (S::static_type) ||
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) ||
+ p.is_a (m::static_type))));
};
- if (t.is_a<exe> ())
+ if (t.is_a<exe> () ||
+ (a.operation () != update_id &&
+ me.cur_options == lib::option_install_runtime))
{
if (header_source (p))
pt = nullptr;
- else if (p.type.see_through)
+ else if (p.type.see_through ())
{
for (i.enter_group (); i.group (); )
{
@@ -108,7 +254,7 @@ namespace build2
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
// Here is a problem: if the user spells the obj*/bmi*{} targets
@@ -138,44 +284,63 @@ namespace build2
{
pt = t.is_a<exe> ()
? nullptr
- : file_rule::filter (is, a, *pt, pm.prerequisite);
+ : file_rule::filter (is, a, *pt, pm.prerequisite, me).first;
break;
}
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
- return pt;
+ return make_pair (pt, options);
}
bool install_rule::
- match (action a, target& t, const string& hint) const
+ match (action a, target& t, const string&, match_extra& me) const
{
- // @@ How do we split the hint between the two?
- //
-
// We only want to handle installation if we are also the ones building
// this target. So first run link's match().
//
- return link_.match (a, t, hint) && file_rule::match (a, t, "");
+ return link_.sub_match (x_link, update_id, a, t, me) &&
+ file_rule::match (a, t);
}
recipe install_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
- recipe r (file_rule::apply_impl (a, t));
+ // Handle match options.
+ //
+ // Do it before calling apply_impl() since we need this information
+ // in the filter() callbacks.
+ //
+ if (a.operation () != update_id)
+ {
+ if (!t.is_a<exe> ())
+ {
+ if (me.new_options == 0)
+ me.new_options = lib::option_install_runtime; // Minimum we can do.
+
+ me.cur_options = me.new_options;
+ }
+ }
+
+ recipe r (file_rule::apply_impl (
+ a, t, me,
+ me.cur_options != match_extra::all_options /* reapply */));
if (r == nullptr)
+ {
+ me.cur_options = match_extra::all_options; // Noop for all options.
return noop_recipe;
+ }
if (a.operation () == update_id)
{
// Signal to the link rule that this is update for install. And if the
// update has already been executed, verify it was done for install.
//
- auto& md (t.data<link_rule::match_data> ());
+ auto& md (t.data<link_rule::match_data> (a.inner_action ()));
if (md.for_install)
{
@@ -190,30 +355,109 @@ namespace build2
}
else // install or uninstall
{
- // Derive shared library paths and cache them in the target's aux
- // storage if we are un/installing (used in the *_extra() functions
- // below).
- //
- static_assert (sizeof (link_rule::libs_paths) <= target::data_size,
- "insufficient space");
-
- if (file* f = t.is_a<libs> ())
+ file* ls;
+ if ((ls = t.is_a<libs> ()) || t.is_a<liba> ())
{
- if (!f->path ().empty ()) // Not binless.
+ // Derive shared library paths and cache them in the target's aux
+ // storage if we are un/installing (used in the *_extra() functions
+ // below).
+ //
+ link_rule::libs_paths lsp;
+ if (ls != nullptr && !ls->path ().empty ()) // Not binless.
{
const string* p (cast_null<string> (t["bin.lib.prefix"]));
const string* s (cast_null<string> (t["bin.lib.suffix"]));
- t.data (
- link_.derive_libs_paths (*f,
- p != nullptr ? p->c_str (): nullptr,
- s != nullptr ? s->c_str (): nullptr));
+
+ lsp = link_.derive_libs_paths (*ls,
+ p != nullptr ? p->c_str (): nullptr,
+ s != nullptr ? s->c_str (): nullptr);
}
+
+ return install_match_data {move (r), me.cur_options, move (lsp)};
}
}
return r;
}
+ void install_rule::
+ apply_posthoc (action a, target& t, match_extra& me) const
+ {
+ // Similar semantics to filter() above for shared libraries specified as
+ // post hoc prerequisites (e.g., plugins).
+ //
+ if (a.operation () != update_id)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ if (t.is_a<exe> ())
+ p.match_options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ p.match_options = lib::option_install_runtime;
+ }
+ }
+ }
+ }
+ }
+
+ void install_rule::
+ reapply (action a, target& t, match_extra& me) const
+ {
+ tracer trace ("cc::install_rule::reapply");
+
+ assert (a.operation () != update_id && !t.is_a<exe> ());
+
+ l6 ([&]{trace << "rematching " << t
+ << ", current options " << me.cur_options
+ << ", new options " << me.new_options;});
+
+ me.cur_options |= me.new_options;
+
+ // We also need to update options in install_match_data.
+ //
+ t.data<install_match_data> (a).options = me.cur_options;
+
+ if ((me.new_options & lib::option_install_buildtime) != 0)
+ {
+ // If we are rematched with the buildtime option, propagate it to our
+ // prerequisite libraries.
+ //
+ for (const target* pt: t.prerequisite_targets[a])
+ {
+ if (pt != nullptr && (pt->is_a<liba> () || pt->is_a<libs> () ||
+ pt->is_a<libua> () || pt->is_a<libus> ()))
+ {
+ // Go for all options instead of just install_buildtime to avoid
+ // any further relocking/reapply (we only support runtime-only or
+ // everything).
+ //
+ rematch_sync (a, *pt, match_extra::all_options);
+ }
+ }
+
+ // Also to post hoc.
+ //
+ if (me.posthoc_prerequisite_targets != nullptr)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ p.match_options = match_extra::all_options;
+ }
+ }
+ }
+
+ // Also match any additional prerequisites (e.g., headers).
+ //
+ file_rule::reapply_impl (a, t, me);
+ }
+ }
+
bool install_rule::
install_extra (const file& t, const install_dir& id) const
{
@@ -221,14 +465,19 @@ namespace build2
if (t.is_a<libs> ())
{
+ const auto& md (t.data<install_match_data> (perform_install_id));
+
// Here we may have a bunch of symlinks that we need to install.
//
+ // Note that for runtime-only install we only omit the name that is
+ // used for linking (e.g., libfoo.so).
+ //
const scope& rs (t.root_scope ());
- auto& lp (t.data<link_rule::libs_paths> ());
+ const link_rule::libs_paths& lp (md.libs_paths);
- auto ln = [&rs, &id] (const path& f, const path& l)
+ auto ln = [&t, &rs, &id] (const path& f, const path& l)
{
- install_l (rs, id, f.leaf (), l.leaf (), 2 /* verbosity */);
+ install_l (rs, id, l.leaf (), t, f.leaf (), 2 /* verbosity */);
return true;
};
@@ -242,7 +491,10 @@ namespace build2
if (!in.empty ()) {r = ln (*f, in) || r; f = &in;}
if (!so.empty ()) {r = ln (*f, so) || r; f = &so;}
if (!ld.empty ()) {r = ln (*f, ld) || r; f = &ld;}
- if (!lk.empty ()) {r = ln (*f, lk) || r; }
+ if ((md.options & lib::option_install_buildtime) != 0)
+ {
+ if (!lk.empty ()) {r = ln (*f, lk) || r;}
+ }
}
return r;
@@ -255,14 +507,16 @@ namespace build2
if (t.is_a<libs> ())
{
+ const auto& md (t.data<install_match_data> (perform_uninstall_id));
+
// Here we may have a bunch of symlinks that we need to uninstall.
//
const scope& rs (t.root_scope ());
- auto& lp (t.data<link_rule::libs_paths> ());
+ const link_rule::libs_paths& lp (md.libs_paths);
- auto rm = [&rs, &id] (const path& l)
+ auto rm = [&rs, &id] (const path& f, const path& l)
{
- return uninstall_f (rs, id, nullptr, l.leaf (), 2 /* verbosity */);
+ return uninstall_l (rs, id, l.leaf (), f.leaf (), 2 /* verbosity */);
};
const path& lk (lp.link);
@@ -270,10 +524,15 @@ namespace build2
const path& so (lp.soname);
const path& in (lp.interm);
- if (!lk.empty ()) r = rm (lk) || r;
- if (!ld.empty ()) r = rm (ld) || r;
- if (!so.empty ()) r = rm (so) || r;
- if (!in.empty ()) r = rm (in) || r;
+ const path* f (lp.real);
+
+ if (!in.empty ()) {r = rm (*f, in) || r; f = &in;}
+ if (!so.empty ()) {r = rm (*f, so) || r; f = &so;}
+ if (!ld.empty ()) {r = rm (*f, ld) || r; f = &ld;}
+ if ((md.options & lib::option_install_buildtime) != 0)
+ {
+ if (!lk.empty ()) {r = rm (*f, lk) || r;}
+ }
}
return r;
@@ -285,22 +544,30 @@ namespace build2
libux_install_rule (data&& d, const link_rule& l)
: common (move (d)), link_ (l) {}
- const target* libux_install_rule::
+ pair<const target*, uint64_t> libux_install_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
using file_rule = install::file_rule;
const prerequisite& p (i->prerequisite);
+ uint64_t options (match_extra::all_options);
+
+ otype ot (link_type (t).type);
+
// The "see through" semantics that should be parallel to install_rule
// above. In particular, here we use libue/libua/libus{} as proxies for
// exe/liba/libs{} there.
//
- otype ot (link_type (t).type);
+ // @@ TMP: drop eventually.
+ //
+#if 0
bool st (t.is_a<libue> () || t.is_a<libus> ()); // Target needs shared.
bool at (t.is_a<libua> () || t.is_a<libus> ()); // Target needs static.
+ assert (st || at);
if ((st && (p.is_a<libx> () || p.is_a<libs> ())) ||
(at && (p.is_a<libx> () || p.is_a<liba> ())))
@@ -311,28 +578,89 @@ namespace build2
pt = link_member (*l, a, link_info (t.base_scope (), ot));
if ((st && pt->is_a<libs> ()) || (at && pt->is_a<liba> ()))
- return is == nullptr || pt->in (*is) ? pt : nullptr;
+ {
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<libue> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
if (pt->is_a<libux> ())
- return pt;
+ {
+ if (a.operation () != update_id && !pt->is_a<libue> ())
+ {
+ if (t.is_a<libue> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (pt, options);
+ }
+ }
+#else
+ if (p.is_a<libx> () || p.is_a<libs> () || p.is_a<liba> ())
+ {
+ const target* pt (&search (t, p));
+
+ if (const libx* l = pt->is_a<libx> ())
+ pt = link_member (*l, a, link_info (t.base_scope (), ot));
+
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<libue> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ if (pt->is_a<libs> () || pt->is_a<liba> ())
+ {
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
+ else
+ return make_pair (pt, options);
}
+#endif
- const target* pt (file_rule::instance.filter (is, a, t, p));
+ const target* pt (file_rule::instance.filter (is, a, t, p, me).first);
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
auto header_source = [this] (const auto& p)
{
- return (x_header (p) ||
- p.is_a (x_src) ||
- (x_mod != nullptr && p.is_a (*x_mod)));
+ return (x_header (p) ||
+ p.is_a (x_src) ||
+ p.is_a (c::static_type) ||
+ p.is_a (S::static_type) ||
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) ||
+ p.is_a (m::static_type))));
};
- if (t.is_a<libue> ())
+ if (t.is_a<libue> () ||
+ (a.operation () != update_id &&
+ me.cur_options == lib::option_install_runtime))
{
if (header_source (p))
pt = nullptr;
- else if (p.type.see_through)
+ else if (p.type.see_through ())
{
for (i.enter_group (); i.group (); )
{
@@ -343,7 +671,7 @@ namespace build2
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
bool g (false);
@@ -359,25 +687,103 @@ namespace build2
{
pt = t.is_a<libue> ()
? nullptr
- : file_rule::instance.filter (is, a, *pt, pm.prerequisite);
+ : file_rule::instance.filter (
+ is, a, *pt, pm.prerequisite, me).first;
break;
}
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
- return pt;
+ return make_pair (pt, options);
}
bool libux_install_rule::
- match (action a, target& t, const string& hint) const
+ match (action a, target& t, const string&, match_extra& me) const
{
// We only want to handle installation if we are also the ones building
// this target. So first run link's match().
//
- return link_.match (a, t, hint) && alias_rule::match (a, t, "");
+ return link_.sub_match (x_link, update_id, a, t, me) &&
+ alias_rule::match (a, t);
+ }
+
+ recipe libux_install_rule::
+ apply (action a, target& t, match_extra& me) const
+ {
+ if (a.operation () != update_id)
+ {
+ if (!t.is_a<libue> ())
+ {
+ if (me.new_options == 0)
+ me.new_options = lib::option_install_runtime;
+
+ me.cur_options = me.new_options;
+ }
+ }
+
+ return alias_rule::apply_impl (
+ a, t, me, me.cur_options != match_extra::all_options /* reapply */);
+ }
+
+ void libux_install_rule::
+ apply_posthoc (action a, target& t, match_extra& me) const
+ {
+ if (a.operation () != update_id)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ if (t.is_a<libue> ())
+ p.match_options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ p.match_options = lib::option_install_runtime;
+ }
+ }
+ }
+ }
+ }
+
+ void libux_install_rule::
+ reapply (action a, target& t, match_extra& me) const
+ {
+ tracer trace ("cc::linux_install_rule::reapply");
+
+ assert (a.operation () != update_id && !t.is_a<libue> ());
+
+ l6 ([&]{trace << "rematching " << t
+ << ", current options " << me.cur_options
+ << ", new options " << me.new_options;});
+
+ me.cur_options |= me.new_options;
+
+ if ((me.new_options & lib::option_install_buildtime) != 0)
+ {
+ for (const target* pt: t.prerequisite_targets[a])
+ {
+ if (pt != nullptr && (pt->is_a<liba> () || pt->is_a<libs> () ||
+ pt->is_a<libua> () || pt->is_a<libus> ()))
+ rematch_sync (a, *pt, match_extra::all_options);
+ }
+
+ if (me.posthoc_prerequisite_targets != nullptr)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ p.match_options = match_extra::all_options;
+ }
+ }
+ }
+
+ alias_rule::reapply_impl (a, t, me);
+ }
}
}
}
diff --git a/libbuild2/cc/install-rule.hxx b/libbuild2/cc/install-rule.hxx
index acd1bd8..771c33b 100644
--- a/libbuild2/cc/install-rule.hxx
+++ b/libbuild2/cc/install-rule.hxx
@@ -20,7 +20,7 @@ namespace build2
{
class link_rule;
- // Installation rule for exe{} and lib*{}. Here we do:
+ // Installation rule for exe{} and lib[as]{}. Here we do:
//
// 1. Signal to the link rule that this is update for install.
//
@@ -28,21 +28,37 @@ namespace build2
//
// 3. Extra un/installation (e.g., libs{} symlinks).
//
+ // 4. Handling runtime/buildtime match options for lib[as]{}.
+ //
class LIBBUILD2_CC_SYMEXPORT install_rule: public install::file_rule,
virtual common
{
public:
install_rule (data&&, const link_rule&);
- virtual const target*
+ virtual bool
+ filter (action, const target&, const target&) const override;
+
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const override;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const override;
+ // Note: rule::match() override (with hint and match_extra).
+ //
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
+
+ using file_rule::match; // Make Clang happy.
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
+
+ virtual void
+ apply_posthoc (action, target&, match_extra&) const override;
+
+ virtual void
+ reapply (action, target&, match_extra&) const override;
virtual bool
install_extra (const file&, const install_dir&) const override;
@@ -54,25 +70,40 @@ namespace build2
const link_rule& link_;
};
- // Installation rule for libu*{}.
+ // Installation rule for libu[eas]{}.
//
// While libu*{} members themselves are not installable, we need to see
// through them in case they depend on stuff that we need to install
// (e.g., headers). Note that we use the alias_rule as a base.
//
- class LIBBUILD2_CC_SYMEXPORT libux_install_rule:
- public install::alias_rule,
- virtual common
+ class LIBBUILD2_CC_SYMEXPORT libux_install_rule: public install::alias_rule,
+ virtual common
{
public:
libux_install_rule (data&&, const link_rule&);
- virtual const target*
+ // Note: utility libraries currently have no ad hoc members.
+
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const override;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const override;
+ // Note: rule::match() override.
+ //
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
+
+ using alias_rule::match; // Make Clang happy.
+
+ virtual recipe
+ apply (action, target&, match_extra&) const override;
+
+ virtual void
+ apply_posthoc (action, target&, match_extra&) const override;
+
+ virtual void
+ reapply (action, target&, match_extra&) const override;
private:
const link_rule& link_;
diff --git a/libbuild2/cc/lexer+comment.test.testscript b/libbuild2/cc/lexer+comment.test.testscript
index 358865c..381e479 100644
--- a/libbuild2/cc/lexer+comment.test.testscript
+++ b/libbuild2/cc/lexer+comment.test.testscript
@@ -16,6 +16,11 @@ four
/**
six /*
*/
+/* */
+/*
+
+*/
+/**/
EOI
: cxx-comment
diff --git a/libbuild2/cc/lexer+raw-string-literal.test.testscript b/libbuild2/cc/lexer+raw-string-literal.test.testscript
index bca489a..a6455eb 100644
--- a/libbuild2/cc/lexer+raw-string-literal.test.testscript
+++ b/libbuild2/cc/lexer+raw-string-literal.test.testscript
@@ -16,6 +16,7 @@ R"X(a
b)X"
R"X(a\
b)X"
+R""(a)""
EOI
<string literal>
<string literal>
@@ -24,6 +25,7 @@ EOI
<string literal>
<string literal>
<string literal>
+<string literal>
EOO
: prefix
diff --git a/libbuild2/cc/lexer.cxx b/libbuild2/cc/lexer.cxx
index beeb970..d20e0dc 100644
--- a/libbuild2/cc/lexer.cxx
+++ b/libbuild2/cc/lexer.cxx
@@ -214,7 +214,7 @@ namespace build2
// #line <integer> [<string literal>] ...
// # <integer> [<string literal>] ...
//
- // Also diagnose #include while at it.
+ // Also diagnose #include while at it if preprocessed.
//
if (!(c >= '0' && c <= '9'))
{
@@ -222,10 +222,13 @@ namespace build2
if (t.type == type::identifier)
{
- if (t.value == "include")
- fail (l) << "unexpected #include directive";
- else if (t.value != "line")
+ if (t.value != "line")
+ {
+ if (preprocessed_ && t.value == "include")
+ fail (l) << "unexpected #include directive";
+
continue;
+ }
}
else
continue;
@@ -734,8 +737,8 @@ namespace build2
// R"<delimiter>(<raw_characters>)<delimiter>"
//
// Where <delimiter> is a potentially-empty character sequence made of
- // any source character but parentheses, backslash and spaces. It can be
- // at most 16 characters long.
+ // any source character but parentheses, backslash, and spaces (in
+ // particular, it can be `"`). It can be at most 16 characters long.
//
// Note that the <raw_characters> are not processed in any way, not even
// for line continuations.
@@ -750,7 +753,7 @@ namespace build2
{
c = geth ();
- if (eos (c) || c == '\"' || c == ')' || c == '\\' || c == ' ')
+ if (eos (c) || c == ')' || c == '\\' || c == ' ')
fail (l) << "invalid raw string literal";
if (c == '(')
@@ -1108,21 +1111,18 @@ namespace build2
if (eos (c))
fail (p) << "unterminated comment";
- if (c == '*' && (c = peek ()) == '/')
+ if (c == '*')
{
- get (c);
- break;
+ if ((c = peek ()) == '/')
+ {
+ get (c);
+ break;
+ }
}
-
- if (c != '*' && c != '\\')
+ else
{
// Direct buffer scan.
//
- // Note that we should call get() prior to the direct buffer
- // scan (see butl::char_scanner for details).
- //
- get (c);
-
const char* b (gptr_);
const char* e (egptr_);
const char* p (b);
diff --git a/libbuild2/cc/lexer.hxx b/libbuild2/cc/lexer.hxx
index 81e0d97..17d706b 100644
--- a/libbuild2/cc/lexer.hxx
+++ b/libbuild2/cc/lexer.hxx
@@ -12,6 +12,8 @@
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/cc/export.hxx>
+
namespace build2
{
namespace cc
@@ -20,13 +22,15 @@ namespace build2
// sequence of tokens returned is similar to what a real C/C++ compiler
// would see from its preprocessor.
//
- // The input is a (partially-)preprocessed translation unit that may still
- // contain comments, line continuations, and preprocessor directives such
- // as #line, #pragma, but not #include (which is diagnosed). Currently,
- // all preprocessor directives except #line are ignored and no values are
- // saved from literals. The #line directive (and its shorthand notation)
- // is recognized to provide the logical token location. Note that the
- // modules-related pseudo-directives are not recognized or handled.
+ // The input is a potentially (partially-)preprocessed translation unit
+ // that may still contain comments, line continuations, and preprocessor
+ // directives such as #line and #pragma. If the input is said to be
+ // (partially-)preprocessed then #include directives are diagnosed.
+ // Currently, all preprocessor directives except #line are ignored and no
+ // values are saved from literals. The #line directive (and its shorthand
+ // notation) is recognized to provide the logical token location. Note
+ // that the modules-related pseudo-directives are not recognized or
+ // handled.
//
// While at it we also calculate the checksum of the input ignoring
// comments, whitespaces, etc. This is used to detect changes that do not
@@ -80,15 +84,19 @@ namespace build2
// Output the token value in a format suitable for diagnostics.
//
- ostream&
+ LIBBUILD2_CC_SYMEXPORT ostream&
operator<< (ostream&, const token&);
- class lexer: protected butl::char_scanner<>
+ class LIBBUILD2_CC_SYMEXPORT lexer: protected butl::char_scanner<>
{
public:
- lexer (ifdstream& is, const path_name& name)
+ // If preprocessed is true, then assume the input is at least partially
+ // preprocessed and therefore should not contain #include directives.
+ //
+ lexer (ifdstream& is, const path_name& name, bool preprocessed)
: char_scanner (is, false /* crlf */),
name_ (name),
+ preprocessed_ (preprocessed),
fail ("error", &name_),
log_file_ (name)
{
@@ -173,6 +181,8 @@ namespace build2
private:
const path_name& name_;
+ bool preprocessed_;
+
const fail_mark fail;
// Logical file and line as set by the #line directives. Note that the
diff --git a/libbuild2/cc/lexer.test.cxx b/libbuild2/cc/lexer.test.cxx
index 0d7d12f..82163fe 100644
--- a/libbuild2/cc/lexer.test.cxx
+++ b/libbuild2/cc/lexer.test.cxx
@@ -6,6 +6,7 @@
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
+#include <libbuild2/cc/types.hxx>
#include <libbuild2/cc/lexer.hxx>
#undef NDEBUG
@@ -64,7 +65,7 @@ namespace build2
is.open (fddup (stdin_fd ()));
}
- lexer l (is, in);
+ lexer l (is, in, true /* preprocessed */);
// No use printing eos since we will either get it or loop forever.
//
diff --git a/libbuild2/cc/link-rule.cxx b/libbuild2/cc/link-rule.cxx
index 79de01c..08a60b9 100644
--- a/libbuild2/cc/link-rule.cxx
+++ b/libbuild2/cc/link-rule.cxx
@@ -20,6 +20,8 @@
#include <libbuild2/bin/target.hxx>
#include <libbuild2/bin/utility.hxx>
+#include <libbuild2/install/utility.hxx>
+
#include <libbuild2/cc/target.hxx> // c, pc*
#include <libbuild2/cc/utility.hxx>
@@ -94,7 +96,7 @@ namespace build2
return false;
}
- if (const target* t = search_existing (n, bs, dir_path () /* out */))
+ if (const target* t = search_existing (n, bs))
{
// The same logic as in process_libraries().
//
@@ -156,7 +158,7 @@ namespace build2
{
if (s[0] == '-')
{
- // -l<name>, -l <name>
+ // -l<name>, -l <name> (Note: not -pthread, which is system)
//
if (s[1] == 'l')
{
@@ -256,8 +258,6 @@ namespace build2
: common (move (d)),
rule_id (string (x) += ".link 3")
{
- static_assert (sizeof (match_data) <= target::data_size,
- "insufficient space");
}
link_rule::match_result link_rule::
@@ -282,17 +282,25 @@ namespace build2
{
// If excluded or ad hoc, then don't factor it into our tests.
//
- if (include (a, t, p) != include_type::normal)
+ // Note that here we don't validate the update operation override
+ // value (since we may not match). Instead we do this in apply().
+ //
+ lookup l;
+ if (include (a, t, p, a.operation () == update_id ? &l : nullptr) !=
+ include_type::normal)
continue;
if (p.is_a (x_src) ||
(x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj)) ||
// Header-only X library (or library with C source and X header).
(library && x_header (p, false /* c_hdr */)))
{
r.seen_x = true;
}
- else if (p.is_a<c> () ||
+ else if (p.is_a<c> () || p.is_a<S> () ||
+ (x_obj != nullptr && p.is_a<m> ()) ||
// Header-only C library.
(library && p.is_a<h> ()))
{
@@ -347,6 +355,11 @@ namespace build2
const target* pg (nullptr);
const target* pt (p.search_existing ());
+ auto search = [&t, &p] (const target_type& tt)
+ {
+ return search_existing (t.ctx, p.prerequisite.key (tt));
+ };
+
if (p.is_a<libul> ())
{
if (pt != nullptr)
@@ -369,23 +382,33 @@ namespace build2
{
// It's possible we have no group but have a member so try that.
//
- const target_type& tt (ot == otype::a ? libua::static_type :
- ot == otype::s ? libus::static_type :
- libue::static_type);
+ if (ot != otype::e)
+ {
+ // We know this prerequisite member is a prerequisite since
+ // otherwise the above search would have returned the member
+ // target.
+ //
+ pt = search (ot == otype::a
+ ? libua::static_type
+ : libus::static_type);
+ }
+ else
+ {
+ // Similar semantics to bin::link_member(): prefer static over
+ // shared.
+ //
+ pt = search (libua::static_type);
- // We know this prerequisite member is a prerequisite since
- // otherwise the above search would have returned the member
- // target.
- //
- pt = search_existing (t.ctx, p.prerequisite.key (tt));
+ if (pt == nullptr)
+ pt = search (libus::static_type);
+ }
}
}
else if (!p.is_a<libue> ())
{
// See if we also/instead have a group.
//
- pg = search_existing (t.ctx,
- p.prerequisite.key (libul::static_type));
+ pg = search (libul::static_type);
if (pt == nullptr)
swap (pt, pg);
@@ -413,9 +436,12 @@ namespace build2
r.seen_lib = true;
}
// Some other c-common header/source (say C++ in a C rule) other than
- // a C header (we assume everyone can hanle that).
+ // a C header (we assume everyone can hanle that) or some other
+ // #include'able target.
//
- else if (p.is_a<cc> () && !(x_header (p, true /* c_hdr */)))
+ else if (p.is_a<cc> () &&
+ !(x_header (p, true /* c_hdr */)) &&
+ !p.is_a (x_inc) && !p.is_a<c_inc> ())
{
r.seen_cc = true;
break;
@@ -426,7 +452,7 @@ namespace build2
}
bool link_rule::
- match (action a, target& t, const string& hint) const
+ match (action a, target& t, const string& hint, match_extra&) const
{
// NOTE: may be called multiple times and for both inner and outer
// operations (see the install rules).
@@ -465,17 +491,22 @@ namespace build2
return false;
}
- if (!(r.seen_x || r.seen_c || r.seen_obj || r.seen_lib))
+ // Sometimes we may need to have a binless library whose only purpose is
+ // to export dependencies on other libraries (potentially in a platform-
+ // specific manner; think the whole -pthread mess). So allow a library
+ // without any sources with a hint.
+ //
+ if (!(r.seen_x || r.seen_c || r.seen_obj || r.seen_lib || !hint.empty ()))
{
- l4 ([&]{trace << "no " << x_lang << ", C, or obj/lib prerequisite "
- << "for target " << t;});
+ l4 ([&]{trace << "no " << x_lang << ", C, obj/lib prerequisite or "
+ << "hint for target " << t;});
return false;
}
// We will only chain a C source if there is also an X source or we were
// explicitly told to.
//
- if (r.seen_c && !r.seen_x && hint < x)
+ if (r.seen_c && !r.seen_x && hint.empty ())
{
l4 ([&]{trace << "C prerequisite without " << x_lang << " or hint "
<< "for target " << t;});
@@ -813,6 +844,15 @@ namespace build2
//
if (const libul* ul = pt->is_a<libul> ())
{
+ // @@ Isn't libul{} member already picked or am I missing something?
+ // If not, then we may need the same in recursive-binless logic.
+ //
+#if 0
+ // @@ TMP hm, this hasn't actually been enabled. So may actually
+ // enable and see if it trips up (do git-blame for good measure).
+ //
+ assert (false); // @@ TMP (remove before 0.16.0 release)
+#endif
ux = &link_member (*ul, a, li)->as<libux> ();
}
else if ((ux = pt->is_a<libue> ()) ||
@@ -829,8 +869,20 @@ namespace build2
return nullptr;
};
+ // Given the cc.type value return true if the library is recursively
+ // binless.
+ //
+ static inline bool
+ recursively_binless (const string& type)
+ {
+ size_t p (type.find ("recursively-binless"));
+ return (p != string::npos &&
+ type[p - 1] == ',' && // <lang> is first.
+ (type[p += 19] == '\0' || type[p] == ','));
+ }
+
recipe link_rule::
- apply (action a, target& xt) const
+ apply (action a, target& xt, match_extra&) const
{
tracer trace (x, "link_rule::apply");
@@ -840,7 +892,11 @@ namespace build2
// Note that for_install is signalled by install_rule and therefore
// can only be relied upon during execute.
//
- match_data& md (t.data (match_data ()));
+ // Note that we don't really need to set it as target data: while there
+ // are calls to get it, they should only happen after the target has
+ // been matched.
+ //
+ match_data md (*this);
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
@@ -849,11 +905,6 @@ namespace build2
otype ot (lt.type);
linfo li (link_info (bs, ot));
- // Set the library type (C, C++, etc) as rule-specific variable.
- //
- if (lt.library ())
- t.state[a].assign (c_type) = string (x);
-
bool binless (lt.library ()); // Binary-less until proven otherwise.
bool user_binless (lt.library () && cast_false<bool> (t[b_binless]));
@@ -861,7 +912,7 @@ namespace build2
// for binless libraries since there could be other output (e.g., .pc
// files).
//
- inject_fsdir (a, t);
+ const fsdir* dir (inject_fsdir (a, t));
// Process prerequisites, pass 1: search and match prerequisite
// libraries, search obj/bmi{} targets, and search targets we do rule
@@ -875,7 +926,7 @@ namespace build2
// We do libraries first in order to indicate that we will execute these
// targets before matching any of the obj/bmi{}. This makes it safe for
// compile::apply() to unmatch them and therefore not to hinder
- // parallelism.
+ // parallelism (or mess up for-install'ness).
//
// We also create obj/bmi{} chain targets because we need to add
// (similar to lib{}) all the bmi{} as prerequisites to all the other
@@ -899,33 +950,98 @@ namespace build2
return a.operation () == clean_id && !pt.dir.sub (rs.out_path ());
};
+ bool update_match (false); // Have update during match.
+
auto& pts (t.prerequisite_targets[a]);
size_t start (pts.size ());
for (prerequisite_member p: group_prerequisite_members (a, t))
{
- include_type pi (include (a, t, p));
+ // Note that we have to recognize update=match for *(update), not just
+ // perform(update). But only actually update for perform(update).
+ //
+ lookup l; // The `update` variable value, if any.
+ include_type pi (
+ include (a, t, p, a.operation () == update_id ? &l : nullptr));
// We pre-allocate a NULL slot for each (potential; see clean)
// prerequisite target.
//
pts.push_back (prerequisite_target (nullptr, pi));
- const target*& pt (pts.back ());
+ auto& pto (pts.back ());
+
+ // Use bit 2 of prerequisite_target::include to signal update during
+ // match.
+ //
+ // Not that for now we only allow updating during match ad hoc and
+ // mark 3 (headers, etc; see below) prerequisites.
+ //
+ // By default we update during match headers and ad hoc sources (which
+ // are commonly marked as such because they are #include'ed).
+ //
+ optional<bool> um;
+
+ if (l)
+ {
+ const string& v (cast<string> (l));
+
+ if (v == "match")
+ um = true;
+ else if (v == "execute")
+ um = false;
+ else if (v != "false" && v != "true")
+ {
+ fail << "unrecognized update variable value '" << v
+ << "' specified for prerequisite " << p.prerequisite;
+ }
+ }
+
+ // Skip excluded and ad hoc (unless updated during match) on this
+ // pass.
+ //
+ if (pi != include_type::normal)
+ {
+ if (a == perform_update_id && pi == include_type::adhoc)
+ {
+ // By default update ad hoc headers/sources during match (see
+ // above).
+ //
+#if 1
+ if (!um)
+ um = (p.is_a (x_src) || p.is_a<c> () || p.is_a<S> () ||
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ())) ||
+ x_header (p, true));
+#endif
+
+ if (*um)
+ {
+ pto.target = &p.search (t); // mark 0
+ pto.include |= prerequisite_target::include_udm;
+ update_match = true;
+ }
+ }
- if (pi != include_type::normal) // Skip excluded and ad hoc.
continue;
+ }
+
+ const target*& pt (pto);
- // Mark:
- // 0 - lib
+ // Mark (2 bits):
+ //
+ // 0 - lib or update during match
// 1 - src
// 2 - mod
- // 3 - obj/bmi and also lib not to be cleaned
+ // 3 - obj/bmi and also lib not to be cleaned (and other stuff)
//
- uint8_t m (0);
+ uint8_t mk (0);
bool mod (x_mod != nullptr && p.is_a (*x_mod));
+ bool hdr (false);
- if (mod || p.is_a (x_src) || p.is_a<c> ())
+ if (mod ||
+ p.is_a (x_src) || p.is_a<c> () || p.is_a<S> () ||
+ (x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ())))
{
binless = binless && (mod ? user_binless : false);
@@ -976,8 +1092,8 @@ namespace build2
// be the group -- we will pick a member in part 2 below.
//
pair<target&, ulock> r (
- search_locked (
- t, rtt, d, dir_path (), *cp.tk.name, nullptr, cp.scope));
+ search_new_locked (
+ ctx, rtt, d, dir_path (), *cp.tk.name, nullptr, cp.scope));
// If we shouldn't clean obj{}, then it is fair to assume we
// shouldn't clean the source either (generated source will be in
@@ -1013,7 +1129,7 @@ namespace build2
}
pt = &r.first;
- m = mod ? 2 : 1;
+ mk = mod ? 2 : 1;
}
else if (p.is_a<libx> () ||
p.is_a<liba> () ||
@@ -1022,12 +1138,8 @@ namespace build2
{
// Handle imported libraries.
//
- // Note that since the search is rule-specific, we don't cache the
- // target in the prerequisite.
- //
if (p.proj ())
- pt = search_library (
- a, sys_lib_dirs, usr_lib_dirs, p.prerequisite);
+ pt = search_library (a, sys_lib_dirs, usr_lib_dirs, p.prerequisite);
// The rest is the same basic logic as in search_and_match().
//
@@ -1035,13 +1147,17 @@ namespace build2
pt = &p.search (t);
if (skip (*pt))
- m = 3; // Mark so it is not matched.
+ mk = 3; // Mark so it is not matched.
// If this is the lib{}/libul{} group, then pick the appropriate
- // member.
+ // member. Also note this in prerequisite_target::include (used
+ // by process_libraries()).
//
if (const libx* l = pt->is_a<libx> ())
+ {
pt = link_member (*l, a, li);
+ pto.include |= include_group;
+ }
}
else
{
@@ -1054,8 +1170,11 @@ namespace build2
// Windows module definition (.def). For other platforms (and for
// static libraries) treat it as an ordinary prerequisite.
//
- else if (p.is_a<def> () && tclass == "windows" && ot != otype::a)
+ else if (p.is_a<def> ())
{
+ if (tclass != "windows" || ot == otype::a)
+ continue;
+
pt = &p.search (t);
}
//
@@ -1065,11 +1184,14 @@ namespace build2
//
else
{
- if (!p.is_a<objx> () && !p.is_a<bmix> () && !x_header (p, true))
+ if (!p.is_a<objx> () &&
+ !p.is_a<bmix> () &&
+ !(hdr = x_header (p, true)))
{
// @@ Temporary hack until we get the default outer operation
// for update. This allows operations like test and install to
- // skip such tacked on stuff.
+ // skip such tacked on stuff. @@ This doesn't feel temporary
+ // anymore...
//
// Note that ad hoc inputs have to be explicitly marked with the
// include=adhoc prerequisite-specific variable.
@@ -1079,6 +1201,12 @@ namespace build2
}
pt = &p.search (t);
+
+ if (pt == dir)
+ {
+ pt = nullptr;
+ continue;
+ }
}
if (skip (*pt))
@@ -1097,21 +1225,58 @@ namespace build2
!pt->is_a<hbmix> () &&
cast_false<bool> ((*pt)[b_binless])));
- m = 3;
+ mk = 3;
}
if (user_binless && !binless)
fail << t << " cannot be binless due to " << p << " prerequisite";
- mark (pt, m);
+ // Upgrade update during match prerequisites to mark 0 (see above for
+ // details).
+ //
+ if (a == perform_update_id)
+ {
+ // By default update headers during match (see above).
+ //
+#if 1
+ if (!um)
+ um = hdr;
+#endif
+
+ if (*um)
+ {
+ if (mk != 3)
+ fail << "unable to update during match prerequisite " << p <<
+ info << "updating this type of prerequisites during match is "
+ << "not supported by this rule";
+
+ mk = 0;
+ pto.include |= prerequisite_target::include_udm;
+ update_match = true;
+ }
+ }
+
+ mark (pt, mk);
}
- // Match lib{} (the only unmarked) in parallel and wait for completion.
+ // Match lib{} first and then update during match (the only unmarked) in
+ // parallel and wait for completion. We need to match libraries first
+ // because matching generated headers/sources may lead to matching some
+ // of the libraries (for example, if generation requires some of the
+ // metadata; think poptions needed by Qt moc).
//
- match_members (a, t, pts, start);
+ {
+ auto mask (prerequisite_target::include_udm);
+
+ match_members (a, t, pts, start, {mask, 0});
+
+ if (update_match)
+ match_members (a, t, pts, start, {mask, mask});
+ }
// Check if we have any binful utility libraries.
//
+ bool rec_binless (false); // Recursively-binless.
if (binless)
{
if (const libux* l = find_binful (a, t, li))
@@ -1122,8 +1287,128 @@ namespace build2
fail << t << " cannot be binless due to binful " << *l
<< " prerequisite";
}
+
+ // See if we are recursively-binless.
+ //
+ if (binless)
+ {
+ rec_binless = true;
+
+ for (const target* pt: t.prerequisite_targets[a])
+ {
+ if (pt == nullptr || unmark (pt) != 0) // See above.
+ continue;
+
+ const file* ft;
+ if ((ft = pt->is_a<libs> ()) ||
+ (ft = pt->is_a<liba> ()) ||
+ (ft = pt->is_a<libux> ()))
+ {
+ if (ft->path ().empty ()) // Binless.
+ {
+ // The same lookup as in process_libraries().
+ //
+ if (const string* t = cast_null<string> (
+ ft->state[a].lookup_original (
+ c_type, true /* target_only */).first))
+ {
+ if (recursively_binless (*t))
+ continue;
+ }
+ }
+
+ rec_binless = false;
+ break;
+ }
+ }
+
+ // Another thing we must check is for the presence of any simple
+ // libraries (-lm, shell32.lib, etc) in *.export.libs. See
+ // process_libraries() for details.
+ //
+ if (rec_binless)
+ {
+ auto find = [&t, &bs] (const variable& v) -> lookup
+ {
+ return t.lookup_original (v, false, &bs).first;
+ };
+
+ auto has_simple = [] (lookup l)
+ {
+ if (const auto* ns = cast_null<vector<name>> (l))
+ {
+ for (auto i (ns->begin ()), e (ns->end ()); i != e; ++i)
+ {
+ if (i->pair)
+ ++i;
+ else if (i->simple ()) // -l<name>, etc.
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+ if (lt.shared_library ()) // process_libraries()::impl == false
+ {
+ if (has_simple (find (x_export_libs)) ||
+ has_simple (find (c_export_libs)))
+ rec_binless = false;
+ }
+ else // process_libraries()::impl == true
+ {
+ lookup x (find (x_export_impl_libs));
+ lookup c (find (c_export_impl_libs));
+
+ if (x.defined () || c.defined ())
+ {
+ if (has_simple (x) || has_simple (c))
+ rec_binless = false;
+ }
+ else
+ {
+ // These are strings and we assume if either is defined and
+ // not empty, then we have simple libraries.
+ //
+ if (((x = find (x_libs)) && !x->empty ()) ||
+ ((c = find (c_libs)) && !c->empty ()))
+ rec_binless = false;
+ }
+ }
+ }
+ }
}
+ // Set the library type (C, C++, binless) as rule-specific variable.
+ //
+ if (lt.library ())
+ {
+ string v (x);
+
+ if (rec_binless)
+ v += ",recursively-binless";
+ else if (binless)
+ v += ",binless";
+
+ t.state[a].assign (c_type) = move (v);
+ }
+
+ // If we have any update during match prerequisites, now is the time to
+ // update them. Note that we have to do it before any further matches
+ // since they may rely on these prerequisites already being updated (for
+ // example, object file matches may need the headers to be already
+ // updated). We also must do it after matching all our prerequisite
+ // libraries since they may generate headers that we depend upon.
+ //
+ // Note that we ignore the result and whether it renders us out of date,
+ // leaving it to the common execute logic in perform_update().
+ //
+ // Note also that update_during_match_prerequisites() spoils
+ // prerequisite_target::data.
+ //
+ if (update_match)
+ update_during_match_prerequisites (trace, a, t);
+
// Now that we know for sure whether we are binless, derive file name(s)
// and add ad hoc group members. Note that for binless we still need the
// .pc member (whose name depends on the libray prefix) so we take care
@@ -1267,11 +1552,26 @@ namespace build2
if (wasm.path ().empty ())
wasm.derive_path ();
+ // We don't want to print this member at level 1 diagnostics.
+ //
+ wasm.state[a].assign (ctx.var_backlink) = names {
+ name ("group"), name ("false")};
+
// If we have -pthread then we get additional .worker.js file
// which is used for thread startup. In a somewhat hackish way we
// represent it as an exe{} member to make sure it gets installed
// next to the main .js file.
//
+ // @@ Note that our recommendation is to pass -pthread in *.libs
+ // but checking that is not straightforward (it could come from
+ // one of the libraries that we are linking). We could have called
+ // append_libraries() (similar to $x.lib_libs()) and then looked
+ // there. But this is quite heavy handed and it's not clear this
+ // is worth the trouble since the -pthread support in Emscripten
+ // is quite high-touch (i.e., it's not like we can write a library
+ // that starts some threads and then run its test as on any other
+ // POSIX platform).
+ //
if (find_option ("-pthread", cmode) ||
find_option ("-pthread", t, c_loptions) ||
find_option ("-pthread", t, x_loptions))
@@ -1280,6 +1580,11 @@ namespace build2
if (worker.path ().empty ())
worker.derive_path ();
+
+ // We don't want to print this member at level 1 diagnostics.
+ //
+ worker.state[a].assign (ctx.var_backlink) = names {
+ name ("group"), name ("false")};
}
}
@@ -1288,22 +1593,31 @@ namespace build2
//
if (!binless && ot != otype::a && tsys == "win32-msvc")
{
- if (find_option ("/DEBUG", t, c_loptions, true) ||
- find_option ("/DEBUG", t, x_loptions, true))
+ const string* o;
+ if ((o = find_option_prefix ("/DEBUG", t, c_loptions, true)) != nullptr ||
+ (o = find_option_prefix ("/DEBUG", t, x_loptions, true)) != nullptr)
{
- const target_type& tt (*bs.find_target_type ("pdb"));
+ if (icasecmp (*o, "/DEBUG:NONE") != 0)
+ {
+ const target_type& tt (*bs.find_target_type ("pdb"));
- // We call the target foo.{exe,dll}.pdb rather than just foo.pdb
- // because we can have both foo.exe and foo.dll in the same
- // directory.
- //
- file& pdb (add_adhoc_member<file> (t, tt, e));
+ // We call the target foo.{exe,dll}.pdb rather than just
+ // foo.pdb because we can have both foo.exe and foo.dll in the
+ // same directory.
+ //
+ file& pdb (add_adhoc_member<file> (t, tt, e));
- // Note that the path is derived from the exe/dll path (so it
- // will include the version in case of a dll).
- //
- if (pdb.path ().empty ())
- pdb.derive_path (t.path ());
+ // Note that the path is derived from the exe/dll path (so it
+ // will include the version in case of a dll).
+ //
+ if (pdb.path ().empty ())
+ pdb.derive_path (t.path ());
+
+ // We don't want to print this member at level 1 diagnostics.
+ //
+ pdb.state[a].assign (ctx.var_backlink) = names {
+ name ("group"), name ("false")};
+ }
}
}
@@ -1325,6 +1639,13 @@ namespace build2
// we will use its bin.lib to decide what will be installed and in
// perform_update() we will confirm that it is actually installed.
//
+ // This, of course, works only if we actually have explicit lib{}.
+ // But the user could only have liba{} (common in testing frameworks
+ // that provide main()) or only libs{} (e.g., plugin that can also
+ // be linked). It's also theoretically possible to have both liba{}
+ // and libs{} but no lib{}, in which case it feels correct not to
+ // generate the common file at all.
+ //
if (ot != otype::e)
{
// Note that here we always use the lib name prefix, even on
@@ -1336,7 +1657,13 @@ namespace build2
// Note also that the order in which we are adding these members
// is important (see add_addhoc_member() for details).
//
- if (ot == otype::a || !link_members (rs).a)
+ if (operator>= (t.group->decl, target_decl::implied) // @@ VC14
+ ? ot == (link_members (rs).a ? otype::a : otype::s)
+ : search_existing (ctx,
+ ot == otype::a
+ ? libs::static_type
+ : liba::static_type,
+ t.dir, t.out, t.name) == nullptr)
{
auto& pc (add_adhoc_member<pc> (t));
@@ -1369,14 +1696,13 @@ namespace build2
// exists (windows_rpath_assembly() does take care to clean it up
// if not used).
//
-#ifdef _WIN32
- target& dir =
-#endif
+ target& dir (
add_adhoc_member (t,
fsdir::static_type,
path_cast<dir_path> (t.path () + ".dlls"),
t.out,
- string () /* name */);
+ string () /* name */,
+ nullopt /* ext */));
// By default our backlinking logic will try to symlink the
// directory and it can even be done on Windows using junctions.
@@ -1390,9 +1716,15 @@ namespace build2
// Wine. So we only resort to copy-link'ing if we are running on
// Windows.
//
+ // We also don't want to print this member at level 1 diagnostics.
+ //
+ dir.state[a].assign (ctx.var_backlink) = names {
#ifdef _WIN32
- dir.state[a].assign (ctx.var_backlink) = "copy";
+ name ("copy"), name ("false")
+#else
+ name ("group"), name ("false")
#endif
+ };
}
}
}
@@ -1414,23 +1746,24 @@ namespace build2
continue;
// New mark:
+ // 0 - already matched
// 1 - completion
// 2 - verification
//
- uint8_t m (unmark (pt));
+ uint8_t mk (unmark (pt));
- if (m == 3) // obj/bmi or lib not to be cleaned
+ if (mk == 3) // obj/bmi or lib not to be cleaned
{
- m = 1; // Just completion.
+ mk = 1; // Just completion.
// Note that if this is a library not to be cleaned, we keep it
// marked for completion (see the next phase).
}
- else if (m == 1 || m == 2) // Source/module chain.
+ else if (mk == 1 || mk == 2) // Source/module chain.
{
- bool mod (m == 2);
+ bool mod (mk == 2); // p is_a x_mod
- m = 1;
+ mk = 1;
const target& rt (*pt);
bool group (!p.prerequisite.belongs (t)); // Group's prerequisite.
@@ -1462,7 +1795,21 @@ namespace build2
if (!pt->has_prerequisites () &&
(!group || !rt.has_prerequisites ()))
{
- prerequisites ps {p.as_prerequisite ()}; // Source.
+ prerequisites ps;
+
+ // Add source.
+ //
+ // Remove the update variable (we may have stray update=execute
+ // that was specified together with the header).
+ //
+ {
+ prerequisite pc (p.as_prerequisite ());
+
+ if (!pc.vars.empty ())
+ pc.vars.erase (*ctx.var_update);
+
+ ps.push_back (move (pc));
+ }
// Add our lib*{} (see the export.* machinery for details) and
// bmi*{} (both original and chained; see module search logic)
@@ -1481,7 +1828,7 @@ namespace build2
// might depend on the imported one(s) which we will never "see"
// unless we start with this library.
//
- // Note: have similar logic in make_module_sidebuild().
+ // Note: have similar logic in make_{module,header}_sidebuild().
//
size_t j (start);
for (prerequisite_member p: group_prerequisite_members (a, t))
@@ -1567,7 +1914,10 @@ namespace build2
// Most of the time we will have just a single source so fast-
// path that case.
//
- if (p1.is_a (mod ? *x_mod : x_src) || p1.is_a<c> ())
+ if (mod
+ ? p1.is_a (*x_mod)
+ : (p1.is_a (x_src) || p1.is_a<c> () || p1.is_a<S> () ||
+ (x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ()))))
{
src = true;
continue; // Check the rest of the prerequisites.
@@ -1580,8 +1930,12 @@ namespace build2
p1.is_a<libx> () ||
p1.is_a<liba> () || p1.is_a<libs> () || p1.is_a<libux> () ||
p1.is_a<bmi> () || p1.is_a<bmix> () ||
- (p.is_a (mod ? *x_mod : x_src) && x_header (p1)) ||
- (p.is_a<c> () && p1.is_a<h> ()))
+ ((mod ||
+ p.is_a (x_src) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj))) && x_header (p1)) ||
+ ((p.is_a<c> () || p.is_a<S> () ||
+ (x_obj != nullptr && p.is_a<m> ())) && p1.is_a<h> ()))
continue;
fail << "synthesized dependency for prerequisite " << p
@@ -1594,14 +1948,14 @@ namespace build2
if (!src)
fail << "synthesized dependency for prerequisite " << p
<< " would be incompatible with existing target " << *pt <<
- info << "no existing c/" << x_name << " source prerequisite" <<
+ info << "no existing C/" << x_lang << " source prerequisite" <<
info << "specify corresponding " << rtt.name << "{} "
<< "dependency explicitly";
- m = 2; // Needs verification.
+ mk = 2; // Needs verification.
}
}
- else // lib*{}
+ else // lib*{} or update during match
{
// If this is a static library, see if we need to link it whole.
// Note that we have to do it after match since we rely on the
@@ -1610,6 +1964,8 @@ namespace build2
bool u;
if ((u = pt->is_a<libux> ()) || pt->is_a<liba> ())
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (ctx.var_pool["bin.whole"]); // @@ Cache.
// See the bin module for the lookup semantics discussion. Note
@@ -1619,7 +1975,7 @@ namespace build2
lookup l (p.prerequisite.vars[var]);
if (!l.defined ())
- l = pt->lookup_original (var, true).first;
+ l = pt->lookup_original (var, true /* target_only */).first;
if (!l.defined ())
{
@@ -1638,7 +1994,7 @@ namespace build2
}
}
- mark (pt, m);
+ mark (pt, mk);
}
// Process prerequisites, pass 3: match everything and verify chains.
@@ -1651,10 +2007,10 @@ namespace build2
i = start;
for (prerequisite_member p: group_prerequisite_members (a, t))
{
- bool adhoc (pts[i].adhoc);
+ bool adhoc (pts[i].adhoc ());
const target*& pt (pts[i++]);
- uint8_t m;
+ uint8_t mk;
if (pt == nullptr)
{
@@ -1664,10 +2020,15 @@ namespace build2
continue;
pt = &p.search (t);
- m = 1; // Mark for completion.
+ mk = 1; // Mark for completion.
}
- else if ((m = unmark (pt)) != 0)
+ else
{
+ mk = unmark (pt);
+
+ if (mk == 0)
+ continue; // Already matched.
+
// If this is a library not to be cleaned, we can finally blank it
// out.
//
@@ -1679,7 +2040,7 @@ namespace build2
}
match_async (a, *pt, ctx.count_busy (), t[a].task_count);
- mark (pt, m);
+ mark (pt, mk);
}
wg.wait ();
@@ -1694,15 +2055,15 @@ namespace build2
// Skipped or not marked for completion.
//
- uint8_t m;
- if (pt == nullptr || (m = unmark (pt)) == 0)
+ uint8_t mk;
+ if (pt == nullptr || (mk = unmark (pt)) == 0)
continue;
- build2::match (a, *pt);
+ match_complete (a, *pt);
// Nothing else to do if not marked for verification.
//
- if (m == 1)
+ if (mk == 1)
continue;
// Finish verifying the existing dependency (which is now matched)
@@ -1714,7 +2075,10 @@ namespace build2
for (prerequisite_member p1: group_prerequisite_members (a, *pt))
{
- if (p1.is_a (mod ? *x_mod : x_src) || p1.is_a<c> ())
+ if (mod
+ ? p1.is_a (*x_mod)
+ : (p1.is_a (x_src) || p1.is_a<c> () || p1.is_a<S> () ||
+ (x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ()))))
{
// Searching our own prerequisite is ok, p1 must already be
// resolved.
@@ -1750,14 +2114,11 @@ namespace build2
switch (a)
{
- case perform_update_id: return [this] (action a, const target& t)
- {
- return perform_update (a, t);
- };
- case perform_clean_id: return [this] (action a, const target& t)
- {
- return perform_clean (a, t);
- };
+ // Keep the recipe (which is match_data) after execution to allow the
+ // install rule to examine it.
+ //
+ case perform_update_id: t.keep_data (a); // Fall through.
+ case perform_clean_id: return md;
default: return noop_recipe; // Configure update.
}
}
@@ -1804,7 +2165,7 @@ namespace build2
const target* const* lc,
const small_vector<reference_wrapper<const string>, 2>& ns,
lflags f,
- const string* type, // cc.type
+ const string* type, // Whole cc.type in the <lang>[,...] form.
bool)
{
// Note: see also make_header_sidebuild().
@@ -1825,6 +2186,13 @@ namespace build2
// that range of elements to the end of args. See GitHub issue #114
// for details.
//
+ // One case where we can prune the graph is if the library is
+ // recursively-binless. It's tempting to wish that we can do the same
+ // just for binless, but alas that's not the case: we have to hoist
+ // its binful interface dependency because, for example, it must
+ // appear after the preceding static library of which this binless
+ // library is a dependency.
+ //
// From the process_libraries() semantics we know that this callback
// is always called and always after the options callbacks.
//
@@ -1836,8 +2204,13 @@ namespace build2
{
// Hoist the elements corresponding to this library to the end.
// Note that we cannot prune the traversal since we need to see the
- // last occurrence of each library.
+ // last occurrence of each library, unless the library is
+ // recursively-binless (in which case there will be no need to
+ // hoist since there can be no libraries among the elements).
//
+ if (type != nullptr && recursively_binless (*type))
+ return false;
+
d.ls.hoist (d.args, *al);
return true;
}
@@ -1883,19 +2256,52 @@ namespace build2
// install or both not. We can only do this if the library is build
// by our link_rule.
//
- else if (d.for_install && type != nullptr && *type != "cc")
+ else if (d.for_install &&
+ type != nullptr &&
+ *type != "cc" &&
+ type->compare (0, 3, "cc,") != 0)
{
- auto& md (l->data<link_rule::match_data> ());
- assert (md.for_install); // Must have been executed.
+ auto* md (l->try_data<link_rule::match_data> (d.a));
+
+ if (md == nullptr)
+ fail << "library " << *l << " is not built with cc module-based "
+ << "link rule" <<
+ info << "mark it as generic with cc.type=cc target-specific "
+ << "variable";
+
+ assert (md->for_install); // Must have been executed.
// The user will get the target name from the context info.
//
- if (*md.for_install != *d.for_install)
+ if (*md->for_install != *d.for_install)
fail << "incompatible " << *l << " build" <<
- info << "library is built " << (*md.for_install ? "" : "not ")
+ info << "library is built " << (*md->for_install ? "" : "not ")
<< "for install";
}
+ auto newer = [&d, l] ()
+ {
+ // @@ Work around the unexecuted member for installed libraries
+ // issue (see search_library() for details).
+ //
+ // Note that the member may not even be matched, let alone
+ // executed, so we have to go through the group to detect this
+ // case (if the group is not matched, then the member got to be).
+ //
+#if 0
+ return l->newer (d.mt);
+#else
+ const target* g (l->group);
+ target_state s (g != nullptr &&
+ g->matched (d.a, memory_order_acquire) &&
+ g->state[d.a].rule == &file_rule::rule_match
+ ? target_state::unchanged
+ : l->executed_state (d.a));
+
+ return l->newer (d.mt, s);
+#endif
+ };
+
if (d.li.type == otype::a)
{
// Linking a utility library to a static library.
@@ -1923,7 +2329,7 @@ namespace build2
// Check if this library renders us out of date.
//
if (d.update != nullptr)
- *d.update = *d.update || l->newer (d.mt);
+ *d.update = *d.update || newer ();
for (const target* pt: l->prerequisite_targets[d.a])
{
@@ -1962,7 +2368,7 @@ namespace build2
// Check if this library renders us out of date.
//
if (d.update != nullptr)
- *d.update = *d.update || l->newer (d.mt);
+ *d.update = *d.update || newer ();
// On Windows a shared library is a DLL with the import library as
// an ad hoc group member. MinGW though can link directly to DLLs
@@ -2043,6 +2449,8 @@ namespace build2
//
if (const target* g = exp && l.is_a<libs> () ? l.group : &l)
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (
com
? (exp ? c_export_loptions : c_loptions)
@@ -2061,7 +2469,9 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la,
- lf, imp, lib, opt, self,
+ lf, imp, lib, opt,
+ self,
+ false /* proc_opt_group */,
lib_cache);
}
@@ -2075,9 +2485,14 @@ namespace build2
// Use -rpath-link only on targets that support it (Linux, *BSD). Note
// that we don't really need it for top-level libraries.
//
+ // Note that more recent versions of FreeBSD are using LLVM lld without
+ // any mentioning of -rpath-link in the man pages.
+ //
+ auto have_link = [this] () {return tclass == "linux" || tclass == "bsd";};
+
if (link)
{
- if (tclass != "linux" && tclass != "bsd")
+ if (!have_link ())
return;
}
@@ -2107,8 +2522,56 @@ namespace build2
{
rpathed_libraries& ls;
strings& args;
- bool link;
- } d {ls, args, link};
+ bool rpath;
+ bool rpath_link;
+ } d {ls, args, false, false};
+
+ if (link)
+ d.rpath_link = true;
+ else
+ {
+ // While one would naturally expect -rpath to be a superset of
+ // -rpath-link, according to GNU ld:
+ //
+ // "The -rpath option is also used when locating shared objects which
+ // are needed by shared objects explicitly included in the link; see
+ // the description of the -rpath-link option. Searching -rpath in
+ // this way is only supported by native linkers and cross linkers
+ // which have been configured with the --with-sysroot option."
+ //
+ // So we check if this is cross-compilation and request both options
+ // if that's the case (we have no easy way of detecting whether the
+ // linker has been configured with the --with-sysroot option, whatever
+ // that means, so we will just assume the worst case).
+ //
+ d.rpath = true;
+
+ if (have_link ())
+ {
+ // Detecting cross-compilation is not as easy as it seems. Comparing
+ // complete target triplets proved too strict. For example, we may be
+ // running on x86_64-apple-darwin17.7.0 while the compiler is
+ // targeting x86_64-apple-darwin17.3.0. Also, there is the whole i?86
+ // family of CPUs which, at least for linking, should probably be
+ // considered the same.
+ //
+ const target_triplet& h (*bs.ctx.build_host);
+ const target_triplet& t (ctgt);
+
+ auto x86 = [] (const string& c)
+ {
+ return (c.size () == 4 &&
+ c[0] == 'i' &&
+ (c[1] >= '3' && c[1] <= '6') &&
+ c[2] == '8' &&
+ c[3] == '6');
+ };
+
+ if (t.system != h.system ||
+ (t.cpu != h.cpu && !(x86 (t.cpu) && x86 (h.cpu))))
+ d.rpath_link = true;
+ }
+ }
auto lib = [&d, this] (
const target* const* lc,
@@ -2130,13 +2593,22 @@ namespace build2
auto append = [&d] (const string& f)
{
- string o (d.link ? "-Wl,-rpath-link," : "-Wl,-rpath,");
-
size_t p (path::traits_type::rfind_separator (f));
assert (p != string::npos);
- o.append (f, 0, (p != 0 ? p : 1)); // Don't include trailing slash.
- d.args.push_back (move (o));
+ if (d.rpath)
+ {
+ string o ("-Wl,-rpath,");
+ o.append (f, 0, (p != 0 ? p : 1)); // Don't include trailing slash.
+ d.args.push_back (move (o));
+ }
+
+ if (d.rpath_link)
+ {
+ string o ("-Wl,-rpath-link,");
+ o.append (f, 0, (p != 0 ? p : 1));
+ d.args.push_back (move (o));
+ }
};
if (l != nullptr)
@@ -2213,7 +2685,10 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0 /* lflags */,
- imp, lib, nullptr, false /* self */, lib_cache);
+ imp, lib, nullptr,
+ false /* self */,
+ false /* proc_opt_group */,
+ lib_cache);
}
void link_rule::
@@ -2275,7 +2750,7 @@ namespace build2
// Filter link.exe noise (msvc.cxx).
//
void
- msvc_filter_link (ifdstream&, const file&, otype);
+ msvc_filter_link (diag_buffer&, const file&, otype);
// Translate target CPU to the link.exe/lib.exe /MACHINE option.
//
@@ -2283,7 +2758,7 @@ namespace build2
msvc_machine (const string& cpu); // msvc.cxx
target_state link_rule::
- perform_update (action a, const target& xt) const
+ perform_update (action a, const target& xt, match_data& md) const
{
tracer trace (x, "link_rule::perform_update");
@@ -2295,8 +2770,6 @@ namespace build2
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
- match_data& md (t.data<match_data> ());
-
// Unless the outer install rule signalled that this is update for
// install, signal back that we've performed plain update.
//
@@ -2325,14 +2798,33 @@ namespace build2
// Note that execute_prerequisites() blanks out all the ad hoc
// prerequisites so we don't need to worry about them from now on.
//
+ // There is an interesting trade-off between the straight and reverse
+ // execution. With straight we may end up with inaccurate progress if
+ // most of our library prerequisites (typically specified last) are
+ // already up to date. In this case, the progress will first increase
+ // slowly as we compile this target's source files and then jump
+ // straight to 100% as we "realize" that all the libraries (and all
+ // their prerequisites) are already up to date.
+ //
+ // Switching to reverse fixes this but messes up incremental building:
+ // now instead of starting to compile source files right away, we will
+ // first spend some time making sure all the libraries are up to date
+ // (which, in case of an error in the source code, will be a complete
+ // waste).
+ //
+ // There doesn't seem to be an easy way to distinguish between
+ // incremental and from-scratch builds and on balance fast incremental
+ // builds feel more important.
+ //
target_state ts;
- if (optional<target_state> s =
- execute_prerequisites (a,
- t,
- mt,
- [] (const target&, size_t) {return false;}))
+ if (optional<target_state> s = execute_prerequisites (
+ a, t,
+ mt,
+ [] (const target&, size_t) {return false;}))
+ {
ts = *s;
+ }
else
{
// An ad hoc prerequisite renders us out-of-date. Let's update from
@@ -2346,7 +2838,7 @@ namespace build2
// those that don't match. Note that we have to do it after updating
// prerequisites to keep the dependency counts straight.
//
- if (const variable* var_fi = ctx.var_pool.find ("for_install"))
+ if (const variable* var_fi = rs.var_pool ().find ("for_install"))
{
// Parallel prerequisites/prerequisite_targets loop.
//
@@ -2372,7 +2864,7 @@ namespace build2
// (Re)generate pkg-config's .pc file. While the target itself might be
// up-to-date from a previous run, there is no guarantee that .pc exists
// or also up-to-date. So to keep things simple we just regenerate it
- // unconditionally (and avoid doing so on uninstall; see pkconfig_save()
+ // unconditionally (and avoid doing so on uninstall; see pkgconfig_save()
// for details).
//
// Also, if you are wondering why don't we just always produce this .pc,
@@ -2382,7 +2874,7 @@ namespace build2
// There is a further complication: we may have no intention of
// installing the library but still need to update it for install (see
// install_scope() for background). In which case we may still not have
- // the installation directories. We handle this in pkconfig_save() by
+ // the installation directories. We handle this in pkgconfig_save() by
// skipping the generation of .pc files (and letting the install rule
// complain if we do end up trying to install them).
//
@@ -2399,8 +2891,12 @@ namespace build2
if (!m->is_a (la ? pca::static_type : pcs::static_type))
{
- if (t.group->matched (a))
+ if (operator>= (t.group->decl, target_decl::implied) // @@ VC14
+ ? t.group->matched (a)
+ : true)
+ {
pkgconfig_save (a, t, la, true /* common */, binless);
+ }
else
// Mark as non-existent not to confuse the install rule.
//
@@ -2512,14 +3008,19 @@ namespace build2
try
{
+ // We assume that what we write to stdin is small enough to
+ // fit into the pipe's buffer without blocking.
+ //
process pr (rc,
args,
- -1 /* stdin */,
- 1 /* stdout */,
- 2 /* stderr */,
- nullptr /* cwd */,
+ -1 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */,
+ nullptr /* cwd */,
env_ptrs.empty () ? nullptr : env_ptrs.data ());
+ diag_buffer dbuf (ctx, args[0], pr);
+
try
{
ofdstream os (move (pr.out_fd));
@@ -2543,7 +3044,8 @@ namespace build2
// was caused by that and let run_finish() deal with it.
}
- run_finish (args, pr);
+ dbuf.read ();
+ run_finish (dbuf, args, pr, 2 /* verbosity */);
}
catch (const process_error& e)
{
@@ -2598,6 +3100,8 @@ namespace build2
{
// For VC we use link.exe directly.
//
+ // Note: go straight for the public variable pool.
+ //
const string& cs (
cast<string> (
rs[tsys == "win32-msvc"
@@ -2691,6 +3195,9 @@ namespace build2
// probably safe to assume that the two came from the same version
// of binutils/LLVM.
//
+ // @@ Note also that GNU ar deprecated -T in favor of --thin in
+ // version 2.38.
+ //
if (lt.utility)
{
const string& id (cast<string> (rs["bin.ar.id"]));
@@ -2804,10 +3311,72 @@ namespace build2
rpath_libraries (sargs, bs, a, t, li, for_install /* link */);
lookup l;
-
if ((l = t["bin.rpath"]) && !l->empty ())
+ {
+ // See if we need to make the specified paths relative using the
+ // $ORIGIN (Linux, BSD) or @loader_path (Mac OS) mechanisms.
+ //
+ optional<dir_path> origin;
+ if (for_install && cast_false<bool> (rs["install.relocatable"]))
+ {
+ // Note that both $ORIGIN and @loader_path will be expanded to
+ // the path of the binary that we are building (executable or
+ // shared library) as opposed to top-level executable.
+ //
+ path p (install::resolve_file (t));
+
+ // If the file is not installable then the install.relocatable
+ // semantics does not apply, naturally.
+ //
+ if (!p.empty ())
+ origin = p.directory ();
+ }
+
+ bool origin_used (false);
for (const dir_path& p: cast<dir_paths> (l))
- sargs.push_back ("-Wl,-rpath," + p.string ());
+ {
+ string o ("-Wl,-rpath,");
+
+ // Note that we only rewrite absolute paths so if the user
+ // specified $ORIGIN or @loader_path manually, we will pass it
+ // through as is.
+ //
+ if (origin && p.absolute ())
+ {
+ dir_path l;
+ try
+ {
+ l = p.relative (*origin);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make rpath " << p << " relative to "
+ << *origin <<
+ info << "required for relocatable installation";
+ }
+
+ o += (tclass == "macos" ? "@loader_path" : "$ORIGIN");
+
+ if (!l.empty ())
+ {
+ o += path_traits::directory_separator;
+ o += l.string ();
+ }
+
+ origin_used = true;
+ }
+ else
+ o += p.string ();
+
+ sargs.push_back (move (o));
+ }
+
+ // According to the Internet, `-Wl,-z,origin` is not needed except
+ // potentially for older BSDs.
+ //
+ if (origin_used && tclass == "bsd")
+ sargs.push_back ("-Wl,-z,origin");
+ }
if ((l = t["bin.rpath_link"]) && !l->empty ())
{
@@ -2841,25 +3410,24 @@ namespace build2
// Extra system library dirs (last).
//
- assert (sys_lib_dirs_extra <= sys_lib_dirs.size ());
+ assert (sys_lib_dirs_mode + sys_lib_dirs_extra <= sys_lib_dirs.size ());
+
+ // Note that the mode options are added as part of cmode.
+ //
+ auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode);
+ auto x (b + sys_lib_dirs_extra);
if (tsys == "win32-msvc")
{
// If we have no LIB environment variable set, then we add all of
// them. But we want extras to come first.
//
- // Note that the mode options are added as part of cmode.
- //
- auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode);
- auto m (sys_lib_dirs.begin () + sys_lib_dirs_extra);
- auto e (sys_lib_dirs.end ());
-
- for (auto i (m); i != e; ++i)
+ for (auto i (b); i != x; ++i)
sargs1.push_back ("/LIBPATH:" + i->string ());
if (!getenv ("LIB"))
{
- for (auto i (b); i != m; ++i)
+ for (auto i (x), e (sys_lib_dirs.end ()); i != e; ++i)
sargs1.push_back ("/LIBPATH:" + i->string ());
}
@@ -2870,7 +3438,7 @@ namespace build2
append_option_values (
args,
"-L",
- sys_lib_dirs.begin () + sys_lib_dirs_extra, sys_lib_dirs.end (),
+ b, x,
[] (const dir_path& d) {return d.string ().c_str ();});
}
}
@@ -2966,7 +3534,7 @@ namespace build2
&cs, &update, mt,
bs, a, *f, la, p.data, li,
for_install, true, true, &lc);
- f = nullptr; // Timestamp checked by hash_libraries().
+ f = nullptr; // Timestamp checked by append_libraries().
}
else
{
@@ -3055,6 +3623,10 @@ namespace build2
//
path relt (relative (tp));
+ path reli; // Import library.
+ if (lt.shared_library () && (tsys == "win32-msvc" || tsys == "mingw32"))
+ reli = relative (find_adhoc_member<libi> (t)->path ());
+
const process_path* ld (nullptr);
if (lt.static_library ())
{
@@ -3186,7 +3758,7 @@ namespace build2
// derived from the import library by changing the extension.
// Lucky for us -- there is no option to name it.
//
- out2 += relative (find_adhoc_member<libi> (t)->path ()).string ();
+ out2 += reli.string ();
}
else
{
@@ -3199,14 +3771,17 @@ namespace build2
// If we have /DEBUG then name the .pdb file. It is an ad hoc group
// member.
//
- if (find_option ("/DEBUG", args, true))
+ if (const char* o = find_option_prefix ("/DEBUG", args, true))
{
- const file& pdb (
- *find_adhoc_member<file> (t, *bs.find_target_type ("pdb")));
+ if (icasecmp (o, "/DEBUG:NONE") != 0)
+ {
+ const file& pdb (
+ *find_adhoc_member<file> (t, *bs.find_target_type ("pdb")));
- out1 = "/PDB:";
- out1 += relative (pdb.path ()).string ();
- args.push_back (out1.c_str ());
+ out1 = "/PDB:";
+ out1 += relative (pdb.path ()).string ();
+ args.push_back (out1.c_str ());
+ }
}
out = "/OUT:" + relt.string ();
@@ -3220,6 +3795,8 @@ namespace build2
{
ld = &cpath;
+ append_diag_color_options (args);
+
// Add the option that triggers building a shared library and
// take care of any extras (e.g., import library).
//
@@ -3235,8 +3812,7 @@ namespace build2
// On Windows libs{} is the DLL and an ad hoc group member
// is the import library.
//
- const file& imp (*find_adhoc_member<libi> (t));
- out = "-Wl,--out-implib=" + relative (imp.path ()).string ();
+ out = "-Wl,--out-implib=" + reli.string ();
args.push_back (out.c_str ());
}
}
@@ -3393,17 +3969,43 @@ namespace build2
try_rmfile (relt, true);
}
+ // We have no choice but to serialize early if we want the command line
+ // printed shortly before actually executing the linker. Failed that, it
+ // may look like we are still executing in parallel.
+ //
+ scheduler::alloc_guard jobs_ag;
+ if (!ctx.dry_run && cast_false<bool> (t[c_serialize]))
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, phase_unlock (nullptr));
+
if (verb == 1)
- text << (lt.static_library () ? "ar " : "ld ") << t;
+ print_diag (lt.static_library () ? "ar" : "ld", t);
else if (verb == 2)
print_process (args);
+ // Do any necessary fixups to the command line to make it runnable.
+ //
+ // Notice the split in the diagnostics: at verbosity level 1 we print
+ // the "logical" command line while at level 2 and above -- what we are
+ // actually executing.
+ //
+ // We also need to save the original for the diag_buffer::close() call
+ // below if at verbosity level 1.
+ //
+ cstrings oargs;
+
// Adjust linker parallelism.
//
+ // Note that we are not going to bother with oargs for this.
+ //
+ // Note also that we now have scheduler::serialize() which allows us to
+ // block until full parallelism is available (this mode can currently
+ // be forced with cc.serialize=true; maybe we should invent something
+ // like config.cc.link_serialize or some such which can be used when
+ // LTO is enabled).
+ //
string jobs_arg;
- scheduler::alloc_guard jobs_extra;
- if (!lt.static_library ())
+ if (!ctx.dry_run && !lt.static_library ())
{
switch (ctype)
{
@@ -3419,8 +4021,10 @@ namespace build2
auto i (find_option_prefix ("-flto", args.rbegin (), args.rend ()));
if (i != args.rend () && strcmp (*i, "-flto=auto") == 0)
{
- jobs_extra = scheduler::alloc_guard (ctx.sched, 0);
- jobs_arg = "-flto=" + to_string (1 + jobs_extra.n);
+ if (jobs_ag.n == 0) // Might already have (see above).
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, 0);
+
+ jobs_arg = "-flto=" + to_string (1 + jobs_ag.n);
*i = jobs_arg.c_str ();
}
break;
@@ -3438,8 +4042,10 @@ namespace build2
strcmp (*i, "-flto=thin") == 0 &&
!find_option_prefix ("-flto-jobs=", args))
{
- jobs_extra = scheduler::alloc_guard (ctx.sched, 0);
- jobs_arg = "-flto-jobs=" + to_string (1 + jobs_extra.n);
+ if (jobs_ag.n == 0) // Might already have (see above).
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, 0);
+
+ jobs_arg = "-flto-jobs=" + to_string (1 + jobs_ag.n);
args.insert (i.base (), jobs_arg.c_str ()); // After -flto=thin.
}
break;
@@ -3450,12 +4056,6 @@ namespace build2
}
}
- // Do any necessary fixups to the command line to make it runnable.
- //
- // Notice the split in the diagnostics: at verbosity level 1 we print
- // the "logical" command line while at level 2 and above -- what we are
- // actually executing.
- //
// On Windows we need to deal with the command line length limit. The
// best workaround seems to be passing (part of) the command line in an
// "options file" ("response file" in Microsoft's terminology). Both
@@ -3541,19 +4141,20 @@ namespace build2
fail << "unable to write to " << f << ": " << e;
}
+ if (verb == 1)
+ oargs = args;
+
// Replace input arguments with @file.
//
targ = '@' + f.string ();
args.resize (args_input);
args.push_back (targ.c_str());
args.push_back (nullptr);
-
- //@@ TODO: leave .t file if linker failed and verb > 2?
}
}
#endif
- if (verb > 2)
+ if (verb >= 3)
print_process (args);
// Remove the target file if any of the subsequent (after the linker)
@@ -3571,52 +4172,51 @@ namespace build2
{
// VC tools (both lib.exe and link.exe) send diagnostics to stdout.
// Also, link.exe likes to print various gratuitous messages. So for
- // link.exe we redirect stdout to a pipe, filter that noise out, and
- // send the rest to stderr.
+ // link.exe we filter that noise out.
//
// For lib.exe (and any other insane linker that may try to pull off
// something like this) we are going to redirect stdout to stderr.
// For sane compilers this should be harmless.
//
// Note that we don't need this for LLD's link.exe replacement which
- // is quiet.
+ // is thankfully quiet.
//
bool filter (tsys == "win32-msvc" &&
!lt.static_library () &&
cast<string> (rs["bin.ld.id"]) != "msvc-lld");
process pr (*ld,
- args.data (),
- 0 /* stdin */,
- (filter ? -1 : 2) /* stdout */,
- 2 /* stderr */,
- nullptr /* cwd */,
+ args,
+ 0 /* stdin */,
+ 2 /* stdout */,
+ diag_buffer::pipe (ctx, filter /* force */) /* stderr */,
+ nullptr /* cwd */,
env_ptrs.empty () ? nullptr : env_ptrs.data ());
+ diag_buffer dbuf (ctx, args[0], pr);
+
if (filter)
+ msvc_filter_link (dbuf, t, ot);
+
+ dbuf.read ();
+
{
- try
- {
- ifdstream is (
- move (pr.in_ofd), fdstream_mode::text, ifdstream::badbit);
+ bool e (pr.wait ());
- msvc_filter_link (is, t, ot);
+#ifdef _WIN32
+ // Keep the options file if we have shown it.
+ //
+ if (!e && verb >= 3)
+ trm.cancel ();
+#endif
- // If anything remains in the stream, send it all to stderr.
- // Note that the eof check is important: if the stream is at
- // eof, this and all subsequent writes to the diagnostics stream
- // will fail (and you won't see a thing).
- //
- if (is.peek () != ifdstream::traits_type::eof ())
- diag_stream_lock () << is.rdbuf ();
+ dbuf.close (oargs.empty () ? args : oargs,
+ *pr.exit,
+ 1 /* verbosity */);
- is.close ();
- }
- catch (const io_error&) {} // Assume exits with error.
+ if (!e)
+ throw failed ();
}
-
- run_finish (args, pr);
- jobs_extra.deallocate ();
}
catch (const process_error& e)
{
@@ -3638,12 +4238,24 @@ namespace build2
throw failed ();
}
- // Clean up executable's import library (see above for details).
+ // Clean up executable's import library (see above for details). And
+ // make sure we have an import library for a shared library.
//
- if (lt.executable () && tsys == "win32-msvc")
+ if (tsys == "win32-msvc")
{
- try_rmfile (relt + ".lib", true /* ignore_errors */);
- try_rmfile (relt + ".exp", true /* ignore_errors */);
+ if (lt.executable ())
+ {
+ try_rmfile (relt + ".lib", true /* ignore_errors */);
+ try_rmfile (relt + ".exp", true /* ignore_errors */);
+ }
+ else if (lt.shared_library ())
+ {
+ if (!file_exists (reli,
+ false /* follow_symlinks */,
+ true /* ignore_error */))
+ fail << "linker did not produce import library " << reli <<
+ info << "perhaps this library does not export any symbols?";
+ }
}
// Set executable bit on the .js file so that it can be run with a
@@ -3675,12 +4287,17 @@ namespace build2
print_process (args);
if (!ctx.dry_run)
- run (rl,
+ {
+ run (ctx,
+ rl,
args,
- dir_path () /* cwd */,
+ 1 /* finish_verbosity */,
env_ptrs.empty () ? nullptr : env_ptrs.data ());
+ }
}
+ jobs_ag.deallocate ();
+
// For Windows generate (or clean up) rpath-emulating assembly.
//
if (tclass == "windows")
@@ -3783,12 +4400,11 @@ namespace build2
}
target_state link_rule::
- perform_clean (action a, const target& xt) const
+ perform_clean (action a, const target& xt, match_data& md) const
{
const file& t (xt.as<file> ());
ltype lt (link_type (t));
- const match_data& md (t.data<match_data> ());
clean_extras extras;
clean_adhoc_extras adhoc_extras;
@@ -3861,5 +4477,25 @@ namespace build2
return perform_clean_extra (a, t, extras, adhoc_extras);
}
+
+ const target* link_rule::
+ import (const prerequisite_key& pk,
+ const optional<string>&,
+ const location&) const
+ {
+ tracer trace (x, "link_rule::import");
+
+ // @@ TODO: do we want to make metadata loading optional?
+ //
+ optional<dir_paths> usr_lib_dirs;
+ const target* r (search_library (nullopt /* action */,
+ sys_lib_dirs, usr_lib_dirs,
+ pk));
+
+ if (r == nullptr)
+ l4 ([&]{trace << "unable to find installed library " << pk;});
+
+ return r;
+ }
}
}
diff --git a/libbuild2/cc/link-rule.hxx b/libbuild2/cc/link-rule.hxx
index c6d06d2..9b491c2 100644
--- a/libbuild2/cc/link-rule.hxx
+++ b/libbuild2/cc/link-rule.hxx
@@ -18,11 +18,13 @@ namespace build2
{
namespace cc
{
- class LIBBUILD2_CC_SYMEXPORT link_rule: public simple_rule, virtual common
+ class LIBBUILD2_CC_SYMEXPORT link_rule: public rule, virtual common
{
public:
link_rule (data&&);
+ struct match_data;
+
struct match_result
{
bool seen_x = false;
@@ -46,18 +48,21 @@ namespace build2
match (action, const target&, const target*, otype, bool) const;
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
target_state
- perform_update (action, const target&) const;
+ perform_update (action, const target&, match_data&) const;
target_state
- perform_clean (action, const target&) const;
+ perform_clean (action, const target&, match_data&) const;
- using simple_rule::match; // To make Clang happy.
+ virtual const target*
+ import (const prerequisite_key&,
+ const optional<string>&,
+ const location&) const override;
public:
// Library handling.
@@ -228,9 +233,9 @@ namespace build2
static void
functions (function_family&, const char*); // functions.cxx
- private:
- friend class install_rule;
- friend class libux_install_rule;
+ // Implementation details.
+ //
+ public:
// Shared library paths.
//
@@ -273,6 +278,9 @@ namespace build2
struct match_data
{
+ explicit
+ match_data (const link_rule& r): rule (r) {}
+
// The "for install" condition is signalled to us by install_rule when
// it is matched for the update operation. It also verifies that if we
// have already been executed, then it was for install.
@@ -307,10 +315,21 @@ namespace build2
size_t start; // Parallel prerequisites/prerequisite_targets start.
link_rule::libs_paths libs_paths;
+
+ const link_rule& rule;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return a == perform_update_id
+ ? rule.perform_update (a, t, *this)
+ : rule.perform_clean (a, t, *this);
+ }
};
// Windows rpath emulation (windows-rpath.cxx).
//
+ private:
struct windows_dll
{
reference_wrapper<const string> dll;
diff --git a/libbuild2/cc/module.cxx b/libbuild2/cc/module.cxx
index 871cfb6..cf6c6e4 100644
--- a/libbuild2/cc/module.cxx
+++ b/libbuild2/cc/module.cxx
@@ -11,10 +11,7 @@
#include <libbuild2/bin/target.hxx>
-#include <libbuild2/cc/target.hxx> // pc*
-
#include <libbuild2/config/utility.hxx>
-#include <libbuild2/install/utility.hxx>
#include <libbuild2/cc/guess.hxx>
@@ -30,6 +27,8 @@ namespace build2
{
tracer trace (x, "guess_init");
+ context& ctx (rs.ctx);
+
bool cc_loaded (cast_false<bool> (rs["cc.core.guess.loaded"]));
// Adjust module priority (compiler). Also order cc module before us
@@ -41,7 +40,10 @@ namespace build2
config::save_module (rs, x, 250);
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// Must already exist.
//
@@ -55,7 +57,7 @@ namespace build2
// config.x
//
- strings mode;
+ strings omode; // Original mode.
{
// Normally we will have a persistent configuration and computing the
// default value every time will be a waste. So try without a default
@@ -139,21 +141,34 @@ namespace build2
fail << "invalid path '" << s << "' in " << config_x;
}
- mode.assign (++v.begin (), v.end ());
+ omode.assign (++v.begin (), v.end ());
// Save original path/mode in *.config.path/mode.
//
rs.assign (x_c_path) = xc;
- rs.assign (x_c_mode) = mode;
+ rs.assign (x_c_mode) = omode;
+
+ // Merge the configured mode options into user-specified (which must
+ // be done before loading the *.guess module).
+ //
+ // In particular, this ability to specify the compiler mode in a
+ // buildfile is useful in embedded development where the project may
+ // need to hardcode things like -target, -nostdinc, etc.
+ //
+ const strings& mode (cast<strings> (rs.assign (x_mode) += omode));
// Figure out which compiler we are dealing with, its target, etc.
//
// Note that we could allow guess() to modify mode to support
// imaginary options (such as /MACHINE for cl.exe). Though it's not
// clear what cc.mode would contain (original or modified). Note that
- // we are now folding *.std options into mode options.
+ // we are now adding *.std options into mode options.
+ //
+ // @@ But can't the language standard options alter things like search
+ // directories?
//
x_info = &build2::cc::guess (
+ ctx,
x, x_lang,
rs.root_extra->environment_checksum,
move (xc),
@@ -180,7 +195,8 @@ namespace build2
if (config_sub)
{
- ct = run<string> (3,
+ ct = run<string> (ctx,
+ 3,
*config_sub,
xi.target.c_str (),
[] (string& l, bool) {return move (l);});
@@ -218,9 +234,10 @@ namespace build2
// Assign values to variables that describe the compiler.
//
+ // Note: x_mode is dealt with above.
+ //
rs.assign (x_path) = process_path_ex (
xi.path, x_name, xi.checksum, env_checksum);
- const strings& xm (cast<strings> (rs.assign (x_mode) = move (mode)));
rs.assign (x_id) = xi.id.string ();
rs.assign (x_id_type) = to_string (xi.id.type);
@@ -265,9 +282,9 @@ namespace build2
//
if (!cc_loaded)
{
- // Prepare configuration hints.
+ // Prepare configuration hints (pretend it belongs to root scope).
//
- variable_map h (rs.ctx);
+ variable_map h (rs);
// Note that all these variables have already been registered.
//
@@ -278,8 +295,8 @@ namespace build2
if (!xi.pattern.empty ())
h.assign ("config.cc.pattern") = xi.pattern;
- if (!xm.empty ())
- h.assign ("config.cc.mode") = xm;
+ if (!omode.empty ())
+ h.assign ("config.cc.mode") = move (omode);
h.assign (c_runtime) = xi.runtime;
h.assign (c_stdlib) = xi.c_stdlib;
@@ -350,6 +367,8 @@ namespace build2
# ifdef __APPLE__
static const dir_path a_usr_inc (
"/Library/Developer/CommandLineTools/SDKs/MacOSX*.sdk/usr/include");
+ static const dir_path a_usr_lib (
+ "/Library/Developer/CommandLineTools/SDKs/MacOSX*.sdk/usr/lib");
# endif
#endif
@@ -376,7 +395,9 @@ namespace build2
//
if (!cast_false<bool> (rs["cc.core.config.loaded"]))
{
- variable_map h (rs.ctx);
+ // Prepare configuration hints (pretend it belongs to root scope).
+ //
+ variable_map h (rs);
if (!xi.bin_pattern.empty ())
h.assign ("config.bin.pattern") = xi.bin_pattern;
@@ -602,10 +623,10 @@ namespace build2
switch (xi.class_)
{
case compiler_class::gcc:
- lib_dirs = gcc_library_search_dirs (xi.path, rs);
+ lib_dirs = gcc_library_search_dirs (xi, rs);
break;
case compiler_class::msvc:
- lib_dirs = msvc_library_search_dirs (xi.path, rs);
+ lib_dirs = msvc_library_search_dirs (xi, rs);
break;
}
}
@@ -619,10 +640,10 @@ namespace build2
switch (xi.class_)
{
case compiler_class::gcc:
- hdr_dirs = gcc_header_search_dirs (xi.path, rs);
+ hdr_dirs = gcc_header_search_dirs (xi, rs);
break;
case compiler_class::msvc:
- hdr_dirs = msvc_header_search_dirs (xi.path, rs);
+ hdr_dirs = msvc_header_search_dirs (xi, rs);
break;
}
}
@@ -640,8 +661,8 @@ namespace build2
sys_hdr_dirs_mode = hdr_dirs.second;
sys_mod_dirs_mode = mod_dirs ? mod_dirs->second : 0;
- sys_lib_dirs_extra = lib_dirs.first.size ();
- sys_hdr_dirs_extra = hdr_dirs.first.size ();
+ sys_lib_dirs_extra = 0;
+ sys_hdr_dirs_extra = 0;
#ifndef _WIN32
// Add /usr/local/{include,lib}. We definitely shouldn't do this if we
@@ -657,11 +678,11 @@ namespace build2
// on the next invocation.
//
{
- auto& is (hdr_dirs.first);
+ auto& hs (hdr_dirs.first);
auto& ls (lib_dirs.first);
- bool ui (find (is.begin (), is.end (), usr_inc) != is.end ());
- bool uli (find (is.begin (), is.end (), usr_loc_inc) != is.end ());
+ bool ui (find (hs.begin (), hs.end (), usr_inc) != hs.end ());
+ bool uli (find (hs.begin (), hs.end (), usr_loc_inc) != hs.end ());
#ifdef __APPLE__
// On Mac OS starting from 10.14 there is no longer /usr/include.
@@ -684,15 +705,28 @@ namespace build2
//
// Is Apple's /usr/include.
//
- if (!ui && !uli)
+ // Also, it appears neither Clang nor GCC report MacOSX*.sdk/usr/lib
+ // with -print-search-dirs but they do search in there. So we add it
+ // to our list if we see MacOSX*.sdk/usr/include.
+ //
+ auto aui (find_if (hs.begin (), hs.end (),
+ [] (const dir_path& d)
+ {
+ return path_match (d, a_usr_inc);
+ }));
+
+ if (aui != hs.end ())
{
- for (const dir_path& d: is)
+ if (!ui)
+ ui = true;
+
+ if (find_if (ls.begin (), ls.end (),
+ [] (const dir_path& d)
+ {
+ return path_match (d, a_usr_lib);
+ }) == ls.end ())
{
- if (path_match (d, a_usr_inc))
- {
- ui = true;
- break;
- }
+ ls.push_back (aui->directory () /= "lib");
}
}
#endif
@@ -700,18 +734,29 @@ namespace build2
{
bool ull (find (ls.begin (), ls.end (), usr_loc_lib) != ls.end ());
- // Many platforms don't search in /usr/local/lib by default (but do
- // for headers in /usr/local/include). So add it as the last option.
+ // Many platforms don't search in /usr/local/lib by default but do
+ // for headers in /usr/local/include.
+ //
+ // Note that customarily /usr/local/include is searched before
+ // /usr/include so we add /usr/local/lib before built-in entries
+ // (there isn't really a way to add it after since all we can do is
+ // specify it with -L).
//
if (!ull && exists (usr_loc_lib, true /* ignore_error */))
- ls.push_back (usr_loc_lib);
+ {
+ ls.insert (ls.begin () + sys_lib_dirs_mode, usr_loc_lib);
+ ++sys_lib_dirs_extra;
+ }
// FreeBSD is at least consistent: it searches in neither. Quoting
// its wiki: "FreeBSD can't even find libraries that it installed."
// So let's help it a bit.
//
if (!uli && exists (usr_loc_inc, true /* ignore_error */))
- is.push_back (usr_loc_inc);
+ {
+ hs.insert (hs.begin () + sys_hdr_dirs_mode, usr_loc_inc);
+ ++sys_hdr_dirs_extra;
+ }
}
}
#endif
@@ -815,8 +860,11 @@ namespace build2
dr << "\n hdr dirs";
for (size_t i (0); i != incs.size (); ++i)
{
- if (i == sys_hdr_dirs_extra)
+ if ((sys_hdr_dirs_mode != 0 && i == sys_hdr_dirs_mode) ||
+ (sys_hdr_dirs_extra != 0 &&
+ i == sys_hdr_dirs_extra + sys_hdr_dirs_mode))
dr << "\n --";
+
dr << "\n " << incs[i];
}
}
@@ -826,8 +874,11 @@ namespace build2
dr << "\n lib dirs";
for (size_t i (0); i != libs.size (); ++i)
{
- if (i == sys_lib_dirs_extra)
+ if ((sys_lib_dirs_mode != 0 && i == sys_lib_dirs_mode) ||
+ (sys_lib_dirs_extra != 0 &&
+ i == sys_lib_dirs_extra + sys_lib_dirs_mode))
dr << "\n --";
+
dr << "\n " << libs[i];
}
}
@@ -948,40 +999,7 @@ namespace build2
// Register target types and configure their "installability".
//
- bool install_loaded (cast_false<bool> (rs["install.loaded"]));
-
- {
- using namespace install;
-
- rs.insert_target_type (x_src);
-
- auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
- {
- rs.insert_target_type (tt);
-
- // Install headers into install.include.
- //
- if (install_loaded)
- install_path (rs, tt, dir_path ("include"));
- };
-
- // Note: module (x_mod) is in x_hdr.
- //
- for (const target_type* const* ht (x_hdr); *ht != nullptr; ++ht)
- insert_hdr (**ht);
-
- // Also register the C header for C-derived languages.
- //
- if (*x_hdr != &h::static_type)
- insert_hdr (h::static_type);
-
- rs.insert_target_type<pc> ();
- rs.insert_target_type<pca> ();
- rs.insert_target_type<pcs> ();
-
- if (install_loaded)
- install_path<pc> (rs, dir_path ("pkgconfig"));
- }
+ load_module (rs, rs, (string (x) += ".types"), loc);
// Register rules.
//
@@ -1079,34 +1097,37 @@ namespace build2
// them in case they depend on stuff that we need to install (see the
// install rule implementations for details).
//
- if (install_loaded)
+ if (cast_false<bool> (rs["install.loaded"]))
{
+ // Note: we rely quite heavily in these rule implementations that
+ // these are the only target types they are registered for.
+
const install_rule& ir (*this);
- r.insert<exe> (perform_install_id, x_install, ir);
- r.insert<exe> (perform_uninstall_id, x_uninstall, ir);
+ r.insert<exe> (perform_install_id, x_install, ir);
+ r.insert<exe> (perform_uninstall_id, x_install, ir);
- r.insert<liba> (perform_install_id, x_install, ir);
- r.insert<liba> (perform_uninstall_id, x_uninstall, ir);
+ r.insert<liba> (perform_install_id, x_install, ir);
+ r.insert<liba> (perform_uninstall_id, x_install, ir);
if (s)
{
- r.insert<libs> (perform_install_id, x_install, ir);
- r.insert<libs> (perform_uninstall_id, x_uninstall, ir);
+ r.insert<libs> (perform_install_id, x_install, ir);
+ r.insert<libs> (perform_uninstall_id, x_install, ir);
}
const libux_install_rule& lr (*this);
- r.insert<libue> (perform_install_id, x_install, lr);
- r.insert<libue> (perform_uninstall_id, x_uninstall, lr);
+ r.insert<libue> (perform_install_id, x_install, lr);
+ r.insert<libue> (perform_uninstall_id, x_install, lr);
- r.insert<libua> (perform_install_id, x_install, lr);
- r.insert<libua> (perform_uninstall_id, x_uninstall, lr);
+ r.insert<libua> (perform_install_id, x_install, lr);
+ r.insert<libua> (perform_uninstall_id, x_install, lr);
if (s)
{
- r.insert<libus> (perform_install_id, x_install, lr);
- r.insert<libus> (perform_uninstall_id, x_uninstall, lr);
+ r.insert<libus> (perform_install_id, x_install, lr);
+ r.insert<libus> (perform_uninstall_id, x_install, lr);
}
}
}
diff --git a/libbuild2/cc/module.hxx b/libbuild2/cc/module.hxx
index a91d723..4213516 100644
--- a/libbuild2/cc/module.hxx
+++ b/libbuild2/cc/module.hxx
@@ -4,6 +4,8 @@
#ifndef LIBBUILD2_CC_MODULE_HXX
#define LIBBUILD2_CC_MODULE_HXX
+#include <unordered_map>
+
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -15,6 +17,7 @@
#include <libbuild2/cc/compile-rule.hxx>
#include <libbuild2/cc/link-rule.hxx>
#include <libbuild2/cc/install-rule.hxx>
+#include <libbuild2/cc/predefs-rule.hxx>
#include <libbuild2/cc/export.hxx>
@@ -78,22 +81,53 @@ namespace build2
bool new_config = false; // See guess() and init() for details.
+ // Header cache (see compile_rule::enter_header()).
+ //
+ // We place it into the config module so that we have an option of
+ // sharing it for the entire weak amalgamation.
+ //
+ public:
+ // Keep the hash in the key. This way we can compute it outside of the
+ // lock.
+ //
+ struct header_key
+ {
+ path file;
+ size_t hash;
+
+ friend bool
+ operator== (const header_key& x, const header_key& y)
+ {
+ return x.file == y.file; // Note: hash was already compared.
+ }
+ };
+
+ struct header_key_hasher
+ {
+ size_t operator() (const header_key& k) const {return k.hash;}
+ };
+
+ mutable shared_mutex header_map_mutex;
+ mutable std::unordered_map<header_key,
+ const file*,
+ header_key_hasher> header_map;
+
private:
// Defined in gcc.cxx.
//
pair<dir_paths, size_t>
- gcc_header_search_dirs (const process_path&, scope&) const;
+ gcc_header_search_dirs (const compiler_info&, scope&) const;
pair<dir_paths, size_t>
- gcc_library_search_dirs (const process_path&, scope&) const;
+ gcc_library_search_dirs (const compiler_info&, scope&) const;
// Defined in msvc.cxx.
//
pair<dir_paths, size_t>
- msvc_header_search_dirs (const process_path&, scope&) const;
+ msvc_header_search_dirs (const compiler_info&, scope&) const;
pair<dir_paths, size_t>
- msvc_library_search_dirs (const process_path&, scope&) const;
+ msvc_library_search_dirs (const compiler_info&, scope&) const;
};
class LIBBUILD2_CC_SYMEXPORT module: public build2::module,
@@ -101,16 +135,18 @@ namespace build2
public link_rule,
public compile_rule,
public install_rule,
- public libux_install_rule
+ public libux_install_rule,
+ public predefs_rule
{
public:
explicit
- module (data&& d)
+ module (data&& d, const scope& rs)
: common (move (d)),
link_rule (move (d)),
- compile_rule (move (d)),
+ compile_rule (move (d), rs),
install_rule (move (d), *this),
- libux_install_rule (move (d), *this) {}
+ libux_install_rule (move (d), *this),
+ predefs_rule (move (d)) {}
void
init (scope&,
diff --git a/libbuild2/cc/msvc.cxx b/libbuild2/cc/msvc.cxx
index f95cab0..d21969c 100644
--- a/libbuild2/cc/msvc.cxx
+++ b/libbuild2/cc/msvc.cxx
@@ -164,18 +164,21 @@ namespace build2
// Filter cl.exe and link.exe noise.
//
+ // Note: must be followed with the dbuf.read() call.
+ //
void
- msvc_filter_cl (ifdstream& is, const path& src)
+ msvc_filter_cl (diag_buffer& dbuf, const path& src)
+ try
{
// While it appears VC always prints the source name (event if the
// file does not exist), let's do a sanity check. Also handle the
// command line errors/warnings which come before the file name.
//
- for (string l; !eof (getline (is, l)); )
+ for (string l; !eof (getline (dbuf.is, l)); )
{
if (l != src.leaf ().string ())
{
- diag_stream_lock () << l << endl;
+ dbuf.write (l, true /* newline */);
if (msvc_sense_diag (l, 'D').first != string::npos)
continue;
@@ -184,14 +187,19 @@ namespace build2
break;
}
}
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << dbuf.args0 << " stderr: " << e;
+ }
void
- msvc_filter_link (ifdstream& is, const file& t, otype lt)
+ msvc_filter_link (diag_buffer& dbuf, const file& t, otype lt)
+ try
{
// Filter lines until we encounter something we don't recognize. We also
// have to assume the messages can be translated.
//
- for (string l; getline (is, l); )
+ for (string l; getline (dbuf.is, l); )
{
// " Creating library foo\foo.dll.lib and object foo\foo.dll.exp"
//
@@ -216,12 +224,15 @@ namespace build2
// /INCREMENTAL causes linker to sometimes issue messages but now I
// can't quite reproduce it.
- //
- diag_stream_lock () << l << endl;
+ dbuf.write (l, true /* newline */);
break;
}
}
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << dbuf.args0 << " stderr: " << e;
+ }
void
msvc_extract_header_search_dirs (const strings& v, dir_paths& r)
@@ -253,6 +264,13 @@ namespace build2
}
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -260,10 +278,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -284,6 +299,13 @@ namespace build2
d = dir_path (o, 9, string::npos);
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -291,10 +313,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -313,7 +332,7 @@ namespace build2
{
try
{
- r.push_back (dir_path (move (d)));
+ r.push_back (dir_path (move (d)).normalize ());
}
catch (const invalid_path&)
{
@@ -326,7 +345,7 @@ namespace build2
// Extract system header search paths from MSVC.
//
pair<dir_paths, size_t> config_module::
- msvc_header_search_dirs (const process_path&, scope& rs) const
+ msvc_header_search_dirs (const compiler_info&, scope& rs) const
{
// MSVC doesn't have any built-in paths and all of them either come from
// the INCLUDE environment variable or are specified explicitly on the
@@ -354,7 +373,7 @@ namespace build2
// Extract system library search paths from MSVC.
//
pair<dir_paths, size_t> config_module::
- msvc_library_search_dirs (const process_path&, scope& rs) const
+ msvc_library_search_dirs (const compiler_info&, scope& rs) const
{
// MSVC doesn't seem to have any built-in paths and all of them either
// come from the LIB environment variable or are specified explicitly on
@@ -379,9 +398,22 @@ namespace build2
// Inspect the file and determine if it is static or import library.
// Return otype::e if it is neither (which we quietly ignore).
//
+ static global_cache<otype> library_type_cache;
+
static otype
library_type (const process_path& ld, const path& l)
{
+ string key;
+ {
+ sha256 cs;
+ cs.append (ld.effect_string ());
+ cs.append (l.string ());
+ key = cs.string ();
+
+ if (const otype* r = library_type_cache.find (key))
+ return *r;
+ }
+
// The are several reasonably reliable methods to tell whether it is a
// static or import library. One is lib.exe /LIST -- if there aren't any
// .obj members, then it is most likely an import library (it can also
@@ -422,9 +454,9 @@ namespace build2
//
process pr (run_start (ld,
args,
- 0 /* stdin */,
- -1 /* stdout */,
- false /* error */));
+ 0 /* stdin */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
bool obj (false), dll (false);
string s;
@@ -447,14 +479,11 @@ namespace build2
// libhello\hello.lib.obj
// hello-0.1.0-a.0.19700101000000.dll
//
- // Archive member name at 746: [...]hello.dll[/][ ]*
- // Archive member name at 8C70: [...]hello.lib.obj[/][ ]*
- //
size_t n (s.size ());
for (; n != 0 && s[n - 1] == ' '; --n) ; // Skip trailing spaces.
- if (n >= 7) // At least ": X.obj" or ": X.dll".
+ if (n >= 5) // At least "X.obj" or "X.dll".
{
n -= 4; // Beginning of extension.
@@ -480,7 +509,7 @@ namespace build2
io = true;
}
- if (!run_finish_code (args, pr, s) || io)
+ if (!run_finish_code (args, pr, s, 2 /* verbosity */) || io)
{
diag_record dr;
dr << warn << "unable to detect " << l << " library type, ignoring" <<
@@ -489,23 +518,25 @@ namespace build2
return otype::e;
}
- if (obj && dll)
+ otype r;
+ if (obj != dll)
+ r = obj ? otype::a : otype::s;
+ else
{
- warn << l << " looks like hybrid static/import library, ignoring";
- return otype::e;
- }
+ if (obj && dll)
+ warn << l << " looks like hybrid static/import library, ignoring";
- if (!obj && !dll)
- {
- warn << l << " looks like empty static or import library, ignoring";
- return otype::e;
+ if (!obj && !dll)
+ warn << l << " looks like empty static or import library, ignoring";
+
+ r = otype::e;
}
- return obj ? otype::a : otype::s;
+ return library_type_cache.insert (move (key), r);
}
template <typename T>
- static T*
+ static pair<T*, bool>
msvc_search_library (const process_path& ld,
const dir_path& d,
const prerequisite_key& p,
@@ -551,20 +582,26 @@ namespace build2
//
timestamp mt (mtime (f));
- if (mt != timestamp_nonexistent && library_type (ld, f) == lt)
+ pair<T*, bool> r (nullptr, true);
+
+ if (mt != timestamp_nonexistent)
{
- // Enter the target.
- //
- T* t;
- common::insert_library (p.scope->ctx, t, name, d, ld, e, exist, trace);
- t->path_mtime (move (f), mt);
- return t;
+ if (library_type (ld, f) == lt)
+ {
+ // Enter the target.
+ //
+ common::insert_library (
+ p.scope->ctx, r.first, name, d, ld, e, exist, trace);
+ r.first->path_mtime (move (f), mt);
+ }
+ else
+ r.second = false; // Don't search for binless.
}
- return nullptr;
+ return r;
}
- liba* common::
+ pair<bin::liba*, bool> common::
msvc_search_static (const process_path& ld,
const dir_path& d,
const prerequisite_key& p,
@@ -572,14 +609,21 @@ namespace build2
{
tracer trace (x, "msvc_search_static");
- liba* r (nullptr);
+ liba* a (nullptr);
+ bool b (true);
- auto search = [&r, &ld, &d, &p, exist, &trace] (
+ auto search = [&a, &b, &ld, &d, &p, exist, &trace] (
const char* pf, const char* sf) -> bool
{
- r = msvc_search_library<liba> (
- ld, d, p, otype::a, pf, sf, exist, trace);
- return r != nullptr;
+ pair<liba*, bool> r (msvc_search_library<liba> (
+ ld, d, p, otype::a, pf, sf, exist, trace));
+
+ if (r.first != nullptr)
+ a = r.first;
+ else if (!r.second)
+ b = false;
+
+ return a != nullptr;
};
// Try:
@@ -592,10 +636,10 @@ namespace build2
search ("", "") ||
search ("lib", "") ||
search ("", "lib") ||
- search ("", "_static") ? r : nullptr;
+ search ("", "_static") ? make_pair (a, true) : make_pair (nullptr, b);
}
- libs* common::
+ pair<bin::libs*, bool> common::
msvc_search_shared (const process_path& ld,
const dir_path& d,
const prerequisite_key& pk,
@@ -606,12 +650,14 @@ namespace build2
assert (pk.scope != nullptr);
libs* s (nullptr);
+ bool b (true);
- auto search = [&s, &ld, &d, &pk, exist, &trace] (
+ auto search = [&s, &b, &ld, &d, &pk, exist, &trace] (
const char* pf, const char* sf) -> bool
{
- if (libi* i = msvc_search_library<libi> (
- ld, d, pk, otype::s, pf, sf, exist, trace))
+ pair<libi*, bool> r (msvc_search_library<libi> (
+ ld, d, pk, otype::s, pf, sf, exist, trace));
+ if (r.first != nullptr)
{
ulock l (
insert_library (
@@ -619,6 +665,8 @@ namespace build2
if (!exist)
{
+ libi* i (r.first);
+
if (l.owns_lock ())
{
s->adhoc_member = i; // We are first.
@@ -632,6 +680,8 @@ namespace build2
s->path_mtime (path (), i->mtime ());
}
}
+ else if (!r.second)
+ b = false;
return s != nullptr;
};
@@ -644,7 +694,7 @@ namespace build2
return
search ("", "") ||
search ("lib", "") ||
- search ("", "dll") ? s : nullptr;
+ search ("", "dll") ? make_pair (s, true) : make_pair (nullptr, b);
}
}
}
diff --git a/libbuild2/cc/parser.cxx b/libbuild2/cc/parser.cxx
index dc5093f..f62847e 100644
--- a/libbuild2/cc/parser.cxx
+++ b/libbuild2/cc/parser.cxx
@@ -15,9 +15,11 @@ namespace build2
using type = token_type;
void parser::
- parse (ifdstream& is, const path_name& in, unit& u)
+ parse (ifdstream& is, const path_name& in, unit& u, const compiler_id& cid)
{
- lexer l (is, in);
+ cid_ = &cid;
+
+ lexer l (is, in, true /* preprocessed */);
l_ = &l;
u_ = &u;
@@ -82,6 +84,12 @@ namespace build2
// to call it __import) or it can have a special attribute (GCC
// currently marks it with [[__translated]]).
//
+ // Similarly, MSVC drops the `module;` marker and replaces all
+ // other `module` keywords with `__preprocessed_module`.
+ //
+ // Clang doesn't appear to rewrite anything, at least as of
+ // version 18.
+ //
if (bb == 0 && t.first)
{
const string& id (t.value); // Note: tracks t.
@@ -102,7 +110,9 @@ namespace build2
// Fall through.
}
- if (id == "module")
+ if (id == "module" ||
+ (cid_->type == compiler_type::msvc &&
+ id == "__preprocessed_module"))
{
location_value l (get_location (t));
l_->next (t);
@@ -113,7 +123,9 @@ namespace build2
else
n = false;
}
- else if (id == "import" /*|| id == "__import"*/)
+ else if (id == "import" /* ||
+ (cid_->type == compiler_type::gcc &&
+ id == "__import")*/)
{
l_->next (t);
@@ -181,7 +193,7 @@ namespace build2
//
pair<string, bool> np (parse_module_name (t, true /* partition */));
- // Should be {}-balanced.
+ // Skip attributes (should be {}-balanced).
//
for (;
t.type != type::eos && t.type != type::semi && !t.first;
@@ -262,7 +274,7 @@ namespace build2
return;
}
- // Should be {}-balanced.
+ // Skip attributes (should be {}-balanced).
//
for (;
t.type != type::eos && t.type != type::semi && !t.first;
diff --git a/libbuild2/cc/parser.hxx b/libbuild2/cc/parser.hxx
index 1fbf1a3..0c2eb2d 100644
--- a/libbuild2/cc/parser.hxx
+++ b/libbuild2/cc/parser.hxx
@@ -10,6 +10,7 @@
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/cc/types.hxx>
+#include <libbuild2/cc/guess.hxx> // compiler_id
namespace build2
{
@@ -23,16 +24,19 @@ namespace build2
class parser
{
public:
+ // The compiler_id argument should identify the compiler that has done
+ // the preprocessing.
+ //
unit
- parse (ifdstream& is, const path_name& n)
+ parse (ifdstream& is, const path_name& n, const compiler_id& cid)
{
unit r;
- parse (is, n, r);
+ parse (is, n, r, cid);
return r;
}
void
- parse (ifdstream&, const path_name&, unit&);
+ parse (ifdstream&, const path_name&, unit&, const compiler_id&);
private:
void
@@ -54,6 +58,7 @@ namespace build2
string checksum; // Translation unit checksum.
private:
+ const compiler_id* cid_;
lexer* l_;
unit* u_;
diff --git a/libbuild2/cc/parser.test.cxx b/libbuild2/cc/parser.test.cxx
index 1d5930a..2270d32 100644
--- a/libbuild2/cc/parser.test.cxx
+++ b/libbuild2/cc/parser.test.cxx
@@ -44,7 +44,7 @@ namespace build2
}
parser p;
- unit u (p.parse (is, in));
+ unit u (p.parse (is, in, compiler_id (compiler_type::gcc, "")));
switch (u.type)
{
diff --git a/libbuild2/cc/pkgconfig-libpkg-config.cxx b/libbuild2/cc/pkgconfig-libpkg-config.cxx
new file mode 100644
index 0000000..ecbc019
--- /dev/null
+++ b/libbuild2/cc/pkgconfig-libpkg-config.cxx
@@ -0,0 +1,271 @@
+// file : libbuild2/cc/pkgconfig-libpkg-config.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_BOOTSTRAP
+
+#include <libbuild2/cc/pkgconfig.hxx>
+
+#include <new> // std::bad_alloc
+
+#include <libbuild2/diagnostics.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ // The package dependency traversal depth limit.
+ //
+ static const int max_depth = 100;
+
+ static void
+ error_handler (unsigned int,
+ const char* file,
+ size_t line,
+ const char* msg,
+ const pkg_config_client_t*,
+ const void*)
+ {
+ if (file != nullptr)
+ {
+ path_name n (file);
+ const location l (n, static_cast<uint64_t> (line));
+ error (l) << msg;
+ }
+ else
+ error << msg;
+ }
+
+ // Deleters.
+ //
+ struct fragments_deleter
+ {
+ void operator() (pkg_config_list_t* f) const
+ {
+ pkg_config_fragment_free (f);
+ }
+ };
+
+ // Convert fragments to strings. Skip the -I/-L options that refer to
+ // system directories.
+ //
+ static strings
+ to_strings (const pkg_config_list_t& frags,
+ char type,
+ const pkg_config_list_t& sysdirs)
+ {
+ assert (type == 'I' || type == 'L');
+
+ strings r;
+ auto add = [&r] (const pkg_config_fragment_t* frag)
+ {
+ string s;
+ if (frag->type != '\0')
+ {
+ s += '-';
+ s += frag->type;
+ }
+
+ s += frag->data;
+ r.push_back (move (s));
+ };
+
+ // Option that is separated from its value, for example:
+ //
+ // -I /usr/lib
+ //
+ const pkg_config_fragment_t* opt (nullptr);
+
+ pkg_config_node_t *node;
+ LIBPKG_CONFIG_FOREACH_LIST_ENTRY(frags.head, node)
+ {
+ auto frag (static_cast<const pkg_config_fragment_t*> (node->data));
+
+ // Add the separated option and directory, unless the latest is a
+ // system one.
+ //
+ if (opt != nullptr)
+ {
+ assert (frag->type == '\0'); // See pkg_config_fragment_add().
+
+ if (!pkg_config_path_match_list (frag->data, &sysdirs))
+ {
+ add (opt);
+ add (frag);
+ }
+
+ opt = nullptr;
+ continue;
+ }
+
+ // Skip the -I/-L option if it refers to a system directory.
+ //
+ if (frag->type == type)
+ {
+ // The option is separated from a value, that will (presumably)
+ // follow.
+ //
+ if (*frag->data == '\0')
+ {
+ opt = frag;
+ continue;
+ }
+
+ if (pkg_config_path_match_list (frag->data, &sysdirs))
+ continue;
+ }
+
+ add (frag);
+ }
+
+ if (opt != nullptr) // Add the dangling option.
+ add (opt);
+
+ return r;
+ }
+
+ // Note that some libpkg-config functions can potentially return NULL,
+ // failing to allocate the required memory block. However, we will not
+ // check the returned value for NULL as the library doesn't do so, prior
+ // to filling the allocated structures. So such a code complication on our
+ // side would be useless. Also, for some functions the NULL result has a
+ // special semantics, for example "not found". @@ TODO: can we fix this?
+ // This is now somewhat addressed, see the eflags argument in
+ // pkg_config_pkg_find().
+ //
+ pkgconfig::
+ pkgconfig (path_type p,
+ const dir_paths& pc_dirs,
+ const dir_paths& sys_lib_dirs,
+ const dir_paths& sys_hdr_dirs)
+ : path (move (p))
+ {
+ auto add_dirs = [] (pkg_config_list_t& dir_list,
+ const dir_paths& dirs,
+ bool suppress_dups)
+ {
+ for (const auto& d: dirs)
+ pkg_config_path_add (d.string ().c_str (), &dir_list, suppress_dups);
+ };
+
+ // Initialize the client handle.
+ //
+ // Note: omit initializing the filters from environment/defaults.
+ //
+ unique_ptr<pkg_config_client_t, void (*) (pkg_config_client_t*)> c (
+ pkg_config_client_new (&error_handler,
+ nullptr /* handler_data */,
+ false /* init_filters */),
+ [] (pkg_config_client_t* c) {pkg_config_client_free (c);});
+
+ if (c == nullptr)
+ throw std::bad_alloc ();
+
+ add_dirs (c->filter_libdirs, sys_lib_dirs, false /* suppress_dups */);
+ add_dirs (c->filter_includedirs, sys_hdr_dirs, false /* suppress_dups */);
+
+ // Note that the loaded file directory is added to the (for now empty)
+ // .pc file search list. Also note that loading of the dependency
+ // packages is delayed until the flags retrieval, and their file
+ // directories are not added to the search list.
+ //
+ // @@ Hm, is there a way to force this resolution? But we may not
+ // need this (e.g., only loading from variables).
+ //
+ unsigned int e;
+ pkg_ = pkg_config_pkg_find (c.get (), path.string ().c_str (), &e);
+
+ if (pkg_ == nullptr)
+ {
+ if (e == LIBPKG_CONFIG_ERRF_OK)
+ fail << "package '" << path << "' not found";
+ else
+ // Diagnostics should have already been issued except for allocation
+ // errors.
+ //
+ fail << "unable to load package '" << path << "'";
+ }
+
+ // Add the .pc file search directories.
+ //
+ assert (c->dir_list.length == 1); // Package file directory (see above).
+ add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */);
+
+ client_ = c.release ();
+ }
+
+ void pkgconfig::
+ free ()
+ {
+ assert (client_ != nullptr && pkg_ != nullptr);
+
+ pkg_config_pkg_unref (client_, pkg_);
+ pkg_config_client_free (client_);
+ }
+
+ strings pkgconfig::
+ cflags (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ pkg_config_client_set_flags (
+ client_,
+ // Walk through the private package dependencies (Requires.private)
+ // besides the public ones while collecting the flags. Note that we do
+ // this for both static and shared linking. @@ Hm, I wonder why...?
+ //
+ LIBPKG_CONFIG_PKG_PKGF_SEARCH_PRIVATE |
+
+ // Collect flags from Cflags.private besides those from Cflags for the
+ // static linking.
+ //
+ (stat
+ ? LIBPKG_CONFIG_PKG_PKGF_ADD_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkg_config_list_t f = LIBPKG_CONFIG_LIST_INITIALIZER; // Empty list.
+ int e (pkg_config_pkg_cflags (client_, pkg_, &f, max_depth));
+
+ if (e != LIBPKG_CONFIG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkg_config_list_t, fragments_deleter> fd (&f);
+ return to_strings (f, 'I', client_->filter_includedirs);
+ }
+
+ strings pkgconfig::
+ libs (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ pkg_config_client_set_flags (
+ client_,
+ // Additionally collect flags from the private dependency packages
+ // (see above) and from the Libs.private value for the static linking.
+ //
+ (stat
+ ? LIBPKG_CONFIG_PKG_PKGF_SEARCH_PRIVATE |
+ LIBPKG_CONFIG_PKG_PKGF_ADD_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkg_config_list_t f = LIBPKG_CONFIG_LIST_INITIALIZER; // Empty list.
+ int e (pkg_config_pkg_libs (client_, pkg_, &f, max_depth));
+
+ if (e != LIBPKG_CONFIG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkg_config_list_t, fragments_deleter> fd (&f);
+ return to_strings (f, 'L', client_->filter_libdirs);
+ }
+
+ optional<string> pkgconfig::
+ variable (const char* name) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ const char* r (pkg_config_tuple_find (client_, &pkg_->vars, name));
+ return r != nullptr ? optional<string> (r) : nullopt;
+ }
+ }
+}
+
+#endif // BUILD2_BOOTSTRAP
diff --git a/libbuild2/cc/pkgconfig-libpkgconf.cxx b/libbuild2/cc/pkgconfig-libpkgconf.cxx
new file mode 100644
index 0000000..f3754d3
--- /dev/null
+++ b/libbuild2/cc/pkgconfig-libpkgconf.cxx
@@ -0,0 +1,355 @@
+// file : libbuild2/cc/pkgconfig-libpkgconf.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_BOOTSTRAP
+
+#include <libbuild2/cc/pkgconfig.hxx>
+
+#include <libbuild2/diagnostics.hxx>
+
+// Note that the libpkgconf library did not used to provide the version macro
+// that we could use to compile the code conditionally against different API
+// versions. Thus, we need to sense the pkgconf_client_new() function
+// signature ourselves to call it properly.
+//
+namespace details
+{
+ void*
+ pkgconf_cross_personality_default (); // Never called.
+}
+
+using namespace details;
+
+template <typename H>
+static inline pkgconf_client_t*
+call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*),
+ H error_handler,
+ void* error_handler_data)
+{
+ return f (error_handler, error_handler_data);
+}
+
+template <typename H, typename P>
+static inline pkgconf_client_t*
+call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*, P),
+ H error_handler,
+ void* error_handler_data)
+{
+ return f (error_handler,
+ error_handler_data,
+ ::pkgconf_cross_personality_default ());
+}
+
+namespace build2
+{
+ namespace cc
+ {
+ // The libpkgconf library is not thread-safe, even on the pkgconf_client_t
+ // level (see issue #128 for details). While it seems that the obvious
+ // thread-safety issues are fixed, the default personality initialization,
+ // which is still not thread-safe. So let's keep the mutex for now not to
+ // introduce potential issues.
+ //
+ static mutex pkgconf_mutex;
+
+ // The package dependency traversal depth limit.
+ //
+ static const int pkgconf_max_depth = 100;
+
+ // Normally the error_handler() callback can be called multiple times to
+ // report a single error (once per message line), to produce a multi-line
+ // message like this:
+ //
+ // Package foo was not found in the pkg-config search path.\n
+ // Perhaps you should add the directory containing `foo.pc'\n
+ // to the PKG_CONFIG_PATH environment variable\n
+ // Package 'foo', required by 'bar', not found\n
+ //
+ // For the above example callback will be called 4 times. To suppress all
+ // the junk we will use PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS to get just:
+ //
+ // Package 'foo', required by 'bar', not found\n
+ //
+ // Also disable merging options like -framework into a single fragment, if
+ // possible.
+ //
+ static const int pkgconf_flags =
+ PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS
+ | PKGCONF_PKG_PKGF_SKIP_PROVIDES
+#ifdef PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
+ | PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
+#endif
+ ;
+
+#if defined(LIBPKGCONF_VERSION) && LIBPKGCONF_VERSION >= 10900
+ static bool
+ pkgconf_error_handler (const char* msg,
+ const pkgconf_client_t*,
+ void*)
+#else
+ static bool
+ pkgconf_error_handler (const char* msg,
+ const pkgconf_client_t*,
+ const void*)
+#endif
+ {
+ error << runtime_error (msg); // Sanitize the message (trailing dot).
+ return true;
+ }
+
+ // Deleters. Note that they are thread-safe.
+ //
+ struct fragments_deleter
+ {
+ void operator() (pkgconf_list_t* f) const {pkgconf_fragment_free (f);}
+ };
+
+ // Convert fragments to strings. Skip the -I/-L options that refer to system
+ // directories.
+ //
+ static strings
+ to_strings (const pkgconf_list_t& frags,
+ char type,
+ const pkgconf_list_t& sysdirs)
+ {
+ assert (type == 'I' || type == 'L');
+
+ strings r;
+
+ auto add = [&r] (const pkgconf_fragment_t* frag)
+ {
+ string s;
+ if (frag->type != '\0')
+ {
+ s += '-';
+ s += frag->type;
+ }
+
+ s += frag->data;
+ r.push_back (move (s));
+ };
+
+ // Option that is separated from its value, for example:
+ //
+ // -I /usr/lib
+ //
+ const pkgconf_fragment_t* opt (nullptr);
+
+ pkgconf_node_t *node;
+ PKGCONF_FOREACH_LIST_ENTRY(frags.head, node)
+ {
+ auto frag (static_cast<const pkgconf_fragment_t*> (node->data));
+
+ // Add the separated option and directory, unless the latest is a
+ // system one.
+ //
+ if (opt != nullptr)
+ {
+ // Note that we should restore the directory path that was
+ // (mis)interpreted as an option, for example:
+ //
+ // -I -Ifoo
+ //
+ // In the above example option '-I' is followed by directory
+ // '-Ifoo', which is represented by libpkgconf library as fragment
+ // 'foo' with type 'I'.
+ //
+ if (!pkgconf_path_match_list (
+ frag->type == '\0'
+ ? frag->data
+ : (string ({'-', frag->type}) + frag->data).c_str (),
+ &sysdirs))
+ {
+ add (opt);
+ add (frag);
+ }
+
+ opt = nullptr;
+ continue;
+ }
+
+ // Skip the -I/-L option if it refers to a system directory.
+ //
+ if (frag->type == type)
+ {
+ // The option is separated from a value, that will (presumably)
+ // follow.
+ //
+ if (*frag->data == '\0')
+ {
+ opt = frag;
+ continue;
+ }
+
+ if (pkgconf_path_match_list (frag->data, &sysdirs))
+ continue;
+ }
+
+ add (frag);
+ }
+
+ if (opt != nullptr) // Add the dangling option.
+ add (opt);
+
+ return r;
+ }
+
+ // Note that some libpkgconf functions can potentially return NULL,
+ // failing to allocate the required memory block. However, we will not
+ // check the returned value for NULL as the library doesn't do so, prior
+ // to filling the allocated structures. So such a code complication on our
+ // side would be useless. Also, for some functions the NULL result has a
+ // special semantics, for example "not found".
+ //
+ pkgconfig::
+ pkgconfig (path_type p,
+ const dir_paths& pc_dirs,
+ const dir_paths& sys_lib_dirs,
+ const dir_paths& sys_hdr_dirs)
+ : path (move (p))
+ {
+ auto add_dirs = [] (pkgconf_list_t& dir_list,
+ const dir_paths& dirs,
+ bool suppress_dups,
+ bool cleanup = false)
+ {
+ if (cleanup)
+ {
+ pkgconf_path_free (&dir_list);
+ dir_list = PKGCONF_LIST_INITIALIZER;
+ }
+
+ for (const auto& d: dirs)
+ pkgconf_path_add (d.string ().c_str (), &dir_list, suppress_dups);
+ };
+
+ mlock l (pkgconf_mutex);
+
+ // Initialize the client handle.
+ //
+ unique_ptr<pkgconf_client_t, void (*) (pkgconf_client_t*)> c (
+ call_pkgconf_client_new (&pkgconf_client_new,
+ pkgconf_error_handler,
+ nullptr /* handler_data */),
+ [] (pkgconf_client_t* c) {pkgconf_client_free (c);});
+
+ pkgconf_client_set_flags (c.get (), pkgconf_flags);
+
+ // Note that the system header and library directory lists are
+ // automatically pre-filled by the pkgconf_client_new() call (see
+ // above). We will re-create these lists from scratch.
+ //
+ add_dirs (c->filter_libdirs,
+ sys_lib_dirs,
+ false /* suppress_dups */,
+ true /* cleanup */);
+
+ add_dirs (c->filter_includedirs,
+ sys_hdr_dirs,
+ false /* suppress_dups */,
+ true /* cleanup */);
+
+ // Note that the loaded file directory is added to the (yet empty)
+ // search list. Also note that loading of the prerequisite packages is
+ // delayed until flags retrieval, and their file directories are not
+ // added to the search list.
+ //
+ pkg_ = pkgconf_pkg_find (c.get (), path.string ().c_str ());
+
+ if (pkg_ == nullptr)
+ fail << "package '" << path << "' not found or invalid";
+
+ // Add the .pc file search directories.
+ //
+ assert (c->dir_list.length == 1); // Package file directory (see above).
+ add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */);
+
+ client_ = c.release ();
+ }
+
+ void pkgconfig::
+ free ()
+ {
+ assert (pkg_ != nullptr);
+
+ mlock l (pkgconf_mutex);
+ pkgconf_pkg_unref (client_, pkg_);
+ pkgconf_client_free (client_);
+ }
+
+ strings pkgconfig::
+ cflags (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ mlock l (pkgconf_mutex);
+
+ pkgconf_client_set_flags (
+ client_,
+ pkgconf_flags |
+
+ // Walk through the private package dependencies (Requires.private)
+ // besides the public ones while collecting the flags. Note that we do
+ // this for both static and shared linking.
+ //
+ PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
+
+ // Collect flags from Cflags.private besides those from Cflags for the
+ // static linking.
+ //
+ (stat
+ ? PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
+ int e (pkgconf_pkg_cflags (client_, pkg_, &f, pkgconf_max_depth));
+
+ if (e != PKGCONF_PKG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
+ return to_strings (f, 'I', client_->filter_includedirs);
+ }
+
+ strings pkgconfig::
+ libs (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ mlock l (pkgconf_mutex);
+
+ pkgconf_client_set_flags (
+ client_,
+ pkgconf_flags |
+
+ // Additionally collect flags from the private dependency packages
+ // (see above) and from the Libs.private value for the static linking.
+ //
+ (stat
+ ? PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
+ PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
+ int e (pkgconf_pkg_libs (client_, pkg_, &f, pkgconf_max_depth));
+
+ if (e != PKGCONF_PKG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
+ return to_strings (f, 'L', client_->filter_libdirs);
+ }
+
+ optional<string> pkgconfig::
+ variable (const char* name) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ mlock l (pkgconf_mutex);
+ const char* r (pkgconf_tuple_find (client_, &pkg_->vars, name));
+ return r != nullptr ? optional<string> (r) : nullopt;
+ }
+ }
+}
+
+#endif // BUILD2_BOOTSTRAP
diff --git a/libbuild2/cc/pkgconfig.cxx b/libbuild2/cc/pkgconfig.cxx
index 617834e..046fbc8 100644
--- a/libbuild2/cc/pkgconfig.cxx
+++ b/libbuild2/cc/pkgconfig.cxx
@@ -1,13 +1,6 @@
// file : libbuild2/cc/pkgconfig.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-// In order not to complicate the bootstrap procedure with libpkgconf building
-// exclude functionality that involves reading of .pc files.
-//
-#ifndef BUILD2_BOOTSTRAP
-# include <libpkgconf/libpkgconf.h>
-#endif
-
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
#include <libbuild2/context.hxx>
@@ -25,436 +18,25 @@
#include <libbuild2/cc/utility.hxx>
#include <libbuild2/cc/common.hxx>
+#include <libbuild2/cc/pkgconfig.hxx>
#include <libbuild2/cc/compile-rule.hxx>
#include <libbuild2/cc/link-rule.hxx>
-#ifndef BUILD2_BOOTSTRAP
-
-// Note that the libpkgconf library doesn't provide the version macro that we
-// could use to compile the code conditionally against different API versions.
-// Thus, we need to sense the pkgconf_client_new() function signature
-// ourselves to call it properly.
-//
-namespace details
-{
- void*
- pkgconf_cross_personality_default (); // Never called.
-}
-
-using namespace details;
-
-template <typename H>
-static inline pkgconf_client_t*
-call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*),
- H error_handler,
- void* error_handler_data)
-{
- return f (error_handler, error_handler_data);
-}
-
-template <typename H, typename P>
-static inline pkgconf_client_t*
-call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*, P),
- H error_handler,
- void* error_handler_data)
-{
- return f (error_handler,
- error_handler_data,
- ::pkgconf_cross_personality_default ());
-}
-
-#endif
-
-using namespace std;
-using namespace butl;
+using namespace std; // VC16
namespace build2
{
-#ifndef BUILD2_BOOTSTRAP
-
- // Load package information from a .pc file. Filter out the -I/-L options
- // that refer to system directories. This makes sure all the system search
- // directories are "pushed" to the back which minimizes the chances of
- // picking up wrong (e.g., old installed version) header/library.
- //
- // Note that the prerequisite package .pc files search order is as follows:
- //
- // - in directory of the specified file
- // - in pc_dirs directories (in the natural order)
- //
- class pkgconf
- {
- public:
- using path_type = build2::path;
-
- path_type path;
-
- public:
- explicit
- pkgconf (path_type,
- const dir_paths& pc_dirs,
- const dir_paths& sys_hdr_dirs,
- const dir_paths& sys_lib_dirs);
-
- // Create a special empty object. Querying package information on such
- // an object is illegal.
- //
- pkgconf () = default;
-
- ~pkgconf ();
-
- // Movable-only type.
- //
- pkgconf (pkgconf&& p)
- : path (move (p.path)),
- client_ (p.client_),
- pkg_ (p.pkg_)
- {
- p.client_ = nullptr;
- p.pkg_ = nullptr;
- }
-
- pkgconf&
- operator= (pkgconf&& p)
- {
- if (this != &p)
- {
- this->~pkgconf ();
- new (this) pkgconf (move (p)); // Assume noexcept move-construction.
- }
- return *this;
- }
-
- pkgconf (const pkgconf&) = delete;
- pkgconf& operator= (const pkgconf&) = delete;
-
- strings
- cflags (bool stat) const;
-
- strings
- libs (bool stat) const;
-
- string
- variable (const char*) const;
-
- string
- variable (const string& s) const {return variable (s.c_str ());}
-
- private:
- // Keep them as raw pointers not to deal with API thread-unsafety in
- // deleters and introducing additional mutex locks.
- //
- pkgconf_client_t* client_ = nullptr;
- pkgconf_pkg_t* pkg_ = nullptr;
- };
-
- // Currently the library is not thread-safe, even on the pkgconf_client_t
- // level (see issue #128 for details).
- //
- // @@ An update: seems that the obvious thread-safety issues are fixed.
- // However, let's keep mutex locking for now not to introduce potential
- // issues before we make sure that there are no other ones.
- //
- static mutex pkgconf_mutex;
-
- // The package dependency traversal depth limit.
- //
- static const int pkgconf_max_depth = 100;
-
- // Normally the error_handler() callback can be called multiple times to
- // report a single error (once per message line), to produce a multi-line
- // message like this:
- //
- // Package foo was not found in the pkg-config search path.\n
- // Perhaps you should add the directory containing `foo.pc'\n
- // to the PKG_CONFIG_PATH environment variable\n
- // Package 'foo', required by 'bar', not found\n
- //
- // For the above example callback will be called 4 times. To suppress all the
- // junk we will use PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS to get just:
- //
- // Package 'foo', required by 'bar', not found\n
- //
- // Also disable merging options like -framework into a single fragment, if
- // possible.
- //
- static const int pkgconf_flags =
- PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS
-#ifdef PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
- | PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
-#endif
- ;
-
- static bool
- pkgconf_error_handler (const char* msg, const pkgconf_client_t*, const void*)
- {
- error << runtime_error (msg); // Sanitize the message.
- return true;
- }
-
- // Deleters. Note that they are thread-safe.
- //
- struct fragments_deleter
- {
- void operator() (pkgconf_list_t* f) const {pkgconf_fragment_free (f);}
- };
-
- // Convert fragments to strings. Skip the -I/-L options that refer to system
- // directories.
- //
- static strings
- to_strings (const pkgconf_list_t& frags,
- char type,
- const pkgconf_list_t& sysdirs)
- {
- assert (type == 'I' || type == 'L');
-
- strings r;
-
- auto add = [&r] (const pkgconf_fragment_t* frag)
- {
- string s;
- if (frag->type != '\0')
- {
- s += '-';
- s += frag->type;
- }
-
- s += frag->data;
- r.push_back (move (s));
- };
-
- // Option that is separated from its value, for example:
- //
- // -I /usr/lib
- //
- const pkgconf_fragment_t* opt (nullptr);
-
- pkgconf_node_t *node;
- PKGCONF_FOREACH_LIST_ENTRY(frags.head, node)
- {
- auto frag (static_cast<const pkgconf_fragment_t*> (node->data));
-
- // Add the separated option and directory, unless the latest is a system
- // one.
- //
- if (opt != nullptr)
- {
- // Note that we should restore the directory path that was
- // (mis)interpreted as an option, for example:
- //
- // -I -Ifoo
- //
- // In the above example option '-I' is followed by directory '-Ifoo',
- // which is represented by libpkgconf library as fragment 'foo' with
- // type 'I'.
- //
- if (!pkgconf_path_match_list (
- frag->type == '\0'
- ? frag->data
- : (string ({'-', frag->type}) + frag->data).c_str (),
- &sysdirs))
- {
- add (opt);
- add (frag);
- }
-
- opt = nullptr;
- continue;
- }
-
- // Skip the -I/-L option if it refers to a system directory.
- //
- if (frag->type == type)
- {
- // The option is separated from a value, that will (presumably) follow.
- //
- if (*frag->data == '\0')
- {
- opt = frag;
- continue;
- }
-
- if (pkgconf_path_match_list (frag->data, &sysdirs))
- continue;
- }
-
- add (frag);
- }
-
- if (opt != nullptr) // Add the dangling option.
- add (opt);
-
- return r;
- }
-
- // Note that some libpkgconf functions can potentially return NULL, failing
- // to allocate the required memory block. However, we will not check the
- // returned value for NULL as the library doesn't do so, prior to filling the
- // allocated structures. So such a code complication on our side would be
- // useless. Also, for some functions the NULL result has a special semantics,
- // for example "not found".
- //
- pkgconf::
- pkgconf (path_type p,
- const dir_paths& pc_dirs,
- const dir_paths& sys_lib_dirs,
- const dir_paths& sys_hdr_dirs)
- : path (move (p))
- {
- auto add_dirs = [] (pkgconf_list_t& dir_list,
- const dir_paths& dirs,
- bool suppress_dups,
- bool cleanup = false)
- {
- if (cleanup)
- {
- pkgconf_path_free (&dir_list);
- dir_list = PKGCONF_LIST_INITIALIZER;
- }
-
- for (const auto& d: dirs)
- pkgconf_path_add (d.string ().c_str (), &dir_list, suppress_dups);
- };
-
- mlock l (pkgconf_mutex);
-
- // Initialize the client handle.
- //
- unique_ptr<pkgconf_client_t, void (*) (pkgconf_client_t*)> c (
- call_pkgconf_client_new (&pkgconf_client_new,
- pkgconf_error_handler,
- nullptr /* handler_data */),
- [] (pkgconf_client_t* c) {pkgconf_client_free (c);});
-
- pkgconf_client_set_flags (c.get (), pkgconf_flags);
-
- // Note that the system header and library directory lists are
- // automatically pre-filled by the pkgconf_client_new() call (see above).
- // We will re-create these lists from scratch.
- //
- add_dirs (c->filter_libdirs,
- sys_lib_dirs,
- false /* suppress_dups */,
- true /* cleanup */);
-
- add_dirs (c->filter_includedirs,
- sys_hdr_dirs,
- false /* suppress_dups */,
- true /* cleanup */);
-
- // Note that the loaded file directory is added to the (yet empty) search
- // list. Also note that loading of the prerequisite packages is delayed
- // until flags retrieval, and their file directories are not added to the
- // search list.
- //
- pkg_ = pkgconf_pkg_find (c.get (), path.string ().c_str ());
-
- if (pkg_ == nullptr)
- fail << "package '" << path << "' not found or invalid";
-
- // Add the .pc file search directories.
- //
- assert (c->dir_list.length == 1); // Package file directory (see above).
- add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */);
-
- client_ = c.release ();
- }
-
- pkgconf::
- ~pkgconf ()
- {
- if (client_ != nullptr) // Not empty.
- {
- assert (pkg_ != nullptr);
-
- mlock l (pkgconf_mutex);
- pkgconf_pkg_unref (client_, pkg_);
- pkgconf_client_free (client_);
- }
- }
-
- strings pkgconf::
- cflags (bool stat) const
- {
- assert (client_ != nullptr); // Must not be empty.
-
- mlock l (pkgconf_mutex);
-
- pkgconf_client_set_flags (
- client_,
- pkgconf_flags |
-
- // Walk through the private package dependencies (Requires.private)
- // besides the public ones while collecting the flags. Note that we do
- // this for both static and shared linking.
- //
- PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
-
- // Collect flags from Cflags.private besides those from Cflags for the
- // static linking.
- //
- (stat
- ? PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
- : 0));
-
- pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
- int e (pkgconf_pkg_cflags (client_, pkg_, &f, pkgconf_max_depth));
-
- if (e != PKGCONF_PKG_ERRF_OK)
- throw failed (); // Assume the diagnostics is issued.
-
- unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
- return to_strings (f, 'I', client_->filter_includedirs);
- }
-
- strings pkgconf::
- libs (bool stat) const
- {
- assert (client_ != nullptr); // Must not be empty.
-
- mlock l (pkgconf_mutex);
-
- pkgconf_client_set_flags (
- client_,
- pkgconf_flags |
-
- // Additionally collect flags from the private dependency packages
- // (see above) and from the Libs.private value for the static linking.
- //
- (stat
- ? PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
- PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
- : 0));
-
- pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
- int e (pkgconf_pkg_libs (client_, pkg_, &f, pkgconf_max_depth));
-
- if (e != PKGCONF_PKG_ERRF_OK)
- throw failed (); // Assume the diagnostics is issued.
-
- unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
- return to_strings (f, 'L', client_->filter_libdirs);
- }
-
- string pkgconf::
- variable (const char* name) const
- {
- assert (client_ != nullptr); // Must not be empty.
-
- mlock l (pkgconf_mutex);
- const char* r (pkgconf_tuple_find (client_, &pkg_->vars, name));
- return r != nullptr ? string (r) : string ();
- }
-
-#endif
-
namespace cc
{
using namespace bin;
// In pkg-config backslashes, spaces, etc are escaped with a backslash.
//
+ // @@ TODO: handle empty values (save as ''?)
+ //
+ // Note: may contain variable expansions (e.g, ${pcfiledir}) so unclear
+ // if can use quoting.
+ //
static string
escape (const string& s)
{
@@ -481,6 +63,35 @@ namespace build2
return r;
}
+ // Resolve metadata value type from type name. Return in the second half
+ // of the pair whether this is a dir_path-based type.
+ //
+ static pair<const value_type*, bool>
+ metadata_type (const string& tn)
+ {
+ bool d (false);
+ const value_type* r (nullptr);
+
+ if (tn == "bool") r = &value_traits<bool>::value_type;
+ else if (tn == "int64") r = &value_traits<int64_t>::value_type;
+ else if (tn == "uint64") r = &value_traits<uint64_t>::value_type;
+ else if (tn == "string") r = &value_traits<string>::value_type;
+ else if (tn == "path") r = &value_traits<path>::value_type;
+ else if (tn == "dir_path") {r = &value_traits<dir_path>::value_type; d = true;}
+ else if (tn == "int64s") r = &value_traits<int64s>::value_type;
+ else if (tn == "uint64s") r = &value_traits<uint64s>::value_type;
+ else if (tn == "strings") r = &value_traits<strings>::value_type;
+ else if (tn == "paths") r = &value_traits<paths>::value_type;
+ else if (tn == "dir_paths") {r = &value_traits<dir_paths>::value_type; d = true;}
+
+ return make_pair (r, d);
+ }
+
+ // In order not to complicate the bootstrap procedure with libpkg-config
+ // building, exclude functionality that involves reading of .pc files.
+ //
+#ifndef BUILD2_BOOTSTRAP
+
// Try to find a .pc file in the pkgconfig/ subdirectory of libd, trying
// several names derived from stem. If not found, return false. If found,
// load poptions, loptions, libs, and modules, set the corresponding
@@ -497,9 +108,8 @@ namespace build2
// Also note that the bootstrapped version of build2 will not search for
// .pc files, always returning false (see above for the reasoning).
//
-#ifndef BUILD2_BOOTSTRAP
- // Derive pkgconf search directories from the specified library search
+ // Derive pkg-config search directories from the specified library search
// directory passing them to the callback function for as long as it
// returns false (e.g., not found). Return true if the callback returned
// true.
@@ -543,8 +153,8 @@ namespace build2
return false;
}
- // Search for the .pc files in the pkgconf directories that correspond to
- // the specified library directory. If found, return static (first) and
+ // Search for the .pc files in the pkg-config directories that correspond
+ // to the specified library directory. If found, return static (first) and
// shared (second) library .pc files. If common is false, then only
// consider our .static/.shared files.
//
@@ -554,6 +164,8 @@ namespace build2
const string& stem,
bool common) const
{
+ tracer trace (x, "pkgconfig_search");
+
// When it comes to looking for .pc files we have to decide where to
// search (which directory(ies)) as well as what to search for (which
// names). Suffix is our ".shared" or ".static" extension.
@@ -575,28 +187,36 @@ namespace build2
// then you get something like zlib which calls it zlib.pc. So let's
// just do it.
//
- f = dir;
- f /= "lib";
- f += stem;
- f += sfx;
- f += ".pc";
- if (exists (f))
- return f;
+ // And as you think you've covered all the bases, someone decides to
+ // play with the case (libXau.* vs xau.pc). So let's also try the
+ // lower-case versions of the stem unless we are on a case-insensitive
+ // filesystem.
+ //
+ auto check = [&dir, & sfx, &f] (const string& n)
+ {
+ f = dir;
+ f /= n;
+ f += sfx;
+ f += ".pc";
+ return exists (f);
+ };
- f = dir;
- f /= stem;
- f += sfx;
- f += ".pc";
- if (exists (f))
+ if (check ("lib" + stem) || check (stem))
return f;
+#ifndef _WIN32
+ string lstem (lcase (stem));
+
+ if (lstem != stem)
+ {
+ if (check ("lib" + lstem) || check (lstem))
+ return f;
+ }
+#endif
+
if (proj)
{
- f = dir;
- f /= proj->string ();
- f += sfx;
- f += ".pc";
- if (exists (f))
+ if (check (proj->string ()))
return f;
}
@@ -636,15 +256,18 @@ namespace build2
if (pkgconfig_derive (libd, check))
{
+ l6 ([&]{trace << "found " << libd << stem << " in "
+ << (d.a.empty () ? d.a : d.s).directory ();});
+
r.first = move (d.a);
r.second = move (d.s);
}
return r;
- };
+ }
bool common::
- pkgconfig_load (action a,
+ pkgconfig_load (optional<action> act,
const scope& s,
lib& lt,
liba* at,
@@ -653,7 +276,8 @@ namespace build2
const string& stem,
const dir_path& libd,
const dir_paths& top_sysd,
- const dir_paths& top_usrd) const
+ const dir_paths& top_usrd,
+ pair<bool, bool> metaonly) const
{
assert (at != nullptr || st != nullptr);
@@ -663,12 +287,16 @@ namespace build2
if (p.first.empty () && p.second.empty ())
return false;
- pkgconfig_load (a, s, lt, at, st, p, libd, top_sysd, top_usrd);
+ pkgconfig_load (
+ act, s, lt, at, st, p, libd, top_sysd, top_usrd, metaonly);
return true;
}
+ // Action should be absent if called during the load phase. If metaonly is
+ // true then only load the metadata.
+ //
void common::
- pkgconfig_load (action a,
+ pkgconfig_load (optional<action> act,
const scope& s,
lib& lt,
liba* at,
@@ -676,7 +304,8 @@ namespace build2
const pair<path, path>& paths,
const dir_path& libd,
const dir_paths& top_sysd,
- const dir_paths& top_usrd) const
+ const dir_paths& top_usrd,
+ pair<bool /* a */, bool /* s */> metaonly) const
{
tracer trace (x, "pkgconfig_load");
@@ -687,24 +316,120 @@ namespace build2
assert (!ap.empty () || !sp.empty ());
- // Extract --cflags and set them as lib?{}:export.poptions. Note that we
- // still pass --static in case this is pkgconf which has Cflags.private.
+ const scope& rs (*s.root_scope ());
+
+ const dir_path* sysroot (
+ cast_null<abs_dir_path> (rs["config.cc.pkgconfig.sysroot"]));
+
+ // Append -I<dir> or -L<dir> option suppressing duplicates. Also handle
+ // the sysroot rewrite.
+ //
+ auto append_dir = [sysroot] (strings& ops, string&& o)
+ {
+ char c (o[1]);
+
+ // @@ Should we normalize the path for good measure? But on the other
+ // hand, most of the time when it's not normalized, it will likely
+ // be "consistently-relative", e.g., something like
+ // ${prefix}/lib/../include. I guess let's wait and see for some
+ // real-world examples.
+ //
+ // Well, we now support generating relocatable .pc files that have
+ // a bunch of -I${pcfiledir}/../../include and -L${pcfiledir}/.. .
+ //
+ // On the other hand, there could be symlinks involved and just
+ // normalize() may not be correct.
+ //
+ // Note that we do normalize -L paths in the usrd logic later
+ // (but not when setting as *.export.loptions).
+
+ if (sysroot != nullptr)
+ {
+ // Notes:
+ //
+ // - The path might not be absolute (we only rewrite absolute ones).
+ //
+ // - Do this before duplicate suppression since options in ops
+ // already have the sysroot rewritten.
+ //
+ // - Check if the path already starts with sysroot since some .pc
+ // files might already be in a good shape (e.g., because they use
+ // ${pcfiledir} to support relocation properly).
+ //
+ const char* op (o.c_str () + 2);
+ size_t on (o.size () - 2);
+
+ if (path_traits::absolute (op, on))
+ {
+ const string& s (sysroot->string ());
+
+ const char* sp (s.c_str ());
+ size_t sn (s.size ());
+
+ if (!path_traits::sub (op, on, sp, sn)) // Already in sysroot.
+ {
+ // Find the first directory seperator that seperates the root
+ // component from the rest of the path (think /usr/include,
+ // c:\install\include). We need to replace the root component
+ // with sysroot. If there is no separator (say, -Ic:) or the
+ // path after the separator is empty (say, -I/), then we replace
+ // the entire path.
+ //
+ size_t p (path_traits::find_separator (o, 2));
+ if (p == string::npos || p + 1 == o.size ())
+ p = o.size ();
+
+ o.replace (2, p - 2, s);
+ }
+ }
+ }
+
+ for (const string& x: ops)
+ {
+ if (x.size () > 2 && x[0] == '-' && x[1] == c)
+ {
+ if (path_traits::compare (x.c_str () + 2, x.size () - 2,
+ o.c_str () + 2, o.size () - 2) == 0)
+ return; // Duplicate.
+ }
+ }
+
+ ops.push_back (move (o));
+ };
+
+ // Extract --cflags and set them as lib?{}:export.poptions returing the
+ // pointer to the set value. If [as]pops are not NULL, then only keep
+ // options that are present in both.
//
- auto parse_cflags = [&trace, this] (target& t,
- const pkgconf& pc,
- bool la)
+ auto parse_cflags =[&trace,
+ this,
+ &append_dir] (target& t,
+ const pkgconfig& pc,
+ bool la,
+ const strings* apops = nullptr,
+ const strings* spops = nullptr)
+ -> const strings*
{
+ // Note that we normalize `-[IDU] <arg>` to `-[IDU]<arg>`.
+ //
strings pops;
- bool arg (false);
- for (auto& o: pc.cflags (la))
+ char arg ('\0'); // Option with pending argument.
+ for (string& o: pc.cflags (la))
{
if (arg)
{
// Can only be an argument for -I, -D, -U options.
//
- pops.push_back (move (o));
- arg = false;
+ o.insert (0, 1, arg);
+ o.insert (0, 1, '-');
+
+ if (arg == 'I')
+ append_dir (pops, move (o));
+ else
+ pops.push_back (move (o));
+
+ arg = '\0';
continue;
}
@@ -713,11 +438,17 @@ namespace build2
// We only keep -I, -D and -U.
//
if (n >= 2 &&
- o[0] == '-' &&
- (o[1] == 'I' || o[1] == 'D' || o[1] == 'U'))
+ o[0] == '-' && (o[1] == 'I' || o[1] == 'D' || o[1] == 'U'))
{
- pops.push_back (move (o));
- arg = (n == 2);
+ if (n > 2)
+ {
+ if (o[1] == 'I')
+ append_dir (pops, move (o));
+ else
+ pops.push_back (move (o));
+ }
+ else
+ arg = o[1];
continue;
}
@@ -726,7 +457,7 @@ namespace build2
}
if (arg)
- fail << "argument expected after " << pops.back () <<
+ fail << "argument expected after -" << arg <<
info << "while parsing pkg-config --cflags " << pc.path;
if (!pops.empty ())
@@ -739,19 +470,45 @@ namespace build2
// export stub and we shouldn't touch them.
//
if (p.second)
+ {
+ // If required, only keep common stuff. While removing the entries
+ // is not the most efficient way, it is simple.
+ //
+ if (apops != nullptr || spops != nullptr)
+ {
+ for (auto i (pops.begin ()); i != pops.end (); )
+ {
+ if ((apops != nullptr && find (
+ apops->begin (), apops->end (), *i) == apops->end ()) ||
+ (spops != nullptr && find (
+ spops->begin (), spops->end (), *i) == spops->end ()))
+ i = pops.erase (i);
+ else
+ ++i;
+ }
+ }
+
p.first = move (pops);
+ return &p.first.as<strings> ();
+ }
}
+
+ return nullptr;
};
// Parse --libs into loptions/libs (interface and implementation). If
// ps is not NULL, add each resolved library target as a prerequisite.
//
- auto parse_libs = [a, &s, top_sysd, this] (target& t,
- bool binless,
- const pkgconf& pc,
- bool la,
- prerequisites* ps)
+ auto parse_libs = [this,
+ &append_dir,
+ act, &s, top_sysd] (target& t,
+ bool binless,
+ const pkgconfig& pc,
+ bool la,
+ prerequisites* ps)
{
+ // Note that we normalize `-L <arg>` to `-L<arg>`.
+ //
strings lops;
vector<name> libs;
@@ -760,22 +517,29 @@ namespace build2
// library is binless. But sometimes we may have other linker options,
// for example, -Wl,... or -pthread. It's probably a bad idea to
// ignore them. Also, theoretically, we could have just the library
- // name/path.
+ // name/path. Note that (after some meditation) we consider -pthread
+ // a special form of -l.
//
// The tricky part, of course, is to know whether what follows after
// an option we don't recognize is its argument or another option or
// library. What we do at the moment is stop recognizing just library
// names (without -l) after seeing an unknown option.
//
- bool arg (false), first (true), known (true), have_L;
- for (auto& o: pc.libs (la))
+ bool first (true), known (true), have_L (false);
+
+ string self; // The library itself (-l of just name/path).
+
+ char arg ('\0'); // Option with pending argument.
+ for (string& o: pc.libs (la))
{
if (arg)
{
- // Can only be an argument for an loption.
+ // Can only be an argument for an -L option.
//
- lops.push_back (move (o));
- arg = false;
+ o.insert (0, 1, arg);
+ o.insert (0, 1, '-');
+ append_dir (lops, move (o));
+ arg = '\0';
continue;
}
@@ -785,44 +549,54 @@ namespace build2
//
if (n >= 2 && o[0] == '-' && o[1] == 'L')
{
+ if (n > 2)
+ append_dir (lops, move (o));
+ else
+ arg = o[1];
have_L = true;
- lops.push_back (move (o));
- arg = (n == 2);
continue;
}
- // See if that's -l or just the library name/path.
+ // See if that's -l, -pthread, or just the library name/path.
//
- if ((known && o[0] != '-') ||
- (n > 2 && o[0] == '-' && o[1] == 'l'))
+ if ((known && n != 0 && o[0] != '-') ||
+ (n > 2 && o[0] == '-' && (o[1] == 'l' || o == "-pthread")))
{
// Unless binless, the first one is the library itself, which we
// skip. Note that we don't verify this and theoretically it could
// be some other library, but we haven't encountered such a beast
// yet.
//
+ // What we have enountered (e.g., in the Magick++ library) is the
+ // library itself repeated in Libs.private. So now we save it and
+ // filter all its subsequent occurences.
+ //
+ // @@ To be safe we probably shouldn't rely on the position and
+ // filter out all occurrences of the library itself (by name?)
+ // and complain if none were encountered.
+ //
+ // Note also that the same situation can occur if we have a
+ // binful library for which we could not find the library
+ // binary and are treating it as binless. We now have a diag
+ // frame around the call to search_library() to help diagnose
+ // such situations.
+ //
if (first)
{
first = false;
if (!binless)
+ {
+ self = move (o);
+ continue;
+ }
+ }
+ else
+ {
+ if (!binless && o == self)
continue;
}
- // @@ If by some reason this is the library itself (doesn't go
- // first or libpkgconf parsed libs in some bizarre way) we will
- // have a dependency cycle by trying to lock its target inside
- // search_library() as by now it is already locked. To be safe
- // we probably shouldn't rely on the position and filter out
- // all occurrences of the library itself (by name?) and
- // complain if none were encountered.
- //
- // Note also that the same situation can occur if we have a
- // binful library for which we could not find the library
- // binary and are treating it as binless. We now have a diag
- // frame around the call to search_library() to help diagnose
- // such situations.
- //
libs.push_back (name (move (o)));
continue;
}
@@ -834,7 +608,7 @@ namespace build2
}
if (arg)
- fail << "argument expected after " << lops.back () <<
+ fail << "argument expected after -" << arg <<
info << "while parsing pkg-config --libs " << pc.path;
// Space-separated list of escaped library flags.
@@ -842,7 +616,7 @@ namespace build2
auto lflags = [&pc, la] () -> string
{
string r;
- for (const auto& o: pc.libs (la))
+ for (const string& o: pc.libs (la))
{
if (!r.empty ())
r += ' ';
@@ -851,7 +625,7 @@ namespace build2
return r;
};
- if (first && !binless)
+ if (!binless && self.empty ())
fail << "library expected in '" << lflags () << "'" <<
info << "while parsing pkg-config --libs " << pc.path;
@@ -864,8 +638,8 @@ namespace build2
// import installed, or via a .pc file (which we could have generated
// from the export stub). The exception is "runtime libraries" (which
// are really the extension of libc or the operating system in case of
- // Windows) such as -lm, -ldl, -lpthread, etc. Those we will detect
- // and leave as -l*.
+ // Windows) such as -lm, -ldl, -lpthread (or its -pthread variant),
+ // etc. Those we will detect and leave as -l*.
//
// If we managed to resolve all the -l's (sans runtime), then we can
// omit -L's for a nice and tidy command line.
@@ -892,11 +666,15 @@ namespace build2
if (l[0] != '-') // e.g., just shell32.lib
continue;
else if (cmp ("advapi32") ||
+ cmp ("authz") ||
cmp ("bcrypt") ||
+ cmp ("comdlg32") ||
cmp ("crypt32") ||
- cmp ("dbgeng") ||
cmp ("d2d1") ||
cmp ("d3d", 3) || // d3d*
+ cmp ("dbgeng") ||
+ cmp ("dbghelp") ||
+ cmp ("dnsapi") ||
cmp ("dwmapi") ||
cmp ("dwrite") ||
cmp ("dxgi") ||
@@ -907,7 +685,9 @@ namespace build2
cmp ("imm32") ||
cmp ("iphlpapi") ||
cmp ("kernel32") ||
+ cmp ("mincore") ||
cmp ("mpr") ||
+ cmp ("msimg32") ||
cmp ("mswsock") ||
cmp ("msxml", 5) || // msxml*
cmp ("netapi32") ||
@@ -916,9 +696,11 @@ namespace build2
cmp ("ole32") ||
cmp ("oleaut32") ||
cmp ("opengl32") ||
+ cmp ("powrprof") ||
cmp ("psapi") ||
cmp ("rpcrt4") ||
cmp ("secur32") ||
+ cmp ("setupapi") ||
cmp ("shell32") ||
cmp ("shlwapi") ||
cmp ("synchronization") ||
@@ -926,6 +708,8 @@ namespace build2
cmp ("userenv") ||
cmp ("uuid") ||
cmp ("version") ||
+ cmp ("windowscodecs") ||
+ cmp ("winhttp") ||
cmp ("winmm") ||
cmp ("winspool") ||
cmp ("ws2") ||
@@ -942,6 +726,11 @@ namespace build2
}
continue;
}
+ else if (tsys == "mingw32")
+ {
+ if (l == "-pthread")
+ continue;
+ }
}
else
{
@@ -951,6 +740,7 @@ namespace build2
l == "-lm" ||
l == "-ldl" ||
l == "-lrt" ||
+ l == "-pthread" ||
l == "-lpthread")
continue;
@@ -969,7 +759,11 @@ namespace build2
}
else if (tclass == "macos")
{
- if (l == "-lSystem")
+ // Note that Mac OS has libiconv in /usr/lib/ which only comes
+ // in the shared variant. So we treat it as system.
+ //
+ if (l == "-lSystem" ||
+ l == "-liconv")
continue;
}
else if (tclass == "bsd")
@@ -986,18 +780,13 @@ namespace build2
{
usrd = dir_paths ();
- for (auto i (lops.begin ()); i != lops.end (); ++i)
+ for (const string& o: lops)
{
- const string& o (*i);
-
- if (o.size () >= 2 && o[0] == '-' && o[1] == 'L')
+ // Note: always in the -L<dir> form (see above).
+ //
+ if (o.size () > 2 && o[0] == '-' && o[1] == 'L')
{
- string p;
-
- if (o.size () == 2)
- p = *++i; // We've verified it's there.
- else
- p = string (o, 2);
+ string p (o, 2);
try
{
@@ -1008,6 +797,7 @@ namespace build2
<< lflags () << "'" <<
info << "while parsing pkg-config --libs " << pc.path;
+ d.normalize ();
usrd->push_back (move (d));
}
catch (const invalid_path& e)
@@ -1038,7 +828,7 @@ namespace build2
dr << info (f) << "while resolving pkg-config dependency " << l;
});
- lt = search_library (a, top_sysd, usrd, pk);
+ lt = search_library (act, top_sysd, usrd, pk);
}
if (lt != nullptr)
@@ -1087,24 +877,16 @@ namespace build2
{
// Translate -L to /LIBPATH.
//
- for (auto i (lops.begin ()); i != lops.end (); )
+ for (string& o: lops)
{
- string& o (*i);
size_t n (o.size ());
- if (n >= 2 && o[0] == '-' && o[1] == 'L')
+ // Note: always in the -L<dir> form (see above).
+ //
+ if (n > 2 && o[0] == '-' && o[1] == 'L')
{
o.replace (0, 2, "/LIBPATH:");
-
- if (n == 2)
- {
- o += *++i; // We've verified it's there.
- i = lops.erase (i);
- continue;
- }
}
-
- ++i;
}
}
@@ -1128,6 +910,10 @@ namespace build2
// may escape things even on non-Windows platforms, for example,
// spaces. So we use a slightly modified version of next_word().
//
+ // @@ TODO: handle quotes (e.g., empty values; see parse_metadata()).
+ // I wonder what we get here if something is quoted in the
+ // .pc file.
+ //
auto next = [] (const string& s, size_t& b, size_t& e) -> string
{
string r;
@@ -1163,17 +949,123 @@ namespace build2
return r;
};
+ // Parse the build2.metadata variable value and, if user is true,
+ // extract the user metadata, if any, and set extracted variables on the
+ // specified target.
+ //
+ auto parse_metadata = [&next] (target& t,
+ pkgconfig& pc,
+ const string& md,
+ bool user)
+ {
+ const location loc (pc.path);
+
+ context& ctx (t.ctx);
+
+ optional<uint64_t> ver;
+ optional<string> pfx;
+
+ variable_pool* vp (nullptr); // Resolve lazily.
+
+ string s;
+ for (size_t b (0), e (0); !(s = next (md, b, e)).empty (); )
+ {
+ if (!ver)
+ {
+ try
+ {
+ ver = value_traits<uint64_t>::convert (name (s), nullptr);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (loc) << "invalid version in build2.metadata variable: "
+ << e;
+ }
+
+ if (*ver != 1)
+ fail (loc) << "unexpected metadata version " << *ver;
+
+ if (!user)
+ return;
+
+ continue;
+ }
+
+ if (!pfx)
+ {
+ if (s.empty ())
+ fail (loc) << "empty variable prefix in build2.metadata varible";
+
+ pfx = s;
+ continue;
+ }
+
+ // The rest is variable name/type pairs.
+ //
+ size_t p (s.find ('/'));
+
+ if (p == string::npos)
+ fail (loc) << "expected name/type pair instead of '" << s << "'";
+
+ string vn (s, 0, p);
+ string tn (s, p + 1);
+
+ optional<string> val (pc.variable (vn));
+
+ if (!val)
+ fail (loc) << "metadata variable " << vn << " not set";
+
+ pair<const value_type*, bool> vt (metadata_type (tn));
+ if (vt.first == nullptr)
+ fail (loc) << "unknown metadata type " << tn;
+
+ names ns;
+ for (size_t b (0), e (0); !(s = next (*val, b, e)).empty (); )
+ {
+ ns.push_back (vt.second
+ ? name (dir_path (move (s)))
+ : name (move (s)));
+ }
+
+ // These should be public (qualified) variables so go straight for
+ // the public variable pool.
+ //
+ if (vp == nullptr)
+ vp = &ctx.var_pool.rw (); // Load phase if user==true.
+
+ const variable& var (vp->insert (move (vn)));
+
+ value& v (t.assign (var));
+ v.assign (move (ns), &var);
+ typify (v, *vt.first, &var);
+ }
+
+ if (!ver)
+ fail (loc) << "version expected in build2.metadata variable";
+
+ if (!pfx)
+ return; // No user metadata.
+
+ // Set export.metadata to indicate the presence of user metadata.
+ //
+ t.assign (ctx.var_export_metadata) = names {
+ name (std::to_string (*ver)), name (move (*pfx))};
+ };
+
// Parse modules, enter them as targets, and add them to the
// prerequisites.
//
auto parse_modules = [&trace, this,
- &next, &s, &lt] (const pkgconf& pc,
+ &next, &s, &lt] (const pkgconfig& pc,
prerequisites& ps)
{
- string val (pc.variable ("cxx_modules"));
+ optional<string> val (pc.variable ("cxx.modules"));
+
+ if (!val)
+ return;
string m;
- for (size_t b (0), e (0); !(m = next (val, b, e)).empty (); )
+ for (size_t b (0), e (0); !(m = next (*val, b, e)).empty (); )
{
// The format is <name>=<path> with `..` used as a partition
// separator (see pkgconfig_save() for details).
@@ -1182,18 +1074,26 @@ namespace build2
if (p == string::npos ||
p == 0 || // Empty name.
p == m.size () - 1) // Empty path.
- fail << "invalid module information in '" << val << "'" <<
- info << "while parsing pkg-config --variable=cxx_modules "
+ fail << "invalid module information in '" << *val << "'" <<
+ info << "while parsing pkg-config --variable=cxx.modules "
<< pc.path;
string mn (m, 0, p);
path mp (m, p + 1, string::npos);
+
+ // Must be absolute but may not be normalized due to a relocatable
+ // .pc file. We assume there are no symlink shenanigans that would
+ // require realize().
+ //
+ if (!mp.normalized ())
+ mp.normalize ();
+
path mf (mp.leaf ());
// Extract module properties, if any.
//
- string pp (pc.variable ("cxx_module_preprocessed." + mn));
- string se (pc.variable ("cxx_module_symexport." + mn));
+ optional<string> pp (pc.variable ("cxx.module_preprocessed." + mn));
+ optional<string> se (pc.variable ("cxx.module_symexport." + mn));
// Replace the partition separator.
//
@@ -1212,7 +1112,7 @@ namespace build2
target_decl::implied,
trace));
- target& mt (tl.first);
+ file& mt (tl.first.as<file> ());
// If the target already exists, then setting its variables is not
// MT-safe. So currently we only do it if we have the lock (and thus
@@ -1230,6 +1130,7 @@ namespace build2
//
if (tl.second.owns_lock ())
{
+ mt.path (move (mp));
mt.vars.assign (c_module_name) = move (mn);
// Set module properties. Note that if unspecified we should still
@@ -1238,11 +1139,12 @@ namespace build2
//
{
value& v (mt.vars.assign (x_preprocessed)); // NULL
- if (!pp.empty ()) v = move (pp);
+ if (pp)
+ v = move (*pp);
}
{
- mt.vars.assign (x_symexport) = (se == "true");
+ mt.vars.assign (x_symexport) = (se && *se == "true");
}
tl.second.unlock ();
@@ -1264,18 +1166,29 @@ namespace build2
// the prerequisites.
//
auto parse_headers = [&trace, this,
- &next, &s, &lt] (const pkgconf& pc,
+ &next, &s, &lt] (const pkgconfig& pc,
const target_type& tt,
const char* lang,
prerequisites& ps)
{
- string var (string (lang) + "_importable_headers");
- string val (pc.variable (var));
+ string var (string (lang) + ".importable_headers");
+ optional<string> val (pc.variable (var));
+
+ if (!val)
+ return;
string h;
- for (size_t b (0), e (0); !(h = next (val, b, e)).empty (); )
+ for (size_t b (0), e (0); !(h = next (*val, b, e)).empty (); )
{
path hp (move (h));
+
+ // Must be absolute but may not be normalized due to a relocatable
+ // .pc file. We assume there are no symlink shenanigans that would
+ // require realize().
+ //
+ if (!hp.normalized ())
+ hp.normalize ();
+
path hf (hp.leaf ());
auto tl (
@@ -1288,7 +1201,7 @@ namespace build2
target_decl::implied,
trace));
- target& ht (tl.first);
+ file& ht (tl.first.as<file> ());
// If the target already exists, then setting its variables is not
// MT-safe. So currently we only do it if we have the lock (and thus
@@ -1297,6 +1210,7 @@ namespace build2
//
if (tl.second.owns_lock ())
{
+ ht.path (move (hp));
ht.vars.assign (c_importable) = true;
tl.second.unlock ();
}
@@ -1313,19 +1227,10 @@ namespace build2
}
};
- // For now we only populate prerequisites for lib{}. To do it for
- // liba{} would require weeding out duplicates that are already in
- // lib{}.
+ // Load the information from the pkg-config files.
//
- // Currently, this information is only used by the modules machinery to
- // resolve module names to module files (but we cannot only do this if
- // modules are enabled since the same installed library can be used by
- // multiple builds).
- //
- prerequisites prs;
-
- pkgconf apc;
- pkgconf spc;
+ pkgconfig apc;
+ pkgconfig spc;
// Create the .pc files search directory list.
//
@@ -1333,9 +1238,16 @@ namespace build2
// Note that we rely on the "small function object" optimization here.
//
- auto add_pc_dir = [&pc_dirs] (dir_path&& d) -> bool
+ auto add_pc_dir = [&trace, &pc_dirs] (dir_path&& d) -> bool
{
- pc_dirs.emplace_back (move (d));
+ // Suppress duplicated.
+ //
+ if (find (pc_dirs.begin (), pc_dirs.end (), d) == pc_dirs.end ())
+ {
+ l6 ([&]{trace << "search path " << d;});
+ pc_dirs.emplace_back (move (d));
+ }
+
return false;
};
@@ -1345,18 +1257,115 @@ namespace build2
bool pa (at != nullptr && !ap.empty ());
if (pa || sp.empty ())
- apc = pkgconf (ap, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
+ apc = pkgconfig (ap, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
bool ps (st != nullptr && !sp.empty ());
if (ps || ap.empty ())
- spc = pkgconf (sp, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
+ spc = pkgconfig (sp, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
+
+ // Load the user metadata if we are in the load phase. Otherwise just
+ // determine if we have metadata.
+ //
+ // Note also that we are not failing here if the metadata was requested
+ // but not present (potentially only partially) letting the caller
+ // (i.e., the import machinery) verify that the export.metadata was set
+ // on the target being imported. This would also allow supporting
+ // optional metadata.
+ //
+ bool apc_meta (false);
+ bool spc_meta (false);
+ if (!act)
+ {
+ // We can only do it during the load phase.
+ //
+ assert (lt.ctx.phase == run_phase::load);
+
+ pkgconfig& ipc (ps ? spc : apc); // As below.
+
+ // Since it's not easy to say if things are the same, we load a copy
+ // into the group and each member, if any.
+ //
+ // @@ TODO: check if already loaded? Don't we have the same problem
+ // below with reloading the rest for lt? What if we passed NULL
+ // in this case (and I suppose another bool in metaonly)?
+ //
+ if (optional<string> md = ipc.variable ("build2.metadata"))
+ parse_metadata (lt, ipc, *md, true);
+
+ if (pa)
+ {
+ if (optional<string> md = apc.variable ("build2.metadata"))
+ {
+ parse_metadata (*at, apc, *md, true);
+ apc_meta = true;
+ }
+ }
+
+ if (ps)
+ {
+ if (optional<string> md = spc.variable ("build2.metadata"))
+ {
+ parse_metadata (*st, spc, *md, true);
+ spc_meta = true;
+ }
+ }
+
+ // If we only need metadata, then we are done.
+ //
+ if (at != nullptr && metaonly.first)
+ {
+ pa = false;
+ at = nullptr;
+ }
+
+ if (st != nullptr && metaonly.second)
+ {
+ ps = false;
+ st = nullptr;
+ }
+
+ if (at == nullptr && st == nullptr)
+ return;
+ }
+ else
+ {
+ if (pa)
+ {
+ if (optional<string> md = apc.variable ("build2.metadata"))
+ {
+ parse_metadata (*at, apc, *md, false);
+ apc_meta = true;
+ }
+ }
+
+ if (ps)
+ {
+ if (optional<string> md = spc.variable ("build2.metadata"))
+ {
+ parse_metadata (*st, spc, *md, false);
+ spc_meta = true;
+ }
+ }
+ }
// Sort out the interface dependencies (which we are setting on lib{}).
// If we have the shared .pc variant, then we use that. Otherwise --
// static but extract without the --static option (see also the saving
// logic).
//
- pkgconf& ipc (ps ? spc : apc); // Interface package info.
+ pkgconfig& ipc (ps ? spc : apc); // Interface package info.
+ bool ipc_meta (ps ? spc_meta : apc_meta);
+
+ // For now we only populate prerequisites for lib{}. To do it for
+ // liba{} would require weeding out duplicates that are already in
+ // lib{}.
+ //
+ // Currently, this information is only used by the modules machinery to
+ // resolve module names to module files (but we cannot only do this if
+ // modules are enabled since the same installed library can be used by
+ // multiple builds).
+ //
+ prerequisites prs;
parse_libs (
lt,
@@ -1365,28 +1374,58 @@ namespace build2
false,
&prs);
+ const strings* apops (nullptr);
if (pa)
{
- parse_cflags (*at, apc, true);
+ apops = parse_cflags (*at, apc, true);
parse_libs (*at, at->path ().empty (), apc, true, nullptr);
}
+ const strings* spops (nullptr);
if (ps)
- parse_cflags (*st, spc, false);
+ spops = parse_cflags (*st, spc, false);
+
+ // Also set common poptions for the group. In particular, this makes
+ // sure $lib_poptions() in the "common interface" mode works for the
+ // installed libraries.
+ //
+ // Note that if there are no poptions set for either, then we cannot
+ // possibly have a common subset.
+ //
+ if (apops != nullptr || spops != nullptr)
+ parse_cflags (lt, ipc, false, apops, spops);
+
+ // @@ TODO: we can now load cc.type if there is metadata (but need to
+ // return this rather than set, see search_library() for
+ // details).
+
+ // Load the bin.whole flag (whole archive).
+ //
+ if (at != nullptr && (pa ? apc_meta : spc_meta))
+ {
+ // Note that if unspecified we leave it unset letting the consumer
+ // override it, if necessary (see the bin.lib lookup semantics for
+ // details).
+ //
+ if (optional<string> v = (pa ? apc : spc).variable ("bin.whole"))
+ {
+ at->vars.assign ("bin.whole") = (*v == "true");
+ }
+ }
// For now we assume static and shared variants export the same set of
// modules/importable headers. While technically possible, having
// different sets will most likely lead to all sorts of complications
// (at least for installed libraries) and life is short.
//
- if (modules)
+ if (modules && ipc_meta)
{
parse_modules (ipc, prs);
// We treat headers outside of any project as C headers (see
// enter_header() for details).
//
- parse_headers (ipc, h::static_type /* **x_hdr */, x, prs);
+ parse_headers (ipc, h::static_type /* **x_hdrs */, x, prs);
parse_headers (ipc, h::static_type, "c", prs);
}
@@ -1407,7 +1446,7 @@ namespace build2
}
bool common::
- pkgconfig_load (action,
+ pkgconfig_load (optional<action>,
const scope&,
lib&,
liba*,
@@ -1416,13 +1455,14 @@ namespace build2
const string&,
const dir_path&,
const dir_paths&,
- const dir_paths&) const
+ const dir_paths&,
+ pair<bool, bool>) const
{
return false;
}
void common::
- pkgconfig_load (action,
+ pkgconfig_load (optional<action>,
const scope&,
lib&,
liba*,
@@ -1430,7 +1470,8 @@ namespace build2
const pair<path, path>&,
const dir_path&,
const dir_paths&,
- const dir_paths&) const
+ const dir_paths&,
+ pair<bool, bool>) const
{
assert (false); // Should never be called.
}
@@ -1444,6 +1485,11 @@ namespace build2
// file must be generated based on the static library to get accurate
// Libs.private.
//
+ // The other things that we omit from the common variant are -l options
+ // for binless libraries (so that it's usable from other build systems) as
+ // well as metadata (which could become incomplete due the previous
+ // omissions; for example, importable headers metadata).
+ //
void link_rule::
pkgconfig_save (action a,
const file& l,
@@ -1478,7 +1524,7 @@ namespace build2
// This is the lib{} group if we are generating the common file and the
// target itself otherwise.
//
- const file& g (common ? l.group->as<file> () : l);
+ const target& g (common ? *l.group : l);
// By default we assume things go into install.{include, lib}.
//
@@ -1486,32 +1532,124 @@ namespace build2
// install without actual install and remove the file if it exists.
//
// @@ Shouldn't we use target's install value rather than install.lib
- // in case it gets installed into a custom location?
+ // in case it gets installed into a custom location? I suppose one
+ // can now use cc.pkgconfig.lib to customize this.
//
using install::resolve_dir;
- dir_path ldir (resolve_dir (g,
- cast<dir_path> (g["install.lib"]),
- false /* fail_unknown */));
- if (ldir.empty ())
+ small_vector<dir_path, 1> ldirs;
+
+ if (const dir_paths* ds = cast_null<dir_paths> (g[c_pkgconfig_lib]))
+ {
+ for (const dir_path& d: *ds)
+ {
+ bool f (ldirs.empty ());
+
+ ldirs.push_back (resolve_dir (g, d, {}, !f /* fail_unknown */));
+
+ if (f && ldirs.back ().empty ())
+ break;
+ }
+ }
+ else
+ ldirs.push_back (resolve_dir (g,
+ cast<dir_path> (g["install.lib"]),
+ {},
+ false /* fail_unknown */));
+
+ if (!ldirs.empty () && ldirs.front ().empty ())
{
rmfile (ctx, p, 3 /* verbosity */);
return;
}
- dir_path idir (resolve_dir (g, cast<dir_path> (g["install.include"])));
+ small_vector<dir_path, 1> idirs;
+
+ if (const dir_paths* ds = cast_null<dir_paths> (g[c_pkgconfig_include]))
+ {
+ for (const dir_path& d: *ds)
+ idirs.push_back (resolve_dir (g, d));
+ }
+ else
+ idirs.push_back (resolve_dir (g,
+ cast<dir_path> (g["install.include"])));
// Note that generation can take some time if we have a large number of
// prerequisite libraries.
//
- if (verb)
- text << "pc " << *t;
- else if (verb >= 2)
+ if (verb >= 2)
text << "cat >" << p;
+ else if (verb)
+ print_diag ("pc", g, *t);
if (ctx.dry_run)
return;
+ // See if we should be generating a relocatable .pc file and if so get
+ // its installation location. The plan is to make all absolute paths
+ // that we write relative to this location and prefix them with the
+ // built-in ${pcfiledir} variable (which supported by everybody: the
+ // original pkg-config, pkgconf, and our libpkg-config library).
+ //
+ dir_path rel_base;
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ path f (install::resolve_file (*t));
+ if (!f.empty ()) // Shouldn't happen but who knows.
+ rel_base = f.directory ();
+ }
+
+ // Note: reloc_*path() expect absolute and normalized paths.
+ //
+ // Note also that reloc_path() can be used on dir_path to get the path
+ // without the trailing slash.
+ //
+ auto reloc_path = [&rel_base,
+ s = string ()] (const path& p,
+ const char* what) mutable
+ -> const string&
+ {
+ if (rel_base.empty ())
+ return p.string ();
+
+ try
+ {
+ s = p.relative (rel_base).string ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make " << what << " path " << p << " relative to "
+ << rel_base;
+ }
+
+ if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator);
+ s.insert (0, "${pcfiledir}");
+ return s;
+ };
+
+ auto reloc_dir_path = [&rel_base,
+ s = string ()] (const dir_path& p,
+ const char* what) mutable
+ -> const string&
+ {
+ if (rel_base.empty ())
+ return (s = p.representation ());
+
+ try
+ {
+ s = p.relative (rel_base).representation ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make " << what << " path " << p << " relative to "
+ << rel_base;
+ }
+
+ if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator);
+ s.insert (0, "${pcfiledir}");
+ return s;
+ };
+
auto_rmfile arm (p);
try
@@ -1529,6 +1667,20 @@ namespace build2
fail << "no version variable in project " << n <<
info << "while generating " << p;
+ // When comparing versions, pkg-config uses RPM semantics, which is
+ // basically comparing each all-digit/alpha fragments in order.
+ // This means, for example, a semver with a pre-release will be
+ // compared incorrectly (pre-release will be greater than the final
+ // version). We could detect if this project uses stdver and chop
+ // off any pre-release information (so, essentially only saving the
+ // major.minor.patch part). But that means such .pc files will
+ // contain inaccurate version information. And seeing that we don't
+ // recommend using pkg-config (rather primitive) package dependency
+ // support, having complete version information for documentation
+ // seems more important.
+ //
+ // @@ Maybe still makes sense to only save version.project_id?
+ //
const string& v (cast<string> (vl));
os << "Name: " << n << endl;
@@ -1645,13 +1797,11 @@ namespace build2
return n;
};
- // @@ TODO: support whole archive?
- //
-
// Cflags.
//
os << "Cflags:";
- os << " -I" << escape (idir.string ());
+ for (const dir_path& d: idirs)
+ os << " -I" << escape (reloc_path (d, "header search"));
save_poptions (x_export_poptions);
save_poptions (c_export_poptions);
os << endl;
@@ -1670,7 +1820,8 @@ namespace build2
// While we don't need it for a binless library itselt, it may be
// necessary to resolve its binful dependencies.
//
- os << " -L" << escape (ldir.string ());
+ for (const dir_path& d: ldirs)
+ os << " -L" << escape (reloc_path (d, "library search"));
// Now process ourselves as if we were being linked to something (so
// pretty similar to link_rule::append_libraries()). We also reuse
@@ -1686,7 +1837,8 @@ namespace build2
appended_libraries* pls; // Previous.
appended_libraries* ls; // Current.
strings& args;
- } d {os, nullptr, &ls, args};
+ bool common;
+ } d {os, nullptr, &ls, args, common};
auto imp = [&priv] (const target&, bool la) {return priv && la;};
@@ -1730,7 +1882,17 @@ namespace build2
if (l != nullptr)
{
if (l->is_a<libs> () || l->is_a<liba> ()) // See through libux.
- d.args.push_back (save_library_target (*l));
+ {
+ // Omit binless libraries from the common .pc file (see
+ // above).
+ //
+ // Note that in this case we still want to recursively
+ // traverse such libraries since they may still link to some
+ // non-binless system libraries (-lm, etc).
+ //
+ if (!d.common || !l->path ().empty ())
+ d.args.push_back (save_library_target (*l));
+ }
}
else
{
@@ -1752,7 +1914,7 @@ namespace build2
//@@ TODO: should we filter -L similar to -I?
//@@ TODO: how will the Libs/Libs.private work?
- //@@ TODO: remember to use escape()
+ //@@ TODO: remember to use reloc_*() and escape().
if (d.pls != nullptr && d.pls->find (l) != nullptr)
return true;
@@ -1773,7 +1935,10 @@ namespace build2
library_cache lib_cache;
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // Link flags.
- imp, lib, opt, !binless /* self */, &lib_cache);
+ imp, lib, opt,
+ !binless /* self */,
+ false /* proc_opt_group */, // @@ !priv?
+ &lib_cache);
for (const string& a: args)
os << ' ' << a;
@@ -1795,11 +1960,326 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // Link flags.
- imp, lib, opt, false /* self */, &lib_cache);
+ imp, lib, opt,
+ false /* self */,
+ false /* proc_opt_group */, // @@ !priv?
+ &lib_cache);
for (const string& a: args)
os << ' ' << a;
os << endl;
+
+ // See also bin.whole below.
+ }
+ }
+
+ // Save metadata unless this is the common .pc file (see above).
+ //
+ if (common)
+ {
+ os.close ();
+ arm.cancel ();
+ return;
+ }
+
+ // The build2.metadata variable is a general indication of the
+ // metadata being present. Its value is the metadata version
+ // optionally followed by the user metadata variable prefix and
+ // variable list (see below for details). Having only the version
+ // indicates the absense of user metadata.
+ //
+ // See if we have the user metadata.
+ //
+ lookup um (g[ctx.var_export_metadata]); // Target visibility.
+
+ if (um && !um->empty ())
+ {
+ const names& ns (cast<names> (um));
+
+ // First verify the version.
+ //
+ uint64_t ver;
+ try
+ {
+ // Note: does not change the passed name.
+ //
+ ver = value_traits<uint64_t>::convert (
+ ns[0], ns[0].pair ? &ns[1] : nullptr);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "invalid metadata version in library " << g << ": " << e
+ << endf;
+ }
+
+ if (ver != 1)
+ fail << "unexpected metadata version " << ver << " in library "
+ << g;
+
+ // Next verify the metadata variable prefix.
+ //
+ if (ns.size () != 2 || !ns[1].simple ())
+ fail << "invalid metadata variable prefix in library " << g;
+
+ const string& pfx (ns[1].value);
+
+ // Now find all the target-specific variables with this prefix.
+ //
+ // If this is the common .pc file, then we only look in the group.
+ // Otherwise, in the member and the group.
+ //
+ // To allow setting different values for the for-install and
+ // development build cases (required when a library comes with
+ // additional "assets"), we recognize the special .for_install
+ // variable name suffix: if there is a both <prefix>.<name> and
+ // <prefix>.<name>.for_install variables, then here we take the
+ // value from the latter. Note that we don't consider just
+ // <prefix>.for_install as special (so it's available to the user).
+ //
+ // We only expect a handful of variables so let's use a vector and
+ // linear search instead of a map.
+ //
+ struct binding
+ {
+ const string* name; // Name to be saved (without .for_install).
+ const variable* var; // Actual variable (potentially .for_install).
+ const value* val; // Actual value.
+ };
+ vector<binding> vars;
+
+ auto append = [&l, &pfx, &vars,
+ tmp = string ()] (const target& t, bool dup) mutable
+ {
+ for (auto p (t.vars.lookup_namespace (pfx));
+ p.first != p.second;
+ ++p.first)
+ {
+ const variable* var (&p.first->first.get ());
+
+ // Handle .for_install.
+ //
+ // The plan is as follows: if this is .for_install, then just
+ // verify we also have the value without the suffix and skip
+ // it. Otherwise, check if there also the .for_install variant
+ // and if so, use that instead. While we could probably do this
+ // more efficiently by remembering what we saw in vars, this is
+ // not performance-sensitive and so we keep it simple for now.
+ //
+ const string* name;
+ {
+ const string& v (var->name);
+ size_t n (v.size ());
+
+ if (n > pfx.size () + 1 + 12 && // <prefix>..for_install
+ v.compare (n - 12, 12, ".for_install") == 0)
+ {
+ tmp.assign (v, 0, n - 12);
+
+ if (t.vars.find (tmp) == t.vars.end ())
+ fail << v << " variant without " << tmp << " in library "
+ << l;
+
+ continue;
+ }
+ else
+ {
+ name = &v;
+
+ tmp = v; tmp += ".for_install";
+
+ auto i (t.vars.find (tmp));
+ if (i != t.vars.end ())
+ var = &i->first.get ();
+ }
+ }
+
+ if (dup)
+ {
+ if (find_if (vars.begin (), vars.end (),
+ [name] (const binding& p)
+ {
+ return *p.name == *name;
+ }) != vars.end ())
+ continue;
+ }
+
+ // Re-lookup the value in order to apply target type/pattern
+ // specific prepends/appends.
+ //
+ lookup l (t[*var]);
+ assert (l.defined ());
+
+ vars.push_back (binding {name, var, l.value});
+ }
+ };
+
+ append (g, false);
+
+ if (!common)
+ {
+ if (l.group != nullptr)
+ append (*l.group, true);
+ }
+
+ // First write the build2.metadata variable with the version,
+ // prefix, and all the variable names/types (which should not
+ // require any escaping).
+ //
+ os << endl
+ << "build2.metadata = " << ver << ' ' << pfx;
+
+ for (const binding& b: vars)
+ {
+ const variable& var (*b.var);
+ const value& val (*b.val);
+
+ // There is no notion of NULL in pkg-config variables and it's
+ // probably best not to conflate them with empty.
+ //
+ if (val.null)
+ fail << "null value in exported variable " << var
+ << " of library " << l;
+
+ if (val.type == nullptr)
+ fail << "untyped value in exported variable " << var
+ << " of library " << l;
+
+ // Tighten this to only a sensible subset of types (see
+ // parsing/serialization code for some of the potential problems).
+ //
+ if (!metadata_type (val.type->name).first)
+ fail << "unsupported value type " << val.type->name
+ << " in exported variable " << var << " of library " << l;
+
+ os << " \\" << endl
+ << *b.name << '/' << val.type->name;
+ }
+
+ os << endl
+ << endl;
+
+ // Now the variables themselves.
+ //
+ string s; // Reuse the buffer.
+ for (const binding& b: vars)
+ {
+ const variable& var (*b.var);
+ const value& val (*b.val);
+
+ names ns;
+ names_view nv (reverse (val, ns, true /* reduce */));
+
+ os << *b.name << " =";
+
+ auto append = [&rel_base,
+ &reloc_path,
+ &reloc_dir_path,
+ &l, &var, &val, &s] (const name& v)
+ {
+ // If this is absolute path or dir_path, then attempt to
+ // relocate. Without that the result will not be relocatable.
+ //
+ if (v.simple ())
+ {
+ path p;
+ if (!rel_base.empty () &&
+ val.type != nullptr &&
+ (val.type->is_a<path> () || val.type->is_a<paths> ()) &&
+ (p = path (v.value)).absolute ())
+ {
+ p.normalize ();
+ s += reloc_path (p, var.name.c_str ());
+ }
+ else
+ s += v.value;
+ }
+ else if (v.directory ())
+ {
+ if (!rel_base.empty () && v.dir.absolute ())
+ {
+ dir_path p (v.dir);
+ p.normalize ();
+ s += reloc_dir_path (p, var.name.c_str ());
+ }
+ else
+ s += v.dir.representation ();
+ }
+ else
+ // It seems like we shouldn't end up here due to the type
+ // check but let's keep it for good measure.
+ //
+ fail << "simple or directory value expected instead of '"
+ << v << "' in exported variable " << var << " of library "
+ << l;
+ };
+
+ for (auto i (nv.begin ()); i != nv.end (); ++i)
+ {
+ s.clear ();
+ append (*i);
+
+ if (i->pair)
+ {
+ // @@ What if the value contains the pair character? Maybe
+ // quote the halves in this case? Note: need to handle in
+ // parse_metadata() above if enable here. Note: none of the
+ // types currently allowed use pairs.
+#if 0
+ s += i->pair;
+ append (*++i);
+#else
+ fail << "pair in exported variable " << var << " of library "
+ << l;
+#endif
+ }
+
+ os << ' ' << escape (s);
+ }
+
+ os << endl;
+ }
+ }
+ else
+ {
+ // No user metadata.
+ //
+ os << endl
+ << "build2.metadata = 1" << endl;
+ }
+
+ // Save cc.type (see init() for the format documentation).
+ //
+ // Note that this value is set by link_rule and therefore should
+ // be there.
+ //
+ {
+ const string& t (
+ cast<string> (
+ l.state[a].lookup_original (
+ c_type, true /* target_only */).first));
+
+ // If common, then only save the language (the rest could be
+ // static/shared-specific; strictly speaking even the language could
+ // be, but that seems far fetched).
+ //
+ os << endl
+ << "cc.type = " << (common ? string (t, 0, t.find (',')) : t)
+ << endl;
+ }
+
+ // Save the bin.whole (whole archive) flag (see the link rule for
+ // details on the lookup semantics).
+ //
+ if (la)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ if (cast_false<bool> (l.lookup_original (
+ ctx.var_pool["bin.whole"],
+ true /* target_only */).first))
+ {
+ os << endl
+ << "bin.whole = true" << endl;
}
}
@@ -1881,7 +2361,7 @@ namespace build2
move (pp),
symexport});
}
- else if (pt->is_a (**x_hdr) || pt->is_a<h> ())
+ else if (pt->is_a (**this->x_hdrs) || pt->is_a<h> ())
{
if (cast_false<bool> ((*pt)[c_importable]))
{
@@ -1906,7 +2386,7 @@ namespace build2
if (size_t n = mods.size ())
{
os << endl
- << "cxx_modules =";
+ << "cxx.modules =";
// The partition separator (`:`) is not a valid character in the
// variable name. In fact, from the pkg-config source we can see
@@ -1924,33 +2404,35 @@ namespace build2
// Module names shouldn't require escaping.
//
os << (n != 1 ? " \\\n" : " ")
- << m.name << '=' << escape (m.file.string ());
+ << m.name << '='
+ << escape (reloc_path (m.file, "module interface"));
}
os << endl;
// Module-specific properties. The format is:
//
- // <lang>_module_<property>.<module> = <value>
+ // <lang>.module_<property>.<module> = <value>
//
for (const module& m: mods)
{
if (!m.preprocessed.empty ())
- os << "cxx_module_preprocessed." << m.name << " = "
+ os << "cxx.module_preprocessed." << m.name << " = "
<< m.preprocessed << endl;
if (m.symexport)
- os << "cxx_module_symexport." << m.name << " = true" << endl;
+ os << "cxx.module_symexport." << m.name << " = true" << endl;
}
}
if (size_t n = c_hdrs.size ())
{
os << endl
- << "c_importable_headers =";
+ << "c.importable_headers =";
for (const path& h: c_hdrs)
- os << (n != 1 ? " \\\n" : " ") << escape (h.string ());
+ os << (n != 1 ? " \\\n" : " ")
+ << escape (reloc_path (h, "header unit"));
os << endl;
}
@@ -1958,10 +2440,11 @@ namespace build2
if (size_t n = x_hdrs.size ())
{
os << endl
- << x << "_importable_headers =";
+ << x << ".importable_headers =";
for (const path& h: x_hdrs)
- os << (n != 1 ? " \\\n" : " ") << escape (h.string ());
+ os << (n != 1 ? " \\\n" : " ")
+ << escape (reloc_path (h, "header unit"));
os << endl;
}
diff --git a/libbuild2/cc/pkgconfig.hxx b/libbuild2/cc/pkgconfig.hxx
new file mode 100644
index 0000000..a1bcdee
--- /dev/null
+++ b/libbuild2/cc/pkgconfig.hxx
@@ -0,0 +1,129 @@
+// file : libbuild2/cc/pkgconfig.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CC_PKGCONFIG_HXX
+#define LIBBUILD2_CC_PKGCONFIG_HXX
+
+// In order not to complicate the bootstrap procedure with libpkg-config
+// building, exclude functionality that involves reading of .pc files.
+//
+#ifndef BUILD2_BOOTSTRAP
+
+#ifndef BUILD2_LIBPKGCONF
+# include <libpkg-config/pkg-config.h>
+#else
+# include <libpkgconf/libpkgconf.h>
+#endif
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ // Load package information from a .pc file. Filter out the -I/-L options
+ // that refer to system directories. This makes sure all the system search
+ // directories are "pushed" to the back which minimizes the chances of
+ // picking up wrong (e.g., old installed version) header/library.
+ //
+ // Note that the prerequisite package .pc files search order is as
+ // follows:
+ //
+ // - in the directory of the specified file
+ // - in pc_dirs directories (in the specified order)
+ //
+ // Issue diagnostics and throw failed on any errors.
+ //
+ class pkgconfig
+ {
+ public:
+ using path_type = build2::path;
+
+ path_type path;
+
+ public:
+ pkgconfig (path_type,
+ const dir_paths& pc_dirs,
+ const dir_paths& sys_hdr_dirs,
+ const dir_paths& sys_lib_dirs);
+
+ // Create an unloaded/empty object. Querying package information on such
+ // an object is illegal.
+ //
+ pkgconfig () = default;
+ ~pkgconfig ();
+
+ // Movable-only type.
+ //
+ pkgconfig (pkgconfig&&) noexcept;
+ pkgconfig& operator= (pkgconfig&&) noexcept;
+
+ pkgconfig (const pkgconfig&) = delete;
+ pkgconfig& operator= (const pkgconfig&) = delete;
+
+ strings
+ cflags (bool static_) const;
+
+ strings
+ libs (bool static_) const;
+
+ optional<string>
+ variable (const char*) const;
+
+ optional<string>
+ variable (const string& s) const {return variable (s.c_str ());}
+
+ private:
+ void
+ free ();
+
+#ifndef BUILD2_LIBPKGCONF
+ pkg_config_client_t* client_ = nullptr;
+ pkg_config_pkg_t* pkg_ = nullptr;
+#else
+ pkgconf_client_t* client_ = nullptr;
+ pkgconf_pkg_t* pkg_ = nullptr;
+#endif
+ };
+
+ inline pkgconfig::
+ ~pkgconfig ()
+ {
+ if (client_ != nullptr) // Not empty.
+ free ();
+ }
+
+ inline pkgconfig::
+ pkgconfig (pkgconfig&& p) noexcept
+ : path (move (p.path)),
+ client_ (p.client_),
+ pkg_ (p.pkg_)
+ {
+ p.client_ = nullptr;
+ p.pkg_ = nullptr;
+ }
+
+ inline pkgconfig& pkgconfig::
+ operator= (pkgconfig&& p) noexcept
+ {
+ if (this != &p)
+ {
+ if (client_ != nullptr) // Not empty.
+ free ();
+
+ path = move (p.path);
+ client_ = p.client_;
+ pkg_ = p.pkg_;
+
+ p.client_ = nullptr;
+ p.pkg_ = nullptr;
+ }
+ return *this;
+ }
+ }
+}
+
+#endif // BUILD2_BOOTSTRAP
+
+#endif // LIBBUILD2_CC_PKGCONFIG_HXX
diff --git a/libbuild2/cc/predefs-rule.cxx b/libbuild2/cc/predefs-rule.cxx
new file mode 100644
index 0000000..e74192d
--- /dev/null
+++ b/libbuild2/cc/predefs-rule.cxx
@@ -0,0 +1,379 @@
+// file : libbuild2/cc/predefs-rule.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/cc/predefs-rule.hxx>
+
+#include <libbuild2/depdb.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/context.hxx>
+#include <libbuild2/algorithm.hxx>
+#include <libbuild2/filesystem.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ predefs_rule::
+ predefs_rule (data&& d)
+ : common (move (d)),
+ rule_name (string (x) += ".predefs"),
+ rule_id (rule_name + " 1")
+ {
+ }
+
+ bool predefs_rule::
+ match (action, target&, const string& hint, match_extra&) const
+ {
+ tracer trace (x, "predefs_rule::match");
+
+ // We only match with an explicit hint (failed that, we will turn every
+ // header into predefs).
+ //
+ if (hint == rule_name)
+ {
+ // Don't match if unsupported compiler. In particular, this allows the
+ // user to provide a fallback rule.
+ //
+ switch (cclass)
+ {
+ case compiler_class::gcc: return true;
+ case compiler_class::msvc:
+ {
+ // Only MSVC 19.20 or later. Not tested with clang-cl.
+ //
+ if (cvariant.empty () && (cmaj > 19 || (cmaj == 19 && cmin >= 20)))
+ return true;
+
+ l4 ([&]{trace << "unsupported compiler/version";});
+ break;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ recipe predefs_rule::
+ apply (action a, target& xt, match_extra&) const
+ {
+ file& t (xt.as<file> ());
+ t.derive_path ();
+
+ // Inject dependency on the output directory.
+ //
+ inject_fsdir (a, t);
+
+ if (a == perform_update_id)
+ {
+ return [this] (action a, const target& xt)
+ {
+ return perform_update (a, xt);
+ };
+ }
+ else if (a == perform_clean_id)
+ {
+ return [] (action a, const target& t)
+ {
+ // Also remove the temporary input source file in case it wasn't
+ // removed at the end of the update.
+ //
+ return perform_clean_extra (a, t.as<file> (), {".d", ".t"});
+ };
+ }
+ else
+ return noop_recipe; // Configure update.
+ }
+
+ // Filter noise, sanitize options (msvc.cxx).
+ //
+ void
+ msvc_filter_cl (diag_buffer&, const path& src);
+
+ void
+ msvc_sanitize_cl (cstrings&);
+
+ target_state predefs_rule::
+ perform_update (action a, const target& xt) const
+ {
+ tracer trace (x, "predefs_rule::perform_update");
+
+ const file& t (xt.as<file> ());
+ const path& tp (t.path ());
+
+ context& ctx (t.ctx);
+
+ const scope& rs (t.root_scope ());
+
+ // Execute prerequisites (the output directory being the only one thus
+ // not mtime checking).
+ //
+ execute_prerequisites (a, t);
+
+ // Use depdb to track changes to options, compiler, etc (similar to
+ // the compile_rule).
+ //
+ depdb dd (tp + ".d");
+ {
+ // First should come the rule name/version.
+ //
+ if (dd.expect (rule_id) != nullptr)
+ l4 ([&]{trace << "rule mismatch forcing update of " << t;});
+
+ // Then the compiler checksum.
+ //
+ if (dd.expect (cast<string> (rs[x_checksum])) != nullptr)
+ l4 ([&]{trace << "compiler mismatch forcing update of " << t;});
+
+ // Then the compiler environment checksum.
+ //
+ if (dd.expect (env_checksum) != nullptr)
+ l4 ([&]{trace << "environment mismatch forcing update of " << t;});
+
+ // Finally the options checksum (as below).
+ //
+ {
+ sha256 cs;
+ append_options (cs, t, c_coptions);
+ append_options (cs, t, x_coptions);
+ append_options (cs, cmode);
+
+ if (dd.expect (cs.string ()) != nullptr)
+ l4 ([&]{trace << "options mismatch forcing update of " << t;});
+ }
+ }
+
+ // Update if depdb mismatch.
+ //
+ bool update (dd.writing () || dd.mtime > t.load_mtime ());
+
+ dd.close ();
+
+ if (!update)
+ return target_state::unchanged; // No mtime-based prerequisites.
+
+ // Prepare the compiler command-line.
+ //
+ cstrings args {cpath.recall_string ()};
+
+ // Append compile options.
+ //
+ // Note that any command line macros that we specify with -D will end up
+ // in the predefs, which is something we don't want. So no poptions.
+ //
+ append_options (args, t, c_coptions);
+ append_options (args, t, x_coptions);
+ append_options (args, cmode);
+
+ // The output and input paths, relative to the working directory for
+ // easier to read diagnostics.
+ //
+ path relo (relative (tp));
+ path reli;
+
+ // Add compiler-specific command-line arguments.
+ //
+ switch (cclass)
+ {
+ case compiler_class::gcc:
+ {
+ // Add implied options which may affect predefs, similar to the
+ // compile rule.
+ //
+ if (!find_option_prefix ("-finput-charset=", args))
+ args.push_back ("-finput-charset=UTF-8");
+
+ if (ctype == compiler_type::clang && tsys == "win32-msvc")
+ {
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
+ {
+ args.push_back ("-D_MT");
+ args.push_back ("-D_DLL");
+ }
+ }
+
+ if (ctype == compiler_type::clang && cvariant == "emscripten")
+ {
+ if (x_lang == lang::cxx)
+ {
+ if (!find_option_prefix ("DISABLE_EXCEPTION_CATCHING=", args))
+ {
+ args.push_back ("-s");
+ args.push_back ("DISABLE_EXCEPTION_CATCHING=0");
+ }
+ }
+ }
+
+ args.push_back ("-E"); // Stop after the preprocessing stage.
+ args.push_back ("-dM"); // Generate #define directives.
+
+ // Output.
+ //
+ args.push_back ("-o");
+ args.push_back (relo.string ().c_str ());
+
+ // Input.
+ //
+ args.push_back ("-x");
+ switch (x_lang)
+ {
+ case lang::c: args.push_back ("c"); break;
+ case lang::cxx: args.push_back ("c++"); break;
+ }
+
+ // With GCC and Clang we can compile /dev/null as stdin by
+ // specifying `-` and thus omitting the temporary file.
+ //
+ args.push_back ("-");
+
+ break;
+ }
+ case compiler_class::msvc:
+ {
+ // Add implied options which may affect predefs, similar to the
+ // compile rule.
+ //
+ {
+ // Note: these affect the _MSVC_EXECUTION_CHARACTER_SET, _UTF8
+ // macros.
+ //
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
+
+ if (!sc && !ec)
+ args.push_back ("/utf-8");
+ else
+ {
+ if (!sc)
+ args.push_back ("/source-charset:UTF-8");
+
+ if (!ec)
+ args.push_back ("/execution-charset:UTF-8");
+ }
+ }
+
+ if (x_lang == lang::cxx)
+ {
+ if (!find_option_prefixes ({"/EH", "-EH"}, args))
+ args.push_back ("/EHsc");
+ }
+
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
+ args.push_back ("/MD");
+
+ msvc_sanitize_cl (args);
+
+ args.push_back ("/nologo");
+
+ // /EP may seem like it contradicts /P but it's the recommended
+ // way to suppress `#line`s from the output of the /P option (see
+ // /P in the "MSVC Compiler Options" documentation).
+ //
+ args.push_back ("/P"); // Write preprocessor output to a file.
+ args.push_back ("/EP"); // Preprocess to stdout without `#line`s.
+
+ args.push_back ("/PD"); // Print all macro definitions.
+ args.push_back ("/Zc:preprocessor"); // Preproc. conformance mode.
+
+ // Output (note that while the /Fi: variant is only availbale
+ // starting with VS2013, /Zc:preprocessor is only available in
+ // starting from VS2019).
+ //
+ args.push_back ("/Fi:");
+ args.push_back (relo.string ().c_str ());
+
+ // Input.
+ //
+ switch (x_lang)
+ {
+ case lang::c: args.push_back ("/TC"); break;
+ case lang::cxx: args.push_back ("/TP"); break;
+ }
+
+ // Input path.
+ //
+ // Note that with MSVC we have to use a temporary file. In
+ // particular compiling `nul` does not work.
+ //
+ reli = relo + ".t";
+ args.push_back (reli.string ().c_str ());
+
+ break;
+ }
+ }
+
+ args.push_back (nullptr);
+
+ // Run the compiler.
+ //
+ if (verb >= 2)
+ print_process (args);
+ else if (verb)
+ print_diag ((string (x_name) + "-predefs").c_str (), t);
+
+ if (!ctx.dry_run)
+ {
+ // Create an empty temporary input source file, if necessary.
+ //
+ auto_rmfile rmi;
+ if (!reli.empty ())
+ {
+ rmi = auto_rmfile (reli);
+
+ if (exists (reli, false /* follow_symlinks */))
+ rmfile (ctx, reli, 3 /* verbosity */);
+
+ touch (ctx, reli, true /* create */, 3 /* verbosity */);
+ }
+
+ try
+ {
+ // VC cl.exe sends diagnostics to stdout. It also prints the file
+ // name being compiled as the first line. So for cl.exe we filter
+ // that noise out.
+ //
+ // For other compilers also redirect stdout to stderr, in case any
+ // of them tries to pull off something similar. For sane compilers
+ // this should be harmless.
+ //
+ // We also redirect stdin to /dev/null in case that's used instead
+ // of the temporary file.
+ //
+ // Note: similar logic as in compile_rule.
+ //
+ bool filter (ctype == compiler_type::msvc);
+
+ process pr (cpath,
+ args,
+ -2, /* stdin */
+ 2, /* stdout */
+ diag_buffer::pipe (ctx, filter /* force */) /* stderr */);
+
+ diag_buffer dbuf (ctx, args[0], pr);
+
+ if (filter)
+ msvc_filter_cl (dbuf, reli);
+
+ dbuf.read ();
+
+ run_finish (dbuf, args, pr, 1 /* verbosity */);
+ dd.check_mtime (tp);
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ t.mtime (system_clock::now ());
+ return target_state::changed;
+ }
+ }
+}
diff --git a/libbuild2/cc/predefs-rule.hxx b/libbuild2/cc/predefs-rule.hxx
new file mode 100644
index 0000000..60aa063
--- /dev/null
+++ b/libbuild2/cc/predefs-rule.hxx
@@ -0,0 +1,45 @@
+// file : libbuild2/cc/predefs-rule.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CC_PREDEFS_RULE_HXX
+#define LIBBUILD2_CC_PREDEFS_RULE_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/rule.hxx>
+
+#include <libbuild2/cc/types.hxx>
+#include <libbuild2/cc/common.hxx>
+
+#include <libbuild2/cc/export.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ class LIBBUILD2_CC_SYMEXPORT predefs_rule: public rule,
+ virtual common
+ {
+ public:
+ const string rule_name;
+
+ explicit
+ predefs_rule (data&&);
+
+ virtual bool
+ match (action, target&, const string&, match_extra&) const override;
+
+ virtual recipe
+ apply (action, target&, match_extra&) const override;
+
+ target_state
+ perform_update (action, const target&) const;
+
+ private:
+ const string rule_id;
+ };
+ }
+}
+
+#endif // LIBBUILD2_CC_PREDEFS_RULE_HXX
diff --git a/libbuild2/cc/std.cppm b/libbuild2/cc/std.cppm
new file mode 100644
index 0000000..5368d1c
--- /dev/null
+++ b/libbuild2/cc/std.cppm
@@ -0,0 +1,6781 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// WARNING, this entire header is generated by
+// utils/generate_std_cppm_in.py
+// DO NOT MODIFY!
+
+module;
+
+#include <__config>
+
+#if _LIBCPP_VERSION < 170000
+#error libc++ version 17.0.0 or later required
+#endif
+
+// The headers of Table 24: C++ library headers [tab:headers.cpp]
+// and the headers of Table 25: C++ headers for C library facilities [tab:headers.cpp.c]
+#include <algorithm>
+#include <any>
+#include <array>
+#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
+# include <atomic>
+#endif
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <barrier>
+#endif
+#include <bit>
+#include <bitset>
+#include <cassert>
+#include <cctype>
+#include <cerrno>
+#include <cfenv>
+#include <cfloat>
+#include <charconv>
+#include <chrono>
+#include <cinttypes>
+#include <climits>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <clocale>
+#endif
+#include <cmath>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <codecvt>
+#endif
+#include <compare>
+#include <complex>
+#include <concepts>
+#include <condition_variable>
+#include <coroutine>
+#include <csetjmp>
+#include <csignal>
+#include <cstdarg>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <cuchar>
+#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
+# include <cwchar>
+#endif
+#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
+# include <cwctype>
+#endif
+#include <deque>
+#include <exception>
+#include <execution>
+#include <expected>
+#include <filesystem>
+#include <format>
+#include <forward_list>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <fstream>
+#endif
+#include <functional>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <future>
+#endif
+#include <initializer_list>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <iomanip>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <ios>
+#endif
+#include <iosfwd>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <iostream>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <istream>
+#endif
+#include <iterator>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <latch>
+#endif
+#include <limits>
+#include <list>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <locale>
+#endif
+#include <map>
+#include <mdspan>
+#include <memory>
+#include <memory_resource>
+#include <mutex>
+#include <new>
+#include <numbers>
+#include <numeric>
+#include <optional>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <ostream>
+#endif
+#include <print>
+#include <queue>
+#include <random>
+#include <ranges>
+#include <ratio>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <regex>
+#endif
+#include <scoped_allocator>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <semaphore>
+#endif
+#include <set>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <shared_mutex>
+#endif
+#include <source_location>
+#include <span>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <sstream>
+#endif
+#include <stack>
+#include <stdexcept>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <stop_token>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <streambuf>
+#endif
+#include <string>
+#include <string_view>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <strstream>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+#if __has_include(<syncstream>)
+# define _LIPCPP_HAS_YES_SYNCSTREAM
+# include <syncstream>
+#endif
+#endif
+#include <system_error>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <thread>
+#endif
+#include <tuple>
+#include <type_traits>
+#include <typeindex>
+#include <typeinfo>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <valarray>
+#include <variant>
+#include <vector>
+#include <version>
+
+#if 0
+// *** Headers not yet available ***
+#if __has_include(<debugging>)
+# error "update the header information for <debugging> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<debugging>)
+#if __has_include(<flat_map>)
+# error "update the header information for <flat_map> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<flat_map>)
+#if __has_include(<flat_set>)
+# error "update the header information for <flat_set> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<flat_set>)
+#if __has_include(<generator>)
+# error "update the header information for <generator> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<generator>)
+#if __has_include(<hazard_pointer>)
+# error "update the header information for <hazard_pointer> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<hazard_pointer>)
+#if __has_include(<linalg>)
+# error "update the header information for <linalg> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<linalg>)
+#if __has_include(<rcu>)
+# error "update the header information for <rcu> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<rcu>)
+#if __has_include(<spanstream>)
+# error "update the header information for <spanstream> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<spanstream>)
+#if __has_include(<stacktrace>)
+# error "update the header information for <stacktrace> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<stacktrace>)
+#if __has_include(<stdfloat>)
+# error "update the header information for <stdfloat> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<stdfloat>)
+#if __has_include(<text_encoding>)
+# error "update the header information for <text_encoding> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<text_encoding>)
+#endif
+
+export module std;
+
+// algorithm.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ namespace ranges {
+ // [algorithms.results], algorithm result types
+ using std::ranges::in_found_result;
+ using std::ranges::in_fun_result;
+ using std::ranges::in_in_out_result;
+ using std::ranges::in_in_result;
+ using std::ranges::in_out_out_result;
+ using std::ranges::in_out_result;
+ // using std::ranges::in_value_result;
+ using std::ranges::min_max_result;
+ // using std::ranges::out_value_result;
+ } // namespace ranges
+
+ // [alg.nonmodifying], non-modifying sequence operations
+ // [alg.all.of], all of
+ using std::all_of;
+ namespace ranges {
+ using std::ranges::all_of;
+ }
+
+ // [alg.any.of], any of
+ using std::any_of;
+ namespace ranges {
+ using std::ranges::any_of;
+ }
+
+ // [alg.none.of], none of
+ using std::none_of;
+ namespace ranges {
+ using std::ranges::none_of;
+ }
+
+ // [alg.contains], contains
+#if 0
+ namespace ranges {
+ using std::ranges::contains;
+ using std::ranges::contains_subrange;
+ } // namespace ranges
+#endif
+
+ // [alg.foreach], for each
+ using std::for_each;
+
+ namespace ranges {
+ using std::ranges::for_each;
+ using std::ranges::for_each_result;
+ } // namespace ranges
+
+ using std::for_each_n;
+
+ namespace ranges {
+ using std::ranges::for_each_n_result;
+
+ using std::ranges::for_each_n;
+ } // namespace ranges
+
+ // [alg.find], find
+ using std::find;
+ using std::find_if;
+ using std::find_if_not;
+
+ namespace ranges {
+ using std::ranges::find;
+ using std::ranges::find_if;
+ using std::ranges::find_if_not;
+ } // namespace ranges
+
+ namespace ranges {
+#if 0
+ using std::ranges::find_last;
+ using std::ranges::find_last_if;
+ using std::ranges::find_last_if_not;
+#endif
+ } // namespace ranges
+
+ // [alg.find.end], find end
+ using std::find_end;
+
+ namespace ranges {
+ using std::ranges::find_end;
+ }
+
+ // [alg.find.first.of], find first
+ using std::find_first_of;
+
+ namespace ranges {
+ using std::ranges::find_first_of;
+ }
+
+ // [alg.adjacent.find], adjacent find
+ using std::adjacent_find;
+
+ namespace ranges {
+ using std::ranges::adjacent_find;
+ }
+
+ // [alg.count], count
+ using std::count;
+ using std::count_if;
+
+ namespace ranges {
+ using std::ranges::count;
+ using std::ranges::count_if;
+ } // namespace ranges
+
+ // [mismatch], mismatch
+ using std::mismatch;
+
+ namespace ranges {
+ using std::ranges::mismatch_result;
+
+ using std::ranges::mismatch;
+ } // namespace ranges
+
+ // [alg.equal], equal
+ using std::equal;
+
+ namespace ranges {
+ using std::ranges::equal;
+ }
+
+ // [alg.is.permutation], is permutation
+ using std::is_permutation;
+
+ namespace ranges {
+ using std::ranges::is_permutation;
+ }
+
+ // [alg.search], search
+ using std::search;
+
+ namespace ranges {
+ using std::ranges::search;
+ }
+
+ using std::search_n;
+
+ namespace ranges {
+ using std::ranges::search_n;
+ }
+
+ namespace ranges {
+#if _LIBCPP_STD_VER >= 23
+ // [alg.starts.with], starts with
+ using std::ranges::starts_with;
+
+#if _LIBCPP_VERSION >= 180000
+ // [alg.ends.with], ends with
+ using std::ranges::ends_with;
+#endif
+
+# if 0
+ // [alg.fold], fold
+ using std::ranges::fold_left;
+ using std::ranges::fold_left_first;
+ using std::ranges::fold_right;
+ using std::ranges::fold_right_last;
+ using std::ranges::fold_left_with_iter;
+ using std::ranges::fold_left_with_iter_result;
+ using std::ranges::fold_left_with_iter;
+ using std::ranges::fold_left_first_with_iter;
+ using std::ranges::fold_left_first_with_iter;
+# endif
+#endif // _LIBCPP_STD_VER >= 23
+ } // namespace ranges
+
+ // [alg.modifying.operations], mutating sequence operations
+ // [alg.copy], copy
+ using std::copy;
+
+ namespace ranges {
+ using std::ranges::copy;
+ using std::ranges::copy_result;
+ } // namespace ranges
+
+ using std::copy_n;
+
+ namespace ranges {
+ using std::ranges::copy_n;
+ using std::ranges::copy_n_result;
+ } // namespace ranges
+
+ using std::copy_if;
+
+ namespace ranges {
+ using std::ranges::copy_if;
+ using std::ranges::copy_if_result;
+ } // namespace ranges
+
+ using std::copy_backward;
+
+ namespace ranges {
+ using std::ranges::copy_backward;
+ using std::ranges::copy_backward_result;
+ } // namespace ranges
+
+ // [alg.move], move
+ using std::move;
+
+ namespace ranges {
+ using std::ranges::move;
+ using std::ranges::move_result;
+ } // namespace ranges
+
+ using std::move_backward;
+
+ namespace ranges {
+ using std::ranges::move_backward;
+ using std::ranges::move_backward_result;
+ } // namespace ranges
+
+ // [alg.swap], swap
+ using std::swap_ranges;
+
+ namespace ranges {
+ using std::ranges::swap_ranges;
+ using std::ranges::swap_ranges_result;
+ } // namespace ranges
+
+ using std::iter_swap;
+
+ // [alg.transform], transform
+ using std::transform;
+
+ namespace ranges {
+ using std::ranges::binary_transform_result;
+ using std::ranges::unary_transform_result;
+
+ using std::ranges::transform;
+
+ } // namespace ranges
+
+ using std::replace;
+ using std::replace_if;
+
+ namespace ranges {
+ using std::ranges::replace;
+ using std::ranges::replace_if;
+ } // namespace ranges
+
+ using std::replace_copy;
+ using std::replace_copy_if;
+
+ namespace ranges {
+ using std::ranges::replace_copy;
+ using std::ranges::replace_copy_if;
+ using std::ranges::replace_copy_if_result;
+ using std::ranges::replace_copy_result;
+ } // namespace ranges
+
+ // [alg.fill], fill
+ using std::fill;
+ using std::fill_n;
+
+ namespace ranges {
+ using std::ranges::fill;
+ using std::ranges::fill_n;
+ } // namespace ranges
+
+ // [alg.generate], generate
+ using std::generate;
+ using std::generate_n;
+
+ namespace ranges {
+ using std::ranges::generate;
+ using std::ranges::generate_n;
+ } // namespace ranges
+
+ // [alg.remove], remove
+ using std::remove;
+ using std::remove_if;
+
+ namespace ranges {
+ using std::ranges::remove;
+ using std::ranges::remove_if;
+ } // namespace ranges
+
+ using std::remove_copy;
+ using std::remove_copy_if;
+ namespace ranges {
+ using std::ranges::remove_copy;
+ using std::ranges::remove_copy_if;
+ using std::ranges::remove_copy_if_result;
+ using std::ranges::remove_copy_result;
+ } // namespace ranges
+
+ // [alg.unique], unique
+ using std::unique;
+
+ namespace ranges {
+ using std::ranges::unique;
+ }
+
+ using std::unique_copy;
+
+ namespace ranges {
+ using std::ranges::unique_copy;
+ using std::ranges::unique_copy_result;
+ } // namespace ranges
+
+ // [alg.reverse], reverse
+ using std::reverse;
+
+ namespace ranges {
+ using std::ranges::reverse;
+ }
+
+ using std::reverse_copy;
+
+ namespace ranges {
+ using std::ranges::reverse_copy;
+ using std::ranges::reverse_copy_result;
+ } // namespace ranges
+
+ // [alg.rotate], rotate
+ using std::rotate;
+
+ namespace ranges {
+ using std::ranges::rotate;
+ }
+
+ using std::rotate_copy;
+
+ namespace ranges {
+ using std::ranges::rotate_copy;
+ using std::ranges::rotate_copy_result;
+ } // namespace ranges
+
+ // [alg.random.sample], sample
+ using std::sample;
+
+ namespace ranges {
+ using std::ranges::sample;
+ }
+
+ // [alg.random.shuffle], shuffle
+ using std::shuffle;
+
+ namespace ranges {
+ using std::ranges::shuffle;
+ }
+
+ // [alg.shift], shift
+ using std::shift_left;
+
+ namespace ranges {
+ // using std::ranges::shift_left;
+ }
+
+ using std::shift_right;
+
+ namespace ranges {
+ // using std::ranges::shift_right;
+ }
+
+ // [alg.sorting], sorting and related operations
+ // [alg.sort], sorting
+ using std::sort;
+
+ namespace ranges {
+ using std::ranges::sort;
+ }
+
+ using std::stable_sort;
+
+ namespace ranges {
+ using std::ranges::stable_sort;
+ }
+
+ using std::partial_sort;
+
+ namespace ranges {
+ using std::ranges::partial_sort;
+ }
+ using std::partial_sort_copy;
+
+ namespace ranges {
+ using std::ranges::partial_sort_copy;
+ using std::ranges::partial_sort_copy_result;
+ } // namespace ranges
+
+ using std::is_sorted;
+ using std::is_sorted_until;
+
+ namespace ranges {
+ using std::ranges::is_sorted;
+ using std::ranges::is_sorted_until;
+ } // namespace ranges
+
+ // [alg.nth.element], Nth element
+ using std::nth_element;
+
+ namespace ranges {
+ using std::ranges::nth_element;
+ }
+
+ // [alg.binary.search], binary search
+ using std::lower_bound;
+
+ namespace ranges {
+ using std::ranges::lower_bound;
+ }
+
+ using std::upper_bound;
+
+ namespace ranges {
+ using std::ranges::upper_bound;
+ }
+
+ using std::equal_range;
+
+ namespace ranges {
+ using std::ranges::equal_range;
+ }
+
+ using std::binary_search;
+
+ namespace ranges {
+ using std::ranges::binary_search;
+ }
+
+ // [alg.partitions], partitions
+ using std::is_partitioned;
+
+ namespace ranges {
+ using std::ranges::is_partitioned;
+ }
+
+ using std::partition;
+
+ namespace ranges {
+ using std::ranges::partition;
+ }
+
+ using std::stable_partition;
+
+ namespace ranges {
+ using std::ranges::stable_partition;
+ }
+
+ using std::partition_copy;
+
+ namespace ranges {
+ using std::ranges::partition_copy;
+ using std::ranges::partition_copy_result;
+ } // namespace ranges
+
+ using std::partition_point;
+
+ namespace ranges {
+ using std::ranges::partition_point;
+ }
+ // [alg.merge], merge
+ using std::merge;
+ namespace ranges {
+ using std::ranges::merge;
+ using std::ranges::merge_result;
+ } // namespace ranges
+
+ using std::inplace_merge;
+
+ namespace ranges {
+ using std::ranges::inplace_merge;
+ }
+
+ // [alg.set.operations], set operations
+ using std::includes;
+ namespace ranges {
+ using std::ranges::includes;
+ }
+
+ using std::set_union;
+
+ namespace ranges {
+ using std::ranges::set_union;
+ using std::ranges::set_union_result;
+ } // namespace ranges
+
+ using std::set_intersection;
+ namespace ranges {
+ using std::ranges::set_intersection;
+ using std::ranges::set_intersection_result;
+ } // namespace ranges
+
+ using std::set_difference;
+
+ namespace ranges {
+ using std::ranges::set_difference;
+ using std::ranges::set_difference_result;
+ } // namespace ranges
+
+ using std::set_symmetric_difference;
+
+ namespace ranges {
+ using std::ranges::set_symmetric_difference_result;
+
+ using std::ranges::set_symmetric_difference;
+ } // namespace ranges
+
+ // [alg.heap.operations], heap operations
+ using std::push_heap;
+
+ namespace ranges {
+ using std::ranges::push_heap;
+ }
+
+ using std::pop_heap;
+
+ namespace ranges {
+ using std::ranges::pop_heap;
+ }
+
+ using std::make_heap;
+
+ namespace ranges {
+ using std::ranges::make_heap;
+ }
+
+ using std::sort_heap;
+
+ namespace ranges {
+ using std::ranges::sort_heap;
+ }
+
+ using std::is_heap;
+
+ namespace ranges {
+ using std::ranges::is_heap;
+ }
+
+ using std::is_heap_until;
+
+ namespace ranges {
+ using std::ranges::is_heap_until;
+ }
+
+ // [alg.min.max], minimum and maximum
+ using std::min;
+
+ namespace ranges {
+ using std::ranges::min;
+ }
+
+ using std::max;
+
+ namespace ranges {
+ using std::ranges::max;
+ }
+
+ using std::minmax;
+
+ namespace ranges {
+ using std::ranges::minmax_result;
+
+ using std::ranges::minmax;
+ } // namespace ranges
+
+ using std::min_element;
+
+ namespace ranges {
+ using std::ranges::min_element;
+ }
+
+ using std::max_element;
+
+ namespace ranges {
+ using std::ranges::max_element;
+ }
+
+ using std::minmax_element;
+
+ namespace ranges {
+ using std::ranges::minmax_element_result;
+
+ using std::ranges::minmax_element;
+ } // namespace ranges
+ // [alg.clamp], bounded value
+ using std::clamp;
+
+ namespace ranges {
+ using std::ranges::clamp;
+ }
+
+ // [alg.lex.comparison], lexicographical comparison
+ using std::lexicographical_compare;
+
+ namespace ranges {
+ using std::ranges::lexicographical_compare;
+ }
+
+ // [alg.three.way], three-way comparison algorithms
+ using std::lexicographical_compare_three_way;
+
+ // [alg.permutation.generators], permutations
+ using std::next_permutation;
+
+ namespace ranges {
+ using std::ranges::next_permutation_result;
+
+ using std::ranges::next_permutation;
+ } // namespace ranges
+
+ using std::prev_permutation;
+
+ namespace ranges {
+ using std::ranges::prev_permutation_result;
+
+ using std::ranges::prev_permutation;
+ } // namespace ranges
+
+} // namespace std
+
+// any.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [any.bad.any.cast], class bad_any_cast
+ using std::bad_any_cast;
+
+ // [any.class], class any
+ using std::any;
+
+ // [any.nonmembers], non-member functions
+ using std::any_cast;
+ using std::make_any;
+ using std::swap;
+
+} // namespace std
+
+// array.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [array], class template array
+ using std::array;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ // [array.special], specialized algorithms
+ using std::swap;
+
+ // [array.creation], array creation functions
+ using std::to_array;
+
+ // [array.tuple], tuple interface
+ using std::get;
+ using std::tuple_element;
+ using std::tuple_size;
+
+} // namespace std
+
+// atomic.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [atomics.order], order and consistency
+ using std::memory_order;
+ using std::memory_order_acq_rel;
+ using std::memory_order_acquire;
+ using std::memory_order_consume;
+ using std::memory_order_relaxed;
+ using std::memory_order_release;
+ using std::memory_order_seq_cst;
+
+ using std::kill_dependency;
+
+ // [atomics.ref.generic], class template atomic_ref
+ // [atomics.ref.pointer], partial specialization for pointers
+ // using std::atomic_ref;
+
+ // [atomics.types.generic], class template atomic
+ using std::atomic;
+
+ // [atomics.nonmembers], non-member functions
+ using std::atomic_compare_exchange_strong;
+ using std::atomic_compare_exchange_strong_explicit;
+ using std::atomic_compare_exchange_weak;
+ using std::atomic_compare_exchange_weak_explicit;
+ using std::atomic_exchange;
+ using std::atomic_exchange_explicit;
+ using std::atomic_is_lock_free;
+ using std::atomic_load;
+ using std::atomic_load_explicit;
+ using std::atomic_store;
+ using std::atomic_store_explicit;
+
+ using std::atomic_fetch_add;
+ using std::atomic_fetch_add_explicit;
+ using std::atomic_fetch_and;
+ using std::atomic_fetch_and_explicit;
+ using std::atomic_fetch_or;
+ using std::atomic_fetch_or_explicit;
+ using std::atomic_fetch_sub;
+ using std::atomic_fetch_sub_explicit;
+ using std::atomic_fetch_xor;
+ using std::atomic_fetch_xor_explicit;
+ using std::atomic_notify_all;
+ using std::atomic_notify_one;
+ using std::atomic_wait;
+ using std::atomic_wait_explicit;
+
+ // [atomics.alias], type aliases
+ using std::atomic_bool;
+ using std::atomic_char;
+ using std::atomic_char16_t;
+ using std::atomic_char32_t;
+ using std::atomic_char8_t;
+ using std::atomic_int;
+ using std::atomic_llong;
+ using std::atomic_long;
+ using std::atomic_schar;
+ using std::atomic_short;
+ using std::atomic_uchar;
+ using std::atomic_uint;
+ using std::atomic_ullong;
+ using std::atomic_ulong;
+ using std::atomic_ushort;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::atomic_wchar_t;
+#endif
+
+ using std::atomic_int16_t;
+ using std::atomic_int32_t;
+ using std::atomic_int64_t;
+ using std::atomic_int8_t;
+ using std::atomic_uint16_t;
+ using std::atomic_uint32_t;
+ using std::atomic_uint64_t;
+ using std::atomic_uint8_t;
+
+ using std::atomic_int_least16_t;
+ using std::atomic_int_least32_t;
+ using std::atomic_int_least64_t;
+ using std::atomic_int_least8_t;
+ using std::atomic_uint_least16_t;
+ using std::atomic_uint_least32_t;
+ using std::atomic_uint_least64_t;
+ using std::atomic_uint_least8_t;
+
+ using std::atomic_int_fast16_t;
+ using std::atomic_int_fast32_t;
+ using std::atomic_int_fast64_t;
+ using std::atomic_int_fast8_t;
+ using std::atomic_uint_fast16_t;
+ using std::atomic_uint_fast32_t;
+ using std::atomic_uint_fast64_t;
+ using std::atomic_uint_fast8_t;
+
+ using std::atomic_intmax_t;
+ using std::atomic_intptr_t;
+ using std::atomic_ptrdiff_t;
+ using std::atomic_size_t;
+ using std::atomic_uintmax_t;
+ using std::atomic_uintptr_t;
+
+ using std::atomic_signed_lock_free;
+ using std::atomic_unsigned_lock_free;
+
+ // [atomics.flag], flag type and operations
+ using std::atomic_flag;
+
+ using std::atomic_flag_clear;
+ using std::atomic_flag_clear_explicit;
+ using std::atomic_flag_test;
+ using std::atomic_flag_test_and_set;
+ using std::atomic_flag_test_and_set_explicit;
+ using std::atomic_flag_test_explicit;
+
+ using std::atomic_flag_notify_all;
+ using std::atomic_flag_notify_one;
+ using std::atomic_flag_wait;
+ using std::atomic_flag_wait_explicit;
+
+ // [atomics.fences], fences
+ using std::atomic_signal_fence;
+ using std::atomic_thread_fence;
+
+ // [depr.atomics.nonmembers]
+ using std::atomic_init;
+
+} // namespace std
+
+// barrier.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ using std::barrier;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// bit.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [bit.cast], bit_cast
+ using std::bit_cast;
+
+#if _LIBCPP_STD_VER >= 23
+ // [bit.byteswap], byteswap
+ using std::byteswap;
+#endif
+
+ // [bit.pow.two], integral powers of 2
+ using std::bit_ceil;
+ using std::bit_floor;
+ using std::bit_width;
+ using std::has_single_bit;
+
+ // [bit.rotate], rotating
+ using std::rotl;
+ using std::rotr;
+
+ // [bit.count], counting
+ using std::countl_one;
+ using std::countl_zero;
+ using std::countr_one;
+ using std::countr_zero;
+ using std::popcount;
+
+ // [bit.endian], endian
+ using std::endian;
+} // namespace std
+
+// bitset.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::bitset;
+
+ // [bitset.operators], bitset operators
+ using std::operator&;
+ using std::operator|;
+ using std::operator^;
+ using std::operator>>;
+ using std::operator<<;
+
+ // [bitset.hash], hash support
+ using std::hash;
+
+} // namespace std
+
+// cassert.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// cctype.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::isalnum;
+ using std::isalpha;
+ using std::isblank;
+ using std::iscntrl;
+ using std::isdigit;
+ using std::isgraph;
+ using std::islower;
+ using std::isprint;
+ using std::ispunct;
+ using std::isspace;
+ using std::isupper;
+ using std::isxdigit;
+ using std::tolower;
+ using std::toupper;
+} // namespace std
+
+// cerrno.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// cfenv.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // types
+ using std::fenv_t;
+ using std::fexcept_t;
+
+ // functions
+ using std::feclearexcept;
+ using std::fegetexceptflag;
+ using std::feraiseexcept;
+ using std::fesetexceptflag;
+ using std::fetestexcept;
+
+ using std::fegetround;
+ using std::fesetround;
+
+ using std::fegetenv;
+ using std::feholdexcept;
+ using std::fesetenv;
+ using std::feupdateenv;
+
+} // namespace std
+
+// cfloat.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// charconv.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // floating-point format for primitive numerical conversion
+ using std::chars_format;
+
+ // chars_format is a bitmask type.
+ // [bitmask.types] specified operators
+ using std::operator&;
+ using std::operator&=;
+ using std::operator^;
+ using std::operator^=;
+ using std::operator|;
+ using std::operator|=;
+ using std::operator~;
+
+ // [charconv.to.chars], primitive numerical output conversion
+ using std::to_chars_result;
+
+ using std::to_chars;
+
+ // [charconv.from.chars], primitive numerical input conversion
+ using std::from_chars_result;
+
+ using std::from_chars;
+} // namespace std
+
+// chrono.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ namespace chrono {
+ using std::chrono::duration;
+ using std::chrono::time_point;
+
+ } // namespace chrono
+
+ using std::common_type;
+
+ namespace chrono {
+
+ // [time.traits], customization traits
+ using std::chrono::treat_as_floating_point;
+ using std::chrono::treat_as_floating_point_v;
+
+ using std::chrono::duration_values;
+
+ // using std::chrono::is_clock;
+ // using std::chrono::is_clock_v;
+
+ // [time.duration.nonmember], duration arithmetic
+ using std::chrono::operator+;
+ using std::chrono::operator-;
+ using std::chrono::operator*;
+ using std::chrono::operator/;
+ using std::chrono::operator%;
+
+ // [time.duration.comparisons], duration comparisons
+ using std::chrono::operator==;
+ using std::chrono::operator!=;
+ using std::chrono::operator<;
+ using std::chrono::operator>;
+ using std::chrono::operator<=;
+ using std::chrono::operator>=;
+ using std::chrono::operator<=>;
+
+ // [time.duration.cast], conversions
+ using std::chrono::ceil;
+ using std::chrono::duration_cast;
+ using std::chrono::floor;
+ using std::chrono::round;
+
+ // [time.duration.io], duration I/O
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::chrono::operator<<;
+#endif
+ // using std::chrono::from_stream;
+
+ // convenience typedefs
+ using std::chrono::days;
+ using std::chrono::hours;
+ using std::chrono::microseconds;
+ using std::chrono::milliseconds;
+ using std::chrono::minutes;
+ using std::chrono::months;
+ using std::chrono::nanoseconds;
+ using std::chrono::seconds;
+ using std::chrono::weeks;
+ using std::chrono::years;
+
+ // [time.point.nonmember], time_point arithmetic
+
+ // [time.point.comparisons], time_point comparisons
+
+ // [time.point.cast], conversions
+ using std::chrono::time_point_cast;
+
+ // [time.duration.alg], specialized algorithms
+ using std::chrono::abs;
+
+ // [time.clock.system], class system_clock
+ using std::chrono::system_clock;
+
+ using std::chrono::sys_days;
+ using std::chrono::sys_seconds;
+ using std::chrono::sys_time;
+
+#if 0
+ // [time.clock.utc], class utc_clock
+ using std::chrono::utc_clock;
+
+ using std::chrono::utc_seconds;
+ using std::chrono::utc_time;
+
+ using std::chrono::leap_second_info;
+
+ using std::chrono::get_leap_second_info;
+ // [time.clock.tai], class tai_clock
+ using std::chrono::tai_clock;
+
+ using std::chrono::tai_seconds;
+ using std::chrono::tai_time;
+
+ // [time.clock.gps], class gps_clock
+ using std::chrono::gps_clock;
+
+ using std::chrono::gps_seconds;
+ using std::chrono::gps_time;
+#endif
+ // [time.clock.file], type file_clock
+ using std::chrono::file_clock;
+
+ using std::chrono::file_time;
+
+#ifndef _LIBCPP_HAS_NO_MONOTONIC_CLOCK
+ // [time.clock.steady], class steady_clock
+ using std::chrono::steady_clock;
+#endif
+
+ // [time.clock.hires], class high_resolution_clock
+ using std::chrono::high_resolution_clock;
+
+ // [time.clock.local], local time
+ using std::chrono::local_days;
+ using std::chrono::local_seconds;
+ using std::chrono::local_t;
+ using std::chrono::local_time;
+
+ // [time.clock.cast], time_point conversions
+ // using std::chrono::clock_time_conversion;
+
+ // using std::chrono::clock_cast;
+
+ // [time.cal.last], class last_spec
+ using std::chrono::last_spec;
+
+ // [time.cal.day], class day
+ using std::chrono::day;
+
+ // [time.cal.month], class month
+ using std::chrono::month;
+
+ // [time.cal.year], class year
+ using std::chrono::year;
+
+ // [time.cal.wd], class weekday
+ using std::chrono::weekday;
+
+ // [time.cal.wdidx], class weekday_indexed
+ using std::chrono::weekday_indexed;
+
+ // [time.cal.wdlast], class weekday_last
+ using std::chrono::weekday_last;
+
+ // [time.cal.md], class month_day
+ using std::chrono::month_day;
+
+ // [time.cal.mdlast], class month_day_last
+ using std::chrono::month_day_last;
+
+ // [time.cal.mwd], class month_weekday
+ using std::chrono::month_weekday;
+
+ // [time.cal.mwdlast], class month_weekday_last
+ using std::chrono::month_weekday_last;
+
+ // [time.cal.ym], class year_month
+ using std::chrono::year_month;
+
+ // [time.cal.ymd], class year_month_day
+ using std::chrono::year_month_day;
+
+ // [time.cal.ymdlast], class year_month_day_last
+ using std::chrono::year_month_day_last;
+
+ // [time.cal.ymwd], class year_month_weekday
+ using std::chrono::year_month_weekday;
+
+ // [time.cal.ymwdlast], class year_month_weekday_last
+ using std::chrono::year_month_weekday_last;
+
+ // [time.cal.operators], civil calendar conventional syntax operators
+
+ // [time.hms], class template hh_mm_ss
+ using std::chrono::hh_mm_ss;
+
+ // [time.12], 12/24 hour functions
+ using std::chrono::is_am;
+ using std::chrono::is_pm;
+ using std::chrono::make12;
+ using std::chrono::make24;
+
+#if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \
+ !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+
+# ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+ // [time.zone.db], time zone database
+ using std::chrono::tzdb;
+ using std::chrono::tzdb_list;
+
+ // [time.zone.db.access], time zone database access
+ // using std::chrono::current_zone;
+ using std::chrono::get_tzdb;
+ using std::chrono::get_tzdb_list;
+ // using std::chrono::locate_zone;
+
+ // [time.zone.db.remote], remote time zone database support
+ using std::chrono::reload_tzdb;
+ using std::chrono::remote_version;
+
+# endif // !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) &&
+ // !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+
+# if 0
+ // [time.zone.exception], exception classes
+ using std::chrono::ambiguous_local_time;
+ using std::chrono::nonexistent_local_time;
+
+ // [time.zone.info], information classes
+ using std::chrono::sys_info;
+
+ // [time.zone.timezone], class time_zone
+ using std::chrono::choose;
+ using std::chrono::time_zone;
+
+ // [time.zone.zonedtraits], class template zoned_traits
+ using std::chrono::zoned_traits;
+
+ // [time.zone.zonedtime], class template zoned_time
+ using std::chrono::zoned_time;
+
+ using std::chrono::zoned_seconds;
+
+ // [time.zone.leap], leap second support
+ using std::chrono::leap_second;
+
+ // [time.zone.link], class time_zone_link
+ using std::chrono::time_zone_link;
+
+ // [time.format], formatting
+ using std::chrono::local_time_format;
+# endif
+#endif // _LIBCPP_ENABLE_EXPERIMENTAL
+ } // namespace chrono
+
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::formatter;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+ namespace chrono {
+ // using std::chrono::parse;
+
+ // calendrical constants
+ using std::chrono::last;
+
+ using std::chrono::Friday;
+ using std::chrono::Monday;
+ using std::chrono::Saturday;
+ using std::chrono::Sunday;
+ using std::chrono::Thursday;
+ using std::chrono::Tuesday;
+ using std::chrono::Wednesday;
+
+ using std::chrono::April;
+ using std::chrono::August;
+ using std::chrono::December;
+ using std::chrono::February;
+ using std::chrono::January;
+ using std::chrono::July;
+ using std::chrono::June;
+ using std::chrono::March;
+ using std::chrono::May;
+ using std::chrono::November;
+ using std::chrono::October;
+ using std::chrono::September;
+
+ } // namespace chrono
+
+} // namespace std
+export namespace std::inline literals::inline chrono_literals {
+ // [time.duration.literals], suffixes for duration literals
+ using std::literals::chrono_literals::operator""h;
+ using std::literals::chrono_literals::operator""min;
+ using std::literals::chrono_literals::operator""s;
+ using std::literals::chrono_literals::operator""ms;
+ using std::literals::chrono_literals::operator""us;
+ using std::literals::chrono_literals::operator""ns;
+
+ // [using std::literals::chrono_literals::.cal.day.nonmembers], non-member functions
+ using std::literals::chrono_literals::operator""d;
+
+ // [using std::literals::chrono_literals::.cal.year.nonmembers], non-member functions
+ using std::literals::chrono_literals::operator""y;
+} // namespace std::inline literals::inline chrono_literals
+
+// cinttypes.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::imaxdiv_t;
+
+ using std::imaxabs;
+ using std::imaxdiv;
+ using std::strtoimax;
+ using std::strtoumax;
+ using std::wcstoimax;
+ using std::wcstoumax;
+
+ // abs is conditionally here, but always present in cmath.cppm. To avoid
+ // conflicing declarations omit the using here.
+
+ // div is conditionally here, but always present in cstdlib.cppm. To avoid
+ // conflicing declarations omit the using here.
+} // namespace std
+
+// climits.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// clocale.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::lconv;
+
+ using std::localeconv;
+ using std::setlocale;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// cmath.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ using std::double_t;
+ using std::float_t;
+
+ using std::acos;
+ using std::acosf;
+ using std::acosl;
+
+ using std::asin;
+ using std::asinf;
+ using std::asinl;
+
+ using std::atan;
+ using std::atanf;
+ using std::atanl;
+
+ using std::atan2;
+ using std::atan2f;
+ using std::atan2l;
+
+ using std::cos;
+ using std::cosf;
+ using std::cosl;
+
+ using std::sin;
+ using std::sinf;
+ using std::sinl;
+
+ using std::tan;
+ using std::tanf;
+ using std::tanl;
+
+ using std::acosh;
+ using std::acoshf;
+ using std::acoshl;
+
+ using std::asinh;
+ using std::asinhf;
+ using std::asinhl;
+
+ using std::atanh;
+ using std::atanhf;
+ using std::atanhl;
+
+ using std::cosh;
+ using std::coshf;
+ using std::coshl;
+
+ using std::sinh;
+ using std::sinhf;
+ using std::sinhl;
+
+ using std::tanh;
+ using std::tanhf;
+ using std::tanhl;
+
+ using std::exp;
+ using std::expf;
+ using std::expl;
+
+ using std::exp2;
+ using std::exp2f;
+ using std::exp2l;
+
+ using std::expm1;
+ using std::expm1f;
+ using std::expm1l;
+
+ using std::frexp;
+ using std::frexpf;
+ using std::frexpl;
+
+ using std::ilogb;
+ using std::ilogbf;
+ using std::ilogbl;
+
+ using std::ldexp;
+ using std::ldexpf;
+ using std::ldexpl;
+
+ using std::log;
+ using std::logf;
+ using std::logl;
+
+ using std::log10;
+ using std::log10f;
+ using std::log10l;
+
+ using std::log1p;
+ using std::log1pf;
+ using std::log1pl;
+
+ using std::log2;
+ using std::log2f;
+ using std::log2l;
+
+ using std::logb;
+ using std::logbf;
+ using std::logbl;
+
+ using std::modf;
+ using std::modff;
+ using std::modfl;
+
+ using std::scalbn;
+ using std::scalbnf;
+ using std::scalbnl;
+
+ using std::scalbln;
+ using std::scalblnf;
+ using std::scalblnl;
+
+ using std::cbrt;
+ using std::cbrtf;
+ using std::cbrtl;
+
+ // [c.math.abs], absolute values
+ using std::abs;
+
+ using std::fabs;
+ using std::fabsf;
+ using std::fabsl;
+
+ using std::hypot;
+ using std::hypotf;
+ using std::hypotl;
+
+ // [c.math.hypot3], three-dimensional hypotenuse
+
+ using std::pow;
+ using std::powf;
+ using std::powl;
+
+ using std::sqrt;
+ using std::sqrtf;
+ using std::sqrtl;
+
+ using std::erf;
+ using std::erff;
+ using std::erfl;
+
+ using std::erfc;
+ using std::erfcf;
+ using std::erfcl;
+
+ using std::lgamma;
+ using std::lgammaf;
+ using std::lgammal;
+
+ using std::tgamma;
+ using std::tgammaf;
+ using std::tgammal;
+
+ using std::ceil;
+ using std::ceilf;
+ using std::ceill;
+
+ using std::floor;
+ using std::floorf;
+ using std::floorl;
+
+ using std::nearbyint;
+ using std::nearbyintf;
+ using std::nearbyintl;
+
+ using std::rint;
+ using std::rintf;
+ using std::rintl;
+
+ using std::lrint;
+ using std::lrintf;
+ using std::lrintl;
+
+ using std::llrint;
+ using std::llrintf;
+ using std::llrintl;
+
+ using std::round;
+ using std::roundf;
+ using std::roundl;
+
+ using std::lround;
+ using std::lroundf;
+ using std::lroundl;
+
+ using std::llround;
+ using std::llroundf;
+ using std::llroundl;
+
+ using std::trunc;
+ using std::truncf;
+ using std::truncl;
+
+ using std::fmod;
+ using std::fmodf;
+ using std::fmodl;
+
+ using std::remainder;
+ using std::remainderf;
+ using std::remainderl;
+
+ using std::remquo;
+ using std::remquof;
+ using std::remquol;
+
+ using std::copysign;
+ using std::copysignf;
+ using std::copysignl;
+
+ using std::nan;
+ using std::nanf;
+ using std::nanl;
+
+ using std::nextafter;
+ using std::nextafterf;
+ using std::nextafterl;
+
+ using std::nexttoward;
+ using std::nexttowardf;
+ using std::nexttowardl;
+
+ using std::fdim;
+ using std::fdimf;
+ using std::fdiml;
+
+ using std::fmax;
+ using std::fmaxf;
+ using std::fmaxl;
+
+ using std::fmin;
+ using std::fminf;
+ using std::fminl;
+
+ using std::fma;
+ using std::fmaf;
+ using std::fmal;
+
+ // [c.math.lerp], linear interpolation
+ using std::lerp;
+
+ // [c.math.fpclass], classification / comparison functions
+ using std::fpclassify;
+ using std::isfinite;
+ using std::isgreater;
+ using std::isgreaterequal;
+ using std::isinf;
+ using std::isless;
+ using std::islessequal;
+ using std::islessgreater;
+ using std::isnan;
+ using std::isnormal;
+ using std::isunordered;
+ using std::signbit;
+
+ // [sf.cmath], mathematical special functions
+#if 0
+ // [sf.cmath.assoc.laguerre], associated Laguerre polynomials
+ using std::assoc_laguerre;
+ using std::assoc_laguerref;
+ using std::assoc_laguerrel;
+
+ // [sf.cmath.assoc.legendre], associated Legendre functions
+ using std::assoc_legendre;
+ using std::assoc_legendref;
+ using std::assoc_legendrel;
+
+ // [sf.cmath.beta], beta function
+ using std::beta;
+ using std::betaf;
+ using std::betal;
+
+ // [sf.cmath.comp.ellint.1], complete elliptic integral of the first kind
+ using std::comp_ellint_1;
+ using std::comp_ellint_1f;
+ using std::comp_ellint_1l;
+
+ // [sf.cmath.comp.ellint.2], complete elliptic integral of the second kind
+ using std::comp_ellint_2;
+ using std::comp_ellint_2f;
+ using std::comp_ellint_2l;
+
+ // [sf.cmath.comp.ellint.3], complete elliptic integral of the third kind
+ using std::comp_ellint_3;
+ using std::comp_ellint_3f;
+ using std::comp_ellint_3l;
+
+ // [sf.cmath.cyl.bessel.i], regular modified cylindrical Bessel functions
+ using std::cyl_bessel_i;
+ using std::cyl_bessel_if;
+ using std::cyl_bessel_il;
+
+ // [sf.cmath.cyl.bessel.j], cylindrical Bessel functions of the first kind
+ using std::cyl_bessel_j;
+ using std::cyl_bessel_jf;
+ using std::cyl_bessel_jl;
+
+ // [sf.cmath.cyl.bessel.k], irregular modified cylindrical Bessel functions
+ using std::cyl_bessel_k;
+ using std::cyl_bessel_kf;
+ using std::cyl_bessel_kl;
+
+ // [sf.cmath.cyl.neumann], cylindrical Neumann functions
+ // cylindrical Bessel functions of the second kind
+ using std::cyl_neumann;
+ using std::cyl_neumannf;
+ using std::cyl_neumannl;
+
+ // [sf.cmath.ellint.1], incomplete elliptic integral of the first kind
+ using std::ellint_1;
+ using std::ellint_1f;
+ using std::ellint_1l;
+
+ // [sf.cmath.ellint.2], incomplete elliptic integral of the second kind
+ using std::ellint_2;
+ using std::ellint_2f;
+ using std::ellint_2l;
+
+ // [sf.cmath.ellint.3], incomplete elliptic integral of the third kind
+ using std::ellint_3;
+ using std::ellint_3f;
+ using std::ellint_3l;
+
+ // [sf.cmath.expint], exponential integral
+ using std::expint;
+ using std::expintf;
+ using std::expintl;
+
+ // [sf.cmath.hermite], Hermite polynomials
+ using std::hermite;
+ using std::hermitef;
+ using std::hermitel;
+
+ // [sf.cmath.laguerre], Laguerre polynomials
+ using std::laguerre;
+ using std::laguerref;
+ using std::laguerrel;
+
+ // [sf.cmath.legendre], Legendre polynomials
+ using std::legendre;
+ using std::legendref;
+ using std::legendrel;
+
+ // [sf.cmath.riemann.zeta], Riemann zeta function
+ using std::riemann_zeta;
+ using std::riemann_zetaf;
+ using std::riemann_zetal;
+
+ // [sf.cmath.sph.bessel], spherical Bessel functions of the first kind
+ using std::sph_bessel;
+ using std::sph_besself;
+ using std::sph_bessell;
+
+ // [sf.cmath.sph.legendre], spherical associated Legendre functions
+ using std::sph_legendre;
+ using std::sph_legendref;
+ using std::sph_legendrel;
+
+ // [sf.cmath.sph.neumann], spherical Neumann functions;
+ // spherical Bessel functions of the second kind
+ using std::sph_neumann;
+ using std::sph_neumannf;
+ using std::sph_neumannl;
+#endif
+} // namespace std
+
+// codecvt.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::codecvt_mode;
+
+ using std::codecvt_utf16;
+ using std::codecvt_utf8;
+ using std::codecvt_utf8_utf16;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// compare.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [cmp.categories], comparison category types
+ using std::partial_ordering;
+ using std::strong_ordering;
+ using std::weak_ordering;
+
+ // named comparison functions
+ using std::is_eq;
+ using std::is_gt;
+ using std::is_gteq;
+ using std::is_lt;
+ using std::is_lteq;
+ using std::is_neq;
+
+ // [cmp.common], common comparison category type
+ using std::common_comparison_category;
+ using std::common_comparison_category_t;
+
+ // [cmp.concept], concept three_way_comparable
+ using std::three_way_comparable;
+ using std::three_way_comparable_with;
+
+ // [cmp.result], result of three-way comparison
+ using std::compare_three_way_result;
+
+ using std::compare_three_way_result_t;
+
+ // [comparisons.three.way], class compare_three_way
+ using std::compare_three_way;
+
+ // [cmp.alg], comparison algorithms
+ inline namespace __cpo {
+ using std::__cpo::compare_partial_order_fallback;
+ using std::__cpo::compare_strong_order_fallback;
+ using std::__cpo::compare_weak_order_fallback;
+ using std::__cpo::partial_order;
+ using std::__cpo::strong_order;
+ using std::__cpo::weak_order;
+ } // namespace __cpo
+
+} // namespace std
+
+// complex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [complex], class template complex
+ using std::complex;
+
+ // [complex.ops], operators
+ using std::operator+;
+ using std::operator-;
+ using std::operator*;
+ using std::operator/;
+
+ using std::operator==;
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::operator>>;
+ using std::operator<<;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+ // [complex.value.ops], values
+ using std::imag;
+ using std::real;
+
+ using std::abs;
+ using std::arg;
+ using std::norm;
+
+ using std::conj;
+ using std::polar;
+ using std::proj;
+
+ // [complex.transcendentals], transcendentals
+ using std::acos;
+ using std::asin;
+ using std::atan;
+
+ using std::acosh;
+ using std::asinh;
+ using std::atanh;
+
+ using std::cos;
+ using std::cosh;
+ using std::exp;
+ using std::log;
+ using std::log10;
+
+ using std::pow;
+
+ using std::sin;
+ using std::sinh;
+ using std::sqrt;
+ using std::tan;
+ using std::tanh;
+
+ // [complex.literals], complex literals
+ inline namespace literals {
+ inline namespace complex_literals {
+ using std::operator""il;
+ using std::operator""i;
+ using std::operator""if;
+ } // namespace complex_literals
+ } // namespace literals
+
+} // namespace std
+
+// concepts.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [concepts.lang], language-related concepts
+ // [concept.same], concept same_as
+ using std::same_as;
+
+ // [concept.derived], concept derived_from
+ using std::derived_from;
+
+ // [concept.convertible], concept convertible_to
+ using std::convertible_to;
+
+ // [concept.commonref], concept common_reference_with
+ using std::common_reference_with;
+
+ // [concept.common], concept common_with
+ using std::common_with;
+
+ // [concepts.arithmetic], arithmetic concepts
+ using std::floating_point;
+ using std::integral;
+ using std::signed_integral;
+ using std::unsigned_integral;
+
+ // [concept.assignable], concept assignable_from
+ using std::assignable_from;
+
+ // [concept.swappable], concept swappable
+ namespace ranges {
+ inline namespace __cpo {
+ using std::ranges::__cpo::swap;
+ }
+ } // namespace ranges
+
+ using std::swappable;
+ using std::swappable_with;
+
+ // [concept.destructible], concept destructible
+ using std::destructible;
+
+ // [concept.constructible], concept constructible_from
+ using std::constructible_from;
+
+ // [concept.default.init], concept default_initializable
+ using std::default_initializable;
+
+ // [concept.moveconstructible], concept move_constructible
+ using std::move_constructible;
+
+ // [concept.copyconstructible], concept copy_constructible
+ using std::copy_constructible;
+
+ // [concepts.compare], comparison concepts
+ // [concept.equalitycomparable], concept equality_comparable
+ using std::equality_comparable;
+ using std::equality_comparable_with;
+
+ // [concept.totallyordered], concept totally_ordered
+ using std::totally_ordered;
+ using std::totally_ordered_with;
+
+ // [concepts.object], object concepts
+ using std::copyable;
+ using std::movable;
+ using std::regular;
+ using std::semiregular;
+
+ // [concepts.callable], callable concepts
+ // [concept.invocable], concept invocable
+ using std::invocable;
+
+ // [concept.regularinvocable], concept regular_invocable
+ using std::regular_invocable;
+
+ // [concept.predicate], concept predicate
+ using std::predicate;
+
+ // [concept.relation], concept relation
+ using std::relation;
+
+ // [concept.equiv], concept equivalence_relation
+ using std::equivalence_relation;
+
+ // [concept.strictweakorder], concept strict_weak_order
+ using std::strict_weak_order;
+
+} // namespace std
+
+// condition_variable.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.condition.condvar], class condition_variable
+ using std::condition_variable;
+ // [thread.condition.condvarany], class condition_variable_any
+ using std::condition_variable_any;
+
+ // [thread.condition.nonmember], non-member functions
+ using std::notify_all_at_thread_exit;
+
+ using std::cv_status;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// coroutine.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [coroutine.traits], coroutine traits
+ using std::coroutine_traits;
+
+ // [coroutine.handle], coroutine handle
+ using std::coroutine_handle;
+
+ // [coroutine.handle.compare], comparison operators
+ using std::operator==;
+ using std::operator<=>;
+
+ // [coroutine.handle.hash], hash support
+ using std::hash;
+
+ // [coroutine.noop], no-op coroutines
+ using std::noop_coroutine;
+ using std::noop_coroutine_handle;
+ using std::noop_coroutine_promise;
+
+ // [coroutine.trivial.awaitables], trivial awaitables
+ using std::suspend_always;
+ using std::suspend_never;
+} // namespace std
+
+// csetjmp.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::jmp_buf;
+ using std::longjmp;
+} // namespace std
+
+// csignal.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::sig_atomic_t;
+
+ // [support.signal], signal handlers
+ using std::signal;
+
+ using std::raise;
+
+} // namespace std
+
+// cstdarg.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::va_list;
+} // namespace std
+
+// cstddef.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::max_align_t;
+ using std::nullptr_t;
+ using std::ptrdiff_t;
+ using std::size_t;
+
+ using std::byte;
+
+ // [support.types.byteops], byte type operations
+ using std::operator<<=;
+ using std::operator<<;
+ using std::operator>>=;
+ using std::operator>>;
+ using std::operator|=;
+ using std::operator|;
+ using std::operator&=;
+ using std::operator&;
+ using std::operator^=;
+ using std::operator^;
+ using std::operator~;
+ using std::to_integer;
+} // namespace std
+
+// cstdint.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // signed
+ using std::int8_t _LIBCPP_USING_IF_EXISTS;
+ using std::int16_t _LIBCPP_USING_IF_EXISTS;
+ using std::int32_t _LIBCPP_USING_IF_EXISTS;
+ using std::int64_t _LIBCPP_USING_IF_EXISTS;
+
+ using std::int_fast16_t;
+ using std::int_fast32_t;
+ using std::int_fast64_t;
+ using std::int_fast8_t;
+
+ using std::int_least16_t;
+ using std::int_least32_t;
+ using std::int_least64_t;
+ using std::int_least8_t;
+
+ using std::intmax_t;
+
+ using std::intptr_t _LIBCPP_USING_IF_EXISTS;
+
+ // unsigned
+ using std::uint8_t _LIBCPP_USING_IF_EXISTS;
+ using std::uint16_t _LIBCPP_USING_IF_EXISTS;
+ using std::uint32_t _LIBCPP_USING_IF_EXISTS;
+ using std::uint64_t _LIBCPP_USING_IF_EXISTS;
+
+ using std::uint_fast16_t;
+ using std::uint_fast32_t;
+ using std::uint_fast64_t;
+ using std::uint_fast8_t;
+
+ using std::uint_least16_t;
+ using std::uint_least32_t;
+ using std::uint_least64_t;
+ using std::uint_least8_t;
+
+ using std::uintmax_t;
+
+ using std::uintptr_t _LIBCPP_USING_IF_EXISTS;
+} // namespace std
+
+// cstdio.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::FILE;
+ using std::fpos_t;
+ using std::size_t;
+
+ using std::clearerr;
+ using std::fclose;
+ using std::feof;
+ using std::ferror;
+ using std::fflush;
+ using std::fgetc;
+ using std::fgetpos;
+ using std::fgets;
+ using std::fopen;
+ using std::fprintf;
+ using std::fputc;
+ using std::fputs;
+ using std::fread;
+ using std::freopen;
+ using std::fscanf;
+ using std::fseek;
+ using std::fsetpos;
+ using std::ftell;
+ using std::fwrite;
+ using std::getc;
+ using std::getchar;
+ using std::perror;
+ using std::printf;
+ using std::putc;
+ using std::putchar;
+ using std::puts;
+ using std::remove;
+ using std::rename;
+ using std::rewind;
+ using std::scanf;
+ using std::setbuf;
+ using std::setvbuf;
+ using std::snprintf;
+ using std::sprintf;
+ using std::sscanf;
+ using std::tmpfile;
+ using std::tmpnam;
+ using std::ungetc;
+ using std::vfprintf;
+ using std::vfscanf;
+ using std::vprintf;
+ using std::vscanf;
+ using std::vsnprintf;
+ using std::vsprintf;
+ using std::vsscanf;
+} // namespace std
+
+// cstdlib.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::div_t;
+ using std::ldiv_t;
+ using std::lldiv_t;
+ using std::size_t;
+
+ // [support.start.term], start and termination
+ using std::_Exit;
+ using std::abort;
+ using std::at_quick_exit;
+ using std::atexit;
+ using std::exit;
+ using std::quick_exit;
+
+ using std::getenv;
+ using std::system;
+
+ // [c.malloc], C library memory allocation
+ using std::aligned_alloc;
+ using std::calloc;
+ using std::free;
+ using std::malloc;
+ using std::realloc;
+
+ using std::atof;
+ using std::atoi;
+ using std::atol;
+ using std::atoll;
+ using std::strtod;
+ using std::strtof;
+ using std::strtol;
+ using std::strtold;
+ using std::strtoll;
+ using std::strtoul;
+ using std::strtoull;
+
+ // [c.mb.wcs], multibyte / wide string and character conversion functions
+ using std::mblen;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::mbstowcs;
+ using std::mbtowc;
+ using std::wcstombs;
+ using std::wctomb;
+#endif
+ // [alg.c.library], C standard library algorithms
+ using std::bsearch;
+ using std::qsort;
+
+ // [c.math.rand], low-quality random number generation
+ using std::rand;
+ using std::srand;
+
+ // [c.math.abs], absolute values
+ using std::abs;
+
+ using std::labs;
+ using std::llabs;
+
+ using std::div;
+ using std::ldiv;
+ using std::lldiv;
+} // namespace std
+
+// cstring.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::size_t;
+
+ using std::memchr;
+ using std::memcmp;
+ using std::memcpy;
+ using std::memmove;
+ using std::memset;
+ using std::strcat;
+ using std::strchr;
+ using std::strcmp;
+ using std::strcoll;
+ using std::strcpy;
+ using std::strcspn;
+ using std::strerror;
+ using std::strlen;
+ using std::strncat;
+ using std::strncmp;
+ using std::strncpy;
+ using std::strpbrk;
+ using std::strrchr;
+ using std::strspn;
+ using std::strstr;
+ using std::strtok;
+ using std::strxfrm;
+} // namespace std
+
+// ctime.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::clock_t;
+ using std::size_t;
+ using std::time_t;
+
+ using std::timespec;
+ using std::tm;
+
+ using std::asctime;
+ using std::clock;
+ using std::ctime;
+ using std::difftime;
+ using std::gmtime;
+ using std::localtime;
+ using std::mktime;
+ using std::strftime;
+ using std::time;
+ using std::timespec_get;
+} // namespace std
+
+// cuchar.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // Note the Standard does not mark these symbols optional, but libc++'s header
+ // does. So this seems strictly not to be conforming.
+
+ // mbstate_t is conditionally here, but always present in cwchar.cppm. To avoid
+ // conflicing declarations omit the using here.
+
+ // size_t is conditionally here, but always present in cstddef.cppm. To avoid
+ // conflicing declarations omit the using here.
+
+#if !defined(_LIBCPP_HAS_NO_C8RTOMB_MBRTOC8)
+ using std::mbrtoc8 _LIBCPP_USING_IF_EXISTS;
+ using std::c8rtomb _LIBCPP_USING_IF_EXISTS;
+#endif
+ using std::mbrtoc16 _LIBCPP_USING_IF_EXISTS;
+ using std::c16rtomb _LIBCPP_USING_IF_EXISTS;
+ using std::mbrtoc32 _LIBCPP_USING_IF_EXISTS;
+ using std::c32rtomb _LIBCPP_USING_IF_EXISTS;
+} // namespace std
+
+// cwchar.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::mbstate_t;
+ using std::size_t;
+ using std::wint_t;
+
+ using std::tm;
+
+ using std::btowc;
+ using std::fgetwc;
+ using std::fgetws;
+ using std::fputwc;
+ using std::fputws;
+ using std::fwide;
+ using std::fwprintf;
+ using std::fwscanf;
+ using std::getwc;
+ using std::getwchar;
+ using std::putwc;
+ using std::putwchar;
+ using std::swprintf;
+ using std::swscanf;
+ using std::ungetwc;
+ using std::vfwprintf;
+ using std::vfwscanf;
+ using std::vswprintf;
+ using std::vswscanf;
+ using std::vwprintf;
+ using std::vwscanf;
+ using std::wcscat;
+ using std::wcschr;
+ using std::wcscmp;
+ using std::wcscoll;
+ using std::wcscpy;
+ using std::wcscspn;
+ using std::wcsftime;
+ using std::wcslen;
+ using std::wcsncat;
+ using std::wcsncmp;
+ using std::wcsncpy;
+ using std::wcspbrk;
+ using std::wcsrchr;
+ using std::wcsspn;
+ using std::wcsstr;
+ using std::wcstod;
+ using std::wcstof;
+ using std::wcstok;
+ using std::wcstol;
+ using std::wcstold;
+ using std::wcstoll;
+ using std::wcstoul;
+ using std::wcstoull;
+ using std::wcsxfrm;
+ using std::wctob;
+ using std::wmemchr;
+ using std::wmemcmp;
+ using std::wmemcpy;
+ using std::wmemmove;
+ using std::wmemset;
+ using std::wprintf;
+ using std::wscanf;
+
+ // [c.mb.wcs], multibyte / wide string and character conversion functions
+ using std::mbrlen;
+ using std::mbrtowc;
+ using std::mbsinit;
+ using std::mbsrtowcs;
+ using std::wcrtomb;
+ using std::wcsrtombs;
+#endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS
+} // namespace std
+
+// cwctype.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wctrans_t;
+ using std::wctype_t;
+ using std::wint_t;
+
+ using std::iswalnum;
+ using std::iswalpha;
+ using std::iswblank;
+ using std::iswcntrl;
+ using std::iswctype;
+ using std::iswdigit;
+ using std::iswgraph;
+ using std::iswlower;
+ using std::iswprint;
+ using std::iswpunct;
+ using std::iswspace;
+ using std::iswupper;
+ using std::iswxdigit;
+ using std::towctrans;
+ using std::towlower;
+ using std::towupper;
+ using std::wctrans;
+ using std::wctype;
+#endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS
+} // namespace std
+
+// deque.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [deque], class template deque
+ using std::deque;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [deque.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::deque;
+ }
+} // namespace std
+
+// exception.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::bad_exception;
+ using std::current_exception;
+ using std::exception;
+ using std::exception_ptr;
+ using std::get_terminate;
+ using std::make_exception_ptr;
+ using std::nested_exception;
+ using std::rethrow_exception;
+ using std::rethrow_if_nested;
+ using std::set_terminate;
+ using std::terminate;
+ using std::terminate_handler;
+ using std::throw_with_nested;
+ using std::uncaught_exception;
+ using std::uncaught_exceptions;
+} // namespace std
+
+// execution.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+export namespace std {
+ // [execpol.type], execution policy type trait
+ using std::is_execution_policy;
+ using std::is_execution_policy_v;
+} // namespace std
+
+export namespace std::execution {
+ // [execpol.seq], sequenced execution policy
+ using std::execution::sequenced_policy;
+
+ // [execpol.par], parallel execution policy
+ using std::execution::parallel_policy;
+
+ // [execpol.parunseq], parallel and unsequenced execution policy
+ using std::execution::parallel_unsequenced_policy;
+
+ // [execpol.unseq], unsequenced execution policy
+ using std::execution::unsequenced_policy;
+
+ // [execpol.objects], execution policy objects
+ using std::execution::par;
+ using std::execution::par_unseq;
+ using std::execution::seq;
+ using std::execution::unseq;
+} // namespace std::execution
+#endif // _LIBCPP_ENABLE_EXPERIMENTAL
+
+// expected.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if _LIBCPP_STD_VER >= 23
+ // [expected.unexpected], class template unexpected
+ using std::unexpected;
+
+ // [expected.bad], class template bad_expected_access
+ using std::bad_expected_access;
+
+ // in-place construction of unexpected values
+ using std::unexpect;
+ using std::unexpect_t;
+
+ // [expected.expected], class template expected
+ using std::expected;
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// filesystem.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std::filesystem {
+ // [fs.class.path], paths
+ using std::filesystem::path;
+
+ // [fs.path.nonmember], path non-member functions
+ using std::filesystem::hash_value;
+ using std::filesystem::swap;
+
+ // [fs.class.filesystem.error], filesystem errors
+ using std::filesystem::filesystem_error;
+
+#ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ // [fs.class.directory.entry], directory entries
+ using std::filesystem::directory_entry;
+
+ // [fs.class.directory.iterator], directory iterators
+ using std::filesystem::directory_iterator;
+
+ // [fs.dir.itr.nonmembers], range access for directory iterators
+ using std::filesystem::begin;
+ using std::filesystem::end;
+
+ // [fs.class.rec.dir.itr], recursive directory iterators
+ using std::filesystem::recursive_directory_iterator;
+#endif // _LIBCPP_HAS_NO_FILESYSTEM
+
+ // [fs.rec.dir.itr.nonmembers], range access for recursive directory iterators
+
+ // [fs.class.file.status], file status
+ using std::filesystem::file_status;
+ using std::filesystem::space_info;
+
+ // [fs.enum], enumerations
+ using std::filesystem::copy_options;
+ using std::filesystem::directory_options;
+ using std::filesystem::file_type;
+ using std::filesystem::perm_options;
+ using std::filesystem::perms;
+
+ using std::filesystem::file_time_type;
+
+ // several of these enums are a bitmask type.
+ // [bitmask.types] specified operators
+ using std::filesystem::operator&;
+ using std::filesystem::operator&=;
+ using std::filesystem::operator^;
+ using std::filesystem::operator^=;
+ using std::filesystem::operator|;
+ using std::filesystem::operator|=;
+ using std::filesystem::operator~;
+
+#ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ // [fs.op.funcs], filesystem operations
+ using std::filesystem::absolute;
+ using std::filesystem::canonical;
+ using std::filesystem::copy;
+ using std::filesystem::copy_file;
+ using std::filesystem::copy_symlink;
+ using std::filesystem::create_directories;
+ using std::filesystem::create_directory;
+ using std::filesystem::create_directory_symlink;
+ using std::filesystem::create_hard_link;
+ using std::filesystem::create_symlink;
+ using std::filesystem::current_path;
+ using std::filesystem::equivalent;
+ using std::filesystem::exists;
+ using std::filesystem::file_size;
+ using std::filesystem::hard_link_count;
+
+ using std::filesystem::is_block_file;
+ using std::filesystem::is_character_file;
+ using std::filesystem::is_directory;
+ using std::filesystem::is_empty;
+ using std::filesystem::is_fifo;
+ using std::filesystem::is_other;
+ using std::filesystem::is_regular_file;
+ using std::filesystem::is_socket;
+ using std::filesystem::is_symlink;
+
+ using std::filesystem::last_write_time;
+ using std::filesystem::permissions;
+ using std::filesystem::proximate;
+ using std::filesystem::read_symlink;
+ using std::filesystem::relative;
+ using std::filesystem::remove;
+
+ using std::filesystem::remove_all;
+ using std::filesystem::rename;
+ using std::filesystem::resize_file;
+ using std::filesystem::space;
+ using std::filesystem::status;
+ using std::filesystem::status_known;
+ using std::filesystem::symlink_status;
+ using std::filesystem::temp_directory_path;
+ using std::filesystem::weakly_canonical;
+#endif // _LIBCPP_HAS_NO_FILESYSTEM
+
+ // [depr.fs.path.factory]
+ using std::filesystem::u8path;
+} // namespace std::filesystem
+
+// [fs.path.hash], hash support
+export namespace std {
+ using std::hash;
+}
+
+export namespace std::ranges {
+#ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ using std::ranges::enable_borrowed_range;
+ using std::ranges::enable_view;
+#endif // _LIBCPP_HAS_NO_FILESYSTEM
+} // namespace std::ranges
+
+// flat_map.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ // [flat.map], class template flat_­map
+ using std::flat_map;
+
+ using std::sorted_unique;
+ using std::sorted_unique_t;
+
+ using std::uses_allocator;
+
+ // [flat.map.erasure], erasure for flat_­map
+ using std::erase_if;
+
+ // [flat.multimap], class template flat_­multimap
+ using std::flat_multimap;
+
+ using std::sorted_equivalent;
+ using std::sorted_equivalent_t;
+
+ // [flat.multimap.erasure], erasure for flat_­multimap
+#endif
+} // namespace std
+
+// flat_set.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ // [flat.set], class template flat_­set
+ using std::flat_set;
+
+ using std::sorted_unique;
+ using std::sorted_unique_t;
+
+ using std::uses_allocator;
+
+ // [flat.set.erasure], erasure for flat_­set
+ using std::erase_if;
+
+ // [flat.multiset], class template flat_­multiset
+ using std::flat_multiset;
+
+ using std::sorted_equivalent;
+ using std::sorted_equivalent_t;
+#endif
+} // namespace std
+
+// format.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [format.context], class template basic_format_context
+ using std::basic_format_context;
+ using std::format_context;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_context;
+#endif
+
+ // [format.args], class template basic_format_args
+ using std::basic_format_args;
+ using std::format_args;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_args;
+#endif
+
+ // [format.fmt.string], class template basic_format_string
+ using std::basic_format_string;
+ using std::format_string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_string;
+#endif
+
+ // [format.functions], formatting functions
+ using std::format;
+ using std::format_to;
+ using std::vformat;
+ using std::vformat_to;
+
+ using std::format_to_n;
+ using std::format_to_n_result;
+ using std::formatted_size;
+
+ // [format.formatter], formatter
+ using std::formatter;
+
+#if _LIBCPP_STD_VER >= 23
+ // [format.formattable], concept formattable
+ using std::formattable;
+#endif
+
+ // [format.parse.ctx], class template basic_format_parse_context
+ using std::basic_format_parse_context;
+ using std::format_parse_context;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_parse_context;
+#endif
+
+#if _LIBCPP_STD_VER >= 23
+ // [format.range], formatting of ranges
+ // [format.range.fmtkind], variable template format_kind
+ using std::format_kind;
+ using std::range_format;
+
+ // [format.range.formatter], class template range_formatter
+ using std::range_formatter;
+#endif // _LIBCPP_STD_VER >= 23
+
+ // [format.arg], class template basic_format_arg
+ using std::basic_format_arg;
+ using std::visit_format_arg;
+
+ // [format.arg.store], class template format-arg-store
+ using std::make_format_args;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::make_wformat_args;
+#endif
+
+ // [format.error], class format_error
+ using std::format_error;
+} // namespace std
+
+// forward_list.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [forward.list], class template forward_list
+ using std::forward_list;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [forward.list.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::forward_list;
+ }
+} // namespace std
+
+// fstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_filebuf;
+
+# ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ using std::swap;
+# endif
+
+ using std::filebuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wfilebuf;
+# endif
+
+ using std::basic_ifstream;
+
+ using std::ifstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wifstream;
+# endif
+
+ using std::basic_ofstream;
+
+ using std::ofstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wofstream;
+# endif
+
+ using std::basic_fstream;
+
+ using std::fstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wfstream;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// functional.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [func.invoke], invoke
+ using std::invoke;
+#if _LIBCPP_STD_VER >= 23
+ using std::invoke_r;
+#endif
+
+ // [refwrap], reference_wrapper
+ using std::reference_wrapper;
+
+ using std::cref;
+ using std::ref;
+
+ // [arithmetic.operations], arithmetic operations
+ using std::divides;
+ using std::minus;
+ using std::modulus;
+ using std::multiplies;
+ using std::negate;
+ using std::plus;
+ // [comparisons], comparisons
+ using std::equal_to;
+ using std::greater;
+ using std::greater_equal;
+ using std::less;
+ using std::less_equal;
+ using std::not_equal_to;
+
+ // [comparisons.three.way], class compare_three_way
+ using std::compare_three_way;
+
+ // [logical.operations], logical operations
+ using std::logical_and;
+ using std::logical_not;
+ using std::logical_or;
+
+ // [bitwise.operations], bitwise operations
+ using std::bit_and;
+ using std::bit_not;
+ using std::bit_or;
+ using std::bit_xor;
+
+ // [func.identity], identity
+ using std::identity;
+
+ // [func.not.fn], function template not_fn
+ using std::not_fn;
+
+ // [func.bind.partial], function templates bind_front and bind_back
+ // using std::bind_back;
+ using std::bind_front;
+
+ // [func.bind], bind
+ using std::is_bind_expression;
+ using std::is_bind_expression_v;
+ using std::is_placeholder;
+ using std::is_placeholder_v;
+
+ using std::bind;
+
+ namespace placeholders {
+ // M is the implementation-defined number of placeholders
+ using std::placeholders::_1;
+ using std::placeholders::_10;
+ using std::placeholders::_2;
+ using std::placeholders::_3;
+ using std::placeholders::_4;
+ using std::placeholders::_5;
+ using std::placeholders::_6;
+ using std::placeholders::_7;
+ using std::placeholders::_8;
+ using std::placeholders::_9;
+ } // namespace placeholders
+
+ // [func.memfn], member function adaptors
+ using std::mem_fn;
+
+ // [func.wrap], polymorphic function wrappers
+ using std::bad_function_call;
+
+ using std::function;
+
+ using std::swap;
+
+ using std::operator==;
+
+ // [func.wrap.move], move only wrapper
+ // using std::move_only_function;
+
+ // [func.search], searchers
+ using std::default_searcher;
+
+ using std::boyer_moore_searcher;
+
+ using std::boyer_moore_horspool_searcher;
+
+ // [unord.hash], class template hash
+ using std::hash;
+
+ namespace ranges {
+ // [range.cmp], concept-constrained comparisons
+ using std::ranges::equal_to;
+ using std::ranges::greater;
+ using std::ranges::greater_equal;
+ using std::ranges::less;
+ using std::ranges::less_equal;
+ using std::ranges::not_equal_to;
+ } // namespace ranges
+} // namespace std
+
+// future.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ using std::future_errc;
+ using std::future_status;
+ using std::launch;
+
+ // launch is a bitmask type.
+ // [bitmask.types] specified operators
+ using std::operator&;
+ using std::operator&=;
+ using std::operator^;
+ using std::operator^=;
+ using std::operator|;
+ using std::operator|=;
+ using std::operator~;
+
+ // [futures.errors], error handling
+ using std::is_error_code_enum;
+ using std::make_error_code;
+ using std::make_error_condition;
+
+ using std::future_category;
+
+ // [futures.future.error], class future_error
+ using std::future_error;
+
+ // [futures.promise], class template promise
+ using std::promise;
+
+ using std::swap;
+
+ using std::uses_allocator;
+
+ // [futures.unique.future], class template future
+ using std::future;
+
+ // [futures.shared.future], class template shared_future
+ using std::shared_future;
+
+ // [futures.task], class template packaged_task
+ using std::packaged_task;
+
+ // [futures.async], function template async
+ using std::async;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// generator.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ using std::generator;
+#endif
+} // namespace std
+
+// hazard_pointer.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+# if _LIBCPP_STD_VER >= 23
+ // 4.1.3, class template hazard_pointer_obj_base
+ using std::hazard_pointer_obj_base;
+ // 4.1.4, class hazard_pointer
+ using std::hazard_pointer;
+ // 4.1.5, Construct non-empty hazard_pointer
+ using std::make_hazard_pointer;
+ // 4.1.6, Hazard pointer swap
+ using std::swap;
+# endif // _LIBCPP_STD_VER >= 23
+#endif
+} // namespace std
+
+// initializer_list.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::initializer_list;
+
+ // [support.initlist.range], initializer list range access
+ using std::begin;
+ using std::end;
+} // namespace std
+
+// iomanip.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::get_money;
+ using std::get_time;
+ using std::put_money;
+ using std::put_time;
+ using std::resetiosflags;
+ using std::setbase;
+ using std::setfill;
+ using std::setiosflags;
+ using std::setprecision;
+ using std::setw;
+
+ using std::quoted;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// ios.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::fpos;
+ // based on [tab:fpos.operations]
+ using std::operator!=; // Note not affected by P1614, seems like a bug.
+ using std::operator-;
+ using std::operator==;
+
+ using std::streamoff;
+ using std::streamsize;
+
+ using std::basic_ios;
+ using std::ios_base;
+
+ // [std.ios.manip], manipulators
+ using std::boolalpha;
+ using std::noboolalpha;
+
+ using std::noshowbase;
+ using std::showbase;
+
+ using std::noshowpoint;
+ using std::showpoint;
+
+ using std::noshowpos;
+ using std::showpos;
+
+ using std::noskipws;
+ using std::skipws;
+
+ using std::nouppercase;
+ using std::uppercase;
+
+ using std::nounitbuf;
+ using std::unitbuf;
+
+ // [adjustfield.manip], adjustfield
+ using std::internal;
+ using std::left;
+ using std::right;
+
+ // [basefield.manip], basefield
+ using std::dec;
+ using std::hex;
+ using std::oct;
+
+ // [floatfield.manip], floatfield
+ using std::defaultfloat;
+ using std::fixed;
+ using std::hexfloat;
+ using std::scientific;
+
+ // [error.reporting], error reporting
+ using std::io_errc;
+
+ using std::iostream_category;
+ using std::is_error_code_enum;
+ using std::make_error_code;
+ using std::make_error_condition;
+
+ // [iosfwd.syn]
+ using std::ios;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wios;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// iosfwd.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::streampos;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstreampos;
+#endif
+ using std::u16streampos;
+ using std::u32streampos;
+ using std::u8streampos;
+
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+ using std::basic_osyncstream;
+ using std::basic_syncbuf;
+#endif
+
+ using std::istreambuf_iterator;
+ using std::ostreambuf_iterator;
+
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+ using std::osyncstream;
+ using std::syncbuf;
+#endif
+
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+ using std::wosyncstream;
+ using std::wsyncbuf;
+#endif
+#endif
+
+ using std::fpos;
+} // namespace std
+
+// iostream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::cerr;
+ using std::cin;
+ using std::clog;
+ using std::cout;
+
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcerr;
+ using std::wcin;
+ using std::wclog;
+ using std::wcout;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// istream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_istream;
+
+ using std::istream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wistream;
+# endif
+
+ using std::basic_iostream;
+
+ using std::iostream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wiostream;
+# endif
+
+ using std::ws;
+
+ using std::operator>>;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// iterator.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [iterator.assoc.types], associated types
+ // [incrementable.traits], incrementable traits
+ using std::incrementable_traits;
+ using std::iter_difference_t;
+
+ using std::indirectly_readable_traits;
+ using std::iter_value_t;
+
+ // [iterator.traits], iterator traits
+ using std::iterator_traits;
+
+ using std::iter_reference_t;
+
+ namespace ranges {
+ // [iterator.cust], customization point objects
+ inline namespace __cpo {
+ // [iterator.cust.move], ranges::iter_move
+ using std::ranges::__cpo::iter_move;
+
+ // [iterator.cust.swap], ranges::iter_swap
+ using std::ranges::__cpo::iter_swap;
+ } // namespace __cpo
+ } // namespace ranges
+
+ using std::iter_rvalue_reference_t;
+
+ // [iterator.concepts], iterator concepts
+ // [iterator.concept.readable], concept indirectly_readable
+ using std::indirectly_readable;
+
+ using std::iter_common_reference_t;
+
+ // [iterator.concept.writable], concept indirectly_writable
+ using std::indirectly_writable;
+
+ // [iterator.concept.winc], concept weakly_incrementable
+ using std::weakly_incrementable;
+
+ // [iterator.concept.inc], concept incrementable
+ using std::incrementable;
+
+ // [iterator.concept.iterator], concept input_or_output_iterator
+ using std::input_or_output_iterator;
+
+ // [iterator.concept.sentinel], concept sentinel_for
+ using std::sentinel_for;
+
+ // [iterator.concept.sizedsentinel], concept sized_sentinel_for
+ using std::disable_sized_sentinel_for;
+
+ using std::sized_sentinel_for;
+
+ // [iterator.concept.input], concept input_iterator
+ using std::input_iterator;
+
+ // [iterator.concept.output], concept output_iterator
+ using std::output_iterator;
+
+ // [iterator.concept.forward], concept forward_iterator
+ using std::forward_iterator;
+
+ // [iterator.concept.bidir], concept bidirectional_iterator
+ using std::bidirectional_iterator;
+
+ // [iterator.concept.random.access], concept random_access_iterator
+ using std::random_access_iterator;
+
+ // [iterator.concept.contiguous], concept contiguous_iterator
+ using std::contiguous_iterator;
+
+ // [indirectcallable], indirect callable requirements
+ // [indirectcallable.indirectinvocable], indirect callables
+ using std::indirectly_unary_invocable;
+
+ using std::indirectly_regular_unary_invocable;
+
+ using std::indirect_unary_predicate;
+
+ using std::indirect_binary_predicate;
+
+ using std::indirect_equivalence_relation;
+
+ using std::indirect_strict_weak_order;
+
+ using std::indirect_result_t;
+
+ // [projected], projected
+ using std::projected;
+
+ // [alg.req], common algorithm requirements
+ // [alg.req.ind.move], concept indirectly_movable
+ using std::indirectly_movable;
+
+ using std::indirectly_movable_storable;
+
+ // [alg.req.ind.copy], concept indirectly_copyable
+ using std::indirectly_copyable;
+
+ using std::indirectly_copyable_storable;
+
+ // [alg.req.ind.swap], concept indirectly_swappable
+ using std::indirectly_swappable;
+
+ // [alg.req.ind.cmp], concept indirectly_comparable
+ using std::indirectly_comparable;
+
+ // [alg.req.permutable], concept permutable
+ using std::permutable;
+
+ // [alg.req.mergeable], concept mergeable
+ using std::mergeable;
+
+ // [alg.req.sortable], concept sortable
+ using std::sortable;
+
+ // [iterator.primitives], primitives
+ // [std.iterator.tags], iterator tags
+ using std::bidirectional_iterator_tag;
+ using std::contiguous_iterator_tag;
+ using std::forward_iterator_tag;
+ using std::input_iterator_tag;
+ using std::output_iterator_tag;
+ using std::random_access_iterator_tag;
+
+ // [iterator.operations], iterator operations
+ using std::advance;
+ using std::distance;
+ using std::next;
+ using std::prev;
+
+ // [range.iter.ops], range iterator operations
+ namespace ranges {
+ // [range.iter.op.advance], ranges​::​advance
+ using std::ranges::advance;
+
+ // [range.iter.op.distance], ranges​::​distance
+ using std::ranges::distance;
+
+ // [range.iter.op.next], ranges​::​next
+ using std::ranges::next;
+
+ // [range.iter.op.prev], ranges​::​prev
+ using std::ranges::prev;
+ } // namespace ranges
+
+ // [predef.iterators], predefined iterators and sentinels
+ // [reverse.iterators], reverse iterators
+ using std::reverse_iterator;
+
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::operator-;
+ using std::operator+;
+
+ using std::make_reverse_iterator;
+
+ // using std::disable_sized_sentinel_for;
+
+ // [insert.iterators], insert iterators
+ using std::back_insert_iterator;
+ using std::back_inserter;
+
+ using std::front_insert_iterator;
+ using std::front_inserter;
+
+ using std::insert_iterator;
+ using std::inserter;
+
+ // [const.iterators], constant iterators and sentinels
+ // [const.iterators.alias], alias templates
+ // using std::const_iterator;
+ // using std::const_sentinel;
+ // using std::iter_const_reference_t;
+
+ // [const.iterators.iterator], class template basic_const_iterator
+ // using std::basic_const_iterator;
+
+ // using std::common_type;
+
+ // using std::make_const_iterator;
+
+ // [move.iterators], move iterators and sentinels
+ using std::move_iterator;
+
+ using std::make_move_iterator;
+
+ using std::move_sentinel;
+
+ using std::common_iterator;
+
+ // [default.sentinel], default sentinel
+ using std::default_sentinel;
+ using std::default_sentinel_t;
+
+ // [iterators.counted], counted iterators
+ using std::counted_iterator;
+
+ // [unreachable.sentinel], unreachable sentinel
+ using std::unreachable_sentinel;
+ using std::unreachable_sentinel_t;
+
+ // [stream.iterators], stream iterators
+ using std::istream_iterator;
+
+ using std::ostream_iterator;
+
+ using std::istreambuf_iterator;
+ using std::ostreambuf_iterator;
+
+ // [iterator.range], range access
+ using std::begin;
+ using std::cbegin;
+ using std::cend;
+ using std::crbegin;
+ using std::crend;
+ using std::end;
+ using std::rbegin;
+ using std::rend;
+
+ using std::empty;
+ using std::size;
+ using std::ssize;
+
+ using std::data;
+
+ // [depr.iterator]
+ using std::iterator;
+} // namespace std
+
+// latch.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ using std::latch;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// limits.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [fp.style], floating-point type properties
+ using std::float_denorm_style;
+ using std::float_round_style;
+
+ // [numeric.limits], class template numeric_­limits
+ using std::numeric_limits;
+} // namespace std
+
+// list.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [list], class template list
+ using std::list;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [list.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::list;
+ }
+} // namespace std
+
+// locale.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ // [locale], locale
+ using std::has_facet;
+ using std::locale;
+ using std::use_facet;
+
+ // [locale.convenience], convenience interfaces
+ using std::isalnum;
+ using std::isalpha;
+ using std::isblank;
+ using std::iscntrl;
+ using std::isdigit;
+ using std::isgraph;
+ using std::islower;
+ using std::isprint;
+ using std::ispunct;
+ using std::isspace;
+ using std::isupper;
+ using std::isxdigit;
+ using std::tolower;
+ using std::toupper;
+
+ // [category.ctype], ctype
+ using std::codecvt;
+ using std::codecvt_base;
+ using std::codecvt_byname;
+ using std::ctype;
+ using std::ctype_base;
+ using std::ctype_byname;
+
+ // [category.numeric], numeric
+ using std::num_get;
+ using std::num_put;
+ using std::numpunct;
+ using std::numpunct_byname;
+
+ // [category.collate], collation
+ using std::collate;
+ using std::collate_byname;
+
+ // [category.time], date and time
+ using std::time_base;
+ using std::time_get;
+ using std::time_get_byname;
+ using std::time_put;
+ using std::time_put_byname;
+
+ // [category.monetary], money
+ using std::money_base;
+ using std::money_get;
+ using std::money_put;
+ using std::moneypunct;
+ using std::moneypunct_byname;
+
+ // [category.messages], message retrieval
+ using std::messages;
+ using std::messages_base;
+ using std::messages_byname;
+
+ // [depr.conversions.buffer]
+ using std::wbuffer_convert;
+
+ // [depr.conversions.string]
+ using std::wstring_convert;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// map.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [map], class template map
+ using std::map;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [map.erasure], erasure for map
+ using std::erase_if;
+
+ // [multimap], class template multimap
+ using std::multimap;
+
+ namespace pmr {
+ using std::pmr::map;
+ using std::pmr::multimap;
+ } // namespace pmr
+} // namespace std
+
+// mdspan.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if _LIBCPP_STD_VER >= 23
+ // [mdspan.extents], class template extents
+ using std::extents;
+
+ // [mdspan.extents.dextents], alias template dextents
+ using std::dextents;
+
+ // [mdspan.layout], layout mapping
+ using std::layout_left;
+ using std::layout_right;
+#if _LIBCPP_VERSION >= 180000
+ using std::layout_stride;
+#endif
+
+ // [mdspan.accessor.default], class template default_accessor
+ using std::default_accessor;
+
+ // [mdspan.mdspan], class template mdspan
+ using std::mdspan;
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// memory.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [pointer.traits], pointer traits
+ using std::pointer_traits;
+
+ // [pointer.conversion], pointer conversion
+ using std::to_address;
+
+ // [ptr.align], pointer alignment
+ using std::align;
+ using std::assume_aligned;
+
+ // [obj.lifetime], explicit lifetime management
+ // using std::start_lifetime_as;
+ // using std::start_lifetime_as_array;
+
+ // [allocator.tag], allocator argument tag
+ using std::allocator_arg;
+ using std::allocator_arg_t;
+
+ // [allocator.uses], uses_allocator
+ using std::uses_allocator;
+
+ // [allocator.uses.trait], uses_allocator
+ using std::uses_allocator_v;
+
+ // [allocator.uses.construction], uses-allocator construction
+ using std::uses_allocator_construction_args;
+
+ using std::make_obj_using_allocator;
+ using std::uninitialized_construct_using_allocator;
+
+ // [allocator.traits], allocator traits
+ using std::allocator_traits;
+
+#if _LIBCPP_STD_VER >= 23
+ using std::allocation_result;
+
+ using std::allocate_at_least;
+#endif
+
+ // [default.allocator], the default allocator
+ using std::allocator;
+ using std::operator==;
+
+ // [specialized.addressof], addressof
+ using std::addressof;
+
+ // [specialized.algorithms], specialized algorithms
+ // [special.mem.concepts], special memory concepts
+
+ using std::uninitialized_default_construct;
+ using std::uninitialized_default_construct_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_default_construct;
+ using std::ranges::uninitialized_default_construct_n;
+ } // namespace ranges
+
+ using std::uninitialized_value_construct;
+ using std::uninitialized_value_construct_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_value_construct;
+ using std::ranges::uninitialized_value_construct_n;
+ } // namespace ranges
+
+ using std::uninitialized_copy;
+ using std::uninitialized_copy_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_copy;
+ using std::ranges::uninitialized_copy_result;
+
+ using std::ranges::uninitialized_copy_n;
+ using std::ranges::uninitialized_copy_n_result;
+ } // namespace ranges
+
+ using std::uninitialized_move;
+ using std::uninitialized_move_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_move;
+ using std::ranges::uninitialized_move_result;
+
+ using std::ranges::uninitialized_move_n;
+ using std::ranges::uninitialized_move_n_result;
+ } // namespace ranges
+
+ using std::uninitialized_fill;
+ using std::uninitialized_fill_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_fill;
+ using std::ranges::uninitialized_fill_n;
+ } // namespace ranges
+
+ // [specialized.construct], construct_at
+ using std::construct_at;
+
+ namespace ranges {
+ using std::ranges::construct_at;
+ }
+ // [specialized.destroy], destroy
+ using std::destroy;
+ using std::destroy_at;
+ using std::destroy_n;
+
+ namespace ranges {
+ using std::ranges::destroy;
+ using std::ranges::destroy_at;
+ using std::ranges::destroy_n;
+ } // namespace ranges
+
+ // [unique.ptr], class template unique_ptr
+ using std::default_delete;
+ using std::unique_ptr;
+
+ using std::make_unique;
+ using std::make_unique_for_overwrite;
+
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::operator<<;
+
+ // [util.smartptr.weak.bad], class bad_weak_ptr
+ using std::bad_weak_ptr;
+
+ // [util.smartptr.shared], class template shared_ptr
+ using std::shared_ptr;
+
+ // [util.smartptr.shared.create], shared_ptr creation
+ using std::allocate_shared;
+ using std::allocate_shared_for_overwrite;
+ using std::make_shared;
+ using std::make_shared_for_overwrite;
+
+ // [util.smartptr.shared.spec], shared_ptr specialized algorithms
+ using std::swap;
+
+ // [util.smartptr.shared.cast], shared_ptr casts
+ using std::const_pointer_cast;
+ using std::dynamic_pointer_cast;
+ using std::reinterpret_pointer_cast;
+ using std::static_pointer_cast;
+
+ using std::get_deleter;
+
+ // [util.smartptr.shared.io], shared_ptr I/O
+
+ // [util.smartptr.weak], class template weak_ptr
+ using std::weak_ptr;
+
+ // [util.smartptr.weak.spec], weak_ptr specialized algorithms
+
+ // [util.smartptr.ownerless], class template owner_less
+ using std::owner_less;
+
+ // [util.smartptr.enab], class template enable_shared_from_this
+ using std::enable_shared_from_this;
+
+ // [util.smartptr.hash], hash support
+ using std::hash;
+
+ // [util.smartptr.atomic], atomic smart pointers
+ // using std::atomic;
+
+ // [out.ptr.t], class template out_ptr_t
+ // using std::out_ptr_t;
+
+ // [out.ptr], function template out_ptr
+ // using std::out_ptr;
+
+ // [inout.ptr.t], class template inout_ptr_t
+ // using std::inout_ptr_t;
+
+ // [inout.ptr], function template inout_ptr
+ // using std::inout_ptr;
+
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [depr.util.smartptr.shared.atomic]
+ using std::atomic_is_lock_free;
+
+ using std::atomic_load;
+ using std::atomic_load_explicit;
+
+ using std::atomic_store;
+ using std::atomic_store_explicit;
+
+ using std::atomic_exchange;
+ using std::atomic_exchange_explicit;
+
+ using std::atomic_compare_exchange_strong;
+ using std::atomic_compare_exchange_strong_explicit;
+ using std::atomic_compare_exchange_weak;
+ using std::atomic_compare_exchange_weak_explicit;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// memory_resource.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std::pmr {
+ // [mem.res.class], class memory_resource
+ using std::pmr::memory_resource;
+
+ using std::pmr::operator==;
+
+ // [mem.poly.allocator.class], class template polymorphic_allocator
+ using std::pmr::polymorphic_allocator;
+
+ // [mem.res.global], global memory resources
+ using std::pmr::get_default_resource;
+ using std::pmr::new_delete_resource;
+ using std::pmr::null_memory_resource;
+ using std::pmr::set_default_resource;
+
+ // [mem.res.pool], pool resource classes
+ using std::pmr::monotonic_buffer_resource;
+ using std::pmr::pool_options;
+ using std::pmr::synchronized_pool_resource;
+ using std::pmr::unsynchronized_pool_resource;
+} // namespace std::pmr
+
+// mutex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.mutex.class], class mutex
+ using std::mutex;
+ // [thread.mutex.recursive], class recursive_mutex
+ using std::recursive_mutex;
+ // [thread.timedmutex.class] class timed_mutex
+ using std::timed_mutex;
+ // [thread.timedmutex.recursive], class recursive_timed_mutex
+ using std::recursive_timed_mutex;
+
+ using std::adopt_lock_t;
+ using std::defer_lock_t;
+ using std::try_to_lock_t;
+
+ using std::adopt_lock;
+ using std::defer_lock;
+ using std::try_to_lock;
+
+ // [thread.lock], locks
+ using std::lock_guard;
+ using std::scoped_lock;
+ using std::unique_lock;
+
+ using std::swap;
+
+ // [thread.lock.algorithm], generic locking algorithms
+ using std::lock;
+ using std::try_lock;
+#endif // _LIBCPP_HAS_NO_THREADS
+
+ using std::once_flag;
+
+ using std::call_once;
+} // namespace std
+
+// new.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [alloc.errors], storage allocation errors
+ using std::bad_alloc;
+ using std::bad_array_new_length;
+
+ using std::destroying_delete;
+ using std::destroying_delete_t;
+
+ // global operator new control
+ using std::align_val_t;
+
+ using std::nothrow;
+ using std::nothrow_t;
+
+ using std::get_new_handler;
+ using std::new_handler;
+ using std::set_new_handler;
+
+ // [ptr.launder], pointer optimization barrier
+ using std::launder;
+#if 0
+ // [hardware.interference], hardware interference size
+ using std::hardware_constructive_interference_size;
+ using std::hardware_destructive_interference_size;
+#endif
+} // namespace std
+
+export {
+ using ::operator new;
+ using ::operator delete;
+ using ::operator new[];
+ using ::operator delete[];
+} // export
+
+// numbers.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std::numbers {
+ using std::numbers::e_v;
+ using std::numbers::egamma_v;
+ using std::numbers::inv_pi_v;
+ using std::numbers::inv_sqrt3_v;
+ using std::numbers::inv_sqrtpi_v;
+ using std::numbers::ln10_v;
+ using std::numbers::ln2_v;
+ using std::numbers::log10e_v;
+ using std::numbers::log2e_v;
+ using std::numbers::phi_v;
+ using std::numbers::pi_v;
+ using std::numbers::sqrt2_v;
+ using std::numbers::sqrt3_v;
+
+ using std::numbers::e;
+ using std::numbers::egamma;
+ using std::numbers::inv_pi;
+ using std::numbers::inv_sqrt3;
+ using std::numbers::inv_sqrtpi;
+ using std::numbers::ln10;
+ using std::numbers::ln2;
+ using std::numbers::log10e;
+ using std::numbers::log2e;
+ using std::numbers::phi;
+ using std::numbers::pi;
+ using std::numbers::sqrt2;
+ using std::numbers::sqrt3;
+} // namespace std::numbers
+
+// numeric.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [accumulate], accumulate
+ using std::accumulate;
+
+ // [reduce], reduce
+ using std::reduce;
+
+ // [inner.product], inner product
+ using std::inner_product;
+
+ // [transform.reduce], transform reduce
+ using std::transform_reduce;
+
+ // [partial.sum], partial sum
+ using std::partial_sum;
+
+ // [exclusive.scan], exclusive scan
+ using std::exclusive_scan;
+
+ // [inclusive.scan], inclusive scan
+ using std::inclusive_scan;
+
+ // [transform.exclusive.scan], transform exclusive scan
+ using std::transform_exclusive_scan;
+
+ // [transform.inclusive.scan], transform inclusive scan
+ using std::transform_inclusive_scan;
+
+ // [adjacent.difference], adjacent difference
+ using std::adjacent_difference;
+
+ // [numeric.iota], iota
+ using std::iota;
+
+ namespace ranges {
+ // using std::ranges::iota_result;
+ // using std::ranges::iota;
+ } // namespace ranges
+
+ // [numeric.ops.gcd], greatest common divisor
+ using std::gcd;
+
+ // [numeric.ops.lcm], least common multiple
+ using std::lcm;
+
+ // [numeric.ops.midpoint], midpoint
+ using std::midpoint;
+} // namespace std
+
+// optional.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [optional.optional], class template optional
+ using std::optional;
+
+ // [optional.nullopt], no-value state indicator
+ using std::nullopt;
+ using std::nullopt_t;
+
+ // [optional.bad.access], class bad_optional_access
+ using std::bad_optional_access;
+
+ // [optional.relops], relational operators
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ // [optional.specalg], specialized algorithms
+ using std::swap;
+
+ using std::make_optional;
+
+ // [optional.hash], hash support
+ using std::hash;
+} // namespace std
+
+// ostream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_ostream;
+
+ using std::ostream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wostream;
+# endif
+
+ using std::endl;
+ using std::ends;
+ using std::flush;
+
+# if 0
+ using std::emit_on_flush;
+ using std::flush_emit;
+ using std::noemit_on_flush;
+# endif
+ using std::operator<<;
+
+# if 0
+ // [ostream.formatted.print], print functions
+ using std::print;
+ using std::println;
+
+ using std::vprint_nonunicode;
+ using std::vprint_unicode;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// print.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if _LIBCPP_STD_VER >= 23
+ // [print.fun], print functions
+ using std::print;
+ using std::println;
+
+ using std::vprint_nonunicode;
+# ifndef _LIBCPP_HAS_NO_UNICODE
+ using std::vprint_unicode;
+# endif // _LIBCPP_HAS_NO_UNICODE
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// queue.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [queue], class template queue
+ using std::queue;
+
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::swap;
+ using std::uses_allocator;
+
+ // [priority.queue], class template priority_queue
+ using std::priority_queue;
+} // namespace std
+
+// random.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [rand.req.urng], uniform random bit generator requirements
+ using std::uniform_random_bit_generator;
+
+ // [rand.eng.lcong], class template linear_congruential_engine
+ using std::linear_congruential_engine;
+
+ // [rand.eng.mers], class template mersenne_twister_engine
+ using std::mersenne_twister_engine;
+
+ // [rand.eng.sub], class template subtract_with_carry_engine
+ using std::subtract_with_carry_engine;
+
+ // [rand.adapt.disc], class template discard_block_engine
+ using std::discard_block_engine;
+
+ // [rand.adapt.ibits], class template independent_bits_engine
+ using std::independent_bits_engine;
+
+ // [rand.adapt.shuf], class template shuffle_order_engine
+ using std::shuffle_order_engine;
+
+ // [rand.predef], engines and engine adaptors with predefined parameters
+ using std::knuth_b;
+ using std::minstd_rand;
+ using std::minstd_rand0;
+ using std::mt19937;
+ using std::mt19937_64;
+ using std::ranlux24;
+ using std::ranlux24_base;
+ using std::ranlux48;
+ using std::ranlux48_base;
+
+ using std::default_random_engine;
+
+#ifndef _LIBCPP_HAS_NO_RANDOM_DEVICE
+ // [rand.device], class random_device
+ using std::random_device;
+#endif
+
+ // [rand.util.seedseq], class seed_seq
+ using std::seed_seq;
+
+ // [rand.util.canonical], function template generate_canonical
+ using std::generate_canonical;
+
+ // [rand.dist.uni.int], class template uniform_int_distribution
+ using std::uniform_int_distribution;
+
+ // [rand.dist.uni.real], class template uniform_real_distribution
+ using std::uniform_real_distribution;
+
+ // [rand.dist.bern.bernoulli], class bernoulli_distribution
+ using std::bernoulli_distribution;
+
+ // [rand.dist.bern.bin], class template binomial_distribution
+ using std::binomial_distribution;
+
+ // [rand.dist.bern.geo], class template geometric_distribution
+ using std::geometric_distribution;
+
+ // [rand.dist.bern.negbin], class template negative_binomial_distribution
+ using std::negative_binomial_distribution;
+
+ // [rand.dist.pois.poisson], class template poisson_distribution
+ using std::poisson_distribution;
+
+ // [rand.dist.pois.exp], class template exponential_distribution
+ using std::exponential_distribution;
+
+ // [rand.dist.pois.gamma], class template gamma_distribution
+ using std::gamma_distribution;
+
+ // [rand.dist.pois.weibull], class template weibull_distribution
+ using std::weibull_distribution;
+
+ // [rand.dist.pois.extreme], class template extreme_value_distribution
+ using std::extreme_value_distribution;
+
+ // [rand.dist.norm.normal], class template normal_distribution
+ using std::normal_distribution;
+
+ // [rand.dist.norm.lognormal], class template lognormal_distribution
+ using std::lognormal_distribution;
+
+ // [rand.dist.norm.chisq], class template chi_squared_distribution
+ using std::chi_squared_distribution;
+
+ // [rand.dist.norm.cauchy], class template cauchy_distribution
+ using std::cauchy_distribution;
+
+ // [rand.dist.norm.f], class template fisher_f_distribution
+ using std::fisher_f_distribution;
+
+ // [rand.dist.norm.t], class template student_t_distribution
+ using std::student_t_distribution;
+
+ // [rand.dist.samp.discrete], class template discrete_distribution
+ using std::discrete_distribution;
+
+ // [rand.dist.samp.pconst], class template piecewise_constant_distribution
+ using std::piecewise_constant_distribution;
+
+ // [rand.dist.samp.plinear], class template piecewise_linear_distribution
+ using std::piecewise_linear_distribution;
+} // namespace std
+
+// ranges.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ namespace ranges {
+ inline namespace __cpo {
+ // [range.access], range access
+ using std::ranges::__cpo::begin;
+ using std::ranges::__cpo::cbegin;
+ using std::ranges::__cpo::cend;
+ using std::ranges::__cpo::crbegin;
+ using std::ranges::__cpo::crend;
+ using std::ranges::__cpo::end;
+ using std::ranges::__cpo::rbegin;
+ using std::ranges::__cpo::rend;
+
+ using std::ranges::__cpo::cdata;
+ using std::ranges::__cpo::data;
+ using std::ranges::__cpo::empty;
+ using std::ranges::__cpo::size;
+ using std::ranges::__cpo::ssize;
+ } // namespace __cpo
+
+ // [range.range], ranges
+ using std::ranges::range;
+
+ using std::ranges::enable_borrowed_range;
+
+ using std::ranges::borrowed_range;
+
+ // using std::ranges::const_iterator_t;
+ // using std::ranges::const_sentinel_t;
+ using std::ranges::iterator_t;
+ // using std::ranges::range_const_reference_t;
+ using std::ranges::range_common_reference_t;
+ using std::ranges::range_difference_t;
+ using std::ranges::range_reference_t;
+ using std::ranges::range_rvalue_reference_t;
+ using std::ranges::range_size_t;
+ using std::ranges::range_value_t;
+ using std::ranges::sentinel_t;
+
+ // [range.sized], sized ranges
+ using std::ranges::disable_sized_range;
+ using std::ranges::sized_range;
+
+ // [range.view], views
+ using std::ranges::enable_view;
+ using std::ranges::view;
+ using std::ranges::view_base;
+
+ // [range.refinements], other range refinements
+ using std::ranges::bidirectional_range;
+ using std::ranges::common_range;
+ // using std::ranges::constant_range;
+ using std::ranges::contiguous_range;
+ using std::ranges::forward_range;
+ using std::ranges::input_range;
+ using std::ranges::output_range;
+ using std::ranges::random_access_range;
+ using std::ranges::viewable_range;
+
+ // [view.interface], class template view_­interface
+ using std::ranges::view_interface;
+
+ // [range.subrange], sub-ranges
+ using std::ranges::subrange;
+ using std::ranges::subrange_kind;
+
+ using std::ranges::get;
+ } // namespace ranges
+
+ using std::ranges::get;
+
+ namespace ranges {
+
+ // [range.dangling], dangling iterator handling
+ using std::ranges::dangling;
+
+ // [range.elementsof], class template elements_­of
+ // using std::ranges::elements_of;
+
+ using std::ranges::borrowed_iterator_t;
+
+ using std::ranges::borrowed_subrange_t;
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.utility.conv], range conversions
+ using std::ranges::to;
+#endif
+
+ // [range.empty], empty view
+ using std::ranges::empty_view;
+
+ namespace views {
+ using std::ranges::views::empty;
+ }
+
+ // [range.single], single view
+ using std::ranges::single_view;
+
+ namespace views {
+ using std::ranges::views::single;
+ } // namespace views
+
+ // [range.iota], iota view
+ using std::ranges::iota_view;
+
+ namespace views {
+ using std::ranges::views::iota;
+ } // namespace views
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.repeat], repeat view
+ using std::ranges::repeat_view;
+
+ namespace views {
+ using std::ranges::views::repeat;
+ } // namespace views
+#endif // _LIBCPP_STD_VER >= 23
+
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ // [range.istream], istream view
+ using std::ranges::basic_istream_view;
+ using std::ranges::istream_view;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::ranges::wistream_view;
+# endif
+
+ namespace views {
+ using std::ranges::views::istream;
+ }
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+ // [range.adaptor.object], range adaptor objects
+ // using std::ranges::range_adaptor_closure;
+
+ // [range.all], all view
+ namespace views {
+ using std::ranges::views::all;
+ using std::ranges::views::all_t;
+ } // namespace views
+
+ // [range.ref.view], ref view
+ using std::ranges::ref_view;
+
+ // [range.owning.view], owning view
+ using std::ranges::owning_view;
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.as.rvalue], as rvalue view
+ using std::ranges::as_rvalue_view;
+
+ namespace views {
+ using std::ranges::views::as_rvalue;
+ } // namespace views
+#endif // _LIBCPP_STD_VER >= 23
+
+ // [range.filter], filter view
+ using std::ranges::filter_view;
+
+ namespace views {
+ using std::ranges::views::filter;
+ } // namespace views
+
+ // [range.transform], transform view
+ using std::ranges::transform_view;
+
+ namespace views {
+ using std::ranges::views::transform;
+ } // namespace views
+
+ // [range.take], take view
+ using std::ranges::take_view;
+
+ namespace views {
+ using std::ranges::views::take;
+ } // namespace views
+
+ // [range.take.while], take while view
+ using std::ranges::take_while_view;
+
+ namespace views {
+ using std::ranges::views::take_while;
+ } // namespace views
+
+ // [range.drop], drop view
+ using std::ranges::drop_view;
+
+ namespace views {
+ using std::ranges::views::drop;
+ } // namespace views
+
+ // [range.drop.while], drop while view
+ using std::ranges::drop_while_view;
+
+ namespace views {
+ using std::ranges::views::drop_while;
+ } // namespace views
+
+#ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+ using std::ranges::join_view;
+
+ namespace views {
+ using std::ranges::views::join;
+ } // namespace views
+#endif // _LIBCPP_ENABLE_EXPERIMENTAL
+#if 0
+ using std::ranges::join_with_view;
+
+ namespace views {
+ using std::ranges::views::join_with;
+ } // namespace views
+#endif
+ using std::ranges::lazy_split_view;
+
+ // [range.split], split view
+ using std::ranges::split_view;
+
+ namespace views {
+ using std::ranges::views::lazy_split;
+ using std::ranges::views::split;
+ } // namespace views
+
+ // [range.counted], counted view
+ namespace views {
+ using std::ranges::views::counted;
+ } // namespace views
+
+ // [range.common], common view
+ using std::ranges::common_view;
+
+ namespace views {
+ using std::ranges::views::common;
+ } // namespace views
+
+ // [range.reverse], reverse view
+ using std::ranges::reverse_view;
+
+ namespace views {
+ using std::ranges::views::reverse;
+ } // namespace views
+
+ // [range.as.const], as const view
+#if 0
+ using std::ranges::as_const_view;
+
+ namespace views {
+ using std::ranges::views::as_const;
+ } // namespace views
+#endif
+ // [range.elements], elements view
+ using std::ranges::elements_view;
+
+ using std::ranges::keys_view;
+ using std::ranges::values_view;
+
+ namespace views {
+ using std::ranges::views::elements;
+ using std::ranges::views::keys;
+ using std::ranges::views::values;
+ } // namespace views
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.zip], zip view
+ using std::ranges::zip_view;
+
+ namespace views {
+ using std::ranges::views::zip;
+ } // namespace views
+#endif // _LIBCPP_STD_VER >= 23
+
+#if 0
+ // [range.zip.transform], zip transform view
+ using std::ranges::zip_transform_view;
+
+ namespace views {
+ using std::ranges::views::zip_transform;
+ }
+
+ using std::ranges::adjacent_view;
+
+ namespace views {
+ using std::ranges::views::adjacent;
+ using std::ranges::views::pairwise;
+ } // namespace views
+
+ using std::ranges::adjacent_transform_view;
+
+ namespace views {
+ using std::ranges::views::adjacent_transform;
+ using std::ranges::views::pairwise_transform;
+ } // namespace views
+
+ using std::ranges::chunk_view;
+
+ using std::ranges::chunk_view<V>;
+
+ namespace views {
+ using std::ranges::views::chunk;
+ }
+
+ using std::ranges::slide_view;
+
+ namespace views {
+ using std::ranges::views::slide;
+ }
+#endif
+
+#if _LIBCPP_STD_VER >= 23
+#if _LIBCPP_VERSION >= 180000
+ // [range.chunk.by], chunk by view
+ using std::ranges::chunk_by_view;
+
+ namespace views {
+ using std::ranges::views::chunk_by;
+ }
+#endif
+#endif // _LIBCPP_STD_VER >= 23
+
+#if 0
+ // [range.stride], stride view
+ using std::ranges::stride_view;
+
+ namespace views {
+ using std::ranges::views::stride;
+ }
+
+ using std::ranges::cartesian_product_view;
+
+ namespace views {
+ using std::ranges::views::cartesian_product;
+ }
+#endif
+ } // namespace ranges
+
+ namespace views = ranges::views;
+
+ using std::tuple_element;
+ using std::tuple_size;
+
+#if _LIBCPP_STD_VER >= 23
+ using std::from_range;
+ using std::from_range_t;
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// ratio.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [ratio.ratio], class template ratio
+ using std::ratio;
+
+ // [ratio.arithmetic], ratio arithmetic
+ using std::ratio_add;
+ using std::ratio_divide;
+ using std::ratio_multiply;
+ using std::ratio_subtract;
+
+ // [ratio.comparison], ratio comparison
+ using std::ratio_equal;
+ using std::ratio_greater;
+ using std::ratio_greater_equal;
+ using std::ratio_less;
+ using std::ratio_less_equal;
+ using std::ratio_not_equal;
+
+ using std::ratio_equal_v;
+ using std::ratio_greater_equal_v;
+ using std::ratio_greater_v;
+ using std::ratio_less_equal_v;
+ using std::ratio_less_v;
+ using std::ratio_not_equal_v;
+
+ // [ratio.si], convenience SI typedefs
+ using std::atto;
+ using std::centi;
+ using std::deca;
+ using std::deci;
+ using std::exa;
+ using std::femto;
+ using std::giga;
+ using std::hecto;
+ using std::kilo;
+ using std::mega;
+ using std::micro;
+ using std::milli;
+ using std::nano;
+ using std::peta;
+ using std::pico;
+ using std::tera;
+
+ // These are not supported by libc++, due to the range of intmax_t
+ // using std::yocto;
+ // using std::yotta;
+ // using std::zepto;
+ // using std::zetta
+} // namespace std
+
+// rcu.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+# if _LIBCPP_STD_VER >= 23
+ // 2.2.3, class template rcu_obj_base using std::rcu_obj_base;
+ // 2.2.4, class rcu_domain
+ using std::rcu_domain;
+ using std::rcu_default_domain();
+ using std::rcu_barrier;
+ using std::rcu_retire;
+ using std::rcu_synchronize;
+# endif // _LIBCPP_STD_VER >= 23
+#endif
+} // namespace std
+
+// regex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ // [re.const], regex constants
+ namespace regex_constants {
+ using std::regex_constants::error_type;
+ using std::regex_constants::match_flag_type;
+ using std::regex_constants::syntax_option_type;
+
+ // regex_constants is a bitmask type.
+ // [bitmask.types] specified operators
+ using std::regex_constants::operator&;
+ using std::regex_constants::operator&=;
+ using std::regex_constants::operator^;
+ using std::regex_constants::operator^=;
+ using std::regex_constants::operator|;
+ using std::regex_constants::operator|=;
+ using std::regex_constants::operator~;
+
+ } // namespace regex_constants
+
+ // [re.badexp], class regex_error
+ using std::regex_error;
+
+ // [re.traits], class template regex_traits
+ using std::regex_traits;
+
+ // [re.regex], class template basic_regex
+ using std::basic_regex;
+
+ using std::regex;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wregex;
+# endif
+
+ // [re.regex.swap], basic_regex swap
+ using std::swap;
+
+ // [re.submatch], class template sub_match
+ using std::sub_match;
+
+ using std::csub_match;
+ using std::ssub_match;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcsub_match;
+ using std::wssub_match;
+# endif
+
+ // [re.submatch.op], sub_match non-member operators
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::operator<<;
+
+ // [re.results], class template match_results
+ using std::match_results;
+
+ using std::cmatch;
+ using std::smatch;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcmatch;
+ using std::wsmatch;
+# endif
+
+ // match_results comparisons
+
+ // [re.results.swap], match_results swap
+
+ // [re.alg.match], function template regex_match
+ using std::regex_match;
+
+ // [re.alg.search], function template regex_search
+ using std::regex_search;
+
+ // [re.alg.replace], function template regex_replace
+ using std::regex_replace;
+
+ // [re.regiter], class template regex_iterator
+ using std::regex_iterator;
+
+ using std::cregex_iterator;
+ using std::sregex_iterator;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcregex_iterator;
+ using std::wsregex_iterator;
+# endif
+
+ // [re.tokiter], class template regex_token_iterator
+ using std::regex_token_iterator;
+
+ using std::cregex_token_iterator;
+ using std::sregex_token_iterator;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcregex_token_iterator;
+ using std::wsregex_token_iterator;
+# endif
+
+ namespace pmr {
+ using std::pmr::match_results;
+
+ using std::pmr::cmatch;
+ using std::pmr::smatch;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::pmr::wcmatch;
+ using std::pmr::wsmatch;
+# endif
+ } // namespace pmr
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// scoped_allocator.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // class template scoped_allocator_adaptor
+ using std::scoped_allocator_adaptor;
+
+ // [scoped.adaptor.operators], scoped allocator operators
+ using std::operator==;
+
+} // namespace std
+
+// semaphore.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.sema.cnt], class template counting_semaphore
+ using std::counting_semaphore;
+
+ using std::binary_semaphore;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// set.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [set], class template set
+ using std::set;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [set.erasure], erasure for set
+ using std::erase_if;
+
+ // [multiset], class template multiset
+ using std::multiset;
+
+ namespace pmr {
+ using std::pmr::multiset;
+ using std::pmr::set;
+ } // namespace pmr
+} // namespace std
+
+// shared_mutex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.sharedmutex.class], class shared_­mutex
+ using std::shared_mutex;
+ // [thread.sharedtimedmutex.class], class shared_­timed_­mutex
+ using std::shared_timed_mutex;
+ // [thread.lock.shared], class template shared_­lock
+ using std::shared_lock;
+ using std::swap;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// source_location.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::source_location;
+} // namespace std
+
+// span.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // constants
+ using std::dynamic_extent;
+
+ // [views.span], class template span
+ using std::span;
+
+ namespace ranges {
+ using std::ranges::enable_borrowed_range;
+ using std::ranges::enable_view;
+ } // namespace ranges
+
+ // [span.objectrep], views of object representation
+ using std::as_bytes;
+
+ using std::as_writable_bytes;
+} // namespace std
+
+// spanstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ using std::basic_spanbuf;
+
+ using std::swap;
+
+ using std::spanbuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wspanbuf;
+# endif
+
+ using std::basic_ispanstream;
+
+ using std::ispanstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wispanstream;
+# endif
+
+ using std::basic_ospanstream;
+
+ using std::ospanstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wospanstream;
+# endif
+
+ using std::basic_spanstream;
+
+ using std::spanstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wspanstream;
+# endif
+#endif
+} // namespace std
+
+// sstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_stringbuf;
+
+ using std::swap;
+
+ using std::stringbuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstringbuf;
+# endif
+
+ using std::basic_istringstream;
+
+ using std::istringstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wistringstream;
+# endif
+
+ using std::basic_ostringstream;
+
+ using std::ostringstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wostringstream;
+# endif
+
+ using std::basic_stringstream;
+
+ using std::stringstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstringstream;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// stack.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [stack], class template stack
+ using std::stack;
+
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::swap;
+ using std::uses_allocator;
+} // namespace std
+
+// stacktrace.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ // [stacktrace.entry], class stacktrace_­entry
+ using std::stacktrace_entry;
+
+ // [stacktrace.basic], class template basic_­stacktrace
+ using std::basic_stacktrace;
+
+ // basic_­stacktrace typedef-names
+ using std::stacktrace;
+
+ // [stacktrace.basic.nonmem], non-member functions
+ using std::swap;
+
+ using std::to_string;
+
+ using std::operator<<;
+
+ namespace pmr {
+ using std::pmr::stacktrace;
+ }
+
+ // [stacktrace.basic.hash], hash support
+ using std::hash;
+#endif
+} // namespace std
+
+// stdexcept.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::domain_error;
+ using std::invalid_argument;
+ using std::length_error;
+ using std::logic_error;
+ using std::out_of_range;
+ using std::overflow_error;
+ using std::range_error;
+ using std::runtime_error;
+ using std::underflow_error;
+} // namespace std
+
+// stdfloat.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if defined(__STDCPP_FLOAT16_T__)
+ using std::float16_t;
+#endif
+#if defined(__STDCPP_FLOAT32_T__)
+ using std::float32_t;
+#endif
+#if defined(__STDCPP_FLOAT64_T__)
+ using std::float64_t;
+#endif
+#if defined(__STDCPP_FLOAT128_T__)
+ using std::float128_t;
+#endif
+#if defined(__STDCPP_BFLOAT16_T__)
+ using std::bfloat16_t;
+#endif
+} // namespace std
+
+// stop_token.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+# ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+ // [stoptoken], class stop_­token
+ using std::stop_token;
+
+ // [stopsource], class stop_­source
+ using std::stop_source;
+
+ // no-shared-stop-state indicator
+ using std::nostopstate;
+ using std::nostopstate_t;
+
+ // [stopcallback], class template stop_­callback
+ using std::stop_callback;
+# endif // _LIBCPP_ENABLE_EXPERIMENTAL
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// streambuf.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_streambuf;
+ using std::streambuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstreambuf;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// string.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [char.traits], character traits
+ using std::char_traits;
+
+ // [basic.string], basic_string
+ using std::basic_string;
+
+ using std::operator+;
+ using std::operator==;
+ using std::operator<=>;
+
+ // [string.special], swap
+ using std::swap;
+
+ // [string.io], inserters and extractors
+ using std::operator>>;
+ using std::operator<<;
+ using std::getline;
+
+ // [string.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ // basic_string typedef-names
+ using std::string;
+ using std::u16string;
+ using std::u32string;
+ using std::u8string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstring;
+#endif
+
+ // [string.conversions], numeric conversions
+ using std::stod;
+ using std::stof;
+ using std::stoi;
+ using std::stol;
+ using std::stold;
+ using std::stoll;
+ using std::stoul;
+ using std::stoull;
+ using std::to_string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::to_wstring;
+#endif
+
+ namespace pmr {
+ using std::pmr::basic_string;
+ using std::pmr::string;
+ using std::pmr::u16string;
+ using std::pmr::u32string;
+ using std::pmr::u8string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::pmr::wstring;
+#endif
+ } // namespace pmr
+
+ // [basic.string.hash], hash support
+ using std::hash;
+
+ // TODO MODULES is this a bug?
+#if _LIBCPP_STD_VER >= 23
+ using std::operator""s;
+#else
+ inline namespace literals {
+ inline namespace string_literals {
+ // [basic.string.literals], suffix for basic_string literals
+ using std::literals::string_literals::operator""s;
+ } // namespace string_literals
+ } // namespace literals
+#endif
+} // namespace std
+
+// string_view.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [string.view.template], class template basic_string_view
+ using std::basic_string_view;
+
+ namespace ranges {
+ using std::ranges::enable_borrowed_range;
+ using std::ranges::enable_view;
+ } // namespace ranges
+
+ // [string.view.comparison], non-member comparison functions
+ using std::operator==;
+ using std::operator<=>;
+
+ // [string.view.io], inserters and extractors
+ using std::operator<<;
+
+ // basic_string_view typedef-names
+ using std::string_view;
+ using std::u16string_view;
+ using std::u32string_view;
+ using std::u8string_view;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstring_view;
+#endif
+
+ // [string.view.hash], hash support
+ using std::hash;
+
+ inline namespace literals {
+ inline namespace string_view_literals {
+ // [string.view.literals], suffix for basic_string_view literals
+ using std::literals::string_view_literals::operator""sv;
+ } // namespace string_view_literals
+ } // namespace literals
+} // namespace std
+
+// strstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::istrstream;
+ using std::ostrstream;
+ using std::strstream;
+ using std::strstreambuf;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// syncstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+
+export namespace std {
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM)
+ using std::basic_syncbuf;
+
+ // [syncstream.syncbuf.special], specialized algorithms
+ using std::swap;
+
+ using std::syncbuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wsyncbuf;
+# endif
+ using std::basic_osyncstream;
+
+ using std::osyncstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wosyncstream;
+# endif
+#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM)
+} // namespace std
+
+#endif
+
+// system_error.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::error_category;
+ using std::generic_category;
+ using std::system_category;
+
+ using std::error_code;
+ using std::error_condition;
+ using std::system_error;
+
+ using std::is_error_code_enum;
+ using std::is_error_condition_enum;
+
+ using std::errc;
+
+ // [syserr.errcode.nonmembers], non-member functions
+ using std::make_error_code;
+
+ using std::operator<<;
+
+ // [syserr.errcondition.nonmembers], non-member functions
+ using std::make_error_condition;
+
+ // [syserr.compare], comparison operator functions
+ using std::operator==;
+ using std::operator<=>;
+
+ // [syserr.hash], hash support
+ using std::hash;
+
+ // [syserr], system error support
+ using std::is_error_code_enum_v;
+ using std::is_error_condition_enum_v;
+} // namespace std
+
+// text_encoding.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+# if _LIBCPP_STD_VER >= 23
+ using std::text_encoding;
+
+ // hash support
+ using std::hash;
+# endif // _LIBCPP_STD_VER >= 23
+#endif
+} // namespace std
+
+// thread.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.thread.class], class thread
+ using std::thread;
+
+ using std::swap;
+
+ // [thread.jthread.class], class jthread
+# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN)
+ using std::jthread;
+# endif
+
+ // [thread.thread.this], namespace this_thread
+ namespace this_thread {
+ using std::this_thread::get_id;
+
+ using std::this_thread::sleep_for;
+ using std::this_thread::sleep_until;
+ using std::this_thread::yield;
+ } // namespace this_thread
+
+ // [thread.thread.id]
+ using std::operator==;
+ using std::operator<=>;
+# ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::operator<<;
+# endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+# if _LIBCPP_STD_VER >= 23
+ using std::formatter;
+# endif
+
+ using std::hash;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// tuple.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [tuple.tuple], class template tuple
+ using std::tuple;
+
+ // [tuple.like], concept tuple-like
+
+#if _LIBCPP_STD_VER >= 23
+ // [tuple.common.ref], common_reference related specializations
+ using std::basic_common_reference;
+ using std::common_type;
+#endif
+
+ // [tuple.creation], tuple creation functions
+ using std::ignore;
+
+ using std::forward_as_tuple;
+ using std::make_tuple;
+ using std::tie;
+ using std::tuple_cat;
+
+ // [tuple.apply], calling a function with a tuple of arguments
+ using std::apply;
+
+ using std::make_from_tuple;
+
+ // [tuple.helper], tuple helper classes
+ using std::tuple_element;
+ using std::tuple_size;
+
+ // [tuple.elem], element access
+ using std::get;
+ using std::tuple_element_t;
+
+ // [tuple.rel], relational operators
+ using std::operator==;
+ using std::operator<=>;
+
+ // [tuple.traits], allocator-related traits
+ using std::uses_allocator;
+
+ // [tuple.special], specialized algorithms
+ using std::swap;
+
+ // [tuple.helper], tuple helper classes
+ using std::tuple_size_v;
+} // namespace std
+
+// type_traits.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [meta.help], helper class
+ using std::integral_constant;
+
+ using std::bool_constant;
+ using std::false_type;
+ using std::true_type;
+
+ // [meta.unary.cat], primary type categories
+ using std::is_array;
+ using std::is_class;
+ using std::is_enum;
+ using std::is_floating_point;
+ using std::is_function;
+ using std::is_integral;
+ using std::is_lvalue_reference;
+ using std::is_member_function_pointer;
+ using std::is_member_object_pointer;
+ using std::is_null_pointer;
+ using std::is_pointer;
+ using std::is_rvalue_reference;
+ using std::is_union;
+ using std::is_void;
+
+ // [meta.unary.comp], composite type categories
+ using std::is_arithmetic;
+ using std::is_compound;
+ using std::is_fundamental;
+ using std::is_member_pointer;
+ using std::is_object;
+ using std::is_reference;
+ using std::is_scalar;
+
+ // [meta.unary.prop], type properties
+ using std::is_abstract;
+ using std::is_aggregate;
+ using std::is_const;
+ using std::is_empty;
+ using std::is_final;
+ using std::is_polymorphic;
+ using std::is_standard_layout;
+ using std::is_trivial;
+ using std::is_trivially_copyable;
+ using std::is_volatile;
+
+ using std::is_bounded_array;
+#if _LIBCPP_STD_VER >= 23
+ using std::is_scoped_enum;
+#endif
+ using std::is_signed;
+ using std::is_unbounded_array;
+ using std::is_unsigned;
+
+ using std::is_constructible;
+ using std::is_copy_constructible;
+ using std::is_default_constructible;
+ using std::is_move_constructible;
+
+ using std::is_assignable;
+ using std::is_copy_assignable;
+ using std::is_move_assignable;
+
+ using std::is_swappable;
+ using std::is_swappable_with;
+
+ using std::is_destructible;
+
+ using std::is_trivially_constructible;
+ using std::is_trivially_copy_constructible;
+ using std::is_trivially_default_constructible;
+ using std::is_trivially_move_constructible;
+
+ using std::is_trivially_assignable;
+ using std::is_trivially_copy_assignable;
+ using std::is_trivially_destructible;
+ using std::is_trivially_move_assignable;
+
+ using std::is_nothrow_constructible;
+ using std::is_nothrow_copy_constructible;
+ using std::is_nothrow_default_constructible;
+ using std::is_nothrow_move_constructible;
+
+ using std::is_nothrow_assignable;
+ using std::is_nothrow_copy_assignable;
+ using std::is_nothrow_move_assignable;
+
+ using std::is_nothrow_swappable;
+ using std::is_nothrow_swappable_with;
+
+ using std::is_nothrow_destructible;
+
+ // using std::is_implicit_lifetime;
+
+ using std::has_virtual_destructor;
+
+ using std::has_unique_object_representations;
+
+ // using std::reference_constructs_from_temporary;
+ // using std::reference_converts_from_temporary;
+
+ // [meta.unary.prop.query], type property queries
+ using std::alignment_of;
+ using std::extent;
+ using std::rank;
+
+ // [meta.rel], type relations
+ using std::is_base_of;
+ using std::is_convertible;
+ // using std::is_layout_compatible;
+ using std::is_nothrow_convertible;
+ // using std::is_pointer_interconvertible_base_of;
+ using std::is_same;
+
+ using std::is_invocable;
+ using std::is_invocable_r;
+
+ using std::is_nothrow_invocable;
+ using std::is_nothrow_invocable_r;
+
+ // [meta.trans.cv], const-volatile modifications
+ using std::add_const;
+ using std::add_cv;
+ using std::add_volatile;
+ using std::remove_const;
+ using std::remove_cv;
+ using std::remove_volatile;
+
+ using std::add_const_t;
+ using std::add_cv_t;
+ using std::add_volatile_t;
+ using std::remove_const_t;
+ using std::remove_cv_t;
+ using std::remove_volatile_t;
+
+ // [meta.trans.ref], reference modifications
+ using std::add_lvalue_reference;
+ using std::add_rvalue_reference;
+ using std::remove_reference;
+
+ using std::add_lvalue_reference_t;
+ using std::add_rvalue_reference_t;
+ using std::remove_reference_t;
+
+ // [meta.trans.sign], sign modifications
+ using std::make_signed;
+ using std::make_unsigned;
+
+ using std::make_signed_t;
+ using std::make_unsigned_t;
+
+ // [meta.trans.arr], array modifications
+ using std::remove_all_extents;
+ using std::remove_extent;
+
+ using std::remove_all_extents_t;
+ using std::remove_extent_t;
+
+ // [meta.trans.ptr], pointer modifications
+ using std::add_pointer;
+ using std::remove_pointer;
+
+ using std::add_pointer_t;
+ using std::remove_pointer_t;
+
+ // [meta.trans.other], other transformations
+ using std::basic_common_reference;
+ using std::common_reference;
+ using std::common_type;
+ using std::conditional;
+ using std::decay;
+ using std::enable_if;
+ using std::invoke_result;
+ using std::remove_cvref;
+ using std::type_identity;
+ using std::underlying_type;
+ using std::unwrap_ref_decay;
+ using std::unwrap_reference;
+
+ using std::common_reference_t;
+ using std::common_type_t;
+ using std::conditional_t;
+ using std::decay_t;
+ using std::enable_if_t;
+ using std::invoke_result_t;
+ using std::remove_cvref_t;
+ using std::type_identity_t;
+ using std::underlying_type_t;
+ using std::unwrap_ref_decay_t;
+ using std::unwrap_reference_t;
+ using std::void_t;
+
+ // [meta.logical], logical operator traits
+ using std::conjunction;
+ using std::disjunction;
+ using std::negation;
+
+ // [meta.unary.cat], primary type categories
+ using std::is_array_v;
+ using std::is_class_v;
+ using std::is_enum_v;
+ using std::is_floating_point_v;
+ using std::is_function_v;
+ using std::is_integral_v;
+ using std::is_lvalue_reference_v;
+ using std::is_member_function_pointer_v;
+ using std::is_member_object_pointer_v;
+ using std::is_null_pointer_v;
+ using std::is_pointer_v;
+ using std::is_rvalue_reference_v;
+ using std::is_union_v;
+ using std::is_void_v;
+
+ // [meta.unary.comp], composite type categories
+ using std::is_arithmetic_v;
+ using std::is_compound_v;
+ using std::is_fundamental_v;
+ using std::is_member_pointer_v;
+ using std::is_object_v;
+ using std::is_reference_v;
+ using std::is_scalar_v;
+
+ // [meta.unary.prop], type properties
+ using std::has_unique_object_representations_v;
+ using std::has_virtual_destructor_v;
+ using std::is_abstract_v;
+ using std::is_aggregate_v;
+ using std::is_assignable_v;
+ using std::is_bounded_array_v;
+ using std::is_const_v;
+ using std::is_constructible_v;
+ using std::is_copy_assignable_v;
+ using std::is_copy_constructible_v;
+ using std::is_default_constructible_v;
+ using std::is_destructible_v;
+ using std::is_empty_v;
+ using std::is_final_v;
+ // using std::is_implicit_lifetime_v;
+ using std::is_move_assignable_v;
+ using std::is_move_constructible_v;
+ using std::is_nothrow_assignable_v;
+ using std::is_nothrow_constructible_v;
+ using std::is_nothrow_copy_assignable_v;
+ using std::is_nothrow_copy_constructible_v;
+ using std::is_nothrow_default_constructible_v;
+ using std::is_nothrow_destructible_v;
+ using std::is_nothrow_move_assignable_v;
+ using std::is_nothrow_move_constructible_v;
+ using std::is_nothrow_swappable_v;
+ using std::is_nothrow_swappable_with_v;
+ using std::is_polymorphic_v;
+#if _LIBCPP_STD_VER >= 23
+ using std::is_scoped_enum_v;
+#endif
+ using std::is_signed_v;
+ using std::is_standard_layout_v;
+ using std::is_swappable_v;
+ using std::is_swappable_with_v;
+ using std::is_trivial_v;
+ using std::is_trivially_assignable_v;
+ using std::is_trivially_constructible_v;
+ using std::is_trivially_copy_assignable_v;
+ using std::is_trivially_copy_constructible_v;
+ using std::is_trivially_copyable_v;
+ using std::is_trivially_default_constructible_v;
+ using std::is_trivially_destructible_v;
+ using std::is_trivially_move_assignable_v;
+ using std::is_trivially_move_constructible_v;
+ using std::is_unbounded_array_v;
+ using std::is_unsigned_v;
+ using std::is_volatile_v;
+ // using std::reference_constructs_from_temporary_v;
+ // using std::reference_converts_from_temporary_v;
+
+ // [meta.unary.prop.query], type property queries
+ using std::alignment_of_v;
+ using std::extent_v;
+ using std::rank_v;
+
+ // [meta.rel], type relations
+ using std::is_base_of_v;
+ using std::is_convertible_v;
+ using std::is_invocable_r_v;
+ using std::is_invocable_v;
+ // using std::is_layout_compatible_v;
+ using std::is_nothrow_convertible_v;
+ using std::is_nothrow_invocable_r_v;
+ using std::is_nothrow_invocable_v;
+ // using std::is_pointer_interconvertible_base_of_v;
+ using std::is_same_v;
+
+ // [meta.logical], logical operator traits
+ using std::conjunction_v;
+ using std::disjunction_v;
+ using std::negation_v;
+
+ // [meta.member], member relationships
+ // using std::is_corresponding_member;
+ // using std::is_pointer_interconvertible_with_class;
+
+ // [meta.const.eval], constant evaluation context
+ using std::is_constant_evaluated;
+
+ // [depr.meta.types]
+ using std::aligned_storage;
+ using std::aligned_storage_t;
+ using std::aligned_union;
+ using std::aligned_union_t;
+ using std::is_pod;
+ using std::is_pod_v;
+} // namespace std
+
+// typeindex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::hash;
+ using std::type_index;
+} // namespace std
+
+// typeinfo.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::bad_cast;
+ using std::bad_typeid;
+ using std::type_info;
+} // namespace std
+
+// unordered_map.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [unord.map], class template unordered_­map
+ using std::unordered_map;
+
+ // [unord.multimap], class template unordered_­multimap
+ using std::unordered_multimap;
+
+ using std::operator==;
+
+ using std::swap;
+
+ // [unord.map.erasure], erasure for unordered_­map
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::unordered_map;
+ using std::pmr::unordered_multimap;
+ } // namespace pmr
+} // namespace std
+
+// unordered_set.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [unord.set], class template unordered_­set
+ using std::unordered_set;
+
+ // [unord.multiset], class template unordered_­multiset
+ using std::unordered_multiset;
+
+ using std::operator==;
+
+ using std::swap;
+
+ // [unord.set.erasure], erasure for unordered_­set
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::unordered_multiset;
+ using std::pmr::unordered_set;
+ } // namespace pmr
+} // namespace std
+
+// utility.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [utility.swap], swap
+ using std::swap;
+
+ // [utility.exchange], exchange
+ using std::exchange;
+
+ // [forward], forward/move
+ using std::forward;
+#if _LIBCPP_STD_VER >= 23
+ using std::forward_like;
+#endif
+ using std::move;
+ using std::move_if_noexcept;
+
+ // [utility.as.const], as_const
+ using std::as_const;
+
+ // [declval], declval
+ using std::declval;
+
+ // [utility.intcmp], integer comparison functions
+ using std::cmp_equal;
+ using std::cmp_not_equal;
+
+ using std::cmp_greater;
+ using std::cmp_greater_equal;
+ using std::cmp_less;
+ using std::cmp_less_equal;
+
+ using std::in_range;
+
+#if _LIBCPP_STD_VER >= 23
+ // [utility.underlying], to_underlying
+ using std::to_underlying;
+
+ // [utility.unreachable], unreachable
+ using std::unreachable;
+#endif // _LIBCPP_STD_VER >= 23
+
+ // [intseq], compile-time integer sequences
+ using std::index_sequence;
+ using std::integer_sequence;
+
+ using std::make_index_sequence;
+ using std::make_integer_sequence;
+
+ using std::index_sequence_for;
+
+ // [pairs], class template pair
+ using std::pair;
+
+#if _LIBCPP_STD_VER >= 23
+ using std::basic_common_reference;
+ using std::common_type;
+#endif
+ // [pairs.spec], pair specialized algorithms
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::make_pair;
+
+ // [pair.astuple], tuple-like access to pair
+ using std::tuple_element;
+ using std::tuple_size;
+
+ using std::get;
+
+ // [pair.piecewise], pair piecewise construction
+ using std::piecewise_construct;
+ using std::piecewise_construct_t;
+
+ // in-place construction
+ using std::in_place;
+ using std::in_place_t;
+
+ using std::in_place_type;
+ using std::in_place_type_t;
+
+ using std::in_place_index;
+ using std::in_place_index_t;
+
+ // [depr.relops]
+ namespace rel_ops {
+ using rel_ops::operator!=;
+ using rel_ops::operator>;
+ using rel_ops::operator<=;
+ using rel_ops::operator>=;
+ } // namespace rel_ops
+} // namespace std
+
+// valarray.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::gslice;
+ using std::gslice_array;
+ using std::indirect_array;
+ using std::mask_array;
+ using std::slice;
+ using std::slice_array;
+ using std::valarray;
+
+ using std::swap;
+
+ using std::operator*;
+ using std::operator/;
+ using std::operator%;
+ using std::operator+;
+ using std::operator-;
+
+ using std::operator^;
+ using std::operator&;
+ using std::operator|;
+
+ using std::operator<<;
+ using std::operator>>;
+
+ using std::operator&&;
+ using std::operator||;
+
+ using std::operator==;
+ using std::operator!=;
+
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+
+ using std::abs;
+ using std::acos;
+ using std::asin;
+ using std::atan;
+
+ using std::atan2;
+
+ using std::cos;
+ using std::cosh;
+ using std::exp;
+ using std::log;
+ using std::log10;
+
+ using std::pow;
+
+ using std::sin;
+ using std::sinh;
+ using std::sqrt;
+ using std::tan;
+ using std::tanh;
+
+ using std::begin;
+ using std::end;
+} // namespace std
+
+// variant.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [variant.variant], class template variant
+ using std::variant;
+
+ // [variant.helper], variant helper classes
+ using std::variant_alternative;
+ using std::variant_npos;
+ using std::variant_size;
+ using std::variant_size_v;
+
+ // [variant.get], value access
+ using std::get;
+ using std::get_if;
+ using std::holds_alternative;
+ using std::variant_alternative_t;
+
+ // [variant.relops], relational operators
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ // [variant.visit], visitation
+ using std::visit;
+
+ // [variant.monostate], class monostate
+ using std::monostate;
+
+ // [variant.specalg], specialized algorithms
+ using std::swap;
+
+ // [variant.bad.access], class bad_variant_access
+ using std::bad_variant_access;
+
+ // [variant.hash], hash support
+ using std::hash;
+} // namespace std
+
+// vector.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [vector], class template vector
+ using std::vector;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [vector.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::vector;
+ }
+
+ // hash support
+ using std::hash;
+
+#if _LIBCPP_STD_VER >= 23
+ // [vector.bool.fmt], formatter specialization for vector<bool>
+ using std::formatter;
+#endif
+} // namespace std
+
+// version.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
diff --git a/libbuild2/cc/target.cxx b/libbuild2/cc/target.cxx
index b17e1ef..6a518dd 100644
--- a/libbuild2/cc/target.cxx
+++ b/libbuild2/cc/target.cxx
@@ -21,11 +21,10 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
extern const char h_ext_def[] = "h";
-
const target_type h::static_type
{
"h",
@@ -36,11 +35,10 @@ namespace build2
&target_pattern_var<h_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char c_ext_def[] = "c";
-
const target_type c::static_type
{
"c",
@@ -51,11 +49,51 @@ namespace build2
&target_pattern_var<c_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
- extern const char pc_ext[] = "pc"; // VC14 rejects constexpr.
+ extern const char m_ext_def[] = "m";
+ const target_type m::static_type
+ {
+ "m",
+ &cc::static_type,
+ &target_factory<m>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<m_ext_def>,
+ &target_pattern_var<m_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+ extern const char S_ext_def[] = "S";
+ const target_type S::static_type
+ {
+ "S",
+ &cc::static_type,
+ &target_factory<S>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<S_ext_def>,
+ &target_pattern_var<S_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
+ const target_type c_inc::static_type
+ {
+ "c_inc",
+ &cc::static_type,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ target_type::flag::none
+ };
+
+ extern const char pc_ext[] = "pc"; // VC14 rejects constexpr.
const target_type pc::static_type
{
"pc",
@@ -66,11 +104,10 @@ namespace build2
&target_pattern_fix<pc_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false
+ target_type::flag::none
};
extern const char pca_ext[] = "static.pc"; // VC14 rejects constexpr.
-
const target_type pca::static_type
{
"pca",
@@ -81,11 +118,10 @@ namespace build2
&target_pattern_fix<pca_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false
+ target_type::flag::none
};
extern const char pcs_ext[] = "shared.pc"; // VC14 rejects constexpr.
-
const target_type pcs::static_type
{
"pcs",
@@ -96,7 +132,7 @@ namespace build2
&target_pattern_fix<pcs_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/cc/target.hxx b/libbuild2/cc/target.hxx
index 7067421..01f2d6e 100644
--- a/libbuild2/cc/target.hxx
+++ b/libbuild2/cc/target.hxx
@@ -23,11 +23,14 @@ namespace build2
class LIBBUILD2_CC_SYMEXPORT cc: public file
{
public:
- using file::file;
+ cc (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const = 0;
};
// There is hardly a c-family compilation without a C header inclusion.
@@ -36,11 +39,14 @@ namespace build2
class LIBBUILD2_CC_SYMEXPORT h: public cc
{
public:
- using cc::cc;
+ h (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// This one we define in cc but the target type is only registered by the
@@ -52,11 +58,65 @@ namespace build2
class LIBBUILD2_CC_SYMEXPORT c: public cc
{
public:
- using cc::cc;
+ c (context& ctx, dir_path d, dir_path o, string n)
+ : cc (ctx, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Objective-C source file (the same rationale for having it here as for
+ // c{} above).
+ //
+ class LIBBUILD2_CC_SYMEXPORT m: public cc
+ {
+ public:
+ m (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Assembler with C preprocessor source file (the same rationale for
+ // having it here as for c{} above).
+ //
+ class LIBBUILD2_CC_SYMEXPORT S: public cc
+ {
+ public:
+ S (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // This is an abstract base target for deriving additional targets that
+ // can be #include'd in C translation units (the same rationale for having
+ // it here as for c{} above). In particular, only such targets will be
+ // considered to reverse-lookup extensions to target types (see
+ // dyndep_rule::map_extension() for background).
+ //
+ class LIBBUILD2_CC_SYMEXPORT c_inc: public cc
+ {
+ public:
+ c_inc (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// pkg-config file targets.
@@ -64,31 +124,40 @@ namespace build2
class LIBBUILD2_CC_SYMEXPORT pc: public file // .pc (common)
{
public:
- using file::file;
+ pc (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CC_SYMEXPORT pca: public pc // .static.pc
{
public:
- using pc::pc;
+ pca (context& c, dir_path d, dir_path o, string n)
+ : pc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CC_SYMEXPORT pcs: public pc // .shared.pc
{
public:
- using pc::pc;
+ pcs (context& c, dir_path d, dir_path o, string n)
+ : pc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/cc/types.cxx b/libbuild2/cc/types.cxx
index 8ee4fa9..c6cfae9 100644
--- a/libbuild2/cc/types.cxx
+++ b/libbuild2/cc/types.cxx
@@ -6,6 +6,7 @@
#include <libbuild2/cc/utility.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -123,6 +124,8 @@ namespace build2
size_t importable_headers::
insert_angle_pattern (const dir_paths& sys_hdr_dirs, const string& pat)
{
+ tracer trace ("importable_headers::insert_angle_pattern");
+
assert (pat.front () == '<' && pat.back () == '>' && path_pattern (pat));
// First see if it has already been inserted.
@@ -172,7 +175,17 @@ namespace build2
try
{
- path_search (f, process, dir);
+ path_search (
+ f,
+ process,
+ dir,
+ path_match_flags::follow_symlinks,
+ [&trace] (const dir_entry& de)
+ {
+ l5 ([&]{trace << "skipping inaccessible/dangling entry "
+ << de.base () / de.path ();});
+ return true;
+ });
}
catch (const system_error& e)
{
diff --git a/libbuild2/cc/types.hxx b/libbuild2/cc/types.hxx
index c5b35f5..93f31bc 100644
--- a/libbuild2/cc/types.hxx
+++ b/libbuild2/cc/types.hxx
@@ -175,6 +175,10 @@ namespace build2
const target_type& bmi;
const target_type& hbmi;
};
+
+ // "Unhide" operator<< from the build2 namespace.
+ //
+ using build2::operator<<;
}
}
diff --git a/libbuild2/cc/windows-rpath.cxx b/libbuild2/cc/windows-rpath.cxx
index 2d90ace..eb62ad1 100644
--- a/libbuild2/cc/windows-rpath.cxx
+++ b/libbuild2/cc/windows-rpath.cxx
@@ -45,6 +45,8 @@ namespace build2
// Return the greatest (newest) timestamp of all the DLLs that we will be
// adding to the assembly or timestamp_nonexistent if there aren't any.
//
+ // Note: called during the execute phase.
+ //
timestamp link_rule::
windows_rpath_timestamp (const file& t,
const scope& bs,
@@ -88,7 +90,18 @@ namespace build2
//
if (l->is_a<libs> () && !l->path ().empty ()) // Also covers binless.
{
- timestamp t (l->load_mtime ());
+ // Handle the case where the library is a member of a group (for
+ // example, people are trying to hack something up with pre-built
+ // libraries; see GH issue #366).
+ //
+ timestamp t;
+ if (l->group_state (action () /* inner */))
+ {
+ t = l->group->is_a<mtime_target> ()->mtime ();
+ assert (t != timestamp_unknown);
+ }
+ else
+ t = l->load_mtime ();
if (t > r)
r = t;
@@ -128,7 +141,9 @@ namespace build2
library_cache lib_cache;
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt.adhoc || pt == nullptr)
+ // Note: during execute so check for ad hoc first to avoid data races.
+ //
+ if (pt.adhoc () || pt == nullptr)
continue;
bool la;
@@ -139,7 +154,9 @@ namespace build2
( f = pt->is_a<libs> ()))
process_libraries (a, bs, li, sys_lib_dirs,
*f, la, pt.data,
- imp, lib, nullptr, true /* self */,
+ imp, lib, nullptr,
+ true /* self */,
+ false /* proc_opt_group */,
&lib_cache);
}
@@ -253,7 +270,9 @@ namespace build2
library_cache lib_cache;
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt.adhoc || pt == nullptr)
+ // Note: during execute so check for ad hoc first to avoid data races.
+ //
+ if (pt.adhoc () || pt == nullptr)
continue;
bool la;
@@ -264,7 +283,9 @@ namespace build2
( f = pt->is_a<libs> ()))
process_libraries (a, bs, li, sys_lib_dirs,
*f, la, pt.data,
- imp, lib, nullptr, true /* self */,
+ imp, lib, nullptr,
+ true /* self */,
+ false /* proc_opt_group */,
&lib_cache);
}
@@ -361,11 +382,16 @@ namespace build2
// of the same amalgamation. This way if the amalgamation is moved
// as a whole, the links will remain valid.
//
+ // Note: mkanylink() is from libbutl and thus doesn't handle the
+ // dry-run mode.
+ //
try
{
- switch (mkanylink (f, l,
- true /* copy */,
- f.sub (as.out_path ()) /* relative */))
+ switch (as.ctx.dry_run
+ ? entry_type::symlink
+ : mkanylink (f, l,
+ true /* copy */,
+ f.sub (as.out_path ()) /* relative */))
{
case entry_type::regular: print ("cp"); break;
case entry_type::symlink: print ("ln -s"); break;
diff --git a/libbuild2/cli/buildfile b/libbuild2/cli/buildfile
new file mode 100644
index 0000000..9b6e4eb
--- /dev/null
+++ b/libbuild2/cli/buildfile
@@ -0,0 +1,71 @@
+# file : libbuild2/cli/buildfile
+# license : MIT; see accompanying LICENSE file
+
+# NOTE: shared imports should go into root.build.
+#
+include ../
+impl_libs = ../lib{build2} # Implied interface dependency.
+
+include ../cxx/
+intf_libs = ../cxx/lib{build2-cxx}
+
+./: lib{build2-cli}: libul{build2-cli}: {hxx ixx txx cxx}{** -**.test...} \
+ $intf_libs $impl_libs
+
+# Unit tests.
+#
+exe{*.test}:
+{
+ test = true
+ install = false
+}
+
+for t: cxx{**.test...}
+{
+ d = $directory($t)
+ n = $name($t)...
+
+ ./: $d/exe{$n}: $t $d/{hxx ixx txx}{+$n} $d/testscript{+$n}
+ $d/exe{$n}: libul{build2-cli}: bin.whole = false
+}
+
+# Build options.
+#
+obja{*}: cxx.poptions += -DLIBBUILD2_CLI_STATIC_BUILD
+objs{*}: cxx.poptions += -DLIBBUILD2_CLI_SHARED_BUILD
+
+# Export options.
+#
+lib{build2-cli}:
+{
+ cxx.export.poptions = "-I$out_root" "-I$src_root"
+ cxx.export.libs = $intf_libs
+}
+
+liba{build2-cli}: cxx.export.poptions += -DLIBBUILD2_CLI_STATIC
+libs{build2-cli}: cxx.export.poptions += -DLIBBUILD2_CLI_SHARED
+
+# For pre-releases use the complete version to make sure they cannot be used
+# in place of another pre-release or the final version. See the version module
+# for details on the version.* variable values.
+#
+# And because this is a build system module, we also embed the same value as
+# the interface version (note that we cannot use build.version.interface for
+# bundled modules because we could be built with a different version of the
+# build system).
+#
+ver = ($version.pre_release \
+ ? "$version.project_id" \
+ : "$version.major.$version.minor")
+
+lib{build2-cli}: bin.lib.version = @"-$ver"
+libs{build2-cli}: bin.lib.load_suffix = "-$ver"
+
+# Install into the libbuild2/cli/ subdirectory of, say, /usr/include/
+# recreating subdirectories.
+#
+{hxx ixx txx}{*}:
+{
+ install = include/libbuild2/cli/
+ install.subdirs = true
+}
diff --git a/libbuild2/cli/export.hxx b/libbuild2/cli/export.hxx
new file mode 100644
index 0000000..67c1eb9
--- /dev/null
+++ b/libbuild2/cli/export.hxx
@@ -0,0 +1,37 @@
+// file : libbuild2/cli/export.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#pragma once
+
+// Normally we don't export class templates (but do complete specializations),
+// inline functions, and classes with only inline member functions. Exporting
+// classes that inherit from non-exported/imported bases (e.g., std::string)
+// will end up badly. The only known workarounds are to not inherit or to not
+// export. Also, MinGW GCC doesn't like seeing non-exported functions being
+// used before their inline definition. The workaround is to reorder code. In
+// the end it's all trial and error.
+
+#if defined(LIBBUILD2_CLI_STATIC) // Using static.
+# define LIBBUILD2_CLI_SYMEXPORT
+#elif defined(LIBBUILD2_CLI_STATIC_BUILD) // Building static.
+# define LIBBUILD2_CLI_SYMEXPORT
+#elif defined(LIBBUILD2_CLI_SHARED) // Using shared.
+# ifdef _WIN32
+# define LIBBUILD2_CLI_SYMEXPORT __declspec(dllimport)
+# else
+# define LIBBUILD2_CLI_SYMEXPORT
+# endif
+#elif defined(LIBBUILD2_CLI_SHARED_BUILD) // Building shared.
+# ifdef _WIN32
+# define LIBBUILD2_CLI_SYMEXPORT __declspec(dllexport)
+# else
+# define LIBBUILD2_CLI_SYMEXPORT
+# endif
+#else
+// If none of the above macros are defined, then we assume we are being used
+// by some third-party build system that cannot/doesn't signal the library
+// type. Note that this fallback works for both static and shared but in case
+// of shared will be sub-optimal compared to having dllimport.
+//
+# define LIBBUILD2_CLI_SYMEXPORT // Using static or shared.
+#endif
diff --git a/libbuild2/cli/init.cxx b/libbuild2/cli/init.cxx
new file mode 100644
index 0000000..581fdaf
--- /dev/null
+++ b/libbuild2/cli/init.cxx
@@ -0,0 +1,287 @@
+// file : libbuild2/cli/init.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/cli/init.hxx>
+
+#include <libbuild2/file.hxx>
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/variable.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+#include <libbuild2/config/utility.hxx>
+
+#include <libbuild2/cxx/target.hxx>
+
+#include <libbuild2/cli/rule.hxx>
+#include <libbuild2/cli/module.hxx>
+#include <libbuild2/cli/target.hxx>
+
+namespace build2
+{
+ namespace cli
+ {
+ // Remaining issues/semantics change:
+ //
+ // @@ Unconfigured caching.
+ //
+ // @@ Default-found cli used to result in config.cli=cli and now it's just
+ // omitted (and default-not-found -- in config.cli.configured=false).
+ //
+ // - Writing any default will take precedence over config.import.cli.
+ // In fact, this duality is a bigger problem: if we have a config
+ // that uses config.cli there is no way to reconfigure it to use
+ // config.import.cli.
+ //
+ // - We could have saved it commented.
+ //
+ // - We could do this at the module level only since we also have
+ // config.cli.options?
+ //
+ // - Note that in the CLI compiler itself we now rely on default cli
+ // being NULL/undefined. So if faving, should probably be commented
+ // out. BUT: it will still be defined, so will need to be defined
+ // NULL. Note also that long term the CLI compiler will not use the
+ // module relying on an ad hoc recipe instead.
+ //
+ // ! Maybe reserving NULL (instead of making it the same as NULL) for
+ // this "configured to default" state and saving commented is not a
+ // bad idea. Feels right to have some marker in config.build that
+ // things are in effect. And I believe if config.import.cli is
+ // specified, it will just be dropped.
+
+ bool
+ guess_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool opt,
+ module_init_extra& extra)
+ {
+ tracer trace ("cli::guess_init");
+ l5 ([&]{trace << "for " << rs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cli.guess module must be loaded in project root";
+
+ // Adjust module config.build save priority (code generator).
+ //
+ config::save_module (rs, "cli", 150);
+
+ // Enter metadata variables.
+ //
+ // They are all qualified so go straight for the public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
+
+ auto& v_ver (vp.insert<string> ("cli.version"));
+ auto& v_sum (vp.insert<string> ("cli.checksum"));
+
+ // Import the CLI compiler target.
+ //
+ // Note that the special config.cli=false value (recognized by the
+ // import machinery) is treated as an explicit request to leave the
+ // module unconfigured.
+ //
+ bool new_cfg (false);
+ import_result<exe> ir (
+ import_direct<exe> (
+ new_cfg,
+ rs,
+ name ("cli", dir_path (), "exe", "cli"), // cli%exe{cli}
+ true /* phase2 */,
+ opt,
+ true /* metadata */,
+ loc,
+ "module load"));
+
+ const exe* tgt (ir.target);
+
+ // Extract metadata.
+ //
+ auto* ver (tgt != nullptr ? &cast<string> (tgt->vars[v_ver]) : nullptr);
+ auto* sum (tgt != nullptr ? &cast<string> (tgt->vars[v_sum]) : nullptr);
+
+ // Print the report.
+ //
+ // If this is a configuration with new values, then print the report
+ // at verbosity level 2 and up (-v).
+ //
+ if (verb >= (new_cfg ? 2 : 3))
+ {
+ diag_record dr (text);
+ dr << "cli " << project (rs) << '@' << rs << '\n';
+
+ if (tgt != nullptr)
+ dr << " cli " << ir << '\n'
+ << " version " << *ver << '\n'
+ << " checksum " << *sum;
+ else
+ dr << " cli " << "not found, leaving unconfigured";
+ }
+
+ if (tgt == nullptr)
+ return false;
+
+ // The cli variable (untyped) is an imported compiler target name.
+ //
+ rs.assign ("cli") = move (ir.name);
+ rs.assign (v_sum) = *sum;
+ rs.assign (v_ver) = *ver;
+
+ {
+ standard_version v (*ver);
+
+ rs.assign<uint64_t> ("cli.version.number") = v.version;
+ rs.assign<uint64_t> ("cli.version.major") = v.major ();
+ rs.assign<uint64_t> ("cli.version.minor") = v.minor ();
+ rs.assign<uint64_t> ("cli.version.patch") = v.patch ();
+ }
+
+ // Cache some values in the module for easier access in the rule.
+ //
+ extra.set_module (new module (data {*tgt, *sum}));
+
+ return true;
+ }
+
+ bool
+ config_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool opt,
+ module_init_extra& extra)
+ {
+ tracer trace ("cli::config_init");
+ l5 ([&]{trace << "for " << rs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cli.config module must be loaded in project root";
+
+ // Load cli.guess and share its module instance as ours.
+ //
+ if (optional<shared_ptr<build2::module>> r = load_module (
+ rs, rs, "cli.guess", loc, opt, extra.hints))
+ {
+ extra.module = *r;
+ }
+ else
+ {
+ // This can happen if someone already optionally loaded cli.guess
+ // and it has failed to configure.
+ //
+ if (!opt)
+ fail (loc) << "cli could not be configured" <<
+ info << "re-run with -V for more information";
+
+ return false;
+ }
+
+ // Configuration.
+ //
+ using config::append_config;
+
+ // config.cli.options
+ //
+ // Note that we merge it into the corresponding cli.* variable.
+ //
+ append_config<strings> (rs, rs, "cli.options", nullptr);
+
+ return true;
+ }
+
+ bool
+ init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool opt,
+ module_init_extra& extra)
+ {
+ tracer trace ("cli::init");
+ l5 ([&]{trace << "for " << rs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cli module must be loaded in project root";
+
+ // Make sure the cxx module has been loaded since we need its targets
+ // types (?xx{}). Note that we don't try to load it ourselves because of
+ // the non-trivial variable merging semantics. So it is better to let
+ // the user load cxx explicitly. @@ Not sure the reason still holds
+ // though it might still make sense to expect the user to load cxx.
+ //
+ if (!cast_false<bool> (rs["cxx.loaded"]))
+ fail (loc) << "cxx module must be loaded before cli";
+
+ // Load cli.config and get its module instance.
+ //
+ if (optional<shared_ptr<build2::module>> r = load_module (
+ rs, rs, "cli.config", loc, opt, extra.hints))
+ {
+ extra.module = *r;
+ }
+ else
+ {
+ // This can happen if someone already optionally loaded cli.config
+ // and it has failed to configure.
+ //
+ if (!opt)
+ fail (loc) << "cli could not be configured" <<
+ info << "re-run with -V for more information";
+
+ return false;
+ }
+
+ auto& m (extra.module_as<module> ());
+
+ // Register target types.
+ //
+ rs.insert_target_type<cli> ();
+ rs.insert_target_type<cli_cxx> ();
+
+ // Register our rules.
+ //
+ // Other rules (e.g., cc::compile) may need to have the group members
+ // resolved/linked up. Looks like a general pattern: groups should
+ // resolve on *(update).
+ {
+ auto reg = [&rs, &m] (meta_operation_id mid, operation_id oid)
+ {
+ rs.insert_rule<cli_cxx> (mid, oid, "cli.compile", m);
+ rs.insert_rule<cxx::hxx> (mid, oid, "cli.compile", m);
+ rs.insert_rule<cxx::cxx> (mid, oid, "cli.compile", m);
+ rs.insert_rule<cxx::ixx> (mid, oid, "cli.compile", m);
+ };
+
+ reg (0 /* wildcard */, update_id);
+ reg (perform_id, clean_id);
+ }
+
+ return true;
+ }
+
+ static const module_functions mod_functions[] =
+ {
+ // NOTE: don't forget to also update the documentation in init.hxx if
+ // changing anything here.
+
+ {"cli.guess", nullptr, guess_init},
+ {"cli.config", nullptr, config_init},
+ {"cli", nullptr, init},
+ {nullptr, nullptr, nullptr}
+ };
+
+ const module_functions*
+ build2_cli_load ()
+ {
+ return mod_functions;
+ }
+ }
+}
diff --git a/libbuild2/cli/init.hxx b/libbuild2/cli/init.hxx
new file mode 100644
index 0000000..6d23795
--- /dev/null
+++ b/libbuild2/cli/init.hxx
@@ -0,0 +1,31 @@
+// file : libbuild2/cli/init.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CLI_INIT_HXX
+#define LIBBUILD2_CLI_INIT_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/module.hxx>
+
+#include <libbuild2/cli/export.hxx>
+
+namespace build2
+{
+ namespace cli
+ {
+ // Module `cli` does not require bootstrapping.
+ //
+ // Submodules:
+ //
+ // `cli.guess` -- set variables describing the compiler.
+ // `cli.config` -- load `cli.guess` and set the rest of the variables.
+ // `cli` -- load `cli.config` and register targets and rules.
+ //
+ extern "C" LIBBUILD2_CLI_SYMEXPORT const module_functions*
+ build2_cli_load ();
+ }
+}
+
+#endif // LIBBUILD2_CLI_INIT_HXX
diff --git a/libbuild2/cli/module.hxx b/libbuild2/cli/module.hxx
new file mode 100644
index 0000000..ba10540
--- /dev/null
+++ b/libbuild2/cli/module.hxx
@@ -0,0 +1,30 @@
+// file : libbuild2/cli/module.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CLI_MODULE_HXX
+#define LIBBUILD2_CLI_MODULE_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/module.hxx>
+
+#include <libbuild2/cli/rule.hxx>
+
+namespace build2
+{
+ namespace cli
+ {
+ class module: public build2::module,
+ public virtual data,
+ public compile_rule
+ {
+ public:
+ explicit
+ module (data&& d)
+ : data (move (d)), compile_rule (move (d)) {}
+ };
+ }
+}
+
+#endif // LIBBUILD2_CLI_MODULE_HXX
diff --git a/libbuild2/cli/rule.cxx b/libbuild2/cli/rule.cxx
new file mode 100644
index 0000000..996ca51
--- /dev/null
+++ b/libbuild2/cli/rule.cxx
@@ -0,0 +1,340 @@
+// file : libbuild2/cli/rule.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/cli/rule.hxx>
+
+#include <libbuild2/depdb.hxx>
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/context.hxx>
+#include <libbuild2/algorithm.hxx>
+#include <libbuild2/filesystem.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+#include <libbuild2/cli/target.hxx>
+
+namespace build2
+{
+ namespace cli
+ {
+ // Figure out if name contains stem and, optionally, calculate prefix and
+ // suffix.
+ //
+ static bool
+ match_stem (const string& name, const string& stem,
+ string* prefix = nullptr, string* suffix = nullptr)
+ {
+ size_t p (name.find (stem));
+
+ if (p != string::npos)
+ {
+ if (prefix != nullptr)
+ prefix->assign (name, 0, p);
+
+ if (suffix != nullptr)
+ suffix->assign (name, p + stem.size (), string::npos);
+
+ return true;
+ }
+
+ return false;
+ }
+
+ bool compile_rule::
+ match (action a, target& t) const
+ {
+ tracer trace ("cli::compile_rule::match");
+
+ // Find the .cli source file.
+ //
+ auto find = [&trace, a, &t] (auto&& r) -> optional<prerequisite_member>
+ {
+ for (prerequisite_member p: r)
+ {
+ // If excluded or ad hoc, then don't factor it into our tests.
+ //
+ if (include (a, t, p) != include_type::normal)
+ continue;
+
+ if (p.is_a<cli> ())
+ {
+ // Check that the stem match.
+ //
+ if (match_stem (t.name, p.name ()))
+ return p;
+
+ l4 ([&]{trace << ".cli file stem '" << p.name () << "' "
+ << "doesn't match target " << t;});
+ }
+ }
+
+ return nullopt;
+ };
+
+ if (cli_cxx* pt = t.is_a<cli_cxx> ())
+ {
+ // The cli.cxx{} group.
+ //
+ cli_cxx& t (*pt);
+
+ // See if we have a .cli source file.
+ //
+ if (!find (group_prerequisite_members (a, t)))
+ {
+ l4 ([&]{trace << "no .cli source file for target " << t;});
+ return false;
+ }
+
+ // Figure out the member list.
+ //
+ // At this stage, no further changes to cli.options are possible and
+ // we can determine whether the --suppress-inline option is present.
+ //
+ // Passing the group as a "reference target" is a bit iffy,
+ // conceptually.
+ //
+ t.h = &search<cxx::hxx> (t, t.dir, t.out, t.name);
+ t.c = &search<cxx::cxx> (t, t.dir, t.out, t.name);
+ t.i = find_option ("--suppress-inline", t, "cli.options")
+ ? nullptr
+ : &search<cxx::ixx> (t, t.dir, t.out, t.name);
+
+ return true;
+ }
+ else
+ {
+ // One of the ?xx{} members.
+ //
+
+ // Check if there is a corresponding cli.cxx{} group.
+ //
+ const cli_cxx* g (t.ctx.targets.find<cli_cxx> (t.dir, t.out, t.name));
+
+ // If not or if it has no prerequisites (happens when we use it to
+ // set cli.options) and this target has a cli{} prerequisite, then
+ // synthesize the dependency.
+ //
+ if (g == nullptr || !g->has_prerequisites ())
+ {
+ if (optional<prerequisite_member> p = find (
+ prerequisite_members (a, t)))
+ {
+ if (g == nullptr)
+ g = &t.ctx.targets.insert<cli_cxx> (t.dir, t.out, t.name, trace);
+
+ prerequisites ps;
+ ps.push_back (p->as_prerequisite ());
+ g->prerequisites (move (ps));
+ }
+ }
+
+ if (g == nullptr)
+ return false;
+
+ // For ixx{}, verify it is part of the group (i.e., not disabled
+ // via --suppress-inline).
+ //
+ if (t.is_a<cxx::ixx> () &&
+ find_option ("--suppress-inline", *g, "cli.options"))
+ return false;
+
+ t.group = g;
+ return true;
+ }
+ }
+
+ recipe compile_rule::
+ apply (action a, target& xt) const
+ {
+ if (cli_cxx* pt = xt.is_a<cli_cxx> ())
+ {
+ cli_cxx& t (*pt);
+
+ // Derive file names for the members.
+ //
+ t.h->derive_path ();
+ t.c->derive_path ();
+ if (t.i != nullptr)
+ t.i->derive_path ();
+
+ // Inject dependency on the output directory.
+ //
+ inject_fsdir (a, t);
+
+ // Match prerequisites.
+ //
+ match_prerequisite_members (a, t);
+
+ // For update inject dependency on the CLI compiler target.
+ //
+ if (a == perform_update_id)
+ inject (a, t, ctgt);
+
+ switch (a)
+ {
+ case perform_update_id: return [this] (action a, const target& t)
+ {
+ return perform_update (a, t);
+ };
+ case perform_clean_id: return &perform_clean_group_depdb;
+ default: return noop_recipe; // Configure/dist update.
+ }
+ }
+ else
+ {
+ const cli_cxx& g (xt.group->as<cli_cxx> ());
+ match_sync (a, g);
+ return group_recipe; // Execute the group's recipe.
+ }
+ }
+
+ static void
+ append_extension (cstrings& args,
+ const path_target& t,
+ const char* option,
+ const char* default_extension)
+ {
+ const string* e (t.ext ());
+ assert (e != nullptr); // Should have been figured out in apply().
+
+ if (*e != default_extension)
+ {
+ // CLI needs the extension with the leading dot (unless it is empty)
+ // while we store the extension without. But if there is an extension,
+ // then we can get it (with the dot) from the file name.
+ //
+ args.push_back (option);
+ args.push_back (e->empty ()
+ ? e->c_str ()
+ : t.path ().extension_cstring () - 1);
+ }
+ }
+
+ target_state compile_rule::
+ perform_update (action a, const target& xt) const
+ {
+ tracer trace ("cli::compile_rule::perform_update");
+
+ // The rule has been matched which means the members should be resolved
+ // and paths assigned. We use the header file as our "target path" for
+ // timestamp, depdb, etc.
+ //
+ const cli_cxx& t (xt.as<cli_cxx> ());
+ const path& tp (t.h->path ());
+
+ context& ctx (t.ctx);
+
+ // Update prerequisites and determine if any relevant ones render us
+ // out-of-date. Note that currently we treat all the prerequisites as
+ // potentially affecting the result (think prologues/epilogues, CLI
+ // compiler target itself, etc).
+ //
+ timestamp mt (t.load_mtime (tp));
+ auto pr (execute_prerequisites<cli> (a, t, mt));
+
+ bool update (!pr.first);
+ target_state ts (update ? target_state::changed : *pr.first);
+
+ const cli& s (pr.second);
+
+ // We use depdb to track changes to the .cli file name, options,
+ // compiler, etc.
+ //
+ depdb dd (tp + ".d");
+ {
+ // First should come the rule name/version.
+ //
+ if (dd.expect ("cli.compile 1") != nullptr)
+ l4 ([&]{trace << "rule mismatch forcing update of " << t;});
+
+ // Then the compiler checksum.
+ //
+ if (dd.expect (csum) != nullptr)
+ l4 ([&]{trace << "compiler mismatch forcing update of " << t;});
+
+ // Then the options checksum.
+ //
+ sha256 cs;
+ append_options (cs, t, "cli.options");
+
+ if (dd.expect (cs.string ()) != nullptr)
+ l4 ([&]{trace << "options mismatch forcing update of " << t;});
+
+ // Finally the .cli input file.
+ //
+ if (dd.expect (s.path ()) != nullptr)
+ l4 ([&]{trace << "input file mismatch forcing update of " << t;});
+ }
+
+ // Update if depdb mismatch.
+ //
+ if (dd.writing () || dd.mtime > mt)
+ update = true;
+
+ dd.close ();
+
+ // If nothing changed, then we are done.
+ //
+ if (!update)
+ return ts;
+
+ // Translate paths to relative (to working directory). This results in
+ // easier to read diagnostics.
+ //
+ path relo (relative (t.dir));
+ path rels (relative (s.path ()));
+
+ const process_path& pp (ctgt.process_path ());
+ cstrings args {pp.recall_string ()};
+
+ // See if we need to pass --output-{prefix,suffix}
+ //
+ string prefix, suffix;
+ match_stem (t.name, s.name, &prefix, &suffix);
+
+ if (!prefix.empty ())
+ {
+ args.push_back ("--output-prefix");
+ args.push_back (prefix.c_str ());
+ }
+
+ if (!suffix.empty ())
+ {
+ args.push_back ("--output-suffix");
+ args.push_back (suffix.c_str ());
+ }
+
+ // See if we need to pass any --?xx-suffix options.
+ //
+ append_extension (args, *t.h, "--hxx-suffix", "hxx");
+ append_extension (args, *t.c, "--cxx-suffix", "cxx");
+ if (t.i != nullptr)
+ append_extension (args, *t.i, "--ixx-suffix", "ixx");
+
+ append_options (args, t, "cli.options");
+
+ if (!relo.empty ())
+ {
+ args.push_back ("-o");
+ args.push_back (relo.string ().c_str ());
+ }
+
+ args.push_back (rels.string ().c_str ());
+ args.push_back (nullptr);
+
+ if (verb >= 2)
+ print_process (args);
+ else if (verb)
+ print_diag ("cli", s, t);
+
+ if (!ctx.dry_run)
+ {
+ run (ctx, pp, args, 1 /* finish_verbosity */);
+ dd.check_mtime (tp);
+ }
+
+ t.mtime (system_clock::now ());
+ return target_state::changed;
+ }
+ }
+}
diff --git a/libbuild2/cli/rule.hxx b/libbuild2/cli/rule.hxx
new file mode 100644
index 0000000..0132b44
--- /dev/null
+++ b/libbuild2/cli/rule.hxx
@@ -0,0 +1,46 @@
+// file : libbuild2/cli/rule.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CLI_RULE_HXX
+#define LIBBUILD2_CLI_RULE_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/rule.hxx>
+
+#include <libbuild2/cli/export.hxx>
+
+namespace build2
+{
+ namespace cli
+ {
+ // Cached data shared between rules and the module.
+ //
+ struct data
+ {
+ const exe& ctgt; // CLI compiler target.
+ const string& csum; // CLI compiler checksum.
+ };
+
+ // @@ Redo as two separate rules?
+ //
+ class LIBBUILD2_CLI_SYMEXPORT compile_rule: public simple_rule,
+ private virtual data
+ {
+ public:
+ compile_rule (data&& d): data (move (d)) {}
+
+ virtual bool
+ match (action, target&) const override;
+
+ virtual recipe
+ apply (action, target&) const override;
+
+ target_state
+ perform_update (action, const target&) const;
+ };
+ }
+}
+
+#endif // LIBBUILD2_CLI_RULE_HXX
diff --git a/libbuild2/cli/target.cxx b/libbuild2/cli/target.cxx
new file mode 100644
index 0000000..22ae75c
--- /dev/null
+++ b/libbuild2/cli/target.cxx
@@ -0,0 +1,75 @@
+// file : libbuild2/cli/target.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/cli/target.hxx>
+
+#include <libbuild2/context.hxx>
+
+namespace build2
+{
+ namespace cli
+ {
+ // cli
+ //
+ extern const char cli_ext_def[] = "cli";
+
+ const target_type cli::static_type
+ {
+ "cli",
+ &file::static_type,
+ &target_factory<cli>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<cli_ext_def>,
+ &target_pattern_var<cli_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
+ // cli.cxx
+ //
+ group_view cli_cxx::
+ group_members (action) const
+ {
+ static_assert (sizeof (cli_cxx_members) == sizeof (const target*) * 3,
+ "member layout incompatible with array");
+
+ return h != nullptr
+ ? group_view {reinterpret_cast<const target* const*> (&h),
+ (i != nullptr ? 3U : 2U)}
+ : group_view {nullptr, 0};
+ }
+
+ static target*
+ cli_cxx_factory (context& ctx,
+ const target_type&, dir_path d, dir_path o, string n)
+ {
+ tracer trace ("cli::cli_cxx_factory");
+
+ // Pre-enter (potential) members as targets. The main purpose of doing
+ // this is to avoid searching for existing files in src_base if the
+ // buildfile mentions some of them explicitly as prerequisites.
+ //
+ // Also required for the src-out remapping logic.
+ //
+ ctx.targets.insert<cxx::hxx> (d, o, n, trace);
+ ctx.targets.insert<cxx::cxx> (d, o, n, trace);
+ ctx.targets.insert<cxx::ixx> (d, o, n, trace);
+
+ return new cli_cxx (ctx, move (d), move (o), move (n));
+ }
+
+ const target_type cli_cxx::static_type
+ {
+ "cli.cxx",
+ &mtime_target::static_type,
+ &cli_cxx_factory,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ target_type::flag::see_through // Group with "see through" iteration.
+ };
+ }
+}
diff --git a/libbuild2/cli/target.hxx b/libbuild2/cli/target.hxx
new file mode 100644
index 0000000..8efb837
--- /dev/null
+++ b/libbuild2/cli/target.hxx
@@ -0,0 +1,61 @@
+// file : libbuild2/cli/target.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CLI_TARGET_HXX
+#define LIBBUILD2_CLI_TARGET_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/target.hxx>
+
+#include <libbuild2/cxx/target.hxx>
+
+#include <libbuild2/cli/export.hxx>
+
+namespace build2
+{
+ namespace cli
+ {
+ class LIBBUILD2_CLI_SYMEXPORT cli: public file
+ {
+ public:
+ cli (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Standard layout type compatible with group_view's const target*[3].
+ //
+ struct cli_cxx_members
+ {
+ const cxx::hxx* h = nullptr;
+ const cxx::cxx* c = nullptr;
+ const cxx::ixx* i = nullptr;
+ };
+
+ class LIBBUILD2_CLI_SYMEXPORT cli_cxx: public mtime_target,
+ public cli_cxx_members
+ {
+ public:
+ cli_cxx (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ virtual group_view
+ group_members (action) const override;
+
+ public:
+ static const target_type static_type;
+ };
+ }
+}
+
+#endif // LIBBUILD2_CLI_TARGET_HXX
diff --git a/libbuild2/common-options.cxx b/libbuild2/common-options.cxx
new file mode 100644
index 0000000..03e7e60
--- /dev/null
+++ b/libbuild2/common-options.cxx
@@ -0,0 +1,809 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+#include <libbuild2/common-options.hxx>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ostream>
+#include <sstream>
+#include <cstring>
+#include <fstream>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ // unknown_option
+ //
+ unknown_option::
+ ~unknown_option () noexcept
+ {
+ }
+
+ void unknown_option::
+ print (::std::ostream& os) const
+ {
+ os << "unknown option '" << option ().c_str () << "'";
+ }
+
+ const char* unknown_option::
+ what () const noexcept
+ {
+ return "unknown option";
+ }
+
+ // unknown_argument
+ //
+ unknown_argument::
+ ~unknown_argument () noexcept
+ {
+ }
+
+ void unknown_argument::
+ print (::std::ostream& os) const
+ {
+ os << "unknown argument '" << argument ().c_str () << "'";
+ }
+
+ const char* unknown_argument::
+ what () const noexcept
+ {
+ return "unknown argument";
+ }
+
+ // missing_value
+ //
+ missing_value::
+ ~missing_value () noexcept
+ {
+ }
+
+ void missing_value::
+ print (::std::ostream& os) const
+ {
+ os << "missing value for option '" << option ().c_str () << "'";
+ }
+
+ const char* missing_value::
+ what () const noexcept
+ {
+ return "missing option value";
+ }
+
+ // invalid_value
+ //
+ invalid_value::
+ ~invalid_value () noexcept
+ {
+ }
+
+ void invalid_value::
+ print (::std::ostream& os) const
+ {
+ os << "invalid value '" << value ().c_str () << "' for option '"
+ << option ().c_str () << "'";
+
+ if (!message ().empty ())
+ os << ": " << message ().c_str ();
+ }
+
+ const char* invalid_value::
+ what () const noexcept
+ {
+ return "invalid option value";
+ }
+
+ // eos_reached
+ //
+ void eos_reached::
+ print (::std::ostream& os) const
+ {
+ os << what ();
+ }
+
+ const char* eos_reached::
+ what () const noexcept
+ {
+ return "end of argument stream reached";
+ }
+
+ // file_io_failure
+ //
+ file_io_failure::
+ ~file_io_failure () noexcept
+ {
+ }
+
+ void file_io_failure::
+ print (::std::ostream& os) const
+ {
+ os << "unable to open file '" << file ().c_str () << "' or read failure";
+ }
+
+ const char* file_io_failure::
+ what () const noexcept
+ {
+ return "unable to open file or read failure";
+ }
+
+ // unmatched_quote
+ //
+ unmatched_quote::
+ ~unmatched_quote () noexcept
+ {
+ }
+
+ void unmatched_quote::
+ print (::std::ostream& os) const
+ {
+ os << "unmatched quote in argument '" << argument ().c_str () << "'";
+ }
+
+ const char* unmatched_quote::
+ what () const noexcept
+ {
+ return "unmatched quote";
+ }
+
+ // scanner
+ //
+ scanner::
+ ~scanner ()
+ {
+ }
+
+ // argv_scanner
+ //
+ bool argv_scanner::
+ more ()
+ {
+ return i_ < argc_;
+ }
+
+ const char* argv_scanner::
+ peek ()
+ {
+ if (i_ < argc_)
+ return argv_[i_];
+ else
+ throw eos_reached ();
+ }
+
+ const char* argv_scanner::
+ next ()
+ {
+ if (i_ < argc_)
+ {
+ const char* r (argv_[i_]);
+
+ if (erase_)
+ {
+ for (int i (i_ + 1); i < argc_; ++i)
+ argv_[i - 1] = argv_[i];
+
+ --argc_;
+ argv_[argc_] = 0;
+ }
+ else
+ ++i_;
+
+ ++start_position_;
+ return r;
+ }
+ else
+ throw eos_reached ();
+ }
+
+ void argv_scanner::
+ skip ()
+ {
+ if (i_ < argc_)
+ {
+ ++i_;
+ ++start_position_;
+ }
+ else
+ throw eos_reached ();
+ }
+
+ std::size_t argv_scanner::
+ position ()
+ {
+ return start_position_;
+ }
+
+ // vector_scanner
+ //
+ bool vector_scanner::
+ more ()
+ {
+ return i_ < v_.size ();
+ }
+
+ const char* vector_scanner::
+ peek ()
+ {
+ if (i_ < v_.size ())
+ return v_[i_].c_str ();
+ else
+ throw eos_reached ();
+ }
+
+ const char* vector_scanner::
+ next ()
+ {
+ if (i_ < v_.size ())
+ return v_[i_++].c_str ();
+ else
+ throw eos_reached ();
+ }
+
+ void vector_scanner::
+ skip ()
+ {
+ if (i_ < v_.size ())
+ ++i_;
+ else
+ throw eos_reached ();
+ }
+
+ std::size_t vector_scanner::
+ position ()
+ {
+ return start_position_ + i_;
+ }
+
+ // argv_file_scanner
+ //
+ int argv_file_scanner::zero_argc_ = 0;
+ std::string argv_file_scanner::empty_string_;
+
+ bool argv_file_scanner::
+ more ()
+ {
+ if (!args_.empty ())
+ return true;
+
+ while (base::more ())
+ {
+ // See if the next argument is the file option.
+ //
+ const char* a (base::peek ());
+ const option_info* oi = 0;
+ const char* ov = 0;
+
+ if (!skip_)
+ {
+ if ((oi = find (a)) != 0)
+ {
+ base::next ();
+
+ if (!base::more ())
+ throw missing_value (a);
+
+ ov = base::next ();
+ }
+ else if (std::strncmp (a, "-", 1) == 0)
+ {
+ if ((ov = std::strchr (a, '=')) != 0)
+ {
+ std::string o (a, 0, ov - a);
+ if ((oi = find (o.c_str ())) != 0)
+ {
+ base::next ();
+ ++ov;
+ }
+ }
+ }
+ }
+
+ if (oi != 0)
+ {
+ if (oi->search_func != 0)
+ {
+ std::string f (oi->search_func (ov, oi->arg));
+
+ if (!f.empty ())
+ load (f);
+ }
+ else
+ load (ov);
+
+ if (!args_.empty ())
+ return true;
+ }
+ else
+ {
+ if (!skip_)
+ skip_ = (std::strcmp (a, "--") == 0);
+
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ const char* argv_file_scanner::
+ peek ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ return args_.empty () ? base::peek () : args_.front ().value.c_str ();
+ }
+
+ const std::string& argv_file_scanner::
+ peek_file ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ return args_.empty () ? empty_string_ : *args_.front ().file;
+ }
+
+ std::size_t argv_file_scanner::
+ peek_line ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ return args_.empty () ? 0 : args_.front ().line;
+ }
+
+ const char* argv_file_scanner::
+ next ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ if (args_.empty ())
+ return base::next ();
+ else
+ {
+ hold_[i_ == 0 ? ++i_ : --i_].swap (args_.front ().value);
+ args_.pop_front ();
+ ++start_position_;
+ return hold_[i_].c_str ();
+ }
+ }
+
+ void argv_file_scanner::
+ skip ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ if (args_.empty ())
+ return base::skip ();
+ else
+ {
+ args_.pop_front ();
+ ++start_position_;
+ }
+ }
+
+ const argv_file_scanner::option_info* argv_file_scanner::
+ find (const char* a) const
+ {
+ for (std::size_t i (0); i < options_count_; ++i)
+ if (std::strcmp (a, options_[i].option) == 0)
+ return &options_[i];
+
+ return 0;
+ }
+
+ std::size_t argv_file_scanner::
+ position ()
+ {
+ return start_position_;
+ }
+
+ void argv_file_scanner::
+ load (const std::string& file)
+ {
+ using namespace std;
+
+ ifstream is (file.c_str ());
+
+ if (!is.is_open ())
+ throw file_io_failure (file);
+
+ files_.push_back (file);
+
+ arg a;
+ a.file = &*files_.rbegin ();
+
+ for (a.line = 1; !is.eof (); ++a.line)
+ {
+ string line;
+ getline (is, line);
+
+ if (is.fail () && !is.eof ())
+ throw file_io_failure (file);
+
+ string::size_type n (line.size ());
+
+ // Trim the line from leading and trailing whitespaces.
+ //
+ if (n != 0)
+ {
+ const char* f (line.c_str ());
+ const char* l (f + n);
+
+ const char* of (f);
+ while (f < l && (*f == ' ' || *f == '\t' || *f == '\r'))
+ ++f;
+
+ --l;
+
+ const char* ol (l);
+ while (l > f && (*l == ' ' || *l == '\t' || *l == '\r'))
+ --l;
+
+ if (f != of || l != ol)
+ line = f <= l ? string (f, l - f + 1) : string ();
+ }
+
+ // Ignore empty lines, those that start with #.
+ //
+ if (line.empty () || line[0] == '#')
+ continue;
+
+ string::size_type p (string::npos);
+ if (line.compare (0, 1, "-") == 0)
+ {
+ p = line.find (' ');
+
+ string::size_type q (line.find ('='));
+ if (q != string::npos && q < p)
+ p = q;
+ }
+
+ string s1;
+ if (p != string::npos)
+ {
+ s1.assign (line, 0, p);
+
+ // Skip leading whitespaces in the argument.
+ //
+ if (line[p] == '=')
+ ++p;
+ else
+ {
+ n = line.size ();
+ for (++p; p < n; ++p)
+ {
+ char c (line[p]);
+ if (c != ' ' && c != '\t' && c != '\r')
+ break;
+ }
+ }
+ }
+ else if (!skip_)
+ skip_ = (line == "--");
+
+ string s2 (line, p != string::npos ? p : 0);
+
+ // If the string (which is an option value or argument) is
+ // wrapped in quotes, remove them.
+ //
+ n = s2.size ();
+ char cf (s2[0]), cl (s2[n - 1]);
+
+ if (cf == '"' || cf == '\'' || cl == '"' || cl == '\'')
+ {
+ if (n == 1 || cf != cl)
+ throw unmatched_quote (s2);
+
+ s2 = string (s2, 1, n - 2);
+ }
+
+ if (!s1.empty ())
+ {
+ // See if this is another file option.
+ //
+ const option_info* oi;
+ if (!skip_ && (oi = find (s1.c_str ())))
+ {
+ if (s2.empty ())
+ throw missing_value (oi->option);
+
+ if (oi->search_func != 0)
+ {
+ string f (oi->search_func (s2.c_str (), oi->arg));
+ if (!f.empty ())
+ load (f);
+ }
+ else
+ {
+ // If the path of the file being parsed is not simple and the
+ // path of the file that needs to be loaded is relative, then
+ // complete the latter using the former as a base.
+ //
+#ifndef _WIN32
+ string::size_type p (file.find_last_of ('/'));
+ bool c (p != string::npos && s2[0] != '/');
+#else
+ string::size_type p (file.find_last_of ("/\\"));
+ bool c (p != string::npos && s2[1] != ':');
+#endif
+ if (c)
+ s2.insert (0, file, 0, p + 1);
+
+ load (s2);
+ }
+
+ continue;
+ }
+
+ a.value = s1;
+ args_.push_back (a);
+ }
+
+ a.value = s2;
+ args_.push_back (a);
+ }
+ }
+
+ template <typename X>
+ struct parser
+ {
+ static void
+ parse (X& x, bool& xs, scanner& s)
+ {
+ using namespace std;
+
+ const char* o (s.next ());
+ if (s.more ())
+ {
+ string v (s.next ());
+ istringstream is (v);
+ if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<bool>
+ {
+ static void
+ parse (bool& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<std::string>
+ {
+ static void
+ parse (std::string& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ x = s.next ();
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::pair<X, std::size_t> >
+ {
+ static void
+ parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
+ {
+ x.second = s.position ();
+ parser<X>::parse (x.first, xs, s);
+ }
+ };
+
+ template <typename X>
+ struct parser<std::vector<X> >
+ {
+ static void
+ parse (std::vector<X>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.push_back (x);
+ xs = true;
+ }
+ };
+
+ template <typename X, typename C>
+ struct parser<std::set<X, C> >
+ {
+ static void
+ parse (std::set<X, C>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.insert (x);
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::map<K, V, C> >
+ {
+ static void
+ parse (std::map<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m[k] = v;
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X, typename T, T X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, s);
+ }
+
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
+ template <typename X, typename T, T X::*M, bool X::*S>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, x.*S, s);
+ }
+ }
+ }
+}
+
+#include <map>
+
+namespace build2
+{
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
diff --git a/libbuild2/common-options.hxx b/libbuild2/common-options.hxx
new file mode 100644
index 0000000..f90f563
--- /dev/null
+++ b/libbuild2/common-options.hxx
@@ -0,0 +1,484 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+#ifndef LIBBUILD2_COMMON_OPTIONS_HXX
+#define LIBBUILD2_COMMON_OPTIONS_HXX
+
+// Begin prologue.
+//
+#include <libbuild2/export.hxx>
+//
+// End prologue.
+
+#include <list>
+#include <deque>
+#include <vector>
+#include <iosfwd>
+#include <string>
+#include <cstddef>
+#include <exception>
+
+#ifndef CLI_POTENTIALLY_UNUSED
+# if defined(_MSC_VER) || defined(__xlC__)
+# define CLI_POTENTIALLY_UNUSED(x) (void*)&x
+# else
+# define CLI_POTENTIALLY_UNUSED(x) (void)x
+# endif
+#endif
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ class usage_para
+ {
+ public:
+ enum value
+ {
+ none,
+ text,
+ option
+ };
+
+ usage_para (value);
+
+ operator value () const
+ {
+ return v_;
+ }
+
+ private:
+ value v_;
+ };
+
+ class unknown_mode
+ {
+ public:
+ enum value
+ {
+ skip,
+ stop,
+ fail
+ };
+
+ unknown_mode (value);
+
+ operator value () const
+ {
+ return v_;
+ }
+
+ private:
+ value v_;
+ };
+
+ // Exceptions.
+ //
+
+ class LIBBUILD2_SYMEXPORT exception: public std::exception
+ {
+ public:
+ virtual void
+ print (::std::ostream&) const = 0;
+ };
+
+ ::std::ostream&
+ operator<< (::std::ostream&, const exception&);
+
+ class LIBBUILD2_SYMEXPORT unknown_option: public exception
+ {
+ public:
+ virtual
+ ~unknown_option () noexcept;
+
+ unknown_option (const std::string& option);
+
+ const std::string&
+ option () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string option_;
+ };
+
+ class LIBBUILD2_SYMEXPORT unknown_argument: public exception
+ {
+ public:
+ virtual
+ ~unknown_argument () noexcept;
+
+ unknown_argument (const std::string& argument);
+
+ const std::string&
+ argument () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string argument_;
+ };
+
+ class LIBBUILD2_SYMEXPORT missing_value: public exception
+ {
+ public:
+ virtual
+ ~missing_value () noexcept;
+
+ missing_value (const std::string& option);
+
+ const std::string&
+ option () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string option_;
+ };
+
+ class LIBBUILD2_SYMEXPORT invalid_value: public exception
+ {
+ public:
+ virtual
+ ~invalid_value () noexcept;
+
+ invalid_value (const std::string& option,
+ const std::string& value,
+ const std::string& message = std::string ());
+
+ const std::string&
+ option () const;
+
+ const std::string&
+ value () const;
+
+ const std::string&
+ message () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string option_;
+ std::string value_;
+ std::string message_;
+ };
+
+ class LIBBUILD2_SYMEXPORT eos_reached: public exception
+ {
+ public:
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+ };
+
+ class LIBBUILD2_SYMEXPORT file_io_failure: public exception
+ {
+ public:
+ virtual
+ ~file_io_failure () noexcept;
+
+ file_io_failure (const std::string& file);
+
+ const std::string&
+ file () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string file_;
+ };
+
+ class LIBBUILD2_SYMEXPORT unmatched_quote: public exception
+ {
+ public:
+ virtual
+ ~unmatched_quote () noexcept;
+
+ unmatched_quote (const std::string& argument);
+
+ const std::string&
+ argument () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string argument_;
+ };
+
+ // Command line argument scanner interface.
+ //
+ // The values returned by next() are guaranteed to be valid
+ // for the two previous arguments up until a call to a third
+ // peek() or next().
+ //
+ // The position() function returns a monotonically-increasing
+ // number which, if stored, can later be used to determine the
+ // relative position of the argument returned by the following
+ // call to next(). Note that if multiple scanners are used to
+ // extract arguments from multiple sources, then the end
+ // position of the previous scanner should be used as the
+ // start position of the next.
+ //
+ class LIBBUILD2_SYMEXPORT scanner
+ {
+ public:
+ virtual
+ ~scanner ();
+
+ virtual bool
+ more () = 0;
+
+ virtual const char*
+ peek () = 0;
+
+ virtual const char*
+ next () = 0;
+
+ virtual void
+ skip () = 0;
+
+ virtual std::size_t
+ position () = 0;
+ };
+
+ class LIBBUILD2_SYMEXPORT argv_scanner: public scanner
+ {
+ public:
+ argv_scanner (int& argc,
+ char** argv,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_scanner (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ int
+ end () const;
+
+ virtual bool
+ more ();
+
+ virtual const char*
+ peek ();
+
+ virtual const char*
+ next ();
+
+ virtual void
+ skip ();
+
+ virtual std::size_t
+ position ();
+
+ protected:
+ std::size_t start_position_;
+ int i_;
+ int& argc_;
+ char** argv_;
+ bool erase_;
+ };
+
+ class LIBBUILD2_SYMEXPORT vector_scanner: public scanner
+ {
+ public:
+ vector_scanner (const std::vector<std::string>&,
+ std::size_t start = 0,
+ std::size_t start_position = 0);
+
+ std::size_t
+ end () const;
+
+ void
+ reset (std::size_t start = 0, std::size_t start_position = 0);
+
+ virtual bool
+ more ();
+
+ virtual const char*
+ peek ();
+
+ virtual const char*
+ next ();
+
+ virtual void
+ skip ();
+
+ virtual std::size_t
+ position ();
+
+ private:
+ std::size_t start_position_;
+ const std::vector<std::string>& v_;
+ std::size_t i_;
+ };
+
+ class LIBBUILD2_SYMEXPORT argv_file_scanner: public argv_scanner
+ {
+ public:
+ argv_file_scanner (int& argc,
+ char** argv,
+ const std::string& option,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_file_scanner (int start,
+ int& argc,
+ char** argv,
+ const std::string& option,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_file_scanner (const std::string& file,
+ const std::string& option,
+ std::size_t start_position = 0);
+
+ struct option_info
+ {
+ // If search_func is not NULL, it is called, with the arg
+ // value as the second argument, to locate the options file.
+ // If it returns an empty string, then the file is ignored.
+ //
+ const char* option;
+ std::string (*search_func) (const char*, void* arg);
+ void* arg;
+ };
+
+ argv_file_scanner (int& argc,
+ char** argv,
+ const option_info* options,
+ std::size_t options_count,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_file_scanner (int start,
+ int& argc,
+ char** argv,
+ const option_info* options,
+ std::size_t options_count,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_file_scanner (const std::string& file,
+ const option_info* options = 0,
+ std::size_t options_count = 0,
+ std::size_t start_position = 0);
+
+ virtual bool
+ more ();
+
+ virtual const char*
+ peek ();
+
+ virtual const char*
+ next ();
+
+ virtual void
+ skip ();
+
+ virtual std::size_t
+ position ();
+
+ // Return the file path if the peeked at argument came from a file and
+ // the empty string otherwise. The reference is guaranteed to be valid
+ // till the end of the scanner lifetime.
+ //
+ const std::string&
+ peek_file ();
+
+ // Return the 1-based line number if the peeked at argument came from
+ // a file and zero otherwise.
+ //
+ std::size_t
+ peek_line ();
+
+ private:
+ const option_info*
+ find (const char*) const;
+
+ void
+ load (const std::string& file);
+
+ typedef argv_scanner base;
+
+ const std::string option_;
+ option_info option_info_;
+ const option_info* options_;
+ std::size_t options_count_;
+
+ struct arg
+ {
+ std::string value;
+ const std::string* file;
+ std::size_t line;
+ };
+
+ std::deque<arg> args_;
+ std::list<std::string> files_;
+
+ // Circular buffer of two arguments.
+ //
+ std::string hold_[2];
+ std::size_t i_;
+
+ bool skip_;
+
+ static int zero_argc_;
+ static std::string empty_string_;
+ };
+
+ template <typename X>
+ struct parser;
+ }
+ }
+}
+
+#include <libbuild2/types.hxx>
+
+#include <libbuild2/options-types.hxx>
+
+namespace build2
+{
+}
+
+#include <libbuild2/common-options.ixx>
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
+#endif // LIBBUILD2_COMMON_OPTIONS_HXX
diff --git a/libbuild2/common-options.ixx b/libbuild2/common-options.ixx
new file mode 100644
index 0000000..1b7b74e
--- /dev/null
+++ b/libbuild2/common-options.ixx
@@ -0,0 +1,312 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+#include <cassert>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ // usage_para
+ //
+ inline usage_para::
+ usage_para (value v)
+ : v_ (v)
+ {
+ }
+
+ // unknown_mode
+ //
+ inline unknown_mode::
+ unknown_mode (value v)
+ : v_ (v)
+ {
+ }
+
+ // exception
+ //
+ inline ::std::ostream&
+ operator<< (::std::ostream& os, const exception& e)
+ {
+ e.print (os);
+ return os;
+ }
+
+ // unknown_option
+ //
+ inline unknown_option::
+ unknown_option (const std::string& option)
+ : option_ (option)
+ {
+ }
+
+ inline const std::string& unknown_option::
+ option () const
+ {
+ return option_;
+ }
+
+ // unknown_argument
+ //
+ inline unknown_argument::
+ unknown_argument (const std::string& argument)
+ : argument_ (argument)
+ {
+ }
+
+ inline const std::string& unknown_argument::
+ argument () const
+ {
+ return argument_;
+ }
+
+ // missing_value
+ //
+ inline missing_value::
+ missing_value (const std::string& option)
+ : option_ (option)
+ {
+ }
+
+ inline const std::string& missing_value::
+ option () const
+ {
+ return option_;
+ }
+
+ // invalid_value
+ //
+ inline invalid_value::
+ invalid_value (const std::string& option,
+ const std::string& value,
+ const std::string& message)
+ : option_ (option),
+ value_ (value),
+ message_ (message)
+ {
+ }
+
+ inline const std::string& invalid_value::
+ option () const
+ {
+ return option_;
+ }
+
+ inline const std::string& invalid_value::
+ value () const
+ {
+ return value_;
+ }
+
+ inline const std::string& invalid_value::
+ message () const
+ {
+ return message_;
+ }
+
+ // file_io_failure
+ //
+ inline file_io_failure::
+ file_io_failure (const std::string& file)
+ : file_ (file)
+ {
+ }
+
+ inline const std::string& file_io_failure::
+ file () const
+ {
+ return file_;
+ }
+
+ // unmatched_quote
+ //
+ inline unmatched_quote::
+ unmatched_quote (const std::string& argument)
+ : argument_ (argument)
+ {
+ }
+
+ inline const std::string& unmatched_quote::
+ argument () const
+ {
+ return argument_;
+ }
+
+ // argv_scanner
+ //
+ inline argv_scanner::
+ argv_scanner (int& argc,
+ char** argv,
+ bool erase,
+ std::size_t sp)
+ : start_position_ (sp + 1),
+ i_ (1),
+ argc_ (argc),
+ argv_ (argv),
+ erase_ (erase)
+ {
+ }
+
+ inline argv_scanner::
+ argv_scanner (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ std::size_t sp)
+ : start_position_ (sp + static_cast<std::size_t> (start)),
+ i_ (start),
+ argc_ (argc),
+ argv_ (argv),
+ erase_ (erase)
+ {
+ }
+
+ inline int argv_scanner::
+ end () const
+ {
+ return i_;
+ }
+
+ // vector_scanner
+ //
+ inline vector_scanner::
+ vector_scanner (const std::vector<std::string>& v,
+ std::size_t i,
+ std::size_t sp)
+ : start_position_ (sp), v_ (v), i_ (i)
+ {
+ }
+
+ inline std::size_t vector_scanner::
+ end () const
+ {
+ return i_;
+ }
+
+ inline void vector_scanner::
+ reset (std::size_t i, std::size_t sp)
+ {
+ i_ = i;
+ start_position_ = sp;
+ }
+
+ // argv_file_scanner
+ //
+ inline argv_file_scanner::
+ argv_file_scanner (int& argc,
+ char** argv,
+ const std::string& option,
+ bool erase,
+ std::size_t sp)
+ : argv_scanner (argc, argv, erase, sp),
+ option_ (option),
+ options_ (&option_info_),
+ options_count_ (1),
+ i_ (1),
+ skip_ (false)
+ {
+ option_info_.option = option_.c_str ();
+ option_info_.search_func = 0;
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (int start,
+ int& argc,
+ char** argv,
+ const std::string& option,
+ bool erase,
+ std::size_t sp)
+ : argv_scanner (start, argc, argv, erase, sp),
+ option_ (option),
+ options_ (&option_info_),
+ options_count_ (1),
+ i_ (1),
+ skip_ (false)
+ {
+ option_info_.option = option_.c_str ();
+ option_info_.search_func = 0;
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (const std::string& file,
+ const std::string& option,
+ std::size_t sp)
+ : argv_scanner (0, zero_argc_, 0, sp),
+ option_ (option),
+ options_ (&option_info_),
+ options_count_ (1),
+ i_ (1),
+ skip_ (false)
+ {
+ option_info_.option = option_.c_str ();
+ option_info_.search_func = 0;
+
+ load (file);
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (int& argc,
+ char** argv,
+ const option_info* options,
+ std::size_t options_count,
+ bool erase,
+ std::size_t sp)
+ : argv_scanner (argc, argv, erase, sp),
+ options_ (options),
+ options_count_ (options_count),
+ i_ (1),
+ skip_ (false)
+ {
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (int start,
+ int& argc,
+ char** argv,
+ const option_info* options,
+ std::size_t options_count,
+ bool erase,
+ std::size_t sp)
+ : argv_scanner (start, argc, argv, erase, sp),
+ options_ (options),
+ options_count_ (options_count),
+ i_ (1),
+ skip_ (false)
+ {
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (const std::string& file,
+ const option_info* options,
+ std::size_t options_count,
+ std::size_t sp)
+ : argv_scanner (0, zero_argc_, 0, sp),
+ options_ (options),
+ options_count_ (options_count),
+ i_ (1),
+ skip_ (false)
+ {
+ load (file);
+ }
+ }
+ }
+}
+
+namespace build2
+{
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
diff --git a/libbuild2/common.cli b/libbuild2/common.cli
new file mode 100644
index 0000000..86c2ad1
--- /dev/null
+++ b/libbuild2/common.cli
@@ -0,0 +1,9 @@
+// file : libbuild2/common.cli
+// license : MIT; see accompanying LICENSE file
+
+include <libbuild2/types.hxx>;
+include <libbuild2/options-types.hxx>;
+
+namespace build2
+{
+}
diff --git a/libbuild2/config/functions.cxx b/libbuild2/config/functions.cxx
index 398512c..b1a61a2 100644
--- a/libbuild2/config/functions.cxx
+++ b/libbuild2/config/functions.cxx
@@ -21,6 +21,58 @@ namespace build2
{
function_family f (m, "config");
+ // $config.origin()
+ //
+ // Return the origin of the value of the specified configuration
+ // variable. Possible result values and their semantics are as follows:
+ //
+ // undefined
+ // The variable is undefined.
+ //
+ // default
+ // The variable has the default value from the config directive (or
+ // as specified by a module).
+ //
+ // buildfile
+ // The variable has the value from a buildfile, normally config.build
+ // but could also be from file(s) specified with config.config.load.
+ //
+ // override
+ // The variable has the command line override value. Note that if
+ // the override happens to be append/prepend, then the value could
+ // incorporate the original value.
+ //
+ // Note that the variable must be specified as a name and not as an
+ // expansion (i.e., without $).
+ //
+ // Note that this function is not pure.
+ //
+ f.insert (".origin", false) += [] (const scope* s, names name)
+ {
+ if (s == nullptr)
+ fail << "config.origin() called out of scope" << endf;
+
+ // Only look in the root scope since that's the only config.*
+ // variables we generally consider.
+ //
+ s = s->root_scope ();
+
+ if (s == nullptr)
+ fail << "config.origin() called out of project" << endf;
+
+ switch (origin (*s, convert<string> (move (name))).first)
+ {
+ case variable_origin::undefined: return "undefined";
+ case variable_origin::default_: return "default";
+ case variable_origin::buildfile: return "buildfile";
+ case variable_origin::override_: return "override";
+ }
+
+ return ""; // Should not reach.
+ };
+
+ // $config.save()
+ //
// Return the configuration file contents as a string, similar to the
// config.config.save variable functionality.
//
@@ -40,7 +92,10 @@ namespace build2
if (s == nullptr)
fail << "config.save() called out of project" << endf;
- module* mod (s->find_module<module> (module::name));
+ // See save_config() for details.
+ //
+ assert (s->ctx.phase == run_phase::load);
+ const module* mod (s->find_module<module> (module::name));
if (mod == nullptr)
fail << "config.save() called without config module";
diff --git a/libbuild2/config/host-config.cxx.in b/libbuild2/config/host-config.cxx.in
index 9e3e0c2..6b1ce77 100644
--- a/libbuild2/config/host-config.cxx.in
+++ b/libbuild2/config/host-config.cxx.in
@@ -9,5 +9,8 @@ namespace build2
//
extern const char host_config[] = R"###($host_config$)###";
extern const char build2_config[] = R"###($build2_config$)###";
+
+ extern const char host_config_no_warnings[] = R"###($host_config_no_warnings$)###";
+ extern const char build2_config_no_warnings[] = R"###($build2_config_no_warnings$)###";
}
}
diff --git a/libbuild2/config/init.cxx b/libbuild2/config/init.cxx
index 87b492c..2f134c4 100644
--- a/libbuild2/config/init.cxx
+++ b/libbuild2/config/init.cxx
@@ -26,6 +26,8 @@ namespace build2
{
namespace config
{
+ static const file_rule file_rule_ (true /* check_type */);
+
void
functions (function_map&); // functions.cxx
@@ -39,7 +41,7 @@ namespace build2
save_environment (const value& d, const value* b, names& storage)
{
if (b == nullptr)
- return make_pair (reverse (d, storage), "=");
+ return make_pair (reverse (d, storage, true /* reduce */), "=");
// The plan is to iterator over environment variables adding those that
// are not in base to storage. There is, however, a complication: we may
@@ -100,7 +102,10 @@ namespace build2
// reserved to not be valid module names (`build`). We also currently
// treat `import` as special.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// NOTE: all config.** variables are by default made (via a pattern) to
// be overridable with global visibility. So we must override this if a
@@ -175,10 +180,6 @@ namespace build2
if (!d)
{
- // Used as a variable prefix by configure_execute().
- //
- vp.insert ("config");
-
// Adjust priority for the config module and import pseudo-module so
// that their variables come first in config.build.
//
@@ -209,6 +210,9 @@ namespace build2
#ifndef BUILD2_BOOTSTRAP
extern const char host_config[];
extern const char build2_config[];
+
+ extern const char host_config_no_warnings[];
+ extern const char build2_config_no_warnings[];
#endif
bool
@@ -238,7 +242,7 @@ namespace build2
? &extra.module_as<module> ()
: nullptr);
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */));
// Note: config.* is pattern-typed to global visibility.
//
@@ -247,6 +251,60 @@ namespace build2
auto& c_v (vp.insert<uint64_t> ("config.version", false /*ovr*/, v_p));
auto& c_l (vp.insert<paths> ("config.config.load", true /* ovr */));
+ // Omit loading the configuration from the config.build file (it is
+ // still loaded from config.config.load if specified). Similar to
+ // config.config.load, only values specified on this project's root
+ // scope and global scope are considered.
+ //
+ // Note that this variable is not saved in config.build and is expected
+ // to always be specified as a command line override.
+ //
+ auto& c_u (vp.insert<bool> ("config.config.unload", true /*ovr*/));
+
+ // Configuration variables to disfigure.
+ //
+ // The exact semantics is to ignore these variables when loading
+ // config.build (and any files specified in config.config.load), letting
+ // them to take on the default values (more precisely, the current
+ // implementation undefined them after loading config.build). See also
+ // config.config.unload.
+ //
+ // Besides names, variables can also be specified as patterns in the
+ // config.<prefix>.(*|**)[<suffix>] form where `*` matches single
+ // component names (i.e., `foo` but not `foo.bar`), and `**` matches
+ // single and multi-component names. Currently only single wildcard (`*`
+ // or `**`) is supported. Additionally, a pattern in the
+ // config.<prefix>(*|**) form (i.e., without `.` after <prefix>) matches
+ // config.<prefix>.(*|**) plus config.<prefix> itself (but not
+ // config.<prefix>foo).
+ //
+ // For example, to disfigure all the project configuration variables
+ // (while preserving all the module configuration variables; note
+ // quoting to prevent pattern expansion):
+ //
+ // b config.config.disfigure="'config.hello**'"
+ //
+ // Note that this variable is not saved in config.build and is expected
+ // to always be specified as a command line override.
+ //
+ // We also had the idea of using NULL values as a more natural way to
+ // undefine a configuration variable, which would only work for non-
+ // nullable variables (such as project configuration variables) or for
+ // those where NULL is the default value (most of the others). However,
+ // this cannot work in our model since we cannot reset a NULL override
+ // to a default value. So setting the variable itself to some special
+ // value does not seem to be an option and we have to convey this in
+ // some other way, such as in config.config.disfigure. Another idea is
+ // to invent a parallel set of variables, such as disfig.*, that can be
+ // used for that (though they would still have to be specified with some
+ // dummy value, for example disfig.hello.fancy=). On the other hand,
+ // this desire to disfigure individual variables does not seem to be
+ // very common (we lived without it for years without noticing). So
+ // it's not clear we need to do something like disfig.* which has a
+ // wiff of hack to it.
+ //
+ auto& c_d (vp.insert<strings> ("config.config.disfigure", true /*ovr*/));
+
// Hermetic configurations.
//
// A hermetic configuration stores environment variables that affect the
@@ -328,9 +386,10 @@ namespace build2
save_null_omitted | save_empty_omitted | save_base,
&save_environment);
- // Load config.build if one exists followed by extra files specified in
- // config.config.load (we don't need to worry about disfigure since we
- // will never be init'ed).
+ // Load config.build if one exists (and unless config.config.unload is
+ // specified) followed by extra files specified in config.config.load
+ // (we don't need to worry about disfigure since we will never be
+ // init'ed).
//
auto load_config = [&rs, &c_v] (istream& is,
const path_name& in,
@@ -375,15 +434,37 @@ namespace build2
auto load_config_file = [&load_config] (const path& f, const location& l)
{
path_name fn (f);
- ifdstream ifs;
- load_config (open_file_or_stdin (fn, ifs), fn, l);
+ try
+ {
+ ifdstream ifs;
+ load_config (open_file_or_stdin (fn, ifs), fn, l);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read buildfile " << fn << ": " << e;
+ }
};
+ // Load config.build unless requested not to.
+ //
{
- path f (config_file (rs));
+ // The same semantics as in config.config.load below.
+ //
+ bool u;
+ {
+ lookup l (rs[c_u]);
+ u = (l &&
+ (l.belongs (rs) || l.belongs (ctx.global_scope)) &&
+ cast_false<bool> (l));
+ }
+
+ if (!u)
+ {
+ path f (config_file (rs));
- if (exists (f))
- load_config_file (f, l);
+ if (exists (f))
+ load_config_file (f, l);
+ }
}
if (lookup l = rs[c_l])
@@ -406,14 +487,23 @@ namespace build2
const string& s (f.string ());
- if (s[0] != '~')
+ if (s.empty ())
+ fail << "empty path in config.config.load";
+ else if (s[0] != '~')
load_config_file (f, l);
- else if (s == "~host" || s == "~build2")
+ else if (s == "~host" || s == "~host-no-warnings" ||
+ s == "~build2" || s == "~build2-no-warnings")
{
#ifdef BUILD2_BOOTSTRAP
assert (false);
#else
- istringstream is (s[1] == 'h' ? host_config : build2_config);
+ istringstream is (s[1] == 'h'
+ ? (s.size () == 5
+ ? host_config
+ : host_config_no_warnings)
+ : (s.size () == 7
+ ? build2_config
+ : build2_config_no_warnings));
load_config (is, path_name (s), l);
#endif
}
@@ -424,6 +514,117 @@ namespace build2
}
}
+ // Undefine variables specified with config.config.disfigure.
+ //
+ if (const strings* ns = cast_null<strings> (rs[c_d]))
+ {
+ auto p (rs.vars.lookup_namespace ("config"));
+
+ for (auto i (p.first); i != p.second; )
+ {
+ const variable& var (i->first);
+
+ // This can be one of the overrides (__override, __prefix, etc),
+ // which we skip.
+ //
+ if (!var.override ())
+ {
+ bool m (false);
+
+ for (const string& n: *ns)
+ {
+ if (n.compare (0, 7, "config.") != 0)
+ fail << "config.* variable expected in "
+ << "config.config.disfigure instead of '" << n << "'";
+
+ size_t p (n.find ('*'));
+
+ if (p == string::npos)
+ {
+ if ((m = var.name == n))
+ break;
+ }
+ else
+ {
+ // Pattern in one of these forms:
+ //
+ // config.<prefix>.(*|**)[<suffix>]
+ // config.<prefix>(*|**)
+ //
+ // BTW, an alternative way to handle this would be to
+ // translate it to a path and use our path_match() machinery,
+ // similar to how we do it for build config include/exclude.
+ // Perhaps one day when/if we decide to support multiple
+ // wildcards.
+ //
+ if (p == 7)
+ fail << "config.<prefix>* pattern expected in "
+ << "config.config.disfigure instead of '" << n << "'";
+
+ bool r (n[p + 1] == '*'); // Recursive.
+
+ size_t pe; // Prefix end/size.
+ if (n[p - 1] != '.')
+ {
+ // Second form should have no suffix.
+ //
+ if (p + (r ? 2 : 1) != n.size ())
+ fail << "config.<prefix>(*|**) pattern expected in "
+ << "config.config.disfigure instead of '" << n << "'";
+
+ // Match just <prefix>.
+ //
+ if ((m = n.compare (0, p, var.name) == 0))
+ break;
+
+ pe = p;
+ }
+ else
+ pe = p - 1;
+
+ // Match <prefix> followed by `.`.
+ //
+ if (n.compare (0, pe, var.name, 0, pe) != 0 ||
+ var.name[pe] != '.')
+ continue;
+
+ // Match suffix.
+ //
+ size_t sb (p + (r ? 2 : 1)); // Suffix begin.
+ size_t sn (n.size () - sb); // Suffix size.
+
+ size_t te; // Stem end.
+ if (sn == 0) // No suffix.
+ te = var.name.size ();
+ else
+ {
+ if (var.name.size () < pe + 1 + sn) // Too short.
+ continue;
+
+ te = var.name.size () - sn;
+
+ if (n.compare (sb, sn, var.name, te, sn) != 0)
+ continue;
+ }
+
+ // Match stem.
+ //
+ if ((m = r || var.name.find ('.', pe + 1) >= te))
+ break;
+ }
+ }
+
+ if (m)
+ {
+ i = rs.vars.erase (i); // Undefine.
+ continue;
+ }
+ }
+
+ ++i;
+ }
+ }
+
// Save and cache the config.config.persist value, if any.
//
if (m != nullptr)
@@ -525,20 +726,23 @@ namespace build2
// Register alias and fallback rule for the configure meta-operation.
//
- // We need this rule for out-of-any-project dependencies (e.g.,
- // libraries imported from /usr/lib). We are registring it on the
- // global scope similar to builtin rules.
- //
- rs.global_scope ().insert_rule<mtime_target> (
- configure_id, 0, "config.file", file_rule::instance);
-
- //@@ outer
rs.insert_rule<alias> (configure_id, 0, "config.alias", alias_rule::instance);
// This allows a custom configure rule while doing nothing by default.
//
- rs.insert_rule<target> (configure_id, 0, "config", noop_rule::instance);
- rs.insert_rule<file> (configure_id, 0, "config.file", noop_rule::instance);
+ rs.insert_rule<target> (configure_id, 0, "config.noop", noop_rule::instance);
+
+ // We need this rule for out-of-any-project dependencies (for example,
+ // libraries imported from /usr/lib). We are registering it on the
+ // global scope similar to builtin rules.
+ //
+ // Note: use target instead of anything more specific (such as
+ // mtime_target) in order not to take precedence over the rules above.
+ //
+ // See a similar rule in the dist module.
+ //
+ rs.global_scope ().insert_rule<target> (
+ configure_id, 0, "config.file", file_rule_);
return true;
}
diff --git a/libbuild2/config/module.hxx b/libbuild2/config/module.hxx
index 82b79be..8d3ff67 100644
--- a/libbuild2/config/module.hxx
+++ b/libbuild2/config/module.hxx
@@ -160,7 +160,7 @@ namespace build2
save_module (scope&, const char*, int);
const saved_variable*
- find_variable (const variable& var)
+ find_variable (const variable& var) const
{
auto i (saved_modules.find_sup (var.name));
if (i != saved_modules.end ())
diff --git a/libbuild2/config/operation.cxx b/libbuild2/config/operation.cxx
index 8ceb4d4..150bf1a 100644
--- a/libbuild2/config/operation.cxx
+++ b/libbuild2/config/operation.cxx
@@ -42,7 +42,7 @@ namespace build2
ofs << "# Created automatically by the config module." << endl
<< "#" << endl
<< "src_root = ";
- to_stream (ofs, name (src_root), true /* quote */, '@');
+ to_stream (ofs, name (src_root), quote_mode::normal, '@');
ofs << endl;
ofs.close ();
@@ -61,8 +61,10 @@ namespace build2
path f (src_root / rs.root_extra->out_root_file);
- if (verb)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ if (verb >= 2)
+ text << "cat >" << f;
+ else if (verb)
+ print_diag ("save", f);
try
{
@@ -71,7 +73,7 @@ namespace build2
ofs << "# Created automatically by the config module." << endl
<< "#" << endl
<< "out_root = ";
- to_stream (ofs, name (out_root), true /* quote */, '@');
+ to_stream (ofs, name (out_root), quote_mode::normal, '@');
ofs << endl;
ofs.close ();
@@ -132,7 +134,8 @@ namespace build2
bool r;
if (c.compare (p, 4 , "save") == 0) r = true;
else if (c.compare (p, 4 , "drop") == 0) r = false;
- else fail << "invalid config.config.persist action '" << c << "'";
+ else fail << "invalid config.config.persist action '" << c << "'"
+ << endf;
bool w (false);
if ((p += 4) != c.size ())
@@ -161,11 +164,18 @@ namespace build2
// and this function can be called from a buildfile (probably only
// during serial execution but still).
//
+ // We could also be configuring multiple projects (including from
+ // pkg_configure() in bpkg) but feels like we should be ok since we
+ // only modify this project's root scope data which should not affect
+ // any other project.
+ //
+ // See also save_environment() for a similar issue.
+ //
void
save_config (const scope& rs,
ostream& os, const path_name& on,
bool inherit,
- module& mod,
+ const module& mod,
const project_set& projects)
{
context& ctx (rs.ctx);
@@ -179,7 +189,7 @@ namespace build2
if (v)
{
storage.clear ();
- dr << "'" << reverse (v, storage) << "'";
+ dr << "'" << reverse (v, storage, true /* reduce */) << "'";
}
else
dr << "[null]";
@@ -207,9 +217,11 @@ namespace build2
// saved according to config.config.persist potentially warning if the
// variable would otherwise be dropped.
//
+ // Note: go straight for the public variable pool.
+ //
auto& vp (ctx.var_pool);
- for (auto p (rs.vars.lookup_namespace (*vp.find ("config")));
+ for (auto p (rs.vars.lookup_namespace ("config"));
p.first != p.second;
++p.first)
{
@@ -247,6 +259,24 @@ namespace build2
continue;
}
+ // A common reason behind an unused config.import.* value is an
+ // unused dependency. That is, there is depends in manifest but no
+ // import in buildfile (or import could be conditional in which case
+ // depends should also be conditional). So let's suggest this
+ // possibility. Note that the project name may have been sanitized
+ // to a variable name. Oh, well, better than nothing.
+ //
+ auto info_import = [] (diag_record& dr, const string& var)
+ {
+ if (var.compare (0, 14, "config.import.") == 0)
+ {
+ size_t p (var.find ('.', 14));
+
+ dr << info << "potentially unused dependency on "
+ << string (var, 14, p == string::npos ? p : p - 14);
+ }
+ };
+
const value& v (p.first->second);
pair<bool, bool> r (save_config_variable (*var,
@@ -255,7 +285,7 @@ namespace build2
true /* unused */));
if (r.first) // save
{
- mod.save_variable (*var, 0);
+ const_cast<module&> (mod).save_variable (*var, 0);
if (r.second) // warn
{
@@ -274,6 +304,7 @@ namespace build2
diag_record dr;
dr << warn (on) << "saving no longer used variable " << *var;
+ info_import (dr, var->name);
if (verb >= 2)
info_value (dr, v);
}
@@ -284,6 +315,7 @@ namespace build2
{
diag_record dr;
dr << warn (on) << "dropping no longer used variable " << *var;
+ info_import (dr, var->name);
info_value (dr, v);
}
}
@@ -509,8 +541,8 @@ namespace build2
// Handle the save_default_commented flag.
//
- if ((org.first.defined () && org.first->extra) && // Default value.
- org.first == ovr.first && // Not overriden.
+ if (org.first.defined () && org.first->extra == 1 && // Default.
+ org.first == ovr.first && // No override.
(flags & save_default_commented) != 0)
{
os << first () << '#' << n << " =" << endl;
@@ -527,7 +559,7 @@ namespace build2
pair<names_view, const char*> p (
sv.save != nullptr
? sv.save (v, base, storage)
- : make_pair (reverse (v, storage), "="));
+ : make_pair (reverse (v, storage, true /* reduce */), "="));
// Might becomes empty after a custom save function had at it.
//
@@ -539,7 +571,7 @@ namespace build2
if (!p.first.empty ())
{
os << ' ';
- to_stream (os, p.first, true /* quote */, '@');
+ to_stream (os, p.first, quote_mode::normal, '@');
}
os << endl;
@@ -556,7 +588,7 @@ namespace build2
save_config (const scope& rs,
const path& f,
bool inherit,
- module& mod,
+ const module& mod,
const project_set& projects)
{
path_name fn (f);
@@ -564,8 +596,10 @@ namespace build2
if (f.string () == "-")
fn.name = "<stdout>";
- if (verb)
- text << (verb >= 2 ? "cat >" : "save ") << fn;
+ if (verb >= 2)
+ text << "cat >" << fn;
+ else if (verb)
+ print_diag ("save", fn);
try
{
@@ -582,6 +616,9 @@ namespace build2
// Update config.config.environment value for a hermetic configuration.
//
+ // @@ We are modifying the module. See also save_config() for a similar
+ // issue.
+ //
static void
save_environment (scope& rs, module& mod)
{
@@ -636,6 +673,8 @@ namespace build2
}
}
+ // Note: go straight for the public variable pool.
+ //
value& v (rs.assign (*rs.ctx.var_pool.find ("config.config.environment")));
// Note that setting new config.config.environment value invalidates the
@@ -652,9 +691,9 @@ namespace build2
static void
configure_project (action a,
- scope& rs,
+ const scope& rs,
const variable* c_s, // config.config.save
- module& mod,
+ const module& mod,
project_set& projects)
{
tracer trace ("configure_project");
@@ -674,7 +713,7 @@ namespace build2
//
if (out_root != src_root)
{
- mkdir_p (out_root / rs.root_extra->build_dir);
+ mkdir_p (out_root / rs.root_extra->build_dir, 1);
mkdir (out_root / rs.root_extra->bootstrap_dir, 2);
}
@@ -688,7 +727,7 @@ namespace build2
// for the other half of this logic).
//
if (cast_false<bool> (rs["config.config.hermetic"]))
- save_environment (rs, mod);
+ save_environment (const_cast<scope&> (rs), const_cast<module&> (mod));
// Save src-root.build unless out_root is the same as src.
//
@@ -720,6 +759,11 @@ namespace build2
lookup l (rs[*c_s]);
if (l && (l.belongs (rs) || l.belongs (ctx.global_scope)))
{
+ const path& f (cast<path> (l));
+
+ if (f.empty ())
+ fail << "empty path in " << *c_s;
+
// While writing the complete configuration seems like a natural
// default, there might be a desire to take inheritance into
// account (if, say, we are exporting at multiple levels). One can
@@ -727,8 +771,7 @@ namespace build2
// still want to support this mode somehow in the future (it seems
// like an override of config.config.persist should do the trick).
//
- save_config (
- rs, cast<path> (l), false /* inherit */, mod, projects);
+ save_config (rs, f, false /* inherit */, mod, projects);
}
}
}
@@ -751,14 +794,14 @@ namespace build2
{
const dir_path& pd (p.second);
dir_path out_nroot (out_root / pd);
- scope& nrs (ctx.scopes.find_out (out_nroot).rw ());
+ const scope& nrs (ctx.scopes.find_out (out_nroot));
// Skip this subproject if it is not loaded or doesn't use the
// config module.
//
if (nrs.out_path () == out_nroot)
{
- if (module* m = nrs.find_module<module> (module::name))
+ if (const module* m = nrs.find_module<module> (module::name))
{
configure_project (a, nrs, c_s, *m, projects);
}
@@ -811,6 +854,8 @@ namespace build2
// Don't translate default to update. In our case unspecified
// means configure everything.
//
+ // Note: see pkg_configure() in bpkg if changing anything here.
+ //
return o;
}
@@ -847,6 +892,8 @@ namespace build2
static void
configure_pre (context&, const values& params, const location& l)
{
+ // Note: see pkg_configure() in bpkg if changing anything here.
+ //
forward (params, "configure", l); // Validate.
}
@@ -870,7 +917,9 @@ namespace build2
fail (l) << "forwarding to source directory " << rs.src_path ();
}
else
- load (params, rs, buildfile, out_base, src_base, l); // Normal load.
+ // Normal load.
+ //
+ perform_load (params, rs, buildfile, out_base, src_base, l);
}
static void
@@ -890,7 +939,7 @@ namespace build2
ts.push_back (&rs);
}
else
- search (params, rs, bs, bf, tk, l, ts); // Normal search.
+ perform_search (params, rs, bs, bf, tk, l, ts); // Normal search.
}
static void
@@ -910,6 +959,8 @@ namespace build2
context& ctx (fwd ? ts[0].as<scope> ().ctx : ts[0].as<target> ().ctx);
+ // Note: go straight for the public variable pool.
+ //
const variable* c_s (ctx.var_pool.find ("config.config.save"));
if (c_s->overrides == nullptr)
@@ -964,13 +1015,19 @@ namespace build2
ctx.current_operation (*oif);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, true /* inner */, location ());
+
phase_lock pl (ctx, run_phase::match);
- match (action (configure_id, id), t);
+ match_sync (action (configure_id, id), t);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, true /* inner */);
}
}
configure_project (a,
- rs->rw (),
+ *rs,
c_s,
*rs->find_module<module> (module::name),
projects);
@@ -978,6 +1035,8 @@ namespace build2
}
}
+ // NOTE: see pkg_configure() in bpkg if changing anything here.
+ //
const meta_operation_info mo_configure {
configure_id,
"configure",
@@ -1053,7 +1112,7 @@ namespace build2
}
}
- if (module* m = rs.find_module<module> (module::name))
+ if (const module* m = rs.find_module<module> (module::name))
{
for (auto hook: m->disfigure_pre_)
r = hook (a, rs) || r;
@@ -1276,6 +1335,8 @@ namespace build2
// Add the default config.config.persist value unless there is a custom
// one (specified as a command line override).
//
+ // Note: go straight for the public variable pool.
+ //
const variable& var (*ctx.var_pool.find ("config.config.persist"));
if (!rs[var].defined ())
@@ -1392,7 +1453,8 @@ namespace build2
string ("config"), /* config_module */
nullopt, /* config_file */
true, /* buildfile */
- "the create meta-operation");
+ "the create meta-operation",
+ 1 /* verbosity */);
save_config (ctx, d);
}
diff --git a/libbuild2/config/operation.hxx b/libbuild2/config/operation.hxx
index 9e2a91e..1662941 100644
--- a/libbuild2/config/operation.hxx
+++ b/libbuild2/config/operation.hxx
@@ -15,8 +15,8 @@ namespace build2
{
class module;
- extern const meta_operation_info mo_configure;
- extern const meta_operation_info mo_disfigure;
+ LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_configure;
+ LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_disfigure;
const string&
preprocess_create (context&,
@@ -37,7 +37,7 @@ namespace build2
save_config (const scope& rs,
ostream&, const path_name&,
bool inherit,
- module&,
+ const module&,
const project_set&);
// See config.config.hermetic.environment.
diff --git a/libbuild2/config/types.hxx b/libbuild2/config/types.hxx
new file mode 100644
index 0000000..3cdc5e3
--- /dev/null
+++ b/libbuild2/config/types.hxx
@@ -0,0 +1,25 @@
+// file : libbuild2/config/types.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CONFIG_TYPES_HXX
+#define LIBBUILD2_CONFIG_TYPES_HXX
+
+#include <libbuild2/types.hxx>
+
+namespace build2
+{
+ namespace config
+ {
+ // The origin of the value of a configuration variable.
+ //
+ enum class variable_origin
+ {
+ undefined, // Undefined.
+ default_, // Default value from the config directive.
+ buildfile, // Value from a buildfile, normally config.build.
+ override_ // Value from a command line override.
+ };
+ }
+}
+
+#endif // LIBBUILD2_CONFIG_TYPES_HXX
diff --git a/libbuild2/config/utility.cxx b/libbuild2/config/utility.cxx
index 928709a..6574367 100644
--- a/libbuild2/config/utility.cxx
+++ b/libbuild2/config/utility.cxx
@@ -32,7 +32,7 @@ namespace build2
// Treat an inherited value that was set to default as new.
//
- if (l.defined () && l->extra)
+ if (l.defined () && l->extra == 1)
n = true;
if (var.overrides != nullptr)
@@ -81,7 +81,9 @@ namespace build2
const string& n,
initializer_list<const char*> ig)
{
- auto& vp (rs.var_pool ());
+ // Note: go straight for the public variable pool.
+ //
+ auto& vp (rs.ctx.var_pool);
// Search all outer scopes for any value in this namespace.
//
@@ -91,7 +93,7 @@ namespace build2
// any original values, they will be "visible"; see find_override() for
// details.
//
- const variable& ns (vp.insert ("config." + n));
+ const string ns ("config." + n);
for (scope* s (&rs); s != nullptr; s = s->parent_scope ())
{
for (auto p (s->vars.lookup_namespace (ns));
@@ -107,12 +109,12 @@ namespace build2
auto match_tail = [&ns, v] (const char* t)
{
- return v->name.compare (ns.name.size () + 1, string::npos, t) == 0;
+ return v->name.compare (ns.size () + 1, string::npos, t) == 0;
};
// Ignore config.*.configured and user-supplied names.
//
- if (v->name.size () <= ns.name.size () ||
+ if (v->name.size () <= ns.size () ||
(!match_tail ("configured") &&
find_if (ig.begin (), ig.end (), match_tail) == ig.end ()))
return true;
@@ -128,7 +130,7 @@ namespace build2
// Pattern-typed as bool.
//
const variable& var (
- rs.var_pool ().insert ("config." + n + ".configured"));
+ rs.var_pool (true).insert ("config." + n + ".configured"));
save_variable (rs, var);
@@ -142,7 +144,7 @@ namespace build2
// Pattern-typed as bool.
//
const variable& var (
- rs.var_pool ().insert ("config." + n + ".configured"));
+ rs.var_pool (true).insert ("config." + n + ".configured"));
save_variable (rs, var);
@@ -156,5 +158,56 @@ namespace build2
else
return false;
}
+
+ pair<variable_origin, lookup>
+ origin (const scope& rs, const string& n)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ const variable* var (rs.ctx.var_pool.find (n));
+
+ if (var == nullptr)
+ {
+ if (n.compare (0, 7, "config.") != 0)
+ throw invalid_argument ("config.* variable expected");
+
+ return make_pair (variable_origin::undefined, lookup ());
+ }
+
+ return origin (rs, *var);
+ }
+
+ pair<variable_origin, lookup>
+ origin (const scope& rs, const variable& var)
+ {
+ // Make sure this is a config.* variable. This could matter since we
+ // rely on the semantics of value::extra. We could also detect
+ // special variables like config.booted, some config.config.*, etc.,
+ // (see config_save() for details) but that seems harmless.
+ //
+ if (var.name.compare (0, 7, "config.") != 0)
+ throw invalid_argument ("config.* variable expected");
+
+ return origin (rs, var, rs.lookup_original (var));
+ }
+
+ pair<variable_origin, lookup>
+ origin (const scope& rs, const variable& var, pair<lookup, size_t> org)
+ {
+ pair<lookup, size_t> ovr (var.overrides == nullptr
+ ? org
+ : rs.lookup_override (var, org));
+
+ if (!ovr.first.defined ())
+ return make_pair (variable_origin::undefined, lookup ());
+
+ if (org.first != ovr.first)
+ return make_pair (variable_origin::override_, ovr.first);
+
+ return make_pair (org.first->extra == 1
+ ? variable_origin::default_
+ : variable_origin::buildfile,
+ org.first);
+ }
}
}
diff --git a/libbuild2/config/utility.hxx b/libbuild2/config/utility.hxx
index bafcafa..1e2ff53 100644
--- a/libbuild2/config/utility.hxx
+++ b/libbuild2/config/utility.hxx
@@ -11,6 +11,8 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/config/types.hxx>
+
#include <libbuild2/export.hxx>
namespace build2
@@ -58,6 +60,15 @@ namespace build2
{
// Mark a variable to be saved during configuration.
//
+ // Note: the save_*_omitted flags work best when undefined or (one of) the
+ // omitted value(s) is the default (see a note in lookup_config()
+ // documentation for details).
+ //
+ // The below lookup_*() functions mark the default value by setting
+ // value::extra to 1. Note that it's exactly 1 and not "not 0" since other
+ // values could have other meaning (see, for example, package skeleton
+ // in bpkg).
+ //
const uint64_t save_default_commented = 0x01; // Based on value::extra.
const uint64_t save_null_omitted = 0x02; // Treat NULL as undefined.
const uint64_t save_empty_omitted = 0x04; // Treat empty as undefined.
@@ -242,9 +253,6 @@ namespace build2
// expensive. It is also ok to call both versions multiple times provided
// the flags are the same.
//
- // @@ Should save_null_omitted be interpreted to treat null as undefined?
- // Sounds logical.
- //
lookup
lookup_config (scope& rs,
const variable&,
@@ -263,6 +271,8 @@ namespace build2
const string& var,
uint64_t save_flags = 0)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (rs, rs.ctx.var_pool[var], save_flags);
}
@@ -272,6 +282,8 @@ namespace build2
const string& var,
uint64_t save_flags = 0)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (new_value, rs, rs.ctx.var_pool[var], save_flags);
}
@@ -300,8 +312,14 @@ namespace build2
// or from the command line (i.e., it is inherited from the amalgamation),
// then its value is "overridden" to the default value on this root scope.
//
- // @@ Should save_null_omitted be interpreted to treat null as undefined?
- // Sounds logical.
+ // Note that while it may seem logical, these functions do not
+ // "reinterpret" defined values according to the save_*_omitted flags (for
+ // example, by returning the default value if the defined value is NULL
+ // and the save_null_omitted flag is specified). This is because such a
+ // reinterpretation may cause a diversion between the returned value and
+ // the re-queried config.* variable value if the defined value came from
+ // an override. To put another way, the save_*_omitted flags are purely to
+ // reduce the noise in config.build.
//
template <typename T>
lookup
@@ -353,6 +371,8 @@ namespace build2
uint64_t save_flags = 0,
bool override = false)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (rs,
rs.ctx.var_pool[var],
std::forward<T> (default_value), // VC14
@@ -369,6 +389,8 @@ namespace build2
uint64_t save_flags = 0,
bool override = false)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (new_value,
rs,
rs.ctx.var_pool[var],
@@ -413,7 +435,7 @@ namespace build2
const V* cv (
cast_null<V> (
lookup_config (rs,
- rs.var_pool ().insert<V> ("config." + var),
+ rs.var_pool (true).insert<V> ("config." + var),
std::forward<T> (default_value)))); // VC14
value& v (bs.assign<V> (move (var)));
@@ -431,7 +453,7 @@ namespace build2
const V* cv (
cast_null<V> (
lookup_config (rs,
- rs.var_pool ().insert<V> ("config." + var),
+ rs.var_pool (true).insert<V> ("config." + var),
std::forward<T> (default_value)))); // VC14
value& v (bs.append<V> (move (var)));
@@ -494,6 +516,25 @@ namespace build2
//
LIBBUILD2_SYMEXPORT bool
unconfigured (scope& rs, const string& var, bool value);
+
+ // Return the origin of the value of the specified configuration variable
+ // plus the value itself. See $config.origin() for details.
+ //
+ // Throws invalid_argument if the passed variable is not config.*.
+ //
+ LIBBUILD2_SYMEXPORT pair<variable_origin, lookup>
+ origin (const scope& rs, const string& name);
+
+ LIBBUILD2_SYMEXPORT pair<variable_origin, lookup>
+ origin (const scope& rs, const variable&);
+
+ // As above but using the result of scope::lookup_original() or
+ // semantically equivalent (e.g., lookup_namespace()).
+ //
+ // Note that this version does not check that the variable is config.*.
+ //
+ LIBBUILD2_SYMEXPORT pair<variable_origin, lookup>
+ origin (const scope& rs, const variable&, pair<lookup, size_t> original);
}
}
diff --git a/libbuild2/config/utility.txx b/libbuild2/config/utility.txx
index b88f76c..71e41fd 100644
--- a/libbuild2/config/utility.txx
+++ b/libbuild2/config/utility.txx
@@ -58,7 +58,7 @@ namespace build2
if (!l.defined () || (def_ovr && !l.belongs (rs)))
{
value& v (rs.assign (var) = std::forward<T> (def_val)); // VC14
- v.extra = true; // Default value flag.
+ v.extra = 1; // Default value flag.
n = (sflags & save_default_commented) == 0; // Absence means default.
l = lookup (v, var, rs);
@@ -66,7 +66,7 @@ namespace build2
}
// Treat an inherited value that was set to default as new.
//
- else if (l->extra)
+ else if (l->extra == 1)
n = (sflags & save_default_commented) == 0; // Absence means default.
if (var.overrides != nullptr)
diff --git a/libbuild2/context.cxx b/libbuild2/context.cxx
index c016a40..6e4fd6f 100644
--- a/libbuild2/context.cxx
+++ b/libbuild2/context.cxx
@@ -45,6 +45,7 @@ namespace build2
scope_map scopes;
target_set targets;
variable_pool var_pool;
+ variable_patterns var_patterns;
variable_overrides var_overrides;
function_map functions;
@@ -52,32 +53,267 @@ namespace build2
variable_override_cache global_override_cache;
strings global_var_overrides;
- data (context& c): scopes (c), targets (c), var_pool (&c /* global */) {}
+ data (context& c)
+ : scopes (c),
+ targets (c),
+ var_pool (&c /* shared */, nullptr /* outer */, &var_patterns),
+ var_patterns (&c /* shared */, &var_pool) {}
};
+ void context::
+ reserve (reserves res)
+ {
+ assert (phase == run_phase::load);
+
+ if (res.targets != 0)
+ data_->targets.map_.reserve (res.targets);
+
+ if (res.variables != 0)
+ data_->var_pool.map_.reserve (res.variables);
+ }
+
+ pair<char, variable_override> context::
+ parse_variable_override (const string& s, size_t i, bool buildspec)
+ {
+ istringstream is (s);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ // Similar to buildspec we do "effective escaping" of the special `'"\$(`
+ // characters (basically what's escapable inside a double-quoted literal
+ // plus the single quote; note, however, that we exclude line
+ // continuations and `)` since they would make directory paths on Windows
+ // unusable).
+ //
+ path_name in ("<cmdline>");
+ lexer l (is, in, 1 /* line */, "\'\"\\$(");
+
+ // At the buildfile level the scope-specific variable should be separated
+ // from the directory with a whitespace, for example:
+ //
+ // ./ foo=$bar
+ //
+ // However, requiring this for command line variables would be too
+ // inconvinient so we support both.
+ //
+ // We also have the optional visibility modifier as a first character of
+ // the variable name:
+ //
+ // ! - global
+ // % - project
+ // / - scope
+ //
+ // The last one clashes a bit with the directory prefix:
+ //
+ // ./ /foo=bar
+ // .//foo=bar
+ //
+ // But that's probably ok (the need for a scope-qualified override with
+ // scope visibility should be pretty rare). Note also that to set the
+ // value on the global scope we use !.
+ //
+ // And so the first token should be a word which can be either a variable
+ // name (potentially with the directory qualification) or just the
+ // directory, in which case it should be followed by another word
+ // (unqualified variable name). To avoid treating any of the visibility
+ // modifiers as special we use the cmdvar mode.
+ //
+ l.mode (lexer_mode::cmdvar);
+ token t (l.next ());
+
+ optional<dir_path> dir;
+ if (t.type == token_type::word)
+ {
+ string& v (t.value);
+ size_t p (path::traits_type::rfind_separator (v));
+
+ if (p != string::npos && p != 0) // If first then visibility.
+ {
+ if (p == v.size () - 1)
+ {
+ // Separate directory.
+ //
+ dir = dir_path (move (v));
+ t = l.next ();
+
+ // Target-specific overrides are not yet supported (and probably
+ // never will be; the beast is already complex enough).
+ //
+ if (t.type == token_type::colon)
+ {
+ diag_record dr (fail);
+
+ dr << "'" << s << "' is a target-specific override";
+
+ if (buildspec)
+ dr << info << "use double '--' to treat this argument as "
+ << "buildspec";
+ }
+ }
+ else
+ {
+ // Combined directory.
+ //
+ // If double separator (visibility marker), then keep the first in
+ // name.
+ //
+ if (p != 0 && path::traits_type::is_separator (v[p - 1]))
+ --p;
+
+ dir = dir_path (t.value, 0, p + 1); // Include the separator.
+ t.value.erase (0, p + 1); // Erase the separator.
+ }
+
+ if (dir->relative ())
+ {
+ // Handle the special relative to base scope case (.../).
+ //
+ auto i (dir->begin ());
+
+ if (*i == "...")
+ dir = dir_path (++i, dir->end ()); // Note: can become empty.
+ else
+ dir->complete (); // Relative to CWD.
+ }
+
+ if (dir->absolute ())
+ dir->normalize ();
+ }
+ }
+
+ token_type tt (l.next ().type);
+
+ // The token should be the variable name followed by =, +=, or =+.
+ //
+ if (t.type != token_type::word || t.value.empty () ||
+ (tt != token_type::assign &&
+ tt != token_type::prepend &&
+ tt != token_type::append))
+ {
+ diag_record dr (fail);
+
+ dr << "expected variable assignment instead of '" << s << "'";
+
+ if (buildspec)
+ dr << info << "use double '--' to treat this argument as buildspec";
+ }
+
+ // Take care of the visibility. Note that here we rely on the fact that
+ // none of these characters are lexer's name separators.
+ //
+ char c (t.value[0]);
+
+ if (path::traits_type::is_separator (c))
+ c = '/'; // Normalize.
+
+ string n (t.value, c == '!' || c == '%' || c == '/' ? 1 : 0);
+
+ // Make sure it is qualified.
+ //
+ // We can support overridable public unqualified variables (which must
+ // all be pre-entered by the end of this constructor) but we will need
+ // to detect their names here in an ad hoc manner (we cannot enter them
+ // before this logic because of the "untyped override" requirement).
+ //
+ // Note: issue the same diagnostics as in variable_pool::update().
+ //
+ if (n.find ('.') == string::npos)
+ fail << "variable " << n << " cannot be overridden";
+
+ if (c == '!' && dir)
+ fail << "scope-qualified global override of variable " << n;
+
+ // Pre-enter the main variable. Note that we rely on all the overridable
+ // variables with global visibility to be known (either entered or
+ // handled via a pattern) at this stage.
+ //
+ variable_pool& vp (data_->var_pool);
+ variable& var (
+ const_cast<variable&> (vp.insert (n, true /* overridable */)));
+
+ const variable* o;
+ {
+ variable_visibility v (c == '/' ? variable_visibility::scope :
+ c == '%' ? variable_visibility::project :
+ variable_visibility::global);
+
+ const char* k (tt == token_type::assign ? "__override" :
+ tt == token_type::append ? "__suffix" : "__prefix");
+
+ unique_ptr<variable> p (
+ new variable {
+ n + '.' + to_string (i + 1) + '.' + k,
+ &vp /* owner */,
+ nullptr /* aliases */,
+ nullptr /* type */,
+ nullptr /* overrides */,
+ v});
+
+ // Back link.
+ //
+ p->aliases = p.get ();
+ if (var.overrides != nullptr)
+ swap (p->aliases,
+ const_cast<variable*> (var.overrides.get ())->aliases);
+
+ // Forward link.
+ //
+ p->overrides = move (var.overrides);
+ var.overrides = move (p);
+
+ o = var.overrides.get ();
+ }
+
+ // Currently we expand project overrides in the global scope to keep
+ // things simple. Pass original variable for diagnostics. Use current
+ // working directory as pattern base.
+ //
+ scope& gs (global_scope.rw ());
+
+ parser p (*this);
+ pair<value, token> r (p.parse_variable_value (l, gs, &work, var));
+
+ if (r.second.type != token_type::eos)
+ fail << "unexpected " << r.second << " in variable assignment "
+ << "'" << s << "'";
+
+ // Make sure the value is not typed.
+ //
+ if (r.first.type != nullptr)
+ fail << "typed override of variable " << n;
+
+ return make_pair (
+ c,
+ variable_override {var, *o, move (dir), move (r.first)});
+ }
+
context::
context (scheduler& s,
global_mutexes& ms,
file_cache& fc,
- bool mo,
+ optional<match_only_level> mo,
bool nem,
bool dr,
+ bool ndb,
bool kg,
const strings& cmd_vars,
+ reserves res,
optional<context*> mc,
- const loaded_modules_lock* ml)
+ const module_libraries_lock* ml,
+ const function<var_override_function>& var_ovr_func)
: data_ (new data (*this)),
- sched (s),
- mutexes (ms),
- fcache (fc),
+ sched (&s),
+ mutexes (&ms),
+ fcache (&fc),
match_only (mo),
no_external_modules (nem),
dry_run_option (dr),
+ no_diag_buffer (ndb),
keep_going (kg),
phase_mutex (*this),
scopes (data_->scopes),
targets (data_->targets),
var_pool (data_->var_pool),
+ var_patterns (data_->var_patterns),
var_overrides (data_->var_overrides),
functions (data_->functions),
global_scope (create_global_scope (data_->scopes)),
@@ -90,12 +326,17 @@ namespace build2
? optional<unique_ptr<context>> (nullptr)
: nullopt)
{
+ // NOTE: see also the bare minimum version below if adding anything here.
+
tracer trace ("context");
l6 ([&]{trace << "initializing build state";});
+ reserve (res);
+
scope_map& sm (data_->scopes);
variable_pool& vp (data_->var_pool);
+ variable_patterns& vpats (data_->var_patterns);
insert_builtin_functions (functions);
@@ -104,7 +345,7 @@ namespace build2
//
meta_operation_table.insert ("noop");
meta_operation_table.insert ("perform");
- meta_operation_table.insert ("configure");
+ meta_operation_table.insert ("configure"); // bpkg assumes no process.
meta_operation_table.insert ("disfigure");
if (config_preprocess_create != nullptr)
@@ -134,13 +375,26 @@ namespace build2
// Any variable assigned on the global scope should natually have the
// global visibility.
//
- auto set = [&gs, &vp] (const char* var, auto val)
+ auto set = [&gs, &vp] (const char* var, auto val) -> const value&
{
using T = decltype (val);
value& v (gs.assign (vp.insert<T> (var, variable_visibility::global)));
v = move (val);
+ return v;
};
+ // Build system mode.
+ //
+ // This value signals any special mode the build system may be running
+ // in. The two core modes are `no-external-modules` (bootstrapping of
+ // external modules is disabled) and `normal` (normal build system
+ // execution). Build system drivers may invent additional modes (for
+ // example, the bpkg `skeleton` mode that is used to evaluate depends
+ // clauses).
+ //
+ set ("build.mode",
+ no_external_modules ? "no-external-modules" : "normal");
+
set ("build.work", work);
set ("build.home", home);
@@ -167,10 +421,10 @@ namespace build2
//
set ("build.verbosity", uint64_t (verb));
- // Build system progress diagnostics.
+ // Build system diagnostics progress and color.
//
- // Note that it can be true, false, or NULL if progress was neither
- // requested nor suppressed.
+ // Note that these can be true, false, or NULL if neither requested nor
+ // suppressed explicitly.
//
{
value& v (gs.assign (vp.insert<bool> ("build.progress", v_g)));
@@ -178,6 +432,18 @@ namespace build2
v = *diag_progress_option;
}
+ {
+ value& v (gs.assign (vp.insert<bool> ("build.diag_color", v_g)));
+ if (diag_color_option)
+ v = *diag_color_option;
+ }
+
+ // These are the "effective" values that incorporate a suitable default
+ // if neither requested nor suppressed explicitly.
+ //
+ set ("build.show_progress", show_progress (verb_never));
+ set ("build.show_diag_color", show_diag_color ());
+
// Build system version (similar to what we do in the version module
// except here we don't include package epoch/revision).
//
@@ -233,7 +499,8 @@ namespace build2
// Did the user ask us to use config.guess?
//
string orig (config_guess
- ? run<string> (3,
+ ? run<string> (*this,
+ 3,
*config_guess,
[](string& l, bool) {return move (l);})
: BUILD2_HOST_TRIPLET);
@@ -256,7 +523,7 @@ namespace build2
set ("build.host.version", t.version);
set ("build.host.class", t.class_);
- set ("build.host", move (t));
+ build_host = &set ("build.host", move (t)).as<target_triplet> ();
}
catch (const invalid_argument& e)
{
@@ -280,6 +547,7 @@ namespace build2
t.insert<path_target> ();
t.insert<file> ();
+ t.insert<group> ();
t.insert<alias> ();
t.insert<dir> ();
t.insert<fsdir> ();
@@ -314,215 +582,51 @@ namespace build2
// Note that some config.config.* variables have project visibility thus
// the match argument is false.
//
- vp.insert_pattern ("config.**", nullopt, true, v_g, true, false);
+ vpats.insert ("config.**", nullopt, true, v_g, true, false);
// Parse and enter the command line variables. We do it before entering
// any other variables so that all the variables that are overriden are
// marked as such first. Then, as we enter variables, we can verify that
// the override is alowed.
//
- for (size_t i (0); i != cmd_vars.size (); ++i)
{
- const string& s (cmd_vars[i]);
-
- istringstream is (s);
- is.exceptions (istringstream::failbit | istringstream::badbit);
-
- // Similar to buildspec we do "effective escaping" and only for ['"\$(]
- // (basically what's necessary inside a double-quoted literal plus the
- // single quote).
- //
- path_name in ("<cmdline>");
- lexer l (is, in, 1 /* line */, "\'\"\\$(");
+ size_t i (0);
+ for (; i != cmd_vars.size (); ++i)
+ {
+ const string& s (cmd_vars[i]);
- // At the buildfile level the scope-specific variable should be
- // separated from the directory with a whitespace, for example:
- //
- // ./ foo=$bar
- //
- // However, requiring this for command line variables would be too
- // inconvinient so we support both.
- //
- // We also have the optional visibility modifier as a first character of
- // the variable name:
- //
- // ! - global
- // % - project
- // / - scope
- //
- // The last one clashes a bit with the directory prefix:
- //
- // ./ /foo=bar
- // .//foo=bar
- //
- // But that's probably ok (the need for a scope-qualified override with
- // scope visibility should be pretty rare). Note also that to set the
- // value on the global scope we use !.
- //
- // And so the first token should be a word which can be either a
- // variable name (potentially with the directory qualification) or just
- // the directory, in which case it should be followed by another word
- // (unqualified variable name). To avoid treating any of the visibility
- // modifiers as special we use the cmdvar mode.
- //
- l.mode (lexer_mode::cmdvar);
- token t (l.next ());
+ pair<char, variable_override> p (
+ parse_variable_override (s, i, true /* buildspec */));
- optional<dir_path> dir;
- if (t.type == token_type::word)
- {
- string& v (t.value);
- size_t p (path::traits_type::rfind_separator (v));
+ char c (p.first);
+ variable_override& vo (p.second);
- if (p != string::npos && p != 0) // If first then visibility.
+ // Global and absolute scope overrides we can enter directly. Project
+ // and relative scope ones will be entered later for each project.
+ //
+ if (c == '!' || (vo.dir && vo.dir->absolute ()))
{
- if (p == v.size () - 1)
- {
- // Separate directory.
- //
- dir = dir_path (move (v));
- t = l.next ();
-
- // Target-specific overrides are not yet supported (and probably
- // never will be; the beast is already complex enough).
- //
- if (t.type == token_type::colon)
- fail << "'" << s << "' is a target-specific override" <<
- info << "use double '--' to treat this argument as buildspec";
- }
- else
- {
- // Combined directory.
- //
- // If double separator (visibility marker), then keep the first in
- // name.
- //
- if (p != 0 && path::traits_type::is_separator (v[p - 1]))
- --p;
-
- dir = dir_path (t.value, 0, p + 1); // Include the separator.
- t.value.erase (0, p + 1); // Erase the separator.
- }
+ scope& s (c == '!' ? gs : *sm.insert_out (*vo.dir)->second.front ());
- if (dir->relative ())
- {
- // Handle the special relative to base scope case (.../).
- //
- auto i (dir->begin ());
-
- if (*i == "...")
- dir = dir_path (++i, dir->end ()); // Note: can become empty.
- else
- dir->complete (); // Relative to CWD.
- }
+ auto p (s.vars.insert (vo.ovr));
+ assert (p.second); // Variable name is unique.
- if (dir->absolute ())
- dir->normalize ();
+ value& v (p.first);
+ v = move (vo.val);
}
- }
-
- token_type tt (l.next ().type);
-
- // The token should be the variable name followed by =, +=, or =+.
- //
- if (t.type != token_type::word || t.value.empty () ||
- (tt != token_type::assign &&
- tt != token_type::prepend &&
- tt != token_type::append))
- {
- fail << "expected variable assignment instead of '" << s << "'" <<
- info << "use double '--' to treat this argument as buildspec";
- }
-
- // Take care of the visibility. Note that here we rely on the fact that
- // none of these characters are lexer's name separators.
- //
- char c (t.value[0]);
-
- if (path::traits_type::is_separator (c))
- c = '/'; // Normalize.
-
- string n (t.value, c == '!' || c == '%' || c == '/' ? 1 : 0);
-
- if (c == '!' && dir)
- fail << "scope-qualified global override of variable " << n;
+ else
+ data_->var_overrides.push_back (move (vo));
- // Pre-enter the main variable. Note that we rely on all the overridable
- // variables with global visibility to be known (either entered or
- // handled via a pettern) at this stage.
- //
- variable& var (
- const_cast<variable&> (vp.insert (n, true /* overridable */)));
-
- const variable* o;
- {
- variable_visibility v (c == '/' ? variable_visibility::scope :
- c == '%' ? variable_visibility::project :
- variable_visibility::global);
-
- const char* k (tt == token_type::assign ? "__override" :
- tt == token_type::append ? "__suffix" : "__prefix");
-
- unique_ptr<variable> p (
- new variable {
- n + '.' + to_string (i + 1) + '.' + k,
- nullptr /* aliases */,
- nullptr /* type */,
- nullptr /* overrides */,
- v});
-
- // Back link.
+ // Save global overrides for nested contexts.
//
- p->aliases = p.get ();
- if (var.overrides != nullptr)
- swap (p->aliases,
- const_cast<variable*> (var.overrides.get ())->aliases);
-
- // Forward link.
- //
- p->overrides = move (var.overrides);
- var.overrides = move (p);
-
- o = var.overrides.get ();
+ if (c == '!')
+ data_->global_var_overrides.push_back (s);
}
- // Currently we expand project overrides in the global scope to keep
- // things simple. Pass original variable for diagnostics. Use current
- // working directory as pattern base.
+ // Parse any ad hoc project-wide overrides.
//
- parser p (*this);
- pair<value, token> r (p.parse_variable_value (l, gs, &work, var));
-
- if (r.second.type != token_type::eos)
- fail << "unexpected " << r.second << " in variable assignment "
- << "'" << s << "'";
-
- // Make sure the value is not typed.
- //
- if (r.first.type != nullptr)
- fail << "typed override of variable " << n;
-
- // Global and absolute scope overrides we can enter directly. Project
- // and relative scope ones will be entered later for each project.
- //
- if (c == '!' || (dir && dir->absolute ()))
- {
- scope& s (c == '!' ? gs : *sm.insert_out (*dir)->second.front ());
-
- auto p (s.vars.insert (*o));
- assert (p.second); // Variable name is unique.
-
- value& v (p.first);
- v = move (r.first);
- }
- else
- data_->var_overrides.push_back (
- variable_override {var, *o, move (dir), move (r.first)});
-
- // Save global overrides for nested contexts.
- //
- if (c == '!')
- data_->global_var_overrides.push_back (s);
+ if (var_ovr_func != nullptr)
+ var_ovr_func (*this, i);
}
// Enter remaining variable patterns and builtin variables.
@@ -531,24 +635,26 @@ namespace build2
const auto v_t (variable_visibility::target);
const auto v_q (variable_visibility::prereq);
- vp.insert_pattern<bool> ("config.**.configured", false, v_p);
+ vpats.insert<bool> ("config.**.configured", false, v_p);
- // file.cxx:import() (note: order is important; see insert_pattern()).
+ // file.cxx:import()
+ //
+ // Note: the order is important (see variable_patterns::insert()).
//
// Note that if any are overriden, they are "pre-typed" by the config.**
// pattern above and we just "add" the types.
//
- vp.insert_pattern<abs_dir_path> ("config.import.*", true, v_g, true);
- vp.insert_pattern<path> ("config.import.**", true, v_g, true);
+ vpats.insert<abs_dir_path> ("config.import.*", true, v_g, true);
+ vpats.insert<path> ("config.import.**", true, v_g, true);
// module.cxx:boot/init_module().
//
// Note that we also have the config.<module>.configured variable (see
// above).
//
- vp.insert_pattern<bool> ("**.booted", false /* overridable */, v_p);
- vp.insert_pattern<bool> ("**.loaded", false, v_p);
- vp.insert_pattern<bool> ("**.configured", false, v_p);
+ vpats.insert<bool> ("**.booted", false /* overridable */, v_p);
+ vpats.insert<bool> ("**.loaded", false, v_p);
+ vpats.insert<bool> ("**.configured", false, v_p);
var_src_root = &vp.insert<dir_path> ("src_root");
var_out_root = &vp.insert<dir_path> ("out_root");
@@ -574,29 +680,71 @@ namespace build2
var_export_metadata = &vp.insert ("export.metadata", v_t); // Untyped.
var_extension = &vp.insert<string> ("extension", v_t);
- var_clean = &vp.insert<bool> ("clean", v_t);
- var_backlink = &vp.insert<string> ("backlink", v_t);
- var_include = &vp.insert<string> ("include", v_q);
+ var_update = &vp.insert<string> ("update", v_q);
+ var_clean = &vp.insert<bool> ("clean", v_t);
+ var_backlink = &vp.insert ("backlink", v_t); // Untyped.
+ var_include = &vp.insert<string> ("include", v_q);
// Backlink executables and (generated) documentation by default.
//
- gs.target_vars[exe::static_type]["*"].assign (var_backlink) = "true";
- gs.target_vars[doc::static_type]["*"].assign (var_backlink) = "true";
+ gs.target_vars[exe::static_type]["*"].assign (var_backlink) =
+ names {name ("true")};
+ gs.target_vars[doc::static_type]["*"].assign (var_backlink) =
+ names {name ("true")};
// Register builtin rules.
//
{
rule_map& r (gs.rules); // Note: global scope!
- //@@ outer
- r.insert<alias> (perform_id, 0, "alias", alias_rule::instance);
+ r.insert<alias> (perform_id, 0, "build.alias", alias_rule::instance);
- r.insert<fsdir> (perform_update_id, "fsdir", fsdir_rule::instance);
- r.insert<fsdir> (perform_clean_id, "fsdir", fsdir_rule::instance);
+ r.insert<fsdir> (perform_update_id, "build.fsdir", fsdir_rule::instance);
+ r.insert<fsdir> (perform_clean_id, "build.fsdir", fsdir_rule::instance);
- r.insert<mtime_target> (perform_update_id, "file", file_rule::instance);
- r.insert<mtime_target> (perform_clean_id, "file", file_rule::instance);
+ r.insert<mtime_target> (perform_update_id, "build.file", file_rule::instance);
+ r.insert<mtime_target> (perform_clean_id, "build.file", file_rule::instance);
}
+
+ // End of initialization.
+ //
+ load_generation = 1;
+ }
+
+ context::
+ context ()
+ : data_ (new data (*this)),
+ sched (nullptr),
+ mutexes (nullptr),
+ fcache (nullptr),
+ match_only (nullopt),
+ no_external_modules (true),
+ dry_run_option (false),
+ no_diag_buffer (false),
+ keep_going (false),
+ phase_mutex (*this),
+ scopes (data_->scopes),
+ targets (data_->targets),
+ var_pool (data_->var_pool),
+ var_patterns (data_->var_patterns),
+ var_overrides (data_->var_overrides),
+ functions (data_->functions),
+ global_scope (create_global_scope (data_->scopes)),
+ global_target_types (data_->global_target_types),
+ global_override_cache (data_->global_override_cache),
+ global_var_overrides (data_->global_var_overrides),
+ modules_lock (nullptr),
+ module_context (nullptr)
+ {
+ variable_pool& vp (data_->var_pool);
+
+ var_src_root = &vp.insert<dir_path> ("src_root");
+ var_out_root = &vp.insert<dir_path> ("out_root");
+
+ var_project = &vp.insert<project_name> ("project");
+ var_amalgamation = &vp.insert<dir_path> ("amalgamation");
+
+ load_generation = 1;
}
context::
@@ -606,6 +754,68 @@ namespace build2
}
void context::
+ enter_project_overrides (scope& rs,
+ const dir_path& out_base,
+ const variable_overrides& ovrs,
+ scope* as)
+ {
+ // The mildly tricky part here is to distinguish the situation where we
+ // are bootstrapping the same project multiple times. The first override
+ // that we set cannot already exist (because the override variable names
+ // are unique) so if it is already set, then it can only mean this project
+ // is already bootstrapped.
+ //
+ // This is further complicated by the project vs amalgamation logic (we
+ // may have already done the amalgamation but not the project). So we
+ // split it into two passes.
+ //
+ auto& sm (scopes.rw ());
+
+ for (const variable_override& o: ovrs)
+ {
+ if (o.ovr.visibility != variable_visibility::global)
+ continue;
+
+ // If we have a directory, enter the scope, similar to how we do
+ // it in the context ctor.
+ //
+ scope& s (
+ o.dir
+ ? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
+ : *(as != nullptr ? as : (as = rs.weak_scope ())));
+
+ auto p (s.vars.insert (o.ovr));
+
+ if (!p.second)
+ break;
+
+ value& v (p.first);
+ v = o.val;
+ }
+
+ for (const variable_override& o: ovrs)
+ {
+ // Ours is either project (%foo) or scope (/foo).
+ //
+ if (o.ovr.visibility == variable_visibility::global)
+ continue;
+
+ scope& s (
+ o.dir
+ ? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
+ : rs);
+
+ auto p (s.vars.insert (o.ovr));
+
+ if (!p.second)
+ break;
+
+ value& v (p.first);
+ v = o.val;
+ }
+ }
+
+ void context::
current_meta_operation (const meta_operation_info& mif)
{
if (current_mname != mif.name)
@@ -615,6 +825,7 @@ namespace build2
}
current_mif = &mif;
+ current_mdata = current_data_ptr (nullptr, null_current_data_deleter);
current_on = 0; // Reset.
}
@@ -623,9 +834,13 @@ namespace build2
const operation_info* outer_oif,
bool diag_noise)
{
- current_oname = (outer_oif == nullptr ? inner_oif : *outer_oif).name;
+ const auto& oif (outer_oif == nullptr ? inner_oif : *outer_oif);
+
+ current_oname = oif.name;
current_inner_oif = &inner_oif;
current_outer_oif = outer_oif;
+ current_inner_odata = current_data_ptr (nullptr, null_current_data_deleter);
+ current_outer_odata = current_data_ptr (nullptr, null_current_data_deleter);
current_on++;
current_mode = inner_oif.mode;
current_diag_noise = diag_noise;
@@ -635,6 +850,11 @@ namespace build2
dependency_count.store (0, memory_order_relaxed);
target_count.store (0, memory_order_relaxed);
skip_count.store (0, memory_order_relaxed);
+ resolve_count.store (0, memory_order_relaxed);
+
+ // Clear accumulated targets with post hoc prerequisites.
+ //
+ current_posthoc_targets.clear ();
}
bool run_phase_mutex::
@@ -667,11 +887,13 @@ namespace build2
}
else if (ctx_.phase != n)
{
- ctx_.sched.deactivate (false /* external */);
+ ++contention; // Protected by m_.
+
+ ctx_.sched->deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
}
else
r = !fail_;
@@ -683,9 +905,11 @@ namespace build2
{
if (!lm_.try_lock ())
{
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
lm_.lock ();
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
+
+ ++contention_load; // Protected by lm_.
}
r = !fail_; // Re-query.
}
@@ -733,9 +957,9 @@ namespace build2
// relock().
//
if (o == run_phase::match && n == run_phase::execute)
- ctx_.sched.push_phase ();
+ ctx_.sched->push_phase ();
else if (o == run_phase::execute && n == run_phase::match)
- ctx_.sched.pop_phase ();
+ ctx_.sched->pop_phase ();
if (v != nullptr)
{
@@ -746,7 +970,7 @@ namespace build2
}
}
- bool run_phase_mutex::
+ optional<bool> run_phase_mutex::
relock (run_phase o, run_phase n)
{
// Pretty much a fused unlock/lock implementation except that we always
@@ -755,6 +979,7 @@ namespace build2
assert (o != n);
bool r;
+ bool s (true); // True switch.
if (o == run_phase::load)
lm_.unlock ();
@@ -789,9 +1014,9 @@ namespace build2
// unlock().
//
if (o == run_phase::match && n == run_phase::execute)
- ctx_.sched.push_phase ();
+ ctx_.sched->push_phase ();
else if (o == run_phase::execute && n == run_phase::match)
- ctx_.sched.pop_phase ();
+ ctx_.sched->pop_phase ();
// Notify others that could be waiting for this phase.
//
@@ -803,11 +1028,13 @@ namespace build2
}
else // phase != n
{
- ctx_.sched.deactivate (false /* external */);
+ ++contention; // Protected by m_.
+
+ ctx_.sched->deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
}
}
@@ -815,14 +1042,23 @@ namespace build2
{
if (!lm_.try_lock ())
{
- ctx_.sched.deactivate (false /* external */);
+ // If we failed to acquire the load mutex, then we know there is (or
+ // was) someone before us in the load phase. And it's impossible to
+ // switch to a different phase between our calls to try_lock() above
+ // and lock() below because of our +1 in lc_.
+ //
+ s = false;
+
+ ctx_.sched->deactivate (false /* external */);
lm_.lock ();
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
+
+ ++contention_load; // Protected by lm_.
}
r = !fail_; // Re-query.
}
- return r;
+ return r ? optional<bool> (s) : nullopt;
}
// C++17 deprecated uncaught_exception() so use uncaught_exceptions() if
@@ -888,35 +1124,35 @@ namespace build2
// phase_unlock
//
phase_unlock::
- phase_unlock (context& c, bool u, bool d)
- : ctx (u ? &c : nullptr), lock (nullptr)
+ phase_unlock (context* c, bool d)
+ : ctx (c), lock_ (nullptr)
{
- if (u && !d)
+ if (ctx != nullptr && !d)
unlock ();
}
void phase_unlock::
unlock ()
{
- if (ctx != nullptr && lock == nullptr)
+ if (ctx != nullptr && lock_ == nullptr)
{
- lock = phase_lock_instance;
- assert (&lock->ctx == ctx);
+ lock_ = phase_lock_instance;
+ assert (&lock_->ctx == ctx);
phase_lock_instance = nullptr; // Note: not lock->prev.
- ctx->phase_mutex.unlock (lock->phase);
+ ctx->phase_mutex.unlock (lock_->phase);
- //text << this_thread::get_id () << " phase unlock " << lock->phase;
+ //text << this_thread::get_id () << " phase unlock " << lock_->phase;
}
}
- phase_unlock::
- ~phase_unlock () noexcept (false)
+ void phase_unlock::
+ lock ()
{
- if (lock != nullptr)
+ if (lock_ != nullptr)
{
- bool r (ctx->phase_mutex.lock (lock->phase));
- phase_lock_instance = lock;
+ bool r (ctx->phase_mutex.lock (lock_->phase));
+ phase_lock_instance = lock_;
// Fail unless we are already failing. Note that we keep the phase
// locked since there will be phase_lock down the stack to unlock it.
@@ -924,10 +1160,16 @@ namespace build2
if (!r && !uncaught_exception ())
throw failed ();
- //text << this_thread::get_id () << " phase lock " << lock->phase;
+ //text << this_thread::get_id () << " phase lock " << lock_->phase;
}
}
+ phase_unlock::
+ ~phase_unlock () noexcept (false)
+ {
+ lock ();
+ }
+
// phase_switch
//
phase_switch::
@@ -937,7 +1179,8 @@ namespace build2
phase_lock* pl (phase_lock_instance);
assert (&pl->ctx == &ctx);
- if (!ctx.phase_mutex.relock (old_phase, new_phase))
+ optional<bool> r (ctx.phase_mutex.relock (old_phase, new_phase));
+ if (!r)
{
ctx.phase_mutex.relock (new_phase, old_phase);
throw failed ();
@@ -946,14 +1189,37 @@ namespace build2
pl->phase = new_phase;
if (new_phase == run_phase::load) // Note: load lock is exclusive.
+ {
ctx.load_generation++;
+ // Invalidate cached target base_scope values if we are switching from a
+ // non-load phase (we don't cache during load which means load->load
+ // switch doesn't have anything to invalidate).
+ //
+ // @@ This is still quite expensive on project like Boost with a large
+ // number of files (targets) and a large number of load phase
+ // switches (due to directory buildfiles).
+ //
+ // Thinking some more on this, we shouldn't need to do this since such
+ // loads can (or at least should) only perform "island appends" see
+ // comment on context::phase for details.
+ //
+#if 0
+ if (*r)
+ {
+ for (const unique_ptr<target>& t: ctx.targets)
+ t->base_scope_.store (nullptr, memory_order_relaxed);
+ }
+#endif
+ }
+
//text << this_thread::get_id () << " phase switch "
// << old_phase << " " << new_phase;
}
#if 0
- // NOTE: see push/pop_phase() logic if trying to enable this.
+ // NOTE: see push/pop_phase() logic if trying to enable this. Also
+ // the load stuff above.
//
phase_switch::
phase_switch (phase_unlock&& u, phase_lock&& l)
diff --git a/libbuild2/context.hxx b/libbuild2/context.hxx
index c4d85c9..33fc892 100644
--- a/libbuild2/context.hxx
+++ b/libbuild2/context.hxx
@@ -21,13 +21,14 @@
namespace build2
{
class file_cache;
- class loaded_modules_lock;
+ class module_libraries_lock;
class LIBBUILD2_SYMEXPORT run_phase_mutex
{
public:
// Acquire a phase lock potentially blocking (unless already in the
// desired phase) until switching to the desired phase is possible.
+ // Return false on failure.
//
bool
lock (run_phase);
@@ -38,11 +39,22 @@ namespace build2
void
unlock (run_phase);
- // Switch from one phase to another.
+ // Switch from one phase to another. Return nullopt on failure (so can be
+ // used as bool), true if switched from a different phase, and false if
+ // joined/switched to the same phase (this, for example, can be used to
+ // decide if a phase switching housekeeping is really necessary). Note:
+ // currently only implemented for the load phase (always returns true
+ // for the others).
//
- bool
+ optional<bool>
relock (run_phase unlock, run_phase lock);
+ // Statistics.
+ //
+ public:
+ size_t contention = 0; // # of contentious phase (re)locks.
+ size_t contention_load = 0; // # of contentious load phase locks.
+
private:
friend class context;
@@ -61,7 +73,7 @@ namespace build2
// is exclusive so we have a separate mutex to serialize it (think of it
// as a second level locking).
//
- // When the mutex is unlocked (all three counters become zero, the phase
+ // When the mutex is unlocked (all three counters become zero), the phase
// is always changed to load (this is also the initial state).
//
context& ctx_;
@@ -94,8 +106,28 @@ namespace build2
explicit
global_mutexes (size_t vc)
- : variable_cache_size (vc),
- variable_cache (new shared_mutex[variable_cache_size]) {}
+ {
+ init (vc);
+ }
+
+ global_mutexes () = default; // Create uninitialized instance.
+
+ void
+ init (size_t vc)
+ {
+ variable_cache_size = vc;
+ variable_cache.reset (new shared_mutex[vc]);
+ }
+ };
+
+ // Match-only level.
+ //
+ // See the --match-only and --load-only options for background.
+ //
+ enum class match_only_level
+ {
+ alias, // Match only alias{} targets.
+ all // Match all targets.
};
// A build context encapsulates the state of a build. It is possible to have
@@ -120,9 +152,9 @@ namespace build2
// instead go the multiple communicating schedulers route, a la the job
// server).
//
- // The loaded_modules state (module.hxx) is shared among all the contexts
+ // The module_libraries state (module.hxx) is shared among all the contexts
// (there is no way to have multiple shared library loading "contexts") and
- // is protected by loaded_modules_lock. A nested context should normally
+ // is protected by module_libraries_lock. A nested context should normally
// inherit this lock value from its outer context.
//
// Note also that any given thread should not participate in multiple
@@ -138,17 +170,66 @@ namespace build2
//
class LIBBUILD2_SYMEXPORT context
{
+ public:
+ // In order to perform each operation the build system goes through the
+ // following phases:
+ //
+ // load - load the buildfiles
+ // match - search prerequisites and match rules
+ // execute - execute the matched rule
+ //
+ // The build system starts with a "serial load" phase and then continues
+ // with parallel match and execute. Match, however, can be interrupted
+ // both with load and execute.
+ //
+ // Match can be interrupted with "exclusive load" in order to load
+ // additional buildfiles. Similarly, it can be interrupted with (parallel)
+ // execute in order to build targetd required to complete the match (for
+ // example, generated source code or source code generators themselves).
+ //
+ // Such interruptions are performed by phase change that is protected by
+ // phase_mutex (which is also used to synchronize the state changes
+ // between phases).
+ //
+ // Serial load can perform arbitrary changes to the build state. Exclusive
+ // load, however, can only perform "island appends". That is, it can
+ // create new "nodes" (variables, scopes, etc) but not (semantically)
+ // change already existing nodes or invalidate any references to such (the
+ // idea here is that one should be able to load additional buildfiles as
+ // long as they don't interfere with the existing build state). The
+ // "islands" are identified by the load_generation number (1 for the
+ // initial/serial load). It is incremented in case of a phase switch and
+ // can be stored in various "nodes" to verify modifications are only done
+ // "within the islands". Another example of invalidation would be
+ // insertion of a new scope "under" an existing target thus changing its
+ // scope hierarchy (and potentially even its base scope). This would be
+ // bad because we may have made decisions based on the original hierarchy,
+ // for example, we may have queried a variable which in the new hierarchy
+ // would "see" a new value from the newly inserted scope.
+ //
+ // The special load_generation value 0 indicates initialization before
+ // anything has been loaded. Currently, it is changed to 1 at the end
+ // of the context constructor.
+ //
+ // Note must come (and thus initialized) before the data_ member.
+ //
+ run_phase phase = run_phase::load;
+ size_t load_generation = 0;
+
+ private:
struct data;
unique_ptr<data> data_;
public:
- scheduler& sched;
- global_mutexes& mutexes;
- file_cache& fcache;
+ // These are only NULL for the "bare minimum" context (see below).
+ //
+ scheduler* sched;
+ global_mutexes* mutexes;
+ file_cache* fcache;
- // Match only flag (see --match-only but also dist).
+ // Match only flag/level (see --{load,match}-only but also dist).
//
- bool match_only;
+ optional<match_only_level> match_only;
// Skip booting external modules flag (see --no-external-modules).
//
@@ -189,6 +270,10 @@ namespace build2
bool dry_run = false;
bool dry_run_option;
+ // Diagnostics buffering flag (--no-diag-buffer).
+ //
+ bool no_diag_buffer;
+
// Keep going flag.
//
// Note that setting it to false is not of much help unless we are running
@@ -197,39 +282,13 @@ namespace build2
//
bool keep_going;
- // In order to perform each operation the build system goes through the
- // following phases:
- //
- // load - load the buildfiles
- // match - search prerequisites and match rules
- // execute - execute the matched rule
- //
- // The build system starts with a "serial load" phase and then continues
- // with parallel match and execute. Match, however, can be interrupted
- // both with load and execute.
- //
- // Match can be interrupted with "exclusive load" in order to load
- // additional buildfiles. Similarly, it can be interrupted with (parallel)
- // execute in order to build targetd required to complete the match (for
- // example, generated source code or source code generators themselves).
- //
- // Such interruptions are performed by phase change that is protected by
- // phase_mutex (which is also used to synchronize the state changes
- // between phases).
+ // Targets to trace (see the --trace-* options).
//
- // Serial load can perform arbitrary changes to the build state. Exclusive
- // load, however, can only perform "island appends". That is, it can
- // create new "nodes" (variables, scopes, etc) but not (semantically)
- // change already existing nodes or invalidate any references to such (the
- // idea here is that one should be able to load additional buildfiles as
- // long as they don't interfere with the existing build state). The
- // "islands" are identified by the load_generation number (0 for the
- // initial/serial load). It is incremented in case of a phase switch and
- // can be stored in various "nodes" to verify modifications are only done
- // "within the islands".
+ // Note that these must be set after construction and must remain valid
+ // for the lifetime of the context instance.
//
- run_phase phase = run_phase::load;
- size_t load_generation = 0;
+ const vector<name>* trace_match = nullptr;
+ const vector<name>* trace_execute = nullptr;
// A "tri-mutex" that keeps all the threads in one of the three phases.
// When a thread wants to switch a phase, it has to wait for all the other
@@ -270,6 +329,7 @@ namespace build2
string current_oname;
const meta_operation_info* current_mif;
+
const operation_info* current_inner_oif;
const operation_info* current_outer_oif;
@@ -291,6 +351,22 @@ namespace build2
(current_mname.empty () && current_oname == mo));
};
+ // Meta/operation-specific context-global auxiliary data storage.
+ //
+ // Note: cleared by current_[meta_]operation() below. Normally set by
+ // meta/operation-specific callbacks from [mate_]operation_info.
+ //
+ // Note also: watch out for MT-safety in the data itself.
+ //
+ static void
+ null_current_data_deleter (void* p) { assert (p == nullptr); }
+
+ using current_data_ptr = unique_ptr<void, void (*) (void*)>;
+
+ current_data_ptr current_mdata = {nullptr, null_current_data_deleter};
+ current_data_ptr current_inner_odata = {nullptr, null_current_data_deleter};
+ current_data_ptr current_outer_odata = {nullptr, null_current_data_deleter};
+
// Current operation number (1-based) in the meta-operation batch.
//
size_t current_on;
@@ -329,20 +405,47 @@ namespace build2
// decremented after such recipe has been executed. If such a recipe has
// skipped executing the operation, then it should increment the skip
// count. These two counters are used for progress monitoring and
- // diagnostics.
+ // diagnostics. The resolve count keeps track of the number of targets
+ // matched but not executed as a result of the resolve_members() calls
+ // (see also target::resolve_counted).
//
atomic_count dependency_count;
atomic_count target_count;
atomic_count skip_count;
+ atomic_count resolve_count;
// Build state (scopes, targets, variables, etc).
//
const scope_map& scopes;
target_set& targets;
- const variable_pool& var_pool;
+ const variable_pool& var_pool; // Public variables pool.
+ const variable_patterns& var_patterns; // Public variables patterns.
const variable_overrides& var_overrides; // Project and relative scope.
function_map& functions;
+ // Current targets with post hoc prerequisites.
+ //
+ // Note that we don't expect many of these so a simple mutex should be
+ // sufficient. Note also that we may end up adding more entries as we
+ // match existing so use list for node and iterator stability. See
+ // match_poshoc() for details.
+ //
+ struct posthoc_target
+ {
+ struct prerequisite_target
+ {
+ const build2::target* target;
+ uint64_t match_options;
+ };
+
+ build2::action action;
+ reference_wrapper<const build2::target> target;
+ vector<prerequisite_target> prerequisite_targets;
+ };
+
+ list<posthoc_target> current_posthoc_targets;
+ mutex current_posthoc_targets_mutex;
+
// Global scope.
//
const scope& global_scope;
@@ -350,6 +453,10 @@ namespace build2
variable_override_cache& global_override_cache;
const strings& global_var_overrides;
+ // Cached values (from global scope).
+ //
+ const target_triplet* build_host; // build.host
+
// Cached variables.
//
@@ -379,8 +486,8 @@ namespace build2
const variable* var_import_build2;
const variable* var_import_target;
- // The import.metadata variable and the --build2-metadata option are used
- // to pass the metadata compatibility version.
+ // The import.metadata export stub variable and the --build2-metadata
+ // executable option are used to pass the metadata compatibility version.
//
// This serves both as an indication that the metadata is required (can be
// useful, for example, in cases where it is expensive to calculate) as
@@ -392,7 +499,8 @@ namespace build2
// The export.metadata value should start with the version followed by the
// metadata variable prefix (for example, cli in cli.version).
//
- // The following metadata variable names have pre-defined meaning:
+ // The following metadata variable names have pre-defined meaning for
+ // executable targets (exe{}; see also process_path_ex):
//
// <var-prefix>.name = [string] # Stable name for diagnostics.
// <var-prefix>.version = [string] # Version for diagnostics.
@@ -402,7 +510,8 @@ namespace build2
// If the <var-prefix>.name variable is missing, it is set to the target
// name as imported.
//
- // See also process_path_ex.
+ // Note that the same mechanism is used for library user metadata (see
+ // cc::pkgconfig_{load,save}() for details).
//
const variable* var_import_metadata;
const variable* var_export_metadata;
@@ -411,6 +520,21 @@ namespace build2
//
const variable* var_extension;
+ // This variable can only be specified as prerequisite-specific (see the
+ // `include` variable for details).
+ //
+ // [string] prerequisite visibility
+ //
+ // Valid values are `true` and `false`. Additionally, some rules (and
+ // potentially only for certain types of prerequisites) may support the
+ // `unmatch` (match but do not update, if possible), `match` (update
+ // during match), and `execute` (update during execute, as is normally)
+ // values (the `execute` value may be useful if the rule has the `match`
+ // semantics by default). Note that if unmatch is impossible, then the
+ // prerequisite is treated as ad hoc.
+ //
+ const variable* var_update;
+
// Note that this variable can also be specified as prerequisite-specific
// (see the `include` variable for details).
//
@@ -418,18 +542,51 @@ namespace build2
//
const variable* var_clean;
- // Forwarded configuration backlink mode. Valid values are:
+ // Forwarded configuration backlink mode. The value has two components
+ // in the form:
+ //
+ // <mode> [<print>]
+ //
+ // Valid <mode> values are:
//
// false - no link.
// true - make a link using appropriate mechanism.
// symbolic - make a symbolic link.
// hard - make a hard link.
// copy - make a copy.
- // overwrite - copy over but don't remove on clean (committed gen code).
+ // overwrite - copy over but don't remove on clean.
+ // group - inherit the group mode (only valid for group members).
//
- // Note that it can be set by a matching rule as a rule-specific variable.
+ // While the <print> component should be either true or false and can be
+ // used to suppress printing of specific ad hoc group members at verbosity
+ // level 1. Note that it cannot be false for the primary member.
//
- // [string] target visibility
+ // Note that this value can be set by a matching rule as a rule-specific
+ // variable.
+ //
+ // Note also that the overwrite mode was originally meant for handling
+ // pregenerated source code. But in the end this did not pan out for
+ // the following reasons:
+ //
+ // 1. This would mean that the pregenerated and regenerated files end up
+ // in the same place (e.g., depending on the develop mode) and it's
+ // hard to make this work without resorting to a conditional graph.
+ //
+ // This could potentially be addressed by allowing backlink to specify
+ // a different location (similar to dist).
+ //
+ // 2. This support for pregenerated source code would be tied to forwarded
+ // configurations.
+ //
+ // Nevertheless, there may be a kernel of an idea here in that we may be
+ // able to provide a built-in "post-copy" mechanism which would allow one
+ // to have a pregenerated setup even when using non-ad hoc recipes
+ // (currently we just manually diff/copy stuff at the end of a recipe).
+ // (Or maybe we should stick to ad hoc recipes with post-diff/copy and
+ // just expose a mechanism to delegate to a different rule, which we
+ // already have).
+ //
+ // [names] target visibility
//
const variable* var_backlink;
@@ -456,14 +613,19 @@ namespace build2
// Sometimes it may be desirable to apply exclusions only to specific
// operations. The initial idea was to extend this value to allow
// specifying the operation (e.g., clean@false). However, later we
- // realized that we could reuse the "operation variables" (clean, install,
- // test) with a more natural-looking result. Note that currently we only
- // recognize the built-in clean variable (for other variables we will need
- // some kind of registration in an operation-to-variable map, probably in
- // root scope). See also install::file_rule::filter().
+ // realized that we could reuse the "operation-specific variables"
+ // (update, clean, install, test; see project_operation_info) with a more
+ // natural-looking and composable result. Plus, this allows for
+ // operation-specific "modifiers", for example, "unmatch" and "update
+ // during match" logic for update (see var_update for details) or
+ // requiring explicit install=true to install exe{} prerequisites (see
+ // install::file_rule::filter()).
//
- // To query this value in rule implementations use the include() helpers
- // from <libbuild2/prerequisites.hxx>.
+ // To query this value and its operation-specific override if any, the
+ // rule implementations use the include() helper.
+ //
+ // Note that there are also related (but quite different) for_<operation>
+ // variables for operations that act as outer (e.g., test, install).
//
// [string] prereq visibility
//
@@ -480,14 +642,34 @@ namespace build2
build2::meta_operation_table meta_operation_table;
build2::operation_table operation_table;
+ // Import cache (see import_load()).
+ //
+ struct import_key
+ {
+ dir_path out_root; // Imported project's out root.
+ name target; // Imported target (unqualified).
+ uint64_t metadata; // Metadata version (0 if none).
+
+ friend bool
+ operator< (const import_key& x, const import_key& y)
+ {
+ int r;
+ return ((r = x.out_root.compare (y.out_root)) != 0 ? r < 0 :
+ (r = x.target.compare (y.target)) != 0 ? r < 0 :
+ x.metadata < y.metadata);
+ }
+ };
+
+ map<import_key, pair<names, const scope&>> import_cache;
+
// The old/new src_root remapping for subprojects.
//
dir_path old_src_root;
dir_path new_src_root;
- // NULL if this context hasn't already locked the loaded_modules state.
+ // NULL if this context hasn't already locked the module_libraries state.
//
- const loaded_modules_lock* modules_lock;
+ const module_libraries_lock* modules_lock;
// Nested context for updating build system modules and ad hoc recipes.
//
@@ -504,17 +686,76 @@ namespace build2
// properly setup context (including, normally, a self-reference in
// modules_context).
//
- explicit
+ // The var_override_function callback can be used to parse ad hoc project-
+ // wide variable overrides (see parse_variable_override()). This has to
+ // happen at a specific point during context construction (see the
+ // implementation for details).
+ //
+ // Note: see also the trace_* data members that, if needed, must be set
+ // separately, after construction.
+ //
+ struct reserves
+ {
+ size_t targets;
+ size_t variables;
+
+ reserves (): targets (0), variables (0) {}
+ reserves (size_t t, size_t v): targets (t), variables (v) {}
+ };
+
+ using var_override_function = void (context&, size_t&);
+
context (scheduler&,
global_mutexes&,
file_cache&,
- bool match_only = false,
+ optional<match_only_level> match_only = nullopt,
bool no_external_modules = false,
bool dry_run = false,
+ bool no_diag_buffer = false,
bool keep_going = true,
const strings& cmd_vars = {},
+ reserves = {0, 160},
optional<context*> module_context = nullptr,
- const loaded_modules_lock* inherited_mudules_lock = nullptr);
+ const module_libraries_lock* inherited_modules_lock = nullptr,
+ const function<var_override_function>& = nullptr);
+
+ // Special context with bare minimum of initializations. It is only
+ // guaranteed to be sufficiently initialized to call extract_variable().
+ //
+ // Note that for this purpose you may omit calls to init_diag() and
+ // init().
+ //
+ context ();
+
+ // Reserve elements in containers to avoid re-allocation/re-hashing. Zero
+ // values are ignored (that is, the corresponding container reserve()
+ // function is not called). Can only be called in the load phase.
+ //
+ void
+ reserve (reserves);
+
+ // Parse a variable override returning its type in the first half of the
+ // pair. Index is the variable index (used to derive unique name) and if
+ // buildspec is true then assume `--` is used as a separator between
+ // variables and buildscpec and issue appropriate diagnostics.
+ //
+ // Note: should only be called from the var_override_function constructor
+ // callback.
+ //
+ pair<char, variable_override>
+ parse_variable_override (const string& var, size_t index, bool buildspec);
+
+ // Enter project-wide (as opposed to global) variable overrides.
+ //
+ // If the amalgamation scope is specified, then use it instead of
+ // rs.weak_scope() to set overrides with global visibility (make sure you
+ // understand the implications before doing this).
+ //
+ void
+ enter_project_overrides (scope& rs,
+ const dir_path& out_base,
+ const variable_overrides&,
+ scope* amalgamation = nullptr);
// Set current meta-operation and operation.
//
@@ -608,14 +849,20 @@ namespace build2
//
struct LIBBUILD2_SYMEXPORT phase_unlock
{
- phase_unlock (context&, bool unlock = true, bool delay = false);
+ explicit phase_unlock (context*, bool delay = false);
+ explicit phase_unlock (context& ctx, bool delay = false)
+ : phase_unlock (&ctx, delay) {}
+
~phase_unlock () noexcept (false);
void
unlock ();
+ void
+ lock ();
+
context* ctx;
- phase_lock* lock;
+ phase_lock* lock_;
};
// Assuming we have a lock on the current phase, temporarily switch to a
@@ -661,8 +908,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- wait_guard (wait_guard&&);
- wait_guard& operator= (wait_guard&&);
+ wait_guard (wait_guard&&) noexcept;
+ wait_guard& operator= (wait_guard&&) noexcept;
wait_guard (const wait_guard&) = delete;
wait_guard& operator= (const wait_guard&) = delete;
diff --git a/libbuild2/context.ixx b/libbuild2/context.ixx
index 4f86c28..6c8c428 100644
--- a/libbuild2/context.ixx
+++ b/libbuild2/context.ixx
@@ -31,7 +31,7 @@ namespace build2
}
inline wait_guard::
- wait_guard (wait_guard&& x)
+ wait_guard (wait_guard&& x) noexcept
: ctx (x.ctx),
start_count (x.start_count),
task_count (x.task_count),
@@ -41,7 +41,7 @@ namespace build2
}
inline wait_guard& wait_guard::
- operator= (wait_guard&& x)
+ operator= (wait_guard&& x) noexcept
{
if (&x != this)
{
@@ -56,8 +56,8 @@ namespace build2
inline void wait_guard::
wait ()
{
- phase_unlock u (*ctx, phase, true /* delay */);
- ctx->sched.wait (start_count, *task_count, u);
+ phase_unlock u (phase ? ctx : nullptr, true /* delay */);
+ ctx->sched->wait (start_count, *task_count, u);
task_count = nullptr;
}
}
diff --git a/libbuild2/cxx/init.cxx b/libbuild2/cxx/init.cxx
index cd5169d..8159d18 100644
--- a/libbuild2/cxx/init.cxx
+++ b/libbuild2/cxx/init.cxx
@@ -7,10 +7,12 @@
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/config/utility.hxx>
+#include <libbuild2/install/utility.hxx>
#include <libbuild2/cc/guess.hxx>
#include <libbuild2/cc/module.hxx>
+#include <libbuild2/cc/target.hxx> // pc*
#include <libbuild2/cxx/target.hxx>
#ifndef BUILD2_DEFAULT_CXX
@@ -62,8 +64,8 @@ namespace build2
uint64_t mi (ci.version.minor);
uint64_t p (ci.version.patch);
- // Besides various `c++NN` we have two special values: `latest` and
- // `experimental`.
+ // Besides various `NN` we have two special values: `latest` and
+ // `experimental`. It can also be `gnu++NN`.
//
// The semantics of the `latest` value is the latest available standard
// that is not necessarily complete or final but is practically usable.
@@ -91,9 +93,27 @@ namespace build2
bool latest (v != nullptr && *v == "latest");
bool experimental (v != nullptr && *v == "experimental");
+ // This helper helps recognize both NN and [cC]++NN to avoid an endless
+ // stream of user questions. It can also be used to recognize Nx in
+ // addition to NN (e.g., "14" and "1y").
+ //
+ auto stdcmp = [v] (const char* nn, const char* nx = nullptr)
+ {
+ if (v != nullptr)
+ {
+ const char* s (v->c_str ());
+ if ((s[0] == 'c' || s[0] == 'C') && s[1] == '+' && s[2] == '+')
+ s += 3;
+
+ return strcmp (s, nn) == 0 || (nx != nullptr && strcmp (s, nx) == 0);
+ }
+
+ return false;
+ };
+
// Feature flags.
//
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */)); // All qualified.
// Similar to config.cxx.std, config.cxx.features.* overrides
// cxx.features.*.
@@ -157,6 +177,10 @@ namespace build2
i = mode.insert (i, move (o)) + 1;
};
+ // Derive approximate __cplusplus value from the standard if possible.
+ //
+ optional<uint32_t> cplusplus;
+
switch (cl)
{
case compiler_class::msvc:
@@ -186,6 +210,26 @@ namespace build2
{
if (v14_3)
o = "/std:c++latest";
+
+ // According to the documentation:
+ //
+ // "The value of __cplusplus with the /std:c++latest option
+ // depends on the version of Visual Studio. It's always at least
+ // one higher than the highest supported __cplusplus standard
+ // value supported by your version of Visual Studio."
+ //
+ if (v16_11)
+ cplusplus = 202002 + 1;
+ else if (v16_0)
+ cplusplus = 201703 + 1;
+ else if (v14_3)
+ cplusplus = 201402 + 1;
+ else if (mj >= 19)
+ cplusplus = 201402;
+ else if (mj >= 16)
+ cplusplus = 201103;
+ else
+ cplusplus = 199711;
}
else if (latest)
{
@@ -194,55 +238,78 @@ namespace build2
// for this mode. So starting from 16 we only enable it in
// `experimental`.
//
+ // Note: no /std:c++23 yet as of MSVC 17.6.
+ //
if (v16_11)
o = "/std:c++20";
else if (v16_0)
o = "/std:c++17";
else if (v14_3)
o = "/std:c++latest";
+
+ if (v16_11)
+ cplusplus = 202002;
+ else if (v16_0)
+ cplusplus = 201703;
+ else if (v14_3)
+ cplusplus = 201402 + 1;
+ else if (mj >= 19)
+ cplusplus = 201402;
+ else if (mj >= 16)
+ cplusplus = 201103;
+ else
+ cplusplus = 199711;
}
else if (v == nullptr)
- ;
- else if (*v != "98" && *v != "03")
+ {
+ // @@ TODO: map defaults to cplusplus for each version.
+ }
+ else if (!stdcmp ("98") && !stdcmp ("03"))
{
bool sup (false);
- if (*v == "11") // C++11 since VS2010/10.0.
+ if (stdcmp ("11", "0x")) // C++11 since VS2010/10.0.
{
sup = mj >= 16;
+ cplusplus = 201103;
}
- else if (*v == "14") // C++14 since VS2015/14.0.
+ else if (stdcmp ("14", "1y")) // C++14 since VS2015/14.0.
{
sup = mj >= 19;
+ cplusplus = 201402;
}
- else if (*v == "17") // C++17 since VS2015/14.0u2.
+ else if (stdcmp ("17", "1z")) // C++17 since VS2015/14.0u2.
{
// Note: the VC15 compiler version is 19.10.
//
sup = (mj > 19 ||
(mj == 19 && (mi > 0 || (mi == 0 && p >= 23918))));
+ cplusplus = 201703;
}
- else if (*v == "20") // C++20 since VS2019/16.11.
+ else if (stdcmp ("20", "2a")) // C++20 since VS2019/16.11.
{
sup = v16_11;
+ cplusplus = 202002;
}
if (!sup)
- fail << "C++" << *v << " is not supported by " << ci.signature <<
+ fail << "C++ " << *v << " is not supported by " << ci.signature <<
info << "required by " << project (rs) << '@' << rs;
if (v15_3)
{
- if (*v == "20") o = "/std:c++20";
- else if (*v == "17") o = "/std:c++17";
- else if (*v == "14") o = "/std:c++14";
+ if (stdcmp ("20", "2a")) o = "/std:c++20";
+ else if (stdcmp ("17", "1z")) o = "/std:c++17";
+ else if (stdcmp ("14", "1y")) o = "/std:c++14";
}
else if (v14_3)
{
- if (*v == "14") o = "/std:c++14";
- else if (*v == "17") o = "/std:c++latest";
+ if (stdcmp ("14", "1y")) o = "/std:c++14";
+ else if (stdcmp ("17", "1z")) o = "/std:c++latest";
}
}
+ else
+ cplusplus = 199711;
if (!o.empty ())
prepend (move (o));
@@ -268,11 +335,33 @@ namespace build2
{
case compiler_type::gcc:
{
- if (mj >= 11) o = "-std=c++23"; // 23
- else if (mj >= 8) o = "-std=c++2a"; // 20
- else if (mj >= 5) o = "-std=c++1z"; // 17
- else if (mj == 4 && mi >= 8) o = "-std=c++1y"; // 14
- else if (mj == 4 && mi >= 4) o = "-std=c++0x"; // 11
+ if (mj >= 11)
+ {
+ o = "-std=c++23";
+ cplusplus = 202302;
+ }
+ else if (mj >= 8)
+ {
+ o = "-std=c++2a";
+ cplusplus = 202002;
+ }
+ else if (mj >= 5)
+ {
+ o = "-std=c++1z";
+ cplusplus = 201703;
+ }
+ else if (mj == 4 && mi >= 8)
+ {
+ o = "-std=c++1y";
+ cplusplus = 201402;
+ }
+ else if (mj == 4 && mi >= 4)
+ {
+ o = "-std=c++0x";
+ cplusplus = 201103;
+ }
+ else
+ cplusplus = 199711;
break;
}
@@ -290,21 +379,56 @@ namespace build2
// MSVC.
//
- if (mj >= 13) o = "-std=c++2b";
- else if (mj == 10 &&
- latest && tt.system == "win32-msvc") o = "-std=c++17";
- else if (mj >= 5) o = "-std=c++2a";
- else if (mj > 3 || (mj == 3 && mi >= 5)) o = "-std=c++1z";
- else if (mj == 3 && mi >= 4) o = "-std=c++1y";
- else /* ??? */ o = "-std=c++0x";
+ if (mj >= 13)
+ {
+ o = "-std=c++2b";
+ cplusplus = 202302;
+ }
+ else if (mj == 10 && latest && tt.system == "win32-msvc")
+ {
+ o = "-std=c++17";
+ cplusplus = 201703;
+ }
+ else if (mj >= 5)
+ {
+ o = "-std=c++2a";
+ cplusplus = 202002;
+ }
+ else if (mj > 3 || (mj == 3 && mi >= 5))
+ {
+ o = "-std=c++1z";
+ cplusplus = 201703;
+ }
+ else if (mj == 3 && mi >= 4)
+ {
+ o = "-std=c++1y";
+ cplusplus = 201402;
+ }
+ else /* ??? */
+ {
+ o = "-std=c++0x";
+ cplusplus = 201103;
+ }
break;
}
case compiler_type::icc:
{
- if (mj >= 17) o = "-std=c++1z";
- else if (mj > 15 || (mj == 15 && p >= 3)) o = "-std=c++1y";
- else /* ??? */ o = "-std=c++0x";
+ if (mj >= 17)
+ {
+ o = "-std=c++1z";
+ cplusplus = 201703;
+ }
+ else if (mj > 15 || (mj == 15 && p >= 3))
+ {
+ o = "-std=c++1y";
+ cplusplus = 201402;
+ }
+ else /* ??? */
+ {
+ o = "-std=c++0x";
+ cplusplus = 201103;
+ }
break;
}
@@ -313,22 +437,33 @@ namespace build2
}
}
else if (v == nullptr)
- ;
+ {
+ // @@ TODO: map defaults to cplusplus for each version.
+ }
else
{
- // Translate 11 to 0x, 14 to 1y, 17 to 1z, 20 to 2a, and 23 to 2b
- // for compatibility with older versions of the compilers.
+ // Translate 11 to 0x, 14 to 1y, 17 to 1z, 20 to 2a, 23 to 2b, and
+ // 26 to 2c for compatibility with older versions of the
+ // compilers.
+ //
+ // @@ TMP: update C++26 __cplusplus value once known.
//
o = "-std=";
- if (*v == "23") o += "c++2b";
- else if (*v == "20") o += "c++2a";
- else if (*v == "17") o += "c++1z";
- else if (*v == "14") o += "c++1y";
- else if (*v == "11") o += "c++0x";
- else if (*v == "03") o += "c++03";
- else if (*v == "98") o += "c++98";
- else o += *v; // In case the user specifies `gnu++NN` or some such.
+ if (stdcmp ("26", "2c")) {o += "c++2c"; cplusplus = 202400;}
+ else if (stdcmp ("23", "2b")) {o += "c++2b"; cplusplus = 202302;}
+ else if (stdcmp ("20", "2a")) {o += "c++2a"; cplusplus = 202002;}
+ else if (stdcmp ("17", "1z")) {o += "c++1z"; cplusplus = 201703;}
+ else if (stdcmp ("14", "1y")) {o += "c++1y"; cplusplus = 201402;}
+ else if (stdcmp ("11", "0x")) {o += "c++0x"; cplusplus = 201103;}
+ else if (stdcmp ("03") ) {o += "c++03"; cplusplus = 199711;}
+ else if (stdcmp ("98") ) {o += "c++98"; cplusplus = 199711;}
+ else
+ {
+ o += *v; // In case the user specifies `gnu++NN` or some such.
+
+ // @@ TODO: can we still try to derive cplusplus value?
+ }
}
if (!o.empty ())
@@ -338,6 +473,8 @@ namespace build2
}
}
+ // Additional experimental options.
+ //
if (experimental)
{
switch (ct)
@@ -357,85 +494,124 @@ namespace build2
default:
break;
}
+ }
- // Unless disabled by the user, try to enable C++ modules.
- //
- if (!modules.value || *modules.value)
+ // Unless disabled by the user, try to enable C++ modules.
+ //
+ // NOTE: see also diagnostics about modules support required in compile
+ // rule.
+ //
+ if (!modules.value || *modules.value)
+ {
+ switch (ct)
{
- switch (ct)
+ case compiler_type::msvc:
{
- case compiler_type::msvc:
+ // Modules are enabled by default in /std:c++20 and
+ // /std:c++latest with both defining __cpp_modules to 201907
+ // (final C++20 module), at least as of 17.6 (LTS).
+ //
+ // @@ Should we enable modules by default? There are still some
+ // serious bugs, like inability to both `import std;` and
+ // `#include <string>` in the same translation unit (see Visual
+ // Studio issue #10541166).
+ //
+ if (modules.value)
{
- // While modules are supported in VC 15.0 (19.10), there is a
- // bug in the separate interface/implementation unit support
- // which makes them pretty much unusable. This has been fixed in
- // 15.3 (19.11). And 15.5 (19.12) supports the `export module
- // M;` syntax. And 16.4 (19.24) supports the global module
- // fragment. And in 16.8 all the modules-related options have
- // been changed. Seeing that the whole thing is unusable anyway,
- // we disable it for 16.8 or later for now.
- //
- if ((mj > 19 || (mj == 19 && mi >= (modules.value ? 10 : 12))) &&
- (mj < 19 || (mj == 19 && mi < 28) || modules.value))
+ if (cplusplus && *cplusplus < 202002)
{
- prepend (
- mj > 19 || mi >= 24 ?
- "/D__cpp_modules=201810" : // p1103 (merged modules)
- mj == 19 || mi >= 12 ?
- "/D__cpp_modules=201704" : // p0629r0 (export module M;)
- "/D__cpp_modules=201703"); // n4647 ( module M;)
-
- prepend ("/experimental:module");
- modules = true;
+ fail << "support for C++ modules requires C++20 or later" <<
+ info << "standard in use is " << *cplusplus <<
+ info << "required by " << project (rs) << '@' << rs;
}
- break;
- }
- case compiler_type::gcc:
- {
- // We use the module mapper support which is only available
- // since GCC 11. And since we are not yet capable of supporting
- // generated headers via the mapper, we require the user to
- // explicitly request modules.
- //
- if (mj >= 11 && modules.value)
+
+ if (mj < 19 || (mj == 19 && mi < 36))
{
- // Defines __cpp_modules:
- //
- // 11 -- 201810
- //
- prepend ("-fmodules-ts");
- modules = true;
+ fail << "support for C++ modules requires MSVC 17.6 or later" <<
+ info << "C++ compiler is " << ci.signature <<
+ info << "required by " << project (rs) << '@' << rs;
}
- break;
+ modules = true;
}
- case compiler_type::clang:
+
+ break;
+ }
+ case compiler_type::gcc:
+ {
+ // We use the module mapper support which is only available since
+ // GCC 11. And since we are not yet capable of supporting
+ // generated headers via the mapper, we require the user to
+ // explicitly request modules.
+ //
+ // @@ Actually, now that we pre-generate headers by default, this
+ // is probably no longer the reason. But GCC modules being
+ // unusable due to bugs is stil a reason.
+ //
+ if (modules.value)
{
- // At the time of this writing, support for C++20 modules in
- // Clang is incomplete. And starting with Clang 9 (Apple Clang
- // 11.0.3), they are enabled by default in the C++2a mode which
- // breaks the way we set things up for partial preprocessing;
- // see this post for details:
- //
- // http://lists.llvm.org/pipermail/cfe-dev/2019-October/063637.html
- //
- // As a result, for now, we only enable modules if forced with
- // explicit cxx.features.modules=true.
+ if (cplusplus && *cplusplus < 202002)
+ {
+ fail << "support for C++ modules requires C++20 or later" <<
+ info << "standard in use is " << *cplusplus <<
+ info << "required by " << project (rs) << '@' << rs;
+ }
+
+ if (mj < 11)
+ {
+ fail << "support for C++ modules requires GCC 11 or later" <<
+ info << "C++ compiler is " << ci.signature <<
+ info << "required by " << project (rs) << '@' << rs;
+ }
+
+ // Defines __cpp_modules:
//
- // Also see Clang modules support hack in cc::compile.
+ // 11 -- 201810
//
- if (modules.value)
+ prepend ("-fmodules-ts");
+ modules = true;
+ }
+
+ break;
+ }
+ case compiler_type::clang:
+ {
+ // Things (command line options, semantics) changed quite a bit
+ // around Clang 16 so we don't support anything earlier than
+ // that (it's not practically usable anyway).
+ //
+ // Clang enable modules by default in c++20 or later but they
+ // don't yet (as of Clang 18) define __cpp_modules. When they
+ // do, we can consider enabling modules by default on our side.
+ // For now, we only enable modules if forced with explicit
+ // cxx.features.modules=true.
+ //
+ if (modules.value)
+ {
+ if (cplusplus && *cplusplus < 202002)
+ {
+ fail << "support for C++ modules requires C++20 or later" <<
+ info << "standard in use is " << *cplusplus <<
+ info << "required by " << project (rs) << '@' << rs;
+ }
+
+ if (mj < 16)
{
- prepend ("-D__cpp_modules=201704"); // p0629r0
- mode.push_back ("-fmodules-ts"); // For the hack to work.
- modules = true;
+ fail << "support for C++ modules requires Clang 16 or later" <<
+ info << "C++ compiler is " << ci.signature <<
+ info << "required by " << project (rs) << '@' << rs;
}
- break;
+ // See https://github.com/llvm/llvm-project/issues/71364
+ //
+ prepend ("-D__cpp_modules=201907L");
+ modules = true;
}
- case compiler_type::icc:
- break; // No modules support yet.
+
+ break;
}
+ case compiler_type::icc:
+ break; // No modules support yet.
}
}
@@ -443,6 +619,95 @@ namespace build2
//set_feature (concepts);
}
+ // See cc::data::x_{hdr,inc} for background.
+ //
+ static const target_type* const hdr[] =
+ {
+ &hxx::static_type,
+ &ixx::static_type,
+ &txx::static_type,
+ &mxx::static_type,
+ nullptr
+ };
+
+ // Note that we don't include S{} here because none of the files we
+ // compile can plausibly want to include .S. (Maybe in inline assembler
+ // instructions?)
+ //
+ static const target_type* const inc[] =
+ {
+ &hxx::static_type,
+ &h::static_type,
+ &ixx::static_type,
+ &txx::static_type,
+ &mxx::static_type,
+ &cxx::static_type,
+ &c::static_type,
+ &mm::static_type,
+ &m::static_type,
+ &cxx_inc::static_type,
+ &cc::c_inc::static_type,
+ nullptr
+ };
+
+ bool
+ types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.types module must be loaded in project root";
+
+ // Register target types and configure their "installability".
+ //
+ using namespace install;
+
+ bool install_loaded (cast_false<bool> (rs["install.loaded"]));
+
+ // Note: not registering mm{} (it is registered seperately by the
+ // respective optional .types submodule).
+ //
+ // Note: mxx{} is in hdr. @@ But maybe it shouldn't be...
+ //
+ rs.insert_target_type<cxx> ();
+
+ auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
+ {
+ rs.insert_target_type (tt);
+
+ // Install headers into install.include.
+ //
+ if (install_loaded)
+ install_path (rs, tt, dir_path ("include"));
+ };
+
+ for (const target_type* const* ht (hdr); *ht != nullptr; ++ht)
+ insert_hdr (**ht);
+
+ // Also register the C header for C-derived languages.
+ //
+ insert_hdr (h::static_type);
+
+ // @@ PERF: maybe factor this to cc.types?
+ //
+ rs.insert_target_type<cc::pc> ();
+ rs.insert_target_type<cc::pca> ();
+ rs.insert_target_type<cc::pcs> ();
+
+ if (install_loaded)
+ install_path<cc::pc> (rs, dir_path ("pkgconfig"));
+
+ return true;
+ }
+
static const char* const hinters[] = {"c", nullptr};
// See cc::module for details on guess_init vs config_init.
@@ -469,15 +734,20 @@ namespace build2
// Enter all the variables and initialize the module data.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
cc::config_data d {
cc::lang::cxx,
"cxx",
"c++",
+ "obj-c++",
BUILD2_DEFAULT_CXX,
".ii",
+ ".mii",
hinters,
@@ -668,6 +938,9 @@ namespace build2
vp["cc.export.libs"],
vp["cc.export.impl_libs"],
+ vp["cc.pkgconfig.include"],
+ vp["cc.pkgconfig.lib"],
+
vp.insert<string> ("cxx.stdlib"),
vp["cc.runtime"],
@@ -678,6 +951,7 @@ namespace build2
vp["cc.module_name"],
vp["cc.importable"],
vp["cc.reprocess"],
+ vp["cc.serialize"],
// Ability to signal that source is already (partially) preprocessed.
// Valid values are 'none' (not preprocessed), 'includes' (no #include
@@ -733,6 +1007,9 @@ namespace build2
vp.insert_alias (d.c_module_name, "cxx.module_name");
vp.insert_alias (d.c_importable, "cxx.importable");
+ vp.insert_alias (d.c_pkgconfig_include, "cxx.pkgconfig.include");
+ vp.insert_alias (d.c_pkgconfig_lib, "cxx.pkgconfig.lib");
+
auto& m (extra.set_module (new config_module (move (d))));
m.guess (rs, loc, extra.hints);
@@ -763,27 +1040,6 @@ namespace build2
return true;
}
- static const target_type* const hdr[] =
- {
- &hxx::static_type,
- &ixx::static_type,
- &txx::static_type,
- &mxx::static_type,
- nullptr
- };
-
- static const target_type* const inc[] =
- {
- &hxx::static_type,
- &h::static_type,
- &ixx::static_type,
- &txx::static_type,
- &mxx::static_type,
- &cxx::static_type,
- &c::static_type,
- nullptr
- };
-
bool
init (scope& rs,
scope& bs,
@@ -805,7 +1061,7 @@ namespace build2
auto& cm (
load_module<config_module> (rs, rs, "cxx.config", loc, extra.hints));
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */)); // All qualified.
bool modules (cast<bool> (rs["cxx.features.modules"]));
@@ -823,10 +1079,8 @@ namespace build2
"cxx.compile",
"cxx.link",
"cxx.install",
- "cxx.uninstall",
- cm.x_info->id.type,
- cm.x_info->id.variant,
+ cm.x_info->id,
cm.x_info->class_,
cm.x_info->version.major,
cm.x_info->version.minor,
@@ -859,25 +1113,131 @@ namespace build2
cxx::static_type,
modules ? &mxx::static_type : nullptr,
+ cxx_inc::static_type,
hdr,
inc
};
- auto& m (extra.set_module (new module (move (d))));
+ auto& m (extra.set_module (new module (move (d), rs)));
m.init (rs, loc, extra.hints, *cm.x_info);
return true;
}
+ bool
+ objcxx_types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::objcxx_types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.objcxx.types module must be loaded in project root";
+
+ // Register the mm{} target type.
+ //
+ rs.insert_target_type<mm> ();
+
+ return true;
+ }
+
+ bool
+ objcxx_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::objcxx_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.objcxx module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("cxx"));
+
+ if (mod == nullptr)
+ fail (loc) << "cxx.objcxx module must be loaded after cxx module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C++ compiler is capable of compiling Objective-C++. But we enable
+ // only if it is.
+ //
+ // Note: see similar code in the c module.
+ //
+ load_module (rs, rs, "cxx.objcxx.types", loc);
+
+ // Note that while Objective-C++ is supported by MinGW GCC, it's
+ // unlikely Clang supports it when targeting MSVC or Emscripten. But
+ // let's keep the check simple for now.
+ //
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_obj = &mm::static_type;
+
+ return true;
+ }
+
+ bool
+ predefs_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::predefs_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.predefs module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("cxx"));
+
+ if (mod == nullptr)
+ fail (loc) << "cxx.predefs module must be loaded after cxx module";
+
+ // Register the cxx.predefs rule.
+ //
+ // Why invent a separate module instead of just always registering it in
+ // the cxx module? The reason is performance: this rule will be called
+ // for every C++ header.
+ //
+ cc::predefs_rule& r (*mod);
+
+ rs.insert_rule<hxx> (perform_update_id, r.rule_name, r);
+ rs.insert_rule<hxx> (perform_clean_id, r.rule_name, r);
+ rs.insert_rule<hxx> (configure_update_id, r.rule_name, r);
+
+ return true;
+ }
+
static const module_functions mod_functions[] =
{
// NOTE: don't forget to also update the documentation in init.hxx if
// changing anything here.
- {"cxx.guess", nullptr, guess_init},
- {"cxx.config", nullptr, config_init},
- {"cxx", nullptr, init},
- {nullptr, nullptr, nullptr}
+ {"cxx.types", nullptr, types_init},
+ {"cxx.guess", nullptr, guess_init},
+ {"cxx.config", nullptr, config_init},
+ {"cxx.objcxx.types", nullptr, objcxx_types_init},
+ {"cxx.objcxx", nullptr, objcxx_init},
+ {"cxx.predefs", nullptr, predefs_init},
+ {"cxx", nullptr, init},
+ {nullptr, nullptr, nullptr}
};
const module_functions*
diff --git a/libbuild2/cxx/init.hxx b/libbuild2/cxx/init.hxx
index 094fea4..a193e74 100644
--- a/libbuild2/cxx/init.hxx
+++ b/libbuild2/cxx/init.hxx
@@ -19,9 +19,19 @@ namespace build2
//
// Submodules:
//
- // `cxx.guess` -- registers and sets some variables.
- // `cxx.config` -- loads cxx.guess and sets more variables.
- // `cxx` -- loads cxx.config and registers target types and rules.
+ // `cxx.types` -- registers target types.
+ // `cxx.guess` -- registers and sets some variables.
+ // `cxx.config` -- loads cxx.guess and sets more variables.
+ // `cxx` -- loads cxx.{types,config} and registers rules
+ // and functions.
+ //
+ // `cxx.objcxx.types` -- registers mm{} target type.
+ // `cxx.objcxx` -- loads cxx.objcxx and enables Objective-C++
+ // compilation.
+ //
+ // `cxx.predefs` -- registers rule for generating a C++ header with
+ // predefined compiler macros. Must be loaded after
+ // cxx.
//
extern "C" LIBBUILD2_CXX_SYMEXPORT const module_functions*
build2_cxx_load ();
diff --git a/libbuild2/cxx/target.cxx b/libbuild2/cxx/target.cxx
index 982dcb4..37096c3 100644
--- a/libbuild2/cxx/target.cxx
+++ b/libbuild2/cxx/target.cxx
@@ -3,10 +3,6 @@
#include <libbuild2/cxx/target.hxx>
-#include <libbuild2/context.hxx>
-
-using namespace std;
-
namespace build2
{
namespace cxx
@@ -22,7 +18,7 @@ namespace build2
&target_pattern_var<hxx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char ixx_ext_def[] = "ixx";
@@ -36,7 +32,7 @@ namespace build2
&target_pattern_var<ixx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char txx_ext_def[] = "txx";
@@ -50,7 +46,7 @@ namespace build2
&target_pattern_var<txx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char cxx_ext_def[] = "cxx";
@@ -64,7 +60,7 @@ namespace build2
&target_pattern_var<cxx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char mxx_ext_def[] = "mxx";
@@ -78,7 +74,34 @@ namespace build2
&target_pattern_var<mxx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
+ };
+
+ extern const char mm_ext_def[] = "mm";
+ const target_type mm::static_type
+ {
+ "mm",
+ &cc::static_type,
+ &target_factory<mm>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<mm_ext_def>,
+ &target_pattern_var<mm_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
+ const target_type cxx_inc::static_type
+ {
+ "cxx_inc",
+ &cc::static_type,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/cxx/target.hxx b/libbuild2/cxx/target.hxx
index cddab68..06e8a67 100644
--- a/libbuild2/cxx/target.hxx
+++ b/libbuild2/cxx/target.hxx
@@ -7,7 +7,6 @@
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
-#include <libbuild2/target.hxx>
#include <libbuild2/cc/target.hxx>
#include <libbuild2/cxx/export.hxx>
@@ -18,45 +17,58 @@ namespace build2
{
using cc::h;
using cc::c;
+ using cc::m;
class LIBBUILD2_CXX_SYMEXPORT hxx: public cc::cc
{
public:
- using cc::cc;
+ hxx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CXX_SYMEXPORT ixx: public cc::cc
{
public:
- using cc::cc;
+ ixx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CXX_SYMEXPORT txx: public cc::cc
{
public:
- using cc::cc;
+ txx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CXX_SYMEXPORT cxx: public cc::cc
{
public:
- using cc::cc;
+ cxx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// The module interface unit is both like a header (e.g., we need to
@@ -67,11 +79,48 @@ namespace build2
class LIBBUILD2_CXX_SYMEXPORT mxx: public cc::cc
{
public:
- using cc::cc;
+ mxx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Objective-C++ source file.
+ //
+ class LIBBUILD2_CXX_SYMEXPORT mm: public cc::cc
+ {
+ public:
+ mm (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // This is an abstract base target for deriving additional targets (for
+ // example, Qt moc{}) that can be #include'd in C++ translation units. In
+ // particular, only such targets will be considered to reverse-lookup
+ // extensions to target types (see dyndep_rule::map_extension() for
+ // background).
+ //
+ class LIBBUILD2_CXX_SYMEXPORT cxx_inc: public cc::cc
+ {
+ public:
+ cxx_inc (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/depdb.cxx b/libbuild2/depdb.cxx
index c229f23..0dabeca 100644
--- a/libbuild2/depdb.cxx
+++ b/libbuild2/depdb.cxx
@@ -18,21 +18,37 @@ namespace build2
// Note that state::write with absent pos is interpreted as non-existent.
//
depdb_base::
- depdb_base (const path& p, state s, optional<uint64_t> pos)
- : state_ (s)
+ depdb_base (const path& p, bool ro, state s, optional<uint64_t> pos)
+ : state_ (s), ro_ (ro)
{
- fdopen_mode om (fdopen_mode::out | fdopen_mode::binary);
+ if (s == state::write && ro)
+ {
+ new (&is_) ifdstream ();
+ buf_ = nullptr; // Shouldn't be needed.
+ return;
+ }
+
+ fdopen_mode om (fdopen_mode::binary);
ifdstream::iostate em (ifdstream::badbit);
if (s == state::write)
{
+ om |= fdopen_mode::out;
+
if (!pos)
om |= fdopen_mode::create | fdopen_mode::exclusive;
em |= ifdstream::failbit;
}
else
- om |= fdopen_mode::in; // Both in & out so can switch from read to write.
+ {
+ om |= fdopen_mode::in;
+
+ // Both in & out so can switch from read to write.
+ //
+ if (!ro)
+ om |= fdopen_mode::out;
+ }
auto_fd fd;
try
@@ -80,8 +96,9 @@ namespace build2
}
depdb::
- depdb (path_type&& p, timestamp mt)
+ depdb (path_type&& p, bool ro, timestamp mt)
: depdb_base (p,
+ ro,
mt != timestamp_nonexistent ? state::read : state::write),
path (move (p)),
mtime (mt != timestamp_nonexistent ? mt : timestamp_unknown)
@@ -91,22 +108,25 @@ namespace build2
if (state_ == state::read)
{
string* l (read ());
- if (l == nullptr || *l != "1")
- write ('1');
+ if (l != nullptr && *l == "1")
+ return;
}
- else
+
+ if (!ro)
write ('1');
+ else if (reading ())
+ change ();
}
depdb::
- depdb (path_type p)
- : depdb (move (p), build2::mtime (p))
+ depdb (path_type p, bool ro)
+ : depdb (move (p), ro, build2::mtime (p))
{
}
depdb::
depdb (reopen_state rs)
- : depdb_base (rs.path, state::write, rs.pos),
+ : depdb_base (rs.path, false, state::write, rs.pos),
path (move (rs.path)),
mtime (timestamp_unknown),
touch (rs.mtime)
@@ -118,51 +138,58 @@ namespace build2
{
assert (state_ != state::write);
- // Transfer the file descriptor from ifdstream to ofdstream. Note that the
- // steps in this dance must be carefully ordered to make sure we don't
- // call any destructors twice in the face of exceptions.
- //
- auto_fd fd (is_.release ());
-
- // Consider this scenario: we are overwriting an old line (so it ends with
- // a newline and the "end marker") but the operation failed half way
- // through. Now we have the prefix from the new line, the suffix from the
- // old, and everything looks valid. So what we need is to somehow
- // invalidate the old content so that it can never combine with (partial)
- // new content to form a valid line. One way to do that would be to
- // truncate the file.
- //
- if (trunc)
- try
+ if (ro_)
{
- fdtruncate (fd.get (), pos_);
+ buf_ = nullptr;
}
- catch (const io_error& e)
+ else
{
- fail << "unable to truncate " << path << ": " << e;
- }
+ // Transfer the file descriptor from ifdstream to ofdstream. Note that
+ // the steps in this dance must be carefully ordered to make sure we
+ // don't call any destructors twice in the face of exceptions.
+ //
+ auto_fd fd (is_.release ());
+
+ // Consider this scenario: we are overwriting an old line (so it ends
+ // with a newline and the "end marker") but the operation failed half
+ // way through. Now we have the prefix from the new line, the suffix
+ // from the old, and everything looks valid. So what we need is to
+ // somehow invalidate the old content so that it can never combine with
+ // (partial) new content to form a valid line. One way to do that would
+ // be to truncate the file.
+ //
+ if (trunc)
+ try
+ {
+ fdtruncate (fd.get (), pos_);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to truncate " << path << ": " << e;
+ }
- // Note: the file descriptor position can be beyond the pos_ value due to
- // the ifdstream buffering. That's why we need to seek to switch from
- // reading to writing.
- //
- try
- {
- fdseek (fd.get (), pos_, fdseek_mode::set);
- }
- catch (const io_error& e)
- {
- fail << "unable to rewind " << path << ": " << e;
- }
+ // Note: the file descriptor position can be beyond the pos_ value due
+ // to the ifdstream buffering. That's why we need to seek to switch from
+ // reading to writing.
+ //
+ try
+ {
+ fdseek (fd.get (), pos_, fdseek_mode::set);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to rewind " << path << ": " << e;
+ }
- // @@ Strictly speaking, ofdstream can throw which will leave us in a
- // non-destructible state. Unlikely but possible.
- //
- is_.~ifdstream ();
- new (&os_) ofdstream (move (fd),
- ofdstream::badbit | ofdstream::failbit,
- pos_);
- buf_ = static_cast<fdstreambuf*> (os_.rdbuf ());
+ // @@ Strictly speaking, ofdstream can throw which will leave us in a
+ // non-destructible state. Unlikely but possible.
+ //
+ is_.~ifdstream ();
+ new (&os_) ofdstream (move (fd),
+ ofdstream::badbit | ofdstream::failbit,
+ pos_);
+ buf_ = static_cast<fdstreambuf*> (os_.rdbuf ());
+ }
state_ = state::write;
mtime = timestamp_unknown;
@@ -304,6 +331,12 @@ namespace build2
void depdb::
close (bool mc)
{
+ if (ro_)
+ {
+ is_.close ();
+ return;
+ }
+
// If we are at eof, then it means all lines are good, there is the "end
// marker" at the end, and we don't need to do anything, except, maybe
// touch the file. Otherwise, if we are still in the read mode, truncate
diff --git a/libbuild2/depdb.hxx b/libbuild2/depdb.hxx
index 55413bf..5855c3f 100644
--- a/libbuild2/depdb.hxx
+++ b/libbuild2/depdb.hxx
@@ -66,14 +66,15 @@ namespace build2
//
enum class state {read, read_eof, write};
- depdb_base (const path&, state, optional<uint64_t> pos = nullopt);
+ depdb_base (const path&, bool ro, state, optional<uint64_t> pos = nullopt);
~depdb_base ();
state state_;
+ bool ro_;
union
{
- ifdstream is_; // read, read_eof
+ ifdstream is_; // read, read_eof, (ro && write)
ofdstream os_; // write
};
@@ -107,13 +108,19 @@ namespace build2
// has wrong format version, or is corrupt, then the database will be
// immediately switched to writing.
//
+ // If read_only is true, then don't actually make any modifications to the
+ // database file. In other words, the database is still nominally switched
+ // to writing but without any filesystem changes. Note that calling any
+ // write-only functions (write(), touch, etc) on such a database is
+ // illegal.
+ //
// The failure commonly happens when the user tries to stash the target in
// a non-existent subdirectory but forgets to add the corresponding fsdir{}
// prerequisite. That's why the issued diagnostics may provide the
// corresponding hint.
//
explicit
- depdb (path_type);
+ depdb (path_type, bool read_only = false);
struct reopen_state
{
@@ -304,7 +311,7 @@ namespace build2
depdb& operator= (const depdb&) = delete;
private:
- depdb (path_type&&, timestamp);
+ depdb (path_type&&, bool, timestamp);
void
change (bool truncate = true);
diff --git a/libbuild2/depdb.ixx b/libbuild2/depdb.ixx
index 819fadd..18b4351 100644
--- a/libbuild2/depdb.ixx
+++ b/libbuild2/depdb.ixx
@@ -8,7 +8,7 @@ namespace build2
inline depdb_base::
~depdb_base ()
{
- if (state_ != state::write)
+ if (state_ != state::write || ro_)
is_.~ifdstream ();
else
os_.~ofdstream ();
@@ -17,7 +17,7 @@ namespace build2
inline void depdb::
flush ()
{
- if (state_ == state::write)
+ if (state_ == state::write && !ro_)
try
{
os_.flush ();
@@ -37,7 +37,7 @@ namespace build2
inline void depdb::
check_mtime (const path_type& t, timestamp e)
{
- if (state_ == state::write && mtime_check ())
+ if (state_ == state::write && !ro_ && mtime_check ())
check_mtime_ (t, e);
}
diff --git a/libbuild2/diagnostics.cxx b/libbuild2/diagnostics.cxx
index 4d2d7ce..4a46756 100644
--- a/libbuild2/diagnostics.cxx
+++ b/libbuild2/diagnostics.cxx
@@ -3,8 +3,10 @@
#include <libbuild2/diagnostics.hxx>
-#include <cstring> // strchr()
+#include <cstring> // strchr(), memcpy()
+#include <cstdlib> // getenv()
+#include <libbutl/fdstream.hxx> // fdterm_color()
#include <libbutl/process-io.hxx>
#include <libbuild2/scope.hxx>
@@ -13,39 +15,524 @@
#include <libbuild2/context.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
- // Diagnostics state (verbosity level, progress, etc). Keep disabled until
- // set from options.
+ // Diagnostics state (verbosity level, progress, etc). Keep default/disabled
+ // until set from options.
//
- uint16_t verb = 0;
- bool silent = true;
+ uint16_t verb = 1;
+ bool silent = false;
optional<bool> diag_progress_option;
+ optional<bool> diag_color_option;
bool diag_no_line = false;
bool diag_no_column = false;
bool stderr_term = false;
+ bool stderr_term_color = false;
void
- init_diag (uint16_t v, bool s, optional<bool> p, bool nl, bool nc, bool st)
+ init_diag (uint16_t v,
+ bool s,
+ optional<bool> p,
+ optional<bool> c,
+ bool nl,
+ bool nc,
+ bool st)
{
assert (!s || v == 0);
verb = v;
silent = s;
diag_progress_option = p;
+ diag_color_option = c;
diag_no_line = nl;
diag_no_column = nc;
stderr_term = st;
+
+ if (st)
+ {
+ // @@ TMP: eventually we want to enable on Windows by default.
+ //
+#ifdef _WIN32
+ if (c && *c)
+ {
+#endif
+ stderr_term_color = fdterm_color (stderr_fd (), !c || *c /* enable */);
+
+ // If the user specified --diag-color on POSIX we will trust the color
+ // is supported (e.g., wrong TERM value, etc).
+ //
+ if (!stderr_term_color && c && *c)
+ {
+#ifdef _WIN32
+ fail << "unable to enable diagnostics color support for stderr";
+#else
+ stderr_term_color = true;
+#endif
+ }
+
+#ifdef _WIN32
+ }
+ else
+ stderr_term_color = false;
+#endif
+ }
+ else
+ stderr_term_color = false;
}
// Stream verbosity.
//
const int stream_verb_index = ostream::xalloc ();
+ // print_diag()
+ //
+ void
+ print_diag_impl (const char* p, target_key* l, target_key&& r, const char* c)
+ {
+ // @@ Print directly to diag_stream (and below)? Won't we be holding
+ // the lock longer?
+
+ diag_record dr (text);
+
+ dr << p << ' ';
+
+ if (l != nullptr)
+ {
+ // Omit the @.../ qualification in either lhs or rhs if it's implied by
+ // the other.
+ //
+ // @@ Shouldn't we, strictly speaking, also check that they belong to
+ // the same project? Though it would be far-fetched to use another
+ // project's target from src. Or maybe not.
+ //
+ if (!l->out->empty ())
+ {
+ if (r.out->empty ())
+ l->out = &empty_dir_path;
+ }
+ else if (!r.out->empty ())
+ r.out = &empty_dir_path;
+
+ dr << *l << ' ' << (c == nullptr ? "->" : c) << ' ';
+ }
+
+ dr << r;
+ }
+
+
+ static inline bool
+ print_diag_cmp (const pair<optional<string>, const target_key*>& x,
+ const pair<optional<string>, const target_key*>& y)
+ {
+ return (x.second->dir->compare (*y.second->dir) == 0 &&
+ x.first->compare (*y.first) == 0);
+ }
+
+ // Return true if we have multiple partitions (see below for details).
+ //
+ static bool
+ print_diag_collect (const vector<target_key>& tks,
+ ostringstream& os,
+ stream_verbosity sv,
+ vector<pair<optional<string>, const target_key*>>& ns)
+ {
+ ns.reserve (tks.size ());
+
+ for (const target_key& k: tks)
+ {
+ bool r;
+ if (auto p = k.type->print)
+ r = p (os, k, true /* name_only */);
+ else
+ r = to_stream (os, k, sv, true /* name_only */);
+
+ ns.push_back (make_pair (r ? optional<string> (os.str ()) : nullopt, &k));
+
+ os.clear ();
+ os.str (string ()); // Note: just seekp(0) is not enough.
+ }
+
+ // Partition.
+ //
+ // While at it also determine whether we have multiple partitions.
+ //
+ bool ml (false);
+ for (auto b (ns.begin ()), e (ns.end ()); b != e; )
+ {
+ const pair<optional<string>, const target_key*>& x (*b++);
+
+ // Move all the elements that are equal to x to the front, preserving
+ // order.
+ //
+ b = stable_partition (
+ b, e,
+ [&x] (const pair<optional<string>, const target_key*>& y)
+ {
+ return (x.first && y.first && print_diag_cmp (x, y));
+ });
+
+ if (!ml && b != e)
+ ml = true;
+ }
+
+ return ml;
+ }
+
+ static void
+ print_diag_print (const vector<pair<optional<string>, const target_key*>>& ns,
+ ostringstream& os,
+ stream_verbosity sv,
+ const optional<string>& ml)
+ {
+ for (auto b (ns.begin ()), i (b), e (ns.end ()); i != e; )
+ {
+ if (i != b)
+ os << '\n' << *ml;
+
+ const pair<optional<string>, const target_key*>& p (*i);
+
+ if (!p.first) // Irregular.
+ {
+ os << *p.second;
+ ++i;
+ continue;
+ }
+
+ // Calculate the number of members in this partition.
+ //
+ size_t n (1);
+ for (auto j (i + 1); j != e && j->first && print_diag_cmp (*i, *j); ++j)
+ ++n;
+
+ // Similar code to to_stream(target_key).
+ //
+
+ // Print the directory.
+ //
+ {
+ const target_key& k (*p.second);
+
+ uint16_t dv (sv.path);
+
+ // Note: relative() returns empty for './'.
+ //
+ const dir_path& rd (dv < 1 ? relative (*k.dir) : *k.dir);
+
+ if (!rd.empty ())
+ {
+ if (dv < 1)
+ os << diag_relative (rd);
+ else
+ to_stream (os, rd, true /* representation */);
+ }
+ }
+
+ // Print target types.
+ //
+ {
+ if (n != 1)
+ os << '{';
+
+ for (auto j (i), e (i + n); j != e; ++j)
+ os << (j != i ? " " : "") << j->second->type->name;
+
+ if (n != 1)
+ os << '}';
+ }
+
+ // Print the target name (the same for all members of this partition).
+ //
+ os << '{' << *i->first << '}';
+
+ i += n;
+ }
+ }
+
+ template <typename L> // L can be target_key, path, or string.
+ static void
+ print_diag_impl (const char* p,
+ const L* l, bool lempty,
+ vector<target_key>&& rs,
+ const char* c)
+ {
+ assert (rs.size () > 1);
+
+ // The overall plan is as follows:
+ //
+ // 1. Collect the printed names for all the group members.
+ //
+ // Note if the printed representation is irregular (see
+ // to_stream(target_key) for details). We will print such members each
+ // on a separate line.
+ //
+ // 2. Move the names around so that we end up with contiguous partitions
+ // of targets with the same name.
+ //
+ // 3. Print the partitions, one per line.
+ //
+ // The steps 1-2 are performed by print_diag_impl_common() above.
+ //
+ vector<pair<optional<string>, const target_key*>> ns;
+
+ // Use the diag_record's ostringstream so that we get the appropriate
+ // stream verbosity, etc.
+ //
+ diag_record dr (text);
+ ostringstream& os (dr.os);
+ stream_verbosity sv (stream_verb (os));
+
+ optional<string> ml;
+ if (print_diag_collect (rs, os, sv, ns))
+ ml = string ();
+
+ // Print.
+ //
+ os << p << ' ';
+
+ if (l != nullptr)
+ os << *l << (lempty ? "" : " ") << (c == nullptr ? "->" : c) << ' ';
+
+ if (ml)
+ ml = string (os.str ().size (), ' '); // Indentation.
+
+ print_diag_print (ns, os, sv, ml);
+ }
+
+ template <typename R> // R can be target_key, path, or string.
+ static void
+ print_diag_impl (const char* p,
+ vector<target_key>&& ls, const R& r,
+ const char* c)
+ {
+ assert (ls.size () > 1);
+
+ // As above but for the group on the LHS.
+ //
+ vector<pair<optional<string>, const target_key*>> ns;
+
+ diag_record dr (text);
+ ostringstream& os (dr.os);
+ stream_verbosity sv (stream_verb (os));
+
+ optional<string> ml;
+ if (print_diag_collect (ls, os, sv, ns))
+ ml = string ();
+
+ // Print.
+ //
+ os << p << ' ';
+
+ if (ml)
+ ml = string (os.str ().size (), ' '); // Indentation.
+
+ print_diag_print (ns, os, sv, ml);
+
+ // @@ TODO: make sure `->` is aligned with longest line printed by
+ // print_diag_print(). Currently it can look like this:
+ //
+ // ln /tmp/hello-gcc/hello/hello/{hxx cxx}{hello-types}
+ // /tmp/hello-gcc/hello/hello/{hxx cxx}{hello-stubs}
+ // /tmp/hello-gcc/hello/hello/cxx{hello-ext} -> ./
+ //
+ os << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag_impl (const char* p,
+ target_key* l, vector<target_key>&& rs,
+ const char* c)
+ {
+ // Note: keep this implementation separate from the above for performance.
+ //
+ assert (!rs.empty ());
+
+ if (rs.size () == 1)
+ {
+ print_diag_impl (p, l, move (rs.front ()), c);
+ return;
+ }
+
+ // At the outset handle out-qualification as above. Here we assume that
+ // all the targets in the group have the same out.
+ //
+ if (l != nullptr)
+ {
+ if (!l->out->empty ())
+ {
+ if (rs.front ().out->empty ())
+ l->out = &empty_dir_path;
+ }
+ else if (!rs.front ().out->empty ())
+ {
+ for (target_key& r: rs)
+ r.out = &empty_dir_path;
+ }
+ }
+
+ print_diag_impl<target_key> (p, l, false /* empty */, move (rs), c);
+ }
+
+ // Note: these can't be inline since need the target class definition.
+ //
+ void
+ print_diag (const char* p, const target& l, const target& r, const char* c)
+ {
+ target_key lk (l.key ());
+ print_diag_impl (p, &lk, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, target_key&& l, const target& r, const char* c)
+ {
+ print_diag_impl (p, &l, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, const target& l, target_key&& r, const char* c)
+ {
+ target_key lk (l.key ());
+ print_diag_impl (p, &lk, move (r), c);
+ }
+
+ void
+ print_diag (const char* p, const path& l, const target& r, const char* c)
+ {
+ return print_diag (p, l, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, const path& l, target_key&& r, const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const path& l, vector<target_key>&& rs,
+ const char* c)
+ {
+ assert (!rs.empty ());
+
+ if (rs.size () == 1)
+ print_diag (p, l, move (rs.front ()), c);
+ else
+ print_diag_impl<path> (p, &l, false /* empty */, move (rs), c);
+ }
+
+ void
+ print_diag (const char* p, const string& l, const target& r, const char* c)
+ {
+ return print_diag (p, l, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, const string& l, target_key&& r, const char* c)
+ {
+ text << p << ' '
+ << l << (l.empty () ? "" : " ")
+ << (c == nullptr ? "->" : c) << ' '
+ << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const string& l, vector<target_key>&& rs,
+ const char* c)
+ {
+ assert (!rs.empty ());
+
+ if (rs.size () == 1)
+ print_diag (p, l, move (rs.front ()), c);
+ else
+ print_diag_impl<string> (p, &l, l.empty (), move (rs), c);
+ }
+
+ void
+ print_diag (const char* p, const target& r)
+ {
+ print_diag_impl (p, nullptr, r.key (), nullptr);
+ }
+
+ void
+ print_diag (const char* p, const dir_path& r)
+ {
+ text << p << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p, const path_name_view& r)
+ {
+ text << p << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const target& l, const path_name_view& r,
+ const char* c)
+ {
+ // @@ TODO: out qualification stripping: only do if p.out is subdir of t
+ // (also below)?
+
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p, const target& l, const dir_path& r, const char* c)
+ {
+ print_diag (p, l.key (), r, c);
+ }
+
+ void
+ print_diag (const char* p, target_key&& l, const dir_path& r, const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ vector<target_key>&& ls, const dir_path& r,
+ const char* c)
+ {
+ assert (!ls.empty ());
+
+ if (ls.size () == 1)
+ print_diag (p, move (ls.front ()), r, c);
+ else
+ print_diag_impl<dir_path> (p, move (ls), r, c);
+ }
+
+ void
+ print_diag (const char* p, const path& l, const dir_path& r, const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const path& l, const path_name_view& r,
+ const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const string& l, const path_name_view& r,
+ const char* c)
+ {
+ text << p << ' '
+ << l << (l.empty () ? "" : " ")
+ << (c == nullptr ? "->" : c) << ' '
+ << r;
+ }
+
+ // print_process()
+ //
void
print_process (const char* const* args, size_t n)
{
@@ -77,30 +564,6 @@ namespace build2
dr << butl::process_args {args, n};
}
- // Diagnostics stack.
- //
- static
-#ifdef __cpp_thread_local
- thread_local
-#else
- __thread
-#endif
- const diag_frame* diag_frame_stack = nullptr;
-
- const diag_frame* diag_frame::
- stack () noexcept
- {
- return diag_frame_stack;
- }
-
- const diag_frame* diag_frame::
- stack (const diag_frame* f) noexcept
- {
- const diag_frame* r (diag_frame_stack);
- diag_frame_stack = f;
- return r;
- }
-
// Diagnostic facility, project specifics.
//
@@ -162,6 +625,305 @@ namespace build2
const fail_mark fail ("error");
const fail_end endf;
+ // diag_buffer
+ //
+
+ int diag_buffer::
+ pipe (context& ctx, bool force)
+ {
+ return (ctx.sched->serial () || ctx.no_diag_buffer) && !force ? 2 : -1;
+ }
+
+ void diag_buffer::
+ open (const char* args0, auto_fd&& fd, fdstream_mode m)
+ {
+ assert (state_ == state::closed && args0 != nullptr);
+
+ serial = ctx_.sched->serial ();
+ nobuf = !serial && ctx_.no_diag_buffer;
+
+ if (fd != nullfd)
+ {
+ try
+ {
+ is.open (move (fd), m | fdstream_mode::text);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << args0 << " stderr: " << e;
+ }
+ }
+
+ this->args0 = args0;
+ state_ = state::opened;
+ }
+
+ void diag_buffer::
+ open_eof (const char* args0)
+ {
+ assert (state_ == state::closed && args0 != nullptr);
+
+ serial = ctx_.sched->serial ();
+ nobuf = !serial && ctx_.no_diag_buffer;
+ this->args0 = args0;
+ state_ = state::eof;
+ }
+
+ bool diag_buffer::
+ read (bool force)
+ {
+ assert (state_ == state::opened);
+
+ bool r;
+ if (is.is_open ())
+ {
+ try
+ {
+ // Copy buffers directly.
+ //
+ auto copy = [this] (fdstreambuf& sb)
+ {
+ const char* p (sb.gptr ());
+ size_t n (sb.egptr () - p);
+
+ // Allocate at least fdstreambuf::buffer_size to reduce
+ // reallocations and memory fragmentation.
+ //
+ size_t i (buf.size ());
+ if (i == 0 && n < fdstreambuf::buffer_size)
+ buf.reserve (fdstreambuf::buffer_size);
+
+ buf.resize (i + n);
+ memcpy (buf.data () + i, p, n);
+
+ sb.gbump (static_cast<int> (n));
+ };
+
+ if (is.blocking ())
+ {
+ if ((serial || nobuf) && !force)
+ {
+ // This is the case where we are called after custom processing.
+ //
+ assert (buf.empty ());
+
+ // Note that the eof check is important: if the stream is at eof,
+ // this and all subsequent writes to the diagnostics stream will
+ // fail (and you won't see a thing).
+ //
+ if (is.peek () != ifdstream::traits_type::eof ())
+ {
+ if (serial)
+ {
+ // Holding the diag lock while waiting for diagnostics from
+ // the child process would be a bad idea in the parallel
+ // build. But it should be harmless in serial.
+ //
+ // @@ TODO: do direct buffer copy.
+ //
+ diag_stream_lock dl;
+ *diag_stream << is.rdbuf ();
+ }
+ else
+ {
+ // Read/write one line at a time not to hold the lock for too
+ // long.
+ //
+ for (string l; !eof (std::getline (is, l)); )
+ {
+ diag_stream_lock dl;
+ *diag_stream << l << '\n';
+ }
+ }
+ }
+ }
+ else
+ {
+ fdstreambuf& sb (*static_cast<fdstreambuf*> (is.rdbuf ()));
+
+ while (is.peek () != istream::traits_type::eof ())
+ copy (sb);
+ }
+
+ r = false;
+ }
+ else
+ {
+ // We do not support finishing off after the custom processing in
+ // the non-blocking mode unless forced to buffer (but could probably
+ // do if necessary).
+ //
+ assert (!(serial || nobuf) || force);
+
+ fdstreambuf& sb (*static_cast<fdstreambuf*> (is.rdbuf ()));
+
+ // Try not to allocate the buffer if there is no diagnostics (the
+ // common case).
+ //
+ // Note that we must read until blocked (0) or EOF (-1).
+ //
+ streamsize n;
+ while ((n = sb.in_avail ()) > 0)
+ copy (sb);
+
+ r = (n != -1);
+ }
+
+ if (!r)
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ // For now we assume (here and pretty much everywhere else) that the
+ // output can't fail.
+ //
+ fail << "unable to read from " << args0 << " stderr: " << e;
+ }
+ }
+ else
+ r = false;
+
+ if (!r)
+ state_ = state::eof;
+
+ return r;
+ }
+
+ void diag_buffer::
+ write (const string& s, bool nl, bool force)
+ {
+ assert (state_ != state::closed);
+
+ // Similar logic to read() above.
+ //
+ if ((serial || nobuf) && !force)
+ {
+ assert (buf.empty ());
+
+ diag_stream_lock dl;
+ *diag_stream << s;
+ if (nl)
+ *diag_stream << '\n';
+ }
+ else
+ {
+ size_t n (s.size () + (nl ? 1 : 0));
+
+ size_t i (buf.size ());
+ if (i == 0 && n < fdstreambuf::buffer_size)
+ buf.reserve (fdstreambuf::buffer_size);
+
+ buf.resize (i + n);
+ memcpy (buf.data () + i, s.c_str (), s.size ());
+
+ if (nl)
+ buf.back () = '\n';
+ }
+ }
+
+ void diag_buffer::
+ close (const char* const* args,
+ const process_exit& pe,
+ uint16_t v,
+ bool omit_normal,
+ const location& loc)
+ {
+ tracer trace ("diag_buffer::close");
+
+ assert (state_ != state::closed);
+
+ // We need to make sure the command line we print on the unsuccessful exit
+ // is inseparable from any buffered diagnostics. So we prepare the record
+ // first and then write both while holding the diagnostics stream lock.
+ //
+ diag_record dr;
+ if (!pe)
+ {
+ // Note: see similar code in run_finish_impl().
+ //
+ if (omit_normal && pe.normal ())
+ {
+ l4 ([&]{trace << "process " << args[0] << " " << pe;});
+ }
+ else
+ {
+ dr << error (loc) << "process " << args[0] << " " << pe;
+
+ if (verb >= 1 && verb <= v)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
+ }
+
+ close (move (dr));
+ }
+
+ void diag_buffer::
+ close (diag_record&& dr)
+ {
+ assert (state_ != state::closed);
+
+ // We may still be in the open state in case of custom processing.
+ //
+ if (state_ == state::opened)
+ {
+ if (is.is_open ())
+ {
+ try
+ {
+ if (is.good ())
+ {
+ if (is.blocking ())
+ {
+ assert (is.peek () == ifdstream::traits_type::eof ());
+ }
+ else
+ {
+ assert (is.rdbuf ()->in_avail () == -1);
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << args0 << " stderr: " << e;
+ }
+ }
+
+ state_ = state::eof;
+ }
+
+ // Note: flushing of the diag record may throw.
+ //
+ args0 = nullptr;
+ state_ = state::closed;
+
+ if (!buf.empty () || !dr.empty ())
+ {
+ diag_stream_lock l;
+
+ if (!buf.empty ())
+ {
+ diag_stream->write (buf.data (), static_cast<streamsize> (buf.size ()));
+ buf.clear ();
+ }
+
+ if (!dr.empty ())
+ dr.flush ([] (const butl::diag_record& r)
+ {
+ // Similar to default_writer().
+ //
+ *diag_stream << r.os.str () << '\n';
+ diag_stream->flush ();
+ });
+ else
+ diag_stream->flush ();
+ }
+ }
+
// diag_do(), etc.
//
string
diff --git a/libbuild2/diagnostics.hxx b/libbuild2/diagnostics.hxx
index f3d9db4..ef41f22 100644
--- a/libbuild2/diagnostics.hxx
+++ b/libbuild2/diagnostics.hxx
@@ -14,17 +14,239 @@
namespace build2
{
- using butl::diag_record;
+ struct diag_record;
// Throw this exception to terminate the build. The handler should
// assume that the diagnostics has already been issued.
//
class failed: public std::exception {};
- // Print process commmand line. If the number of elements is specified
- // (or the second version is used), then it will print the piped multi-
- // process command line, if present. In this case, the expected format
- // is as follows:
+ // Print low-verbosity recipe diagnostics in the forms:
+ //
+ // <prog> <l-target> <comb> <r-target>
+ // <prog> <r-target>
+ //
+ // Where <prog> is an abbreviated/generalized program name, such as c++
+ // (rather than g++ or clang++) or yacc (rather than bison or byacc),
+ // <l-target> is typically the "main" prerequisite target, such as the C++
+ // source file to compile, <r-target> is typically the target being
+ // produced, and <comb> is the combiner, typically "->".
+ //
+ // The second form (without <l-target> and <comb>) should be used when there
+ // is no natural "main" prerequisite, for example, for linking as well as
+ // for programs that act upon the target, such as mkdir, rm, test, etc.
+ //
+ // Note also that these functions omit the @.../ qualification in either
+ // <l-target> or <r-target> if it's implied by the other.
+ //
+ // For example:
+ //
+ // mkdir fsdir{details/}
+ // c++ cxx{hello} -> obje{hello}
+ // ld exe{hello}
+ //
+ // test exe{hello} + testscript
+ //
+ // install exe{hello} -> /usr/bin/
+ // uninstall exe{hello} <- /usr/bin/
+ //
+ // rm exe{hello}
+ // rm obje{hello}
+ // rmdir fsdir{details/}
+ //
+ // Examples of target groups:
+ //
+ // cli cli{foo} -> {hxx cxx}{foo}
+ //
+ // thrift thrift{foo} -> {hxx cxx}{foo-types}
+ // {hxx cxx}{foo-stubs}
+ //
+ // Potentially we could also support target groups for <l-target>:
+ //
+ // tool {hxx cxx}{foo} -> {hxx cxx}{foo-types}
+ //
+ // tool {hxx cxx}{foo-types}
+ // {hxx cxx}{foo-stubs} -> {hxx cxx}{foo-insts}
+ // {hxx cxx}{foo-impls}
+ //
+ // Currently we only support this for the `group -> dir_path` form (used
+ // by the backlink machinery).
+ //
+ // See also the `diag` Buildscript pseudo-builtin which is reduced to one of
+ // the print_diag() calls (adhoc_buildscript_rule::print_custom_diag()). In
+ // particular, if you are adding a new overload, also consider if/how it
+ // should handled there.
+ //
+ // Note: see GH issue #40 for additional background and rationale.
+ //
+ // If <comb> is not specified, then "->" is used by default.
+
+ // prog target -> target
+ // prog target -> group
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ target_key&& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, target_key&& r,
+ const char* comb = nullptr);
+
+ void
+ print_diag (const char* prog,
+ target_key&& l, target_key&& r,
+ const char* comb = nullptr);
+
+ // Note: using small_vector would require target_key definition.
+ //
+ void
+ print_diag (const char* prog,
+ target_key&& l, vector<target_key>&& r,
+ const char* comb = nullptr);
+
+ // prog path -> target
+ // prog path -> group
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, target_key&& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, vector<target_key>&& r,
+ const char* comb = nullptr);
+
+ // prog string -> target
+ // prog string -> group
+ //
+ // Use these versions if, for example, input information is passed as an
+ // argument.
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, target_key&& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, vector<target_key>&& r,
+ const char* comb = nullptr);
+
+ // prog target
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog, const target&);
+
+ void
+ print_diag (const char* prog, target_key&&);
+
+ // prog group
+ //
+ void
+ print_diag (const char* prog, vector<target_key>&&);
+
+ // prog path
+ //
+ // Special versions for cases like mkdir/rmdir, save, etc.
+ //
+ // Note: use path_name("-") if the result is written to stdout.
+ //
+ void
+ print_diag (const char* prog, const path&);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog, const dir_path&);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog, const path_name_view&);
+
+ // Special versions for ln, cp, rm, install/unistall, dist, etc.
+ //
+ // Note: use path_name ("-") if the result is written to stdout.
+
+ // prog target -> path
+ //
+ void
+ print_diag (const char* prog,
+ const target& l, const path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, const path_name_view& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ target_key&& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ // prog group -> dir_path
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ vector<target_key>&& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ // prog path -> path
+ //
+ void
+ print_diag (const char* prog,
+ const path& l, const path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, const path_name_view& r,
+ const char* comb = nullptr);
+
+ // prog string -> path
+ //
+ // Use this version if, for example, input information is passed as an
+ // argument.
+ //
+ void
+ print_diag (const char* prog,
+ const string& l, const path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, const path_name_view& r,
+ const char* comb = nullptr);
+
+ // Print process commmand line. If the number of elements is specified (or
+ // the const cstrings& version is used), then it will print the piped multi-
+ // process command line, if present. In this case, the expected format is as
+ // follows:
//
// name1 arg arg ... nullptr
// name2 arg arg ... nullptr
@@ -179,92 +401,124 @@ namespace build2
using butl::diag_progress_lock;
// Return true if progress is to be shown. The max_verb argument is the
- // maximum verbosity level that this type of progress should be shown by
- // default.
+ // maximum verbosity level that this type of progress should be shown at by
+ // default. If it is verb_never, then both min and max verbosity checks are
+ // omitted, assuming the caller takes care of that themselves.
//
inline bool
show_progress (uint16_t max_verb)
{
return diag_progress_option
? *diag_progress_option
- : stderr_term && verb >= 1 && verb <= max_verb;
+ : stderr_term && (max_verb == verb_never ||
+ (verb >= 1 && verb <= max_verb));
}
- // Diagnostic facility, base infrastructure.
+ // Diagnostics color.
+ //
+ inline bool
+ show_diag_color ()
+ {
+ return diag_color_option ? *diag_color_option : stderr_term_color;
+ }
+
+ // Diagnostic facility.
+ //
+ // Note that this is the "complex" case we we derive from (rather than
+ // alias) a number of butl::diag_* types and provide custom operator<<
+ // "overrides" in order to make ADL look in the build2 rather than butl
+ // namespace.
//
using butl::diag_stream_lock;
using butl::diag_stream;
using butl::diag_epilogue;
+ using butl::diag_frame;
- // Diagnostics stack. Each frame is "applied" to the fail/error/warn/info
- // diag record.
- //
- // Unfortunately most of our use-cases don't fit into the 2-pointer small
- // object optimization of std::function. So we have to complicate things
- // a bit here.
- //
- struct LIBBUILD2_SYMEXPORT diag_frame
+ template <typename> struct diag_prologue;
+ template <typename> struct diag_mark;
+
+ struct diag_record: butl::diag_record
{
- explicit
- diag_frame (void (*f) (const diag_frame&, const diag_record&))
- : func_ (f)
+ template <typename T>
+ const diag_record&
+ operator<< (const T& x) const
{
- if (func_ != nullptr)
- prev_ = stack (this);
+ os << x;
+ return *this;
}
- diag_frame (diag_frame&& x)
- : func_ (x.func_)
- {
- if (func_ != nullptr)
- {
- prev_ = x.prev_;
- stack (this);
+ diag_record () = default;
- x.func_ = nullptr;
- }
- }
+ template <typename B>
+ explicit
+ diag_record (const diag_prologue<B>& p): diag_record () { *this << p;}
- diag_frame& operator= (diag_frame&&) = delete;
+ template <typename B>
+ explicit
+ diag_record (const diag_mark<B>& m): diag_record () { *this << m;}
+ };
- diag_frame (const diag_frame&) = delete;
- diag_frame& operator= (const diag_frame&) = delete;
+ template <typename B>
+ struct diag_prologue: butl::diag_prologue<B>
+ {
+ using butl::diag_prologue<B>::diag_prologue;
- ~diag_frame ()
+ template <typename T>
+ diag_record
+ operator<< (const T& x) const
{
- if (func_ != nullptr )
- stack (prev_);
+ diag_record r;
+ r.append (this->indent, this->epilogue);
+ B::operator() (r);
+ r << x;
+ return r;
}
- static void
- apply (const diag_record& r)
+ friend const diag_record&
+ operator<< (const diag_record& r, const diag_prologue& p)
{
- for (const diag_frame* f (stack ()); f != nullptr; f = f->prev_)
- f->func_ (*f, r);
+ r.append (p.indent, p.epilogue);
+ p (r);
+ return r;
}
+ };
- // Tip of the stack.
- //
- static const diag_frame*
- stack () noexcept;
+ template <typename B>
+ struct diag_mark: butl::diag_mark<B>
+ {
+ using butl::diag_mark<B>::diag_mark;
- // Set the new and return the previous tip of the stack.
- //
- static const diag_frame*
- stack (const diag_frame*) noexcept;
+ template <typename T>
+ diag_record
+ operator<< (const T& x) const
+ {
+ return B::operator() () << x;
+ }
- struct stack_guard
+ friend const diag_record&
+ operator<< (const diag_record& r, const diag_mark& m)
{
- explicit stack_guard (const diag_frame* s): s_ (stack (s)) {}
- ~stack_guard () {stack (s_);}
- const diag_frame* s_;
- };
+ return r << m ();
+ }
+ };
- private:
- void (*func_) (const diag_frame&, const diag_record&);
- const diag_frame* prev_;
+ template <typename B>
+ struct diag_noreturn_end: butl::diag_noreturn_end<B>
+ {
+ diag_noreturn_end () {} // For Clang 3.7 (const needs user default ctor).
+
+ using butl::diag_noreturn_end<B>::diag_noreturn_end;
+
+ [[noreturn]] friend void
+ operator<< (const diag_record& r, const diag_noreturn_end& e)
+ {
+ assert (r.full ());
+ e.B::operator() (r);
+ }
};
+ // Note: diag frames are not applied to text/trace diagnostics.
+ //
template <typename F>
struct diag_frame_impl: diag_frame
{
@@ -273,9 +527,10 @@ namespace build2
private:
static void
- thunk (const diag_frame& f, const diag_record& r)
+ thunk (const diag_frame& f, const butl::diag_record& r)
{
- static_cast<const diag_frame_impl&> (f).func_ (r);
+ static_cast<const diag_frame_impl&> (f).func_ (
+ static_cast<const diag_record&> (r));
}
const F func_;
@@ -288,8 +543,6 @@ namespace build2
return diag_frame_impl<F> (move (f));
}
- // Diagnostic facility, project specifics.
- //
struct LIBBUILD2_SYMEXPORT simple_prologue_base
{
explicit
@@ -352,8 +605,8 @@ namespace build2
struct basic_mark_base
{
- using simple_prologue = butl::diag_prologue<simple_prologue_base>;
- using location_prologue = butl::diag_prologue<location_prologue_base>;
+ using simple_prologue = diag_prologue<simple_prologue_base>;
+ using location_prologue = diag_prologue<location_prologue_base>;
explicit
basic_mark_base (const char* type,
@@ -427,7 +680,7 @@ namespace build2
const void* data_;
diag_epilogue* const epilogue_;
};
- using basic_mark = butl::diag_mark<basic_mark_base>;
+ using basic_mark = diag_mark<basic_mark_base>;
LIBBUILD2_SYMEXPORT extern const basic_mark error;
LIBBUILD2_SYMEXPORT extern const basic_mark warn;
@@ -452,7 +705,7 @@ namespace build2
mod,
name) {}
};
- using trace_mark = butl::diag_mark<trace_mark_base>;
+ using trace_mark = diag_mark<trace_mark_base>;
using tracer = trace_mark;
// fail
@@ -464,17 +717,17 @@ namespace build2
const void* data = nullptr)
: basic_mark_base (type,
data,
- [](const diag_record& r)
+ [](const butl::diag_record& r, butl::diag_writer* w)
{
diag_frame::apply (r);
- r.flush ();
+ r.flush (w);
throw failed ();
},
&stream_verb_map,
nullptr,
nullptr) {}
};
- using fail_mark = butl::diag_mark<fail_mark_base>;
+ using fail_mark = diag_mark<fail_mark_base>;
struct fail_end_base
{
@@ -488,11 +741,289 @@ namespace build2
throw failed ();
}
};
- using fail_end = butl::diag_noreturn_end<fail_end_base>;
+ using fail_end = diag_noreturn_end<fail_end_base>;
LIBBUILD2_SYMEXPORT extern const fail_mark fail;
LIBBUILD2_SYMEXPORT extern const fail_end endf;
+ // Diagnostics buffer.
+ //
+ // The purpose of this class is to handle diagnostics from child processes,
+ // where handle can mean:
+ //
+ // - Buffer it (to avoid interleaving in parallel builds).
+ //
+ // - Stream it (if the input can be split into diagnostic records).
+ //
+ // - Do nothing (in serial builds or if requested not to buffer).
+ //
+ // In the future this class may also be responsible for converting the
+ // diagnostics into the structured form (which means it may need to buffer
+ // even in serial builds).
+ //
+ // The typical usage is as follows:
+ //
+ // process pr (..., diag_buffer::pipe (ctx));
+ // diag_buffer dbuf (ctx, args[0], pr); // Skip.
+ // ifdstream is (move (pr.in_ofd)); // No skip.
+ // ofdstream os (move (pr.out_fd));
+ //
+ // The reason for this somewhat roundabout setup is to make sure the
+ // diag_buffer instance is destroyed before the process instance. This is
+ // important in case an exception is thrown where we want to make sure all
+ // our pipe ends are closed before we wait for the process exit (which
+ // happens in the process destructor).
+ //
+ // And speaking of the destruction order, another thing to keep in mind is
+ // that only one stream can use the skip mode (fdstream_mode::skip; because
+ // skipping is performed in the blocking mode) and the stream that skips
+ // should come first so that all other streams are destroyed/closed before
+ // it (failed that, we may end up in a deadlock). For example:
+ //
+ // process pr (..., diag_buffer::pipe (ctx));
+ // ifdstream is (move (pr.in_ofd), fdstream_mode::skip); // Skip.
+ // diag_buffer dbuf (ctx, args[0], pr, fdstream_mode::none); // No skip.
+ // ofdstream os (move (pr.out_fd));
+ //
+ class LIBBUILD2_SYMEXPORT diag_buffer
+ {
+ public:
+ // If buffering is necessary or force is true, return an "instruction"
+ // (-1) to the process class constructor to open a pipe and redirect
+ // stderr to it. Otherwise, return an "instruction" to inherit stderr (2).
+ //
+ // The force flag is normally used if custom diagnostics processing is
+ // required (filter, split, etc; see read() below).
+ //
+ // Note that the diagnostics buffer must be opened (see below) regardless
+ // of the pipe() result.
+ //
+ static int
+ pipe (context&, bool force = false);
+
+ // Open the diagnostics buffer given the parent end of the pipe (normally
+ // process:in_efd). If it is nullfd, then assume no buffering is
+ // necessary. If mode is non_blocking, then make reading from the parent
+ // end of the pipe non-blocking.
+ //
+ // The args0 argument is the child process program name for diagnostics.
+ // It is expected to remain valid until the call to close() and should
+ // normally be the same as args[0] passed to close().
+ //
+ // Note that the same buffer can go through multiple open-read-close
+ // sequences, for example, to execute multiple commands.
+ //
+ // All the below functions handle io errors, issue suitable diagnostics,
+ // and throw failed. If an exception is thrown from any of them, then the
+ // instance should not be used any further.
+ //
+ // Note that when reading from multiple streams in the non-blocking mode,
+ // only the last stream to be destroyed can normally have the skip mode
+ // since in case of an exception, skipping will be blocking.
+ //
+ diag_buffer (context&,
+ const char* args0,
+ auto_fd&&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // As above, but the parrent end of the pipe (process:in_efd) is passed
+ // via a process instance.
+ //
+ diag_buffer (context&,
+ const char* args0,
+ process&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // As above but with support for the underlying buffer reuse.
+ //
+ // Note that in most cases reusing the buffer is probably not worth the
+ // trouble because we normally don't expect any diagnostics in the common
+ // case. However, if needed, it can be arranged, for example:
+ //
+ // vector<char> buf;
+ //
+ // {
+ // process pr (...);
+ // diag_buffer dbuf (ctx, move (buf), args[0], pr);
+ // dbuf.read ();
+ // dbuf.close ();
+ // buf = move (dbuf.buf);
+ // }
+ //
+ // {
+ // ...
+ // }
+ //
+ // Note also that while there is no guarantee the underlying buffer is
+ // moved when, say, the vector is empty, all the main implementations
+ // always steal the buffer.
+ //
+ diag_buffer (context&,
+ vector<char>&& buf,
+ const char* args0,
+ auto_fd&&,
+ fdstream_mode = fdstream_mode::skip);
+
+ diag_buffer (context&,
+ vector<char>&& buf,
+ const char* args0,
+ process&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // Separate construction and opening.
+ //
+ // Note: be careful with the destruction order (see above for details).
+ //
+ explicit
+ diag_buffer (context&);
+
+ diag_buffer (context&, vector<char>&& buf);
+
+ void
+ open (const char* args0,
+ auto_fd&&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // Open the buffer in the state as if after read() returned false, that
+ // is, the stream corresponding to the parent's end of the pipe reached
+ // EOF and has been closed. This is primarily useful when the diagnostics
+ // is being read in a custom way (for example, it has been merged to
+ // stdout) and all we want is to be able to call write() and close().
+ //
+ void
+ open_eof (const char* args0);
+
+ // Check whether the buffer has been opened with the open() call and
+ // hasn't yet been closed.
+ //
+ // Note that this function returning true does not mean that the pipe was
+ // opened (to check that, call is_open() on the stream member; see below).
+ //
+ bool
+ is_open () const
+ {
+ return state_ != state::closed;
+ }
+
+ // Read the diagnostics from the parent's end of the pipe if one was
+ // opened and buffer/stream it as necessary or forced. Return true if
+ // there could be more diagnostics to read (only possible in the non-
+ // blocking mode) and false otherwise, in which case also close the
+ // stream.
+ //
+ // Note that the force argument here (as well as in write() below) and
+ // in open() above are independent. Specifically, force in open() forces
+ // the opening of the pipe while force in read() and write() forces
+ // the buffering of the diagnostics.
+ //
+ // Instead of calling this function you can perform custom reading and, if
+ // necessary, buffering of the diagnostics by accessing the input stream
+ // (is) and underlying buffer (buf) directly. This can be used to filter,
+ // split the diagnostics into records according to a certain format, etc.
+ // Note that such custom processing implementation should maintain the
+ // overall semantics of diagnostics buffering in that it may only omit
+ // buffering in the serial case or if the diagnostics can be streamed in
+ // atomic records. See also write() below.
+ //
+ // The input stream is opened in the text mode and has the badbit but not
+ // failbit exception mask. The custom processing should also be compatible
+ // with the stream mode (blocking or non). If buffering is performed, then
+ // depending on the expected diagnostics the custom processing may want to
+ // reserve an appropriate initial buffer size to avoid unnecessary
+ // reallocation. As a convenience, in the blocking mode only, if the
+ // stream still contains some diagnostics, then it can be handled by
+ // calling read(). This is useful when needing to process only the inital
+ // part of the diagnostics. The custom processing may also close the
+ // stream manually before calling close().
+ //
+ bool
+ read (bool force = false);
+
+ // Close the parent end of the pipe if one was opened and write out any
+ // buffered diagnostics.
+ //
+ // If the child process exited abnormally or normally with non-0 code,
+ // then print the error diagnostics to this effect. Additionally, if the
+ // verbosity level is between 1 and the specified value, then print the
+ // command line as info after the error. If omit_normal is true, then
+ // don't print either for the normal exit (usually used for custom
+ // diagnostics or when process failure can be tolerated).
+ //
+ // Normally the specified verbosity will be 1 and the command line args
+ // represent the verbosity level 2 (logical) command line. Note that args
+ // should only represent a single command in a pipe (see print_process()
+ // below for details).
+ //
+ // If the diag_buffer instance is destroyed before calling close(), then
+ // any buffered diagnostics is discarded.
+ //
+ // Note: see also run_finish(diag_buffer&).
+ //
+ // @@ TODO: need overload with process_env (see print_process). Also in
+ // run_finish_impl().
+ //
+ void
+ close (const cstrings& args,
+ const process_exit&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = {});
+
+ void
+ close (const char* const* args,
+ const process_exit&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = {});
+
+ // As above but with a custom diag record for the child exit diagnostics,
+ // if any. Note that if the diag record has the fail epilogue, then this
+ // function will throw.
+ //
+ void
+ close (diag_record&& = {});
+
+ // Direct access to the underlying stream and buffer for custom processing
+ // (see read() above for details).
+ //
+ // If serial is true, then we are running serially. If nobuf is true,
+ // then we are running in parallel but diagnostics buffering has been
+ // disabled (--no-diag-buffer). Note that there is a difference: during
+ // the serial execution we are free to hold the diag_stream_lock for as
+ // long as convenient, for example, for the whole duration of child
+ // process execution. Doing the same during parallel execution is very
+ // bad idea and we should read/write the diagnostics in chunks, normally
+ // one line at a time.
+ //
+ public:
+ ifdstream is;
+ vector<char> buf;
+ const char* args0;
+ bool serial;
+ bool nobuf;
+
+ // Buffer or stream a fragment of diagnostics as necessary or forced. If
+ // newline is true, also add a trailing newline.
+ //
+ // This function is normally called from a custom diagnostics processing
+ // implementation (see read() above for details). If nobuf is true, then
+ // the fragment should end on the line boundary to avoid interleaving.
+ //
+ void
+ write (const string&, bool newline, bool force = false);
+
+ private:
+ // Note that we don't seem to need a custom destructor to achieve the
+ // desired semantics: we can assume the process has exited before we are
+ // destroyed (because we supply stderr to its constructor) which means
+ // closing fdstream without reading any futher should be ok.
+ //
+ enum class state {closed, opened, eof};
+
+ context& ctx_;
+ state state_ = state::closed;
+ };
+
// Action phrases, e.g., "configure update exe{foo}", "updating exe{foo}",
// and "updating exe{foo} is configured". Use like this:
//
@@ -558,4 +1089,6 @@ namespace build2
}
}
+#include <libbuild2/diagnostics.ixx>
+
#endif // LIBBUILD2_DIAGNOSTICS_HXX
diff --git a/libbuild2/diagnostics.ixx b/libbuild2/diagnostics.ixx
new file mode 100644
index 0000000..273dfad
--- /dev/null
+++ b/libbuild2/diagnostics.ixx
@@ -0,0 +1,126 @@
+// file : libbuild2/diagnostics.ixx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+namespace build2
+{
+ // print_diag()
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag_impl (const char*, target_key*, target_key&&, const char*);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag_impl (const char*,
+ target_key*, vector<target_key>&& r,
+ const char*);
+
+ inline void
+ print_diag (const char* p, target_key&& l, target_key&& r, const char* c)
+ {
+ print_diag_impl (p, &l, move (r), c);
+ }
+
+ inline void
+ print_diag (const char* p,
+ target_key&& l, vector<target_key>&& r,
+ const char* c)
+ {
+ print_diag_impl (p, &l, move (r), c);
+ }
+
+ inline void
+ print_diag (const char* p, target_key& r)
+ {
+ print_diag_impl (p, nullptr, move (r), nullptr);
+ }
+
+ inline void
+ print_diag (const char* p, vector<target_key>&& r)
+ {
+ print_diag_impl (p, nullptr, move (r), nullptr);
+ }
+
+ inline void
+ print_diag (const char* p, const path& r)
+ {
+ print_diag (p, path_name (&r));
+ }
+
+ inline void
+ print_diag (const char* p, const target& l, const path& r, const char* c)
+ {
+ print_diag (p, l, path_name (&r), c);
+ }
+
+ inline void
+ print_diag (const char* p, const path& l, const path& r, const char* c)
+ {
+ print_diag (p, l, path_name (&r), c);
+ }
+
+ inline void
+ print_diag (const char* p, const string& l, const path& r, const char* c)
+ {
+ print_diag (p, l, path_name (&r), c);
+ }
+
+ // diag_buffer
+ //
+ inline diag_buffer::
+ diag_buffer (context& ctx)
+ : is (ifdstream::badbit), ctx_ (ctx)
+ {
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx, vector<char>&& b)
+ : is (ifdstream::badbit), buf (move (b)), ctx_ (ctx)
+ {
+ buf.clear ();
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx, const char* args0, auto_fd&& fd, fdstream_mode m)
+ : diag_buffer (ctx)
+ {
+ open (args0, move (fd), m);
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx, const char* args0, process& pr, fdstream_mode m)
+ : diag_buffer (ctx)
+ {
+ open (args0, move (pr.in_efd), m);
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx,
+ vector<char>&& b,
+ const char* args0,
+ auto_fd&& fd,
+ fdstream_mode m)
+ : diag_buffer (ctx, move (b))
+ {
+ open (args0, move (fd), m);
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx,
+ vector<char>&& b,
+ const char* args0,
+ process& pr,
+ fdstream_mode m)
+ : diag_buffer (ctx, move (b))
+ {
+ open (args0, move (pr.in_efd), m);
+ }
+
+ inline void diag_buffer::
+ close (const cstrings& args,
+ const process_exit& pe,
+ uint16_t verbosity,
+ bool omit_normal,
+ const location& loc)
+ {
+ close (args.data (), pe, verbosity, omit_normal, loc);
+ }
+}
diff --git a/libbuild2/dist/init.cxx b/libbuild2/dist/init.cxx
index 2be4c3f..48a3e15 100644
--- a/libbuild2/dist/init.cxx
+++ b/libbuild2/dist/init.cxx
@@ -3,8 +3,9 @@
#include <libbuild2/dist/init.hxx>
-#include <libbuild2/scope.hxx>
#include <libbuild2/file.hxx>
+#include <libbuild2/rule.hxx>
+#include <libbuild2/scope.hxx>
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/config/utility.hxx>
@@ -21,6 +22,7 @@ namespace build2
namespace dist
{
static const rule rule_;
+ static const file_rule file_rule_ (true /* check_type */);
void
boot (scope& rs, const location&, module_boot_extra& extra)
@@ -32,7 +34,34 @@ namespace build2
// Enter module variables. Do it during boot in case they get assigned
// in bootstrap.build (which is customary for, e.g., dist.package).
//
- auto& vp (rs.var_pool ());
+
+ // The dist flag or path. Normally it is a flag (true or false) but can
+ // also be used to remap the distribution location.
+ //
+ // In the latter case it specifies the "imaginary" source location which
+ // is used to derive the corresponding distribution local. This location
+ // can be specified as either a directory path (to remap with the same
+ // file name) or a file path (to remap with a different name). And the
+ // way we distinguish between the two is via the presence/absence of the
+ // trailing directory separator. If the path is relative, then it's
+ // treated relative to the target directory. Note that to make things
+ // less error prone, simple paths without any directory separators are
+ // not allowed (use ./<name> instead).
+ //
+ // Note that if multiple targets end up with the same source location,
+ // the behavior is undefined and no diagnostics is issued.
+ //
+ // Note also that such remapping has no effect in the bootstrap
+ // distribution mode.
+ //
+ // Note: project-private.
+ //
+ rs.var_pool ().insert<path> ("dist", variable_visibility::target);
+
+ // The rest of the variables we enter are qualified so go straight for
+ // the public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// config.dist.archives is a list of archive extensions (e.g., zip,
// tar.gz) that can be optionally prefixed with a directory. If it is
@@ -57,7 +86,7 @@ namespace build2
// The bootstrap distribution mode. Note that it can only be specified
// as a global override and is thus marked as unsaved in init(). Unlike
- // the normal load distribution mode, we can do in-source and multiple
+ // the normal load distribution mode, we can do in source and multiple
// projects at once.
//
// Note also that other config.dist.* variables can only be specified as
@@ -71,8 +100,6 @@ namespace build2
vp.insert<paths> ("dist.archives");
vp.insert<paths> ("dist.checksums");
- vp.insert<bool> ("dist", variable_visibility::target); // Flag.
-
// Project's package name. Note: if set, must be in bootstrap.build.
//
auto& v_d_p (vp.insert<string> ("dist.package"));
@@ -107,7 +134,7 @@ namespace build2
//
bool s (specified_config (rs, "dist", {"bootstrap"}));
- // dist.root
+ // config.dist.root
//
{
value& v (rs.assign ("dist.root"));
@@ -119,22 +146,24 @@ namespace build2
}
}
- // dist.cmd
+ // config.dist.cmd
+ //
+ // By default we use in-process code for creating directories and
+ // copying files (for performance, especially on Windows). But an
+ // external program (normally install) can be used if configured.
//
{
- value& v (rs.assign<process_path> ("dist.cmd"));
+ value& v (rs.assign<process_path> ("dist.cmd")); // NULL
if (s)
{
- if (lookup l = lookup_config (rs,
- "config.dist.cmd",
- path ("install")))
+ if (lookup l = lookup_config (rs, "config.dist.cmd", nullptr))
v = run_search (cast<path> (l), true);
}
}
- // dist.archives
- // dist.checksums
+ // config.dist.archives
+ // config.dist.checksums
//
{
value& a (rs.assign ("dist.archives"));
@@ -157,7 +186,7 @@ namespace build2
}
}
- // dist.uncommitted
+ // config.dist.uncommitted
//
// Omit it from the configuration unless specified.
//
@@ -182,13 +211,26 @@ namespace build2
l5 ([&]{trace << "for " << rs;});
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */)); // All qualified.
// Register our wildcard rule. Do it explicitly for the alias to prevent
// something like insert<target>(dist_id, test_id) taking precedence.
//
rs.insert_rule<target> (dist_id, 0, "dist", rule_);
- rs.insert_rule<alias> (dist_id, 0, "dist.alias", rule_); //@@ outer?
+ rs.insert_rule<alias> (dist_id, 0, "dist.alias", rule_);
+
+ // We need this rule for out-of-any-project dependencies (for example,
+ // executables imported from /usr/bin, etc). We are registering it on
+ // the global scope similar to builtin rules.
+ //
+ // Note: use target instead of anything more specific (such as
+ // mtime_target) in order not to take precedence over the "dist" rule
+ // above.
+ //
+ // See a similar rule in the config module.
+ //
+ rs.global_scope ().insert_rule<target> (
+ dist_id, 0, "dist.file", file_rule_);
// Configuration.
//
diff --git a/libbuild2/dist/module.hxx b/libbuild2/dist/module.hxx
index 314dc96..da97939 100644
--- a/libbuild2/dist/module.hxx
+++ b/libbuild2/dist/module.hxx
@@ -10,14 +10,17 @@
#include <libbuild2/module.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/dist/types.hxx>
+
#include <libbuild2/export.hxx>
namespace build2
{
namespace dist
{
- struct LIBBUILD2_SYMEXPORT module: build2::module
+ class LIBBUILD2_SYMEXPORT module: public build2::module
{
+ public:
static const string name;
const variable& var_dist_package;
@@ -38,6 +41,10 @@ namespace build2
adhoc.push_back (move (f));
}
+ // List of postponed prerequisites (see rule for details).
+ //
+ mutable postponed_prerequisites postponed;
+
// Distribution post-processing callbacks.
//
// Only the last component in the pattern may contain wildcards. If the
@@ -69,10 +76,11 @@ namespace build2
// Implementation details.
//
- module (const variable& v_d_p)
- : var_dist_package (v_d_p) {}
+ public:
+ module (const variable& v_d_p): var_dist_package (v_d_p) {}
public:
+ bool distributed = false; // True if this project is being distributed.
vector<path> adhoc;
struct callback
diff --git a/libbuild2/dist/operation.cxx b/libbuild2/dist/operation.cxx
index f3db8ad..cd88eac 100644
--- a/libbuild2/dist/operation.cxx
+++ b/libbuild2/dist/operation.cxx
@@ -6,6 +6,8 @@
#include <libbutl/sha1.hxx>
#include <libbutl/sha256.hxx>
+#include <libbutl/filesystem.hxx> // try_mkdir_p(), cpfile()
+
#include <libbuild2/file.hxx>
#include <libbuild2/dump.hxx>
#include <libbuild2/scope.hxx>
@@ -15,6 +17,8 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/dist/types.hxx>
+#include <libbuild2/dist/rule.hxx>
#include <libbuild2/dist/module.hxx>
using namespace std;
@@ -27,14 +31,14 @@ namespace build2
// install -d <dir>
//
static void
- install (const process_path& cmd, const dir_path&);
+ install (const process_path*, context&, const dir_path&);
- // install <file> <dir>
+ // install <file> <dir>[/<name>]
//
// Return the destination file path.
//
static path
- install (const process_path& cmd, const file&, const dir_path&);
+ install (const process_path*, const file&, const dir_path&, const path&);
// tar|zip ... <dir>/<pkg>.<ext> <pkg>
//
@@ -64,6 +68,30 @@ namespace build2
return o;
}
+ static void
+ dist_load_load (const values& vs,
+ scope& rs,
+ const path& bf,
+ const dir_path& out_base,
+ const dir_path& src_base,
+ const location& l)
+ {
+ // @@ TMP: redo after release (do it here and not in execute, also add
+ // custom search and do the other half there).
+ //
+#if 0
+ if (rs.out_path () != out_base || rs.src_path () != src_base)
+ fail (l) << "dist meta-operation target must be project root directory";
+#endif
+
+ // Mark this project as being distributed.
+ //
+ if (auto* m = rs.find_module<module> (module::name))
+ m->distributed = true;
+
+ perform_load (vs, rs, bf, out_base, src_base, l);
+ }
+
// Enter the specified source file as a target of type T. The path is
// expected to be normalized and relative to src_root. If the third
// argument is false, then first check if the file exists. If the fourth
@@ -82,9 +110,7 @@ namespace build2
// Figure out if we need out.
//
- dir_path out (rs.src_path () != rs.out_path ()
- ? out_src (d, rs)
- : dir_path ());
+ dir_path out (!rs.out_eq_src () ? out_src (d, rs) : dir_path ());
const T& t (rs.ctx.targets.insert<T> (
move (d),
@@ -126,7 +152,7 @@ namespace build2
try
{
- for (const dir_entry& e: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::no_follow))
{
const path& n (e.path ());
@@ -212,8 +238,13 @@ namespace build2
fail << "unknown distribution package name" <<
info << "did you forget to set dist.package?";
+ const module& mod (*rs.find_module<module> (module::name));
+
const string& dist_package (cast<string> (l));
- const process_path& dist_cmd (cast<process_path> (rs.vars["dist.cmd"]));
+ const process_path* dist_cmd (
+ cast_null<process_path> (rs.vars["dist.cmd"]));
+
+ dir_path td (dist_root / dir_path (dist_package));
// We used to print 'dist <target>' at verbosity level 1 but that has
// proven to be just noise. Though we still want to print something
@@ -224,79 +255,143 @@ namespace build2
// (e.g., output directory creation) in all the operations below.
//
if (verb == 1)
- text << "dist " << dist_package;
+ print_diag ("dist", src_root, td);
// Get the list of files to distribute.
//
action_targets files;
+ const variable* dist_var (nullptr);
if (tgt != nullptr)
{
l5 ([&]{trace << "load dist " << rs;});
+ dist_var = rs.var_pool ().find ("dist");
+
// Match a rule for every operation supported by this project. Skip
// default_id.
//
// Note that we are not calling operation_pre/post() callbacks here
// since the meta operation is dist and we know what we are doing.
//
- values params;
path_name pn ("<dist>");
const location loc (pn); // Dummy location.
+ action_targets ts {tgt};
+
+ auto process_postponed = [&ctx, &mod] ()
{
- action_targets ts {tgt};
+ if (!mod.postponed.list.empty ())
+ {
+ // Re-grab the phase lock similar to perform_match().
+ //
+ phase_lock l (ctx, run_phase::match);
- auto mog = make_guard ([&ctx] () {ctx.match_only = false;});
- ctx.match_only = true;
+ // Note that we don't need to bother with the mutex since we do
+ // all of this serially. But we can end up with new elements at
+ // the end.
+ //
+ // Strictly speaking, to handle this correctly we would need to do
+ // multiple passes over this list and only give up when we cannot
+ // make any progress since earlier entries that we cannot resolve
+ // could be "fixed" by later entries. But this feels far-fetched
+ // and so let's wait for a real example before complicating this.
+ //
+ for (auto i (mod.postponed.list.begin ());
+ i != mod.postponed.list.end ();
+ ++i)
+ rule::match_postponed (*i);
+ }
+ };
- const operations& ops (rs.root_extra->operations);
- for (operations::size_type id (default_id + 1); // Skip default_id.
- id < ops.size ();
- ++id)
+ auto mog = make_guard ([&ctx] () {ctx.match_only = nullopt;});
+ ctx.match_only = match_only_level::all;
+
+ const operations& ops (rs.root_extra->operations);
+ for (operations::size_type id (default_id + 1); // Skip default_id.
+ id < ops.size ();
+ ++id)
+ {
+ if (const operation_info* oif = ops[id])
{
- if (const operation_info* oif = ops[id])
- {
- // Skip aliases (e.g., update-for-install). In fact, one can
- // argue the default update should be sufficient since it is
- // assumed to update all prerequisites and we no longer support
- // ad hoc stuff like test.input. Though here we are using the
- // dist meta-operation, not perform.
- //
- if (oif->id != id)
- continue;
+ // Skip aliases (e.g., update-for-install). In fact, one can argue
+ // the default update should be sufficient since it is assumed to
+ // update all prerequisites and we no longer support ad hoc stuff
+ // like test.input. Though here we are using the dist
+ // meta-operation, not perform.
+ //
+ if (oif->id != id)
+ continue;
- // Use standard (perform) match.
- //
- if (auto pp = oif->pre_operation)
+ // Use standard (perform) match.
+ //
+ if (auto pp = oif->pre_operation)
+ {
+ if (operation_id pid = pp (ctx, {}, dist_id, loc))
{
- if (operation_id pid = pp (ctx, params, dist_id, loc))
- {
- const operation_info* poif (ops[pid]);
- ctx.current_operation (*poif, oif, false /* diag_noise */);
- action a (dist_id, poif->id, oif->id);
- match (params, a, ts,
- 1 /* diag (failures only) */,
- false /* progress */);
- }
+ const operation_info* poif (ops[pid]);
+ ctx.current_operation (*poif, oif, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, false /* inner */, loc);
+
+ if (poif->operation_pre != nullptr)
+ poif->operation_pre (ctx, {}, true /* inner */, loc);
+
+ action a (dist_id, poif->id, oif->id);
+ mod.postponed.list.clear ();
+ perform_match ({}, a, ts,
+ 1 /* diag (failures only) */,
+ false /* progress */);
+ process_postponed ();
+
+ if (poif->operation_post != nullptr)
+ poif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, false /* inner */);
}
+ }
+
+ ctx.current_operation (*oif, nullptr, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, true /* inner */, loc);
- ctx.current_operation (*oif, nullptr, false /* diag_noise */);
- action a (dist_id, oif->id);
- match (params, a, ts,
- 1 /* diag (failures only) */,
- false /* progress */);
+ action a (dist_id, oif->id);
+ mod.postponed.list.clear ();
+ perform_match ({}, a, ts,
+ 1 /* diag (failures only) */,
+ false /* progress */);
+ process_postponed ();
- if (auto po = oif->post_operation)
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, true /* inner */);
+
+ if (auto po = oif->post_operation)
+ {
+ if (operation_id pid = po (ctx, {}, dist_id))
{
- if (operation_id pid = po (ctx, params, dist_id))
- {
- const operation_info* poif (ops[pid]);
- ctx.current_operation (*poif, oif, false /* diag_noise */);
- action a (dist_id, poif->id, oif->id);
- match (params, a, ts,
- 1 /* diag (failures only) */,
- false /* progress */);
- }
+ const operation_info* poif (ops[pid]);
+ ctx.current_operation (*poif, oif, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, false /* inner */, loc);
+
+ if (poif->operation_pre != nullptr)
+ poif->operation_pre (ctx, {}, true /* inner */, loc);
+
+ action a (dist_id, poif->id, oif->id);
+ mod.postponed.list.clear ();
+ perform_match ({}, a, ts,
+ 1 /* diag (failures only) */,
+ false /* progress */);
+ process_postponed ();
+
+ if (poif->operation_post != nullptr)
+ poif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, false /* inner */);
}
}
}
@@ -305,7 +400,7 @@ namespace build2
// Add ad hoc files and buildfiles that are not normally loaded as
// part of the project, for example, the export stub. They will still
// be ignored on the next step if the user explicitly marked them
- // dist=false.
+ // with dist=false.
//
auto add_adhoc = [] (const scope& rs)
{
@@ -352,7 +447,7 @@ namespace build2
dir_path out_nroot (out_root / pd);
const scope& nrs (ctx.scopes.find_out (out_nroot));
- if (nrs.out_path () != out_nroot) // This subproject not loaded.
+ if (nrs.out_path () != out_nroot) // This subproject is not loaded.
continue;
if (!nrs.src_path ().sub (src_root)) // Not a strong amalgamation.
@@ -368,50 +463,96 @@ namespace build2
// Note that we are not showing progress here (e.g., "N targets to
// distribute") since it will be useless (too fast).
//
- const variable& dist_var (ctx.var_pool["dist"]);
-
- for (const auto& pt: ctx.targets)
+ auto see_through = [] (const target& t)
{
- file* ft (pt->is_a<file> ());
-
- if (ft == nullptr) // Not a file.
- continue;
+ return ((t.type ().flags & target_type::flag::see_through) ==
+ target_type::flag::see_through);
+ };
- if (ft->dir.sub (src_root))
+ auto collect = [&trace, &dist_var,
+ &src_root, &out_root] (const file& ft)
+ {
+ if (ft.dir.sub (src_root))
{
// Include unless explicitly excluded.
//
- auto l ((*ft)[dist_var]);
-
- if (l && !cast<bool> (l))
- l5 ([&]{trace << "excluding " << *ft;});
- else
- files.push_back (ft);
+ if (const path* v = cast_null<path> (ft[dist_var]))
+ {
+ if (v->string () == "false")
+ {
+ l5 ([&]{trace << "excluding " << ft;});
+ return false;
+ }
+ }
- continue;
+ return true;
}
-
- if (ft->dir.sub (out_root))
+ else if (ft.dir.sub (out_root))
{
// Exclude unless explicitly included.
//
- auto l ((*ft)[dist_var]);
+ if (const path* v = cast_null<path> (ft[dist_var]))
+ {
+ if (v->string () != "false")
+ {
+ l5 ([&]{trace << "including " << ft;});
+ return true;
+ }
+ }
- if (l && cast<bool> (l))
+ return false;
+ }
+ else
+ return false; // Out of project.
+ };
+
+ for (const auto& pt: ctx.targets)
+ {
+ // Collect see-through groups if they are marked with dist=true.
+ //
+ // Note that while it's possible that only their certain members are
+ // marked as such (e.g., via a pattern), we will still require
+ // dist=true on the group itself (and potentially dist=false on some
+ // of its members) for such cases because we don't want to update
+ // every see-through group only to discover that most of them don't
+ // have anything to distribute.
+ //
+ if (see_through (*pt))
+ {
+ if (const path* v = cast_null<path> ((*pt)[dist_var]))
{
- l5 ([&]{trace << "including " << *ft;});
- files.push_back (ft);
+ if (v->string () != "false")
+ {
+ l5 ([&]{trace << "including group " << *pt;});
+ files.push_back (pt.get ());
+ }
}
continue;
}
+
+ file* ft (pt->is_a<file> ());
+
+ if (ft == nullptr) // Not a file.
+ continue;
+
+ // Skip member of see-through groups since after dist_* their list
+ // can be incomplete (or even bogus, e.g., the "representative
+ // sample"). Instead, we will collect them during perfrom_update
+ // below.
+ //
+ if (ft->group != nullptr && see_through (*ft->group))
+ continue;
+
+ if (collect (*ft))
+ files.push_back (ft);
}
// Make sure what we need to distribute is up to date.
//
{
if (mo_perform.meta_operation_pre != nullptr)
- mo_perform.meta_operation_pre (ctx, params, loc);
+ mo_perform.meta_operation_pre (ctx, {}, loc);
// This is a hack since according to the rules we need to completely
// reset the state. We could have done that (i.e., saved target
@@ -427,25 +568,75 @@ namespace build2
ctx.current_on = on + 1;
if (mo_perform.operation_pre != nullptr)
- mo_perform.operation_pre (ctx, params, update_id);
+ mo_perform.operation_pre (ctx, {}, update_id);
ctx.current_operation (op_update, nullptr, false /* diag_noise */);
+ if (op_update.operation_pre != nullptr)
+ op_update.operation_pre (ctx, {}, true /* inner */, loc);
+
action a (perform_update_id);
- mo_perform.match (params, a, files,
+ mo_perform.match ({}, a, files,
1 /* diag (failures only) */,
prog /* progress */);
- mo_perform.execute (params, a, files,
+ mo_perform.execute ({}, a, files,
1 /* diag (failures only) */,
prog /* progress */);
+ // Replace see-through groups (which now should have their members
+ // resolved) with members.
+ //
+ for (auto i (files.begin ()); i != files.end (); )
+ {
+ const target& t (i->as<target> ());
+ if (see_through (t))
+ {
+ group_view gv (t.group_members (a)); // Go directly.
+
+ if (gv.members == nullptr)
+ fail << "unable to resolve see-through group " << t
+ << " members";
+
+ i = files.erase (i); // Drop the group itself.
+
+ for (size_t j (0); j != gv.count; ++j)
+ {
+ if (const target* m = gv.members[j])
+ {
+ if (const file* ft = m->is_a<file> ())
+ {
+ // Note that a rule may only link-up its members to groups
+ // if/when matched (for example, the cli.cxx{} group). It
+ // feels harmless for us to do the linking here.
+ //
+ if (ft->group == nullptr)
+ const_cast<file*> (ft)->group = &t;
+ else
+ assert (ft->group == &t); // Sanity check.
+
+ if (collect (*ft))
+ {
+ i = files.insert (i, ft); // Insert instead of the group.
+ i++; // Stay after the group.
+ }
+ }
+ }
+ }
+ }
+ else
+ ++i;
+ }
+
+ if (op_update.operation_post != nullptr)
+ op_update.operation_post (ctx, {}, true /* inner */);
+
if (mo_perform.operation_post != nullptr)
- mo_perform.operation_post (ctx, params, update_id);
+ mo_perform.operation_post (ctx, {}, update_id);
if (mo_perform.meta_operation_post != nullptr)
- mo_perform.meta_operation_post (ctx, params);
+ mo_perform.meta_operation_post (ctx, {});
}
}
else
@@ -471,37 +662,80 @@ namespace build2
//
auto_project_env penv (rs);
- dir_path td (dist_root / dir_path (dist_package));
-
// Clean up the target directory.
//
if (rmdir_r (ctx, td, true, 2) == rmdir_status::not_empty)
fail << "unable to clean target directory " << td;
auto_rmdir rm_td (td); // Clean it up if things go bad.
- install (dist_cmd, td);
+ install (dist_cmd, ctx, td);
// Copy over all the files. Apply post-processing callbacks.
//
- module& mod (*rs.find_module<module> (module::name));
-
prog = prog && show_progress (1 /* max_verb */);
size_t prog_percent (0);
for (size_t i (0), n (files.size ()); i != n; ++i)
{
- const file& t (*files[i].as<target> ().is_a<file> ());
+ const file& t (files[i].as<target> ().as<file> ()); // Only files.
// Figure out where this file is inside the target directory.
//
- bool src (t.dir.sub (src_root));
- dir_path dl (src ? t.dir.leaf (src_root) : t.dir.leaf (out_root));
+ // First see if the path has been remapped (unless bootstrap).
+ //
+ const path* rp (nullptr);
+ if (tgt != nullptr)
+ {
+ if ((rp = cast_null<path> (t[dist_var])) != nullptr)
+ {
+ if (rp->string () == "true") // Wouldn't be here if false.
+ rp = nullptr;
+ }
+ }
+
+ bool src;
+ path rn;
+ dir_path dl;
+ if (rp == nullptr)
+ {
+ src = t.dir.sub (src_root);
+ dl = src ? t.dir.leaf (src_root) : t.dir.leaf (out_root);
+ }
+ else
+ {
+ // Sort the remapped path into name (if any) and directory,
+ // completing the latter if relative.
+ //
+ bool n (!rp->to_directory ());
+
+ if (n)
+ {
+ if (rp->simple ())
+ {
+ fail << "expected true, false, of path in the dist variable "
+ << "value of target " << t <<
+ info << "specify ./" << *rp << " to remap the name";
+ }
+
+ rn = rp->leaf ();
+ }
+
+ dir_path rd (n ? rp->directory () : path_cast<dir_path> (*rp));
+
+ if (rd.relative ())
+ rd = t.dir / rd;
+
+ rd.normalize ();
+
+ src = rd.sub (src_root);
+ dl = src ? rd.leaf (src_root) : rd.leaf (out_root);
+ }
dir_path d (td / dl);
if (!exists (d))
- install (dist_cmd, d);
+ install (dist_cmd, ctx, d);
- path r (install (dist_cmd, t, d));
+ path r (install (dist_cmd, t, d, rn));
// See if this file is in a subproject.
//
@@ -646,8 +880,8 @@ namespace build2
fail << "dist meta-operation target must be project root directory";
if (rs->out_eq_src ())
- fail << "in-tree distribution of target " << t <<
- info << "distribution requires out-of-tree build";
+ fail << "in source distribution of target " << t <<
+ info << "distribution requires out of source build";
dist_project (*rs, &t, prog);
}
@@ -655,60 +889,131 @@ namespace build2
// install -d <dir>
//
static void
- install (const process_path& cmd, const dir_path& d)
+ install (const process_path* cmd, context& ctx, const dir_path& d)
{
- path reld (relative (d));
+ path reld;
+ cstrings args;
- cstrings args {cmd.recall_string (), "-d"};
+ if (cmd != nullptr || verb >= 2)
+ {
+ reld = relative (d);
- args.push_back ("-m");
- args.push_back ("755");
- args.push_back (reld.string ().c_str ());
- args.push_back (nullptr);
+ args.push_back (cmd != nullptr ? cmd->recall_string () : "install");
+ args.push_back ("-d");
+ args.push_back ("-m");
+ args.push_back ("755");
+ args.push_back (reld.string ().c_str ());
+ args.push_back (nullptr);
- if (verb >= 2)
- print_process (args);
+ if (verb >= 2)
+ print_process (args);
+ }
- run (cmd, args);
+ if (cmd != nullptr)
+ run (ctx, *cmd, args, 1 /* finish_verbosity */);
+ else
+ {
+ try
+ {
+ // Note that mode has no effect on Windows, which is probably for
+ // the best.
+ //
+ try_mkdir_p (d, 0755);
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to create directory " << d << ": " << e;
+ }
+ }
}
- // install <file> <dir>
+ // install <file> <dir>[/<name>]
//
static path
- install (const process_path& cmd, const file& t, const dir_path& d)
+ install (const process_path* cmd,
+ const file& t,
+ const dir_path& d,
+ const path& n)
{
- dir_path reld (relative (d));
- path relf (relative (t.path ()));
-
- cstrings args {cmd.recall_string ()};
+ const path& f (t.path ());
+ path r (d / (n.empty () ? f.leaf () : n));
- // Preserve timestamps. This could becomes important if, for
- // example, we have pre-generated sources. Note that the
- // install-sh script doesn't support this option, while both
- // Linux and BSD install's do.
+ // Assume the file is executable if the owner has execute permission,
+ // in which case we make it executable for everyone.
//
- args.push_back ("-p");
+ bool exe ((path_perms (f) & permissions::xu) == permissions::xu);
- // Assume the file is executable if the owner has execute
- // permission, in which case we make it executable for
- // everyone.
- //
- args.push_back ("-m");
- args.push_back (
- (path_perms (t.path ()) & permissions::xu) == permissions::xu
- ? "755"
- : "644");
+ path relf, reld;
+ cstrings args;
- args.push_back (relf.string ().c_str ());
- args.push_back (reld.string ().c_str ());
- args.push_back (nullptr);
+ if (cmd != nullptr || verb >= 2)
+ {
+ relf = relative (f);
+ reld = relative (d);
- if (verb >= 2)
- print_process (args);
+ if (!n.empty ()) // Leave as just directory if no custom name.
+ reld /= n;
+
+ args.push_back (cmd != nullptr ? cmd->recall_string () : "install");
+
+ // Preserve timestamps. This could becomes important if, for example,
+ // we have pre-generated sources. Note that the install-sh script
+ // doesn't support this option, while both Linux and BSD install's do.
+ //
+ args.push_back ("-p");
+
+ // Assume the file is executable if the owner has execute permission,
+ // in which case we make it executable for everyone.
+ //
+ args.push_back ("-m");
+ args.push_back (exe ? "755" : "644");
+ args.push_back (relf.string ().c_str ());
+ args.push_back (reld.string ().c_str ());
+ args.push_back (nullptr);
- run (cmd, args);
+ if (verb >= 2)
+ print_process (args);
+ }
+
+ if (cmd != nullptr)
+ run (t.ctx, *cmd, args, 1 /* finish_verbosity */);
+ else
+ {
+ permissions perm (permissions::ru | permissions::wu |
+ permissions::rg |
+ permissions::ro); // 644
+ if (exe)
+ perm |= permissions::xu | permissions::xg | permissions::xo; // 755
- return d / relf.leaf ();
+ try
+ {
+ // Note that we don't pass cpflags::overwrite_content which means
+ // this will fail if the file already exists. Since we clean up the
+ // destination directory, this will detect cases where we have
+ // multiple source files with the same distribution destination.
+ //
+ cpfile (f,
+ r,
+ cpflags::overwrite_permissions | cpflags::copy_timestamps,
+ perm);
+ }
+ catch (const system_error& e)
+ {
+ if (e.code ().category () == generic_category () &&
+ e.code ().value () == EEXIST)
+ {
+ // @@ TMP (added in 0.16.0).
+ //
+ warn << "multiple files are distributed as " << r <<
+ info << "second file is " << f <<
+ info << "this warning will become error in the future";
+ }
+ else
+ fail << "unable to copy " << f << " to " << r << ": " << e;
+ }
+ }
+
+ return r;
}
static path
@@ -718,13 +1023,15 @@ namespace build2
const dir_path& dir,
const string& e)
{
+ // NOTE: similar code in bpkg (system-package-manager-archive.cxx).
+
path an (pkg + '.' + e);
// Delete old archive for good measure.
//
path ap (dir / an);
if (exists (ap, false))
- rmfile (ctx, ap);
+ rmfile (ctx, ap, 3 /* verbosity */);
// Use zip for .zip archives. Also recognize and handle a few well-known
// tar.xx cases (in case tar doesn't support -a or has other issues like
@@ -740,7 +1047,7 @@ namespace build2
if (e == "zip")
{
- // On Windows we use libarchive's bsdtar (zip is an MSYS executabales).
+ // On Windows we use libarchive's bsdtar (zip is an MSYS executable).
//
// While not explicitly stated, the compression-level option works
// for zip archives.
@@ -765,15 +1072,28 @@ namespace build2
// On Windows we use libarchive's bsdtar with auto-compression (tar
// itself and quite a few compressors are MSYS executables).
//
+ // OpenBSD tar does not support --format but it appear ustar is the
+ // default (while this is not said explicitly in tar(1), it is said in
+ // pax(1) and confirmed on the mailing list). Nor does it support -a,
+ // at least as of 7.1 but we will let this play out naturally, in case
+ // this support gets added.
+ //
+ // Note also that our long-term plan is to switch to libarchive in
+ // order to generate reproducible archives.
+ //
const char* l (nullptr); // Compression level (option).
#ifdef _WIN32
- const char* tar = "bsdtar";
+ args = {"bsdtar", "--format", "ustar"};
if (e == "tar.gz")
l = "--options=compression-level=9";
#else
- const char* tar = "tar";
+ args = {"tar"
+#ifndef __OpenBSD__
+ , "--format", "ustar"
+#endif
+ };
// For gzip it's a good idea to use -9 by default. For bzip2, -9 is
// the default. And for xz, -9 is not recommended as the default due
@@ -791,13 +1111,10 @@ namespace build2
if (c != nullptr)
{
- args = {tar,
- "--format", "ustar",
- "-cf", "-",
- pkg.c_str (),
- nullptr};
-
- i = args.size ();
+ args.push_back ("-cf");
+ args.push_back ("-");
+ args.push_back (pkg.c_str ());
+ args.push_back (nullptr); i = args.size ();
args.push_back (c);
if (l != nullptr)
args.push_back (l);
@@ -818,20 +1135,13 @@ namespace build2
}
else
#endif
- if (e == "tar")
- args = {tar,
- "--format", "ustar",
- "-cf", ap.string ().c_str (),
- pkg.c_str (),
- nullptr};
- else
{
- args = {tar,
- "--format", "ustar",
- "-a"};
-
- if (l != nullptr)
- args.push_back (l);
+ if (e != "tar")
+ {
+ args.push_back ("-a");
+ if (l != nullptr)
+ args.push_back (l);
+ }
args.push_back ("-cf");
args.push_back (ap.string ().c_str ());
@@ -851,19 +1161,20 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << args[0] << ' ' << ap;
+ print_diag (args[0], dir / dir_path (pkg), ap);
process apr;
process cpr;
- // Change the archiver's working directory to dist_root.
+ // Change the archiver's working directory to root.
//
- apr = run_start (app,
+ // Note: this function is called during serial execution and so no
+ // diagnostics buffering is needed (here and below).
+ //
+ apr = run_start (process_env (app, root),
args,
0 /* stdin */,
- (i != 0 ? -1 : 1) /* stdout */,
- true /* error */,
- root);
+ (i != 0 ? -1 : 1) /* stdout */);
// Start the compressor if required.
//
@@ -875,10 +1186,17 @@ namespace build2
out_fd.get () /* stdout */);
cpr.in_ofd.reset (); // Close the archiver's stdout on our side.
- run_finish (args.data () + i, cpr);
}
- run_finish (args.data (), apr);
+ // Delay throwing until we diagnose both ends of the pipe.
+ //
+ if (!run_finish_code (args.data (),
+ apr,
+ 1 /* verbosity */,
+ false /* omit_normal */) ||
+ !(i == 0 || run_finish_code (args.data () + i, cpr, 1, false)))
+ throw failed ();
+
out_rm.cancel ();
return ap;
@@ -897,7 +1215,7 @@ namespace build2
//
path cp (dir / cn);
if (exists (cp, false))
- rmfile (ctx, cp);
+ rmfile (ctx, cp, 3 /* verbosity */);
auto_rmfile c_rm; // Note: must come first.
auto_fd c_fd;
@@ -936,18 +1254,20 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << args[0] << ' ' << cp;
+ print_diag (args[0], ap, cp);
// Note that to only get the archive name (without the directory) in
// the output we have to run from the archive's directory.
//
- process pr (run_start (pp,
+ // Note: this function is called during serial execution and so no
+ // diagnostics buffering is needed.
+ //
+ process pr (run_start (process_env (pp, ad /* cwd */),
args,
- 0 /* stdin */,
- c_fd.get () /* stdout */,
- true /* error */,
- ad /* cwd */));
- run_finish (args, pr);
+ 0 /* stdin */,
+ c_fd.get () /* stdout */));
+
+ run_finish (args, pr, 1 /* verbosity */);
}
else
{
@@ -967,7 +1287,7 @@ namespace build2
if (verb >= 2)
text << "cat >" << cp;
else if (verb)
- text << e << "sum " << cp;
+ print_diag ((e + "sum").c_str (), ap, cp);
string c;
try
@@ -1001,7 +1321,8 @@ namespace build2
dist_include (action,
const target&,
const prerequisite_member& p,
- include_type i)
+ include_type i,
+ lookup& l)
{
tracer trace ("dist::dist_include");
@@ -1010,12 +1331,18 @@ namespace build2
// given the prescribed semantics of adhoc (match/execute but otherwise
// ignore) is followed.
//
+ // Note that we don't need to do anything for posthoc.
+ //
if (i == include_type::excluded)
{
l5 ([&]{trace << "overriding exclusion of " << p;});
i = include_type::adhoc;
}
+ // Also clear any operation-specific overrides.
+ //
+ l = lookup ();
+
return i;
}
@@ -1029,12 +1356,12 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
&dist_operation_pre,
- &load, // normal load
- &search, // normal search
- nullptr, // no match (see dist_execute()).
+ &dist_load_load,
+ &perform_search, // normal search
+ nullptr, // no match (see dist_execute()).
&dist_load_execute,
- nullptr, // operation post
- nullptr, // meta-operation post
+ nullptr, // operation post
+ nullptr, // meta-operation post
&dist_include
};
@@ -1065,7 +1392,7 @@ namespace build2
init_config (rs);
}
- void
+ static void
dist_bootstrap_search (const values&,
const scope& rs,
const scope&,
diff --git a/libbuild2/dist/rule.cxx b/libbuild2/dist/rule.cxx
index ef144d0..c63f7f3 100644
--- a/libbuild2/dist/rule.cxx
+++ b/libbuild2/dist/rule.cxx
@@ -8,6 +8,9 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/dist/types.hxx>
+#include <libbuild2/dist/module.hxx>
+
using namespace std;
namespace build2
@@ -15,7 +18,7 @@ namespace build2
namespace dist
{
bool rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true; // We always match.
}
@@ -27,17 +30,34 @@ namespace build2
const dir_path& src_root (rs.src_path ());
const dir_path& out_root (rs.out_path ());
- // If we can, go inside see-through groups.
+ // Note that we don't go inside see-through groups since the members for
+ // dist_* may be incomplete (or even bogus, e.g., the "representative
+ // sample"). Instead, for see-through groups our plan is as follows:
+ //
+ // 1. Here we match them as groups (so that we still match all their
+ // prerequisites).
+ //
+ // 2. In dist_project() we collect them along with files after dist_*
+ // but before perform_update. Here we also skip files that are
+ // members of see-through groups (which we may still get).
//
- for (prerequisite_member pm:
- group_prerequisite_members (a, t, members_mode::maybe))
+ // 3. During perform_update we collect all the see-through group
+ // members, similar to files on step (2).
+ //
+ for (const prerequisite& p: group_prerequisites (t))
{
// Note: no exclusion tests, we want all of them (and see also the
- // dist_include() override).
+ // dist_include() override). But if we don't ignore post hoc ones
+ // here, we will end up with a cycle (they will still be handled
+ // by the post-pass).
+ //
+ lookup l; // Ignore any operation-specific values.
+ if (include (a, t, p, &l) == include_type::posthoc)
+ continue;
// Skip prerequisites imported from other projects.
//
- if (pm.proj ())
+ if (p.proj)
continue;
// We used to always search and match but that resulted in the
@@ -56,18 +76,18 @@ namespace build2
// @@ Note that this is still an issue in a custom dist rule.
//
const target* pt (nullptr);
- if (pm.is_a<file> ())
+ if (p.is_a<file> ())
{
- pt = pm.load ();
+ pt = p.target.load ();
if (pt == nullptr)
{
- const prerequisite& p (pm.prerequisite);
-
// Search for an existing target or existing file in src.
//
+ // Note: see also similar code in match_postponed() below.
+ //
const prerequisite_key& k (p.key ());
- pt = k.tk.type->search (t, k);
+ pt = k.tk.type->search (t.ctx, &t, k);
if (pt == nullptr)
{
@@ -79,23 +99,65 @@ namespace build2
!p.dir.sub (out_root))
continue;
- fail << "prerequisite " << k << " is not existing source file "
- << "nor known output target" << endf;
+ // This can be order-dependent: for example libs{} prerequisite
+ // may be unknown because we haven't matched the lib{} group
+ // yet. So we postpone this for later (see match_postponed()).
+ //
+ const module& mod (*rs.find_module<module> (module::name));
+
+ mlock l (mod.postponed.mutex);
+ mod.postponed.list.push_back (
+ postponed_prerequisite {a, t, p, t.state[a].rule->first});
+ continue;
}
search_custom (p, *pt); // Cache.
}
}
else
- pt = &pm.search (t);
+ pt = &search (t, p);
// Don't match targets that are outside of our project.
//
if (pt->dir.sub (out_root))
- build2::match (a, *pt);
+ match_sync (a, *pt);
}
return noop_recipe; // We will never be executed.
}
+
+ void rule::
+ match_postponed (const postponed_prerequisite& pp)
+ {
+ action a (pp.action);
+ const target& t (pp.target);
+ const prerequisite& p (pp.prereq);
+
+ const prerequisite_key& k (p.key ());
+ const target* pt (k.tk.type->search (t.ctx, &t, k));
+
+ if (pt == nullptr)
+ {
+ // Note that we do loose the diag frame that we normally get when
+ // failing during match. So let's mention the target/rule manually.
+ //
+ fail << "prerequisite " << k << " is not existing source file nor "
+ << "known output target" <<
+ info << "while applying rule " << pp.rule << " to " << diag_do (a, t);
+ }
+
+ search_custom (p, *pt); // Cache.
+
+ // It's theoretically possible that the target gets entered but nobody
+ // else depends on it but us. So we need to make sure it's matched
+ // (since it, in turns, can pull in other targets). Note that this could
+ // potentially add new postponed prerequisites to the list.
+ //
+ if (!pt->matched (a))
+ {
+ if (pt->dir.sub (t.root_scope ().out_path ()))
+ match_direct_sync (a, *pt);
+ }
+ }
}
}
diff --git a/libbuild2/dist/rule.hxx b/libbuild2/dist/rule.hxx
index e63016d..69ab3d9 100644
--- a/libbuild2/dist/rule.hxx
+++ b/libbuild2/dist/rule.hxx
@@ -11,6 +11,10 @@
#include <libbuild2/action.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/dist/types.hxx>
+
+#include <libbuild2/export.hxx>
+
namespace build2
{
namespace dist
@@ -19,20 +23,28 @@ namespace build2
//
// A custom rule (usually the same as perform_update) may be necessary to
// establish group links (so that we see the dist variable set on a group)
- // or to see through non-see-through groups (like lib{}; see the
- // bin::lib_rule for an example). Note that in the latter case the rule
- // should "see" all its members for the dist case.
+ // or to see through non-see-through groups (like lib{}, obj{}; see rule
+ // in the bin module for an example). Note that in the latter case the
+ // rule should "see" all its members for the dist case.
//
- class rule: public simple_rule
+ class LIBBUILD2_SYMEXPORT rule: public simple_rule
{
public:
rule () {}
+ // Always matches (returns true).
+ //
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
+ // Matches all the prerequisites (including from group) and returns
+ // noop_recipe (which will never be executed).
+ //
virtual recipe
apply (action, target&) const override;
+
+ static void
+ match_postponed (const postponed_prerequisite&);
};
}
}
diff --git a/libbuild2/dist/types.hxx b/libbuild2/dist/types.hxx
new file mode 100644
index 0000000..b833951
--- /dev/null
+++ b/libbuild2/dist/types.hxx
@@ -0,0 +1,41 @@
+// file : libbuild2/dist/types.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_DIST_TYPES_HXX
+#define LIBBUILD2_DIST_TYPES_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/forward.hxx>
+
+#include <libbuild2/prerequisite-key.hxx>
+
+namespace build2
+{
+ namespace dist
+ {
+ // List of prerequisites that could not be searched to a target and were
+ // postponed for later re-search. This can happen, for example, because a
+ // prerequisite would resolve to a member of a group that hasn't been
+ // matched yet (for example, libs{} of lib{}). See rule::apply() for
+ // details.
+ //
+ // Note that we are using list instead of vector because new elements can
+ // be added at the end while we are iterating over the list.
+ //
+ struct postponed_prerequisite
+ {
+ build2::action action;
+ reference_wrapper<const build2::target> target;
+ reference_wrapper<const prerequisite> prereq;
+ string rule;
+ };
+
+ struct postponed_prerequisites
+ {
+ build2::mutex mutex;
+ build2::list<postponed_prerequisite> list;
+ };
+ }
+}
+
+#endif // LIBBUILD2_DIST_TYPES_HXX
diff --git a/libbuild2/dump.cxx b/libbuild2/dump.cxx
index b1a16ba..9b7f5b1 100644
--- a/libbuild2/dump.cxx
+++ b/libbuild2/dump.cxx
@@ -3,6 +3,11 @@
#include <libbuild2/dump.hxx>
+#ifndef BUILD2_BOOTSTRAP
+# include <iostream> // cout
+# include <unordered_map>
+#endif
+
#include <libbuild2/rule.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
@@ -11,6 +16,7 @@
#include <libbuild2/diagnostics.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -49,10 +55,321 @@ namespace build2
if (v)
{
names storage;
- os << (a ? " " : "") << reverse (v, storage);
+ os << (a ? " " : "") << reverse (v, storage, true /* reduce */);
+ }
+ }
+
+#ifndef BUILD2_BOOTSTRAP
+
+ static string
+ quoted_target_name (const names_view& ns, bool rel)
+ {
+ ostringstream os;
+ stream_verb (os, stream_verbosity (rel ? 0 : 1, 0));
+ to_stream (os, ns, quote_mode::effective, '@');
+ return os.str ();
+ }
+
+ static void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const names_view& ns,
+ bool rel)
+ {
+ j.value (quoted_target_name (ns, rel));
+ }
+
+ static string
+ quoted_target_name (const target& t, bool rel)
+ {
+ names ns (t.as_name ()); // Note: potentially adds an extension.
+
+ // Don't print target names relative if the target is in src and out!=src.
+ // Failed that, we will end up with pointless ../../../... paths.
+ //
+ // It may also seem that we can omit @-qualification in this case, since
+ // it is implied by the containing scope. However, keep in mind that the
+ // target may not be directly in this scope. We could make it relative,
+ // though.
+ //
+ if (rel && !t.out.empty ())
+ {
+ // Make the out relative ourselves and then disable relative for src.
+ //
+ dir_path& o (ns.back ().dir);
+ o = relative (o); // Note: may return empty path.
+ if (o.empty ())
+ o = dir_path (".");
+
+ rel = false;
+ }
+
+ return quoted_target_name (ns, rel);
+ }
+
+ void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const target& t,
+ bool rel)
+ {
+ j.value (quoted_target_name (t, rel));
+ }
+
+ using target_name_cache = unordered_map<const target*, string>;
+
+ static void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const target& t,
+ target_name_cache& tc)
+ {
+ auto i (tc.find (&t));
+ if (i == tc.end ())
+ i = tc.emplace (&t, quoted_target_name (t, false /* relative */)).first;
+
+ j.value (i->second);
+ }
+
+ void
+ dump_display_target_name (json::stream_serializer& j,
+ const target& t,
+ bool rel)
+ {
+ // Note: see the quoted version above for details.
+
+ target_key tk (t.key ());
+
+ dir_path o;
+ if (rel && !tk.out->empty ())
+ {
+ o = relative (*tk.out);
+ if (o.empty ())
+ o = dir_path (".");
+ tk.out = &o;
+
+ rel = false;
}
+
+ // Change the stream verbosity to print relative if requested and omit
+ // extension.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (rel ? 0 : 1, 0));
+ os << tk;
+ j.value (os.str ());
}
+ static void
+ dump_value (json::stream_serializer& j, const value& v)
+ {
+ // Hints.
+ //
+ // Note that the pair hint should only be used for simple names.
+ //
+ optional<bool> h_array;
+ optional<bool> h_pair; // true/false - second/first is optional.
+
+ if (v.null)
+ {
+ j.value (nullptr);
+ return;
+ }
+ else if (v.type != nullptr)
+ {
+ const value_type& t (*v.type);
+
+ auto s_array = [&j] (const auto& vs)
+ {
+ j.begin_array ();
+ for (const auto& v: vs) j.value (v);
+ j.end_array ();
+ };
+
+ auto s_array_string = [&j] (const auto& vs)
+ {
+ j.begin_array ();
+ for (const auto& v: vs) j.value (v.string ());
+ j.end_array ();
+ };
+
+ // Note: check in the derived-first order.
+ //
+ if (t.is_a<bool> ()) j.value (v.as<bool> ());
+ else if (t.is_a<int64_t> ()) j.value (v.as<int64_t> ());
+ else if (t.is_a<uint64_t> ()) j.value (v.as<uint64_t> ());
+ else if (t.is_a<string> ()) j.value (v.as<string> ());
+ else if (t.is_a<path> ()) j.value (v.as<path> ().string ());
+ else if (t.is_a<dir_path> ()) j.value (v.as<dir_path> ().string ());
+ else if (t.is_a<target_triplet> ()) j.value (v.as<target_triplet> ().string ());
+ else if (t.is_a<project_name> ()) j.value (v.as<project_name> ().string ());
+ else if (t.is_a<int64s> ()) s_array (v.as<int64s> ());
+ else if (t.is_a<uint64s> ()) s_array (v.as<uint64s> ());
+ else if (t.is_a<strings> ()) s_array (v.as<strings> ());
+ else if (t.is_a<paths> ()) s_array_string (v.as<paths> ());
+ else if (t.is_a<dir_paths> ()) s_array_string (v.as<dir_paths> ());
+ else
+ {
+ // Note: check in the derived-first order.
+ //
+ if (t.is_a<name> ()) h_array = false;
+ else if (t.is_a<name_pair> ())
+ {
+ h_array = false;
+ h_pair = true;
+ }
+ else if (t.is_a<process_path_ex> ())
+ {
+ // Decide on array dynamically.
+ h_pair = true;
+ }
+ else if (t.is_a<process_path> ())
+ {
+ h_array = false;
+ h_pair = true;
+ }
+ else if (t.is_a<cmdline> () ||
+ t.is_a<vector<name>> ())
+ {
+ h_array = true;
+ }
+ else if (t.is_a<vector<pair<string, string>>> () ||
+ t.is_a<vector<pair<string, optional<string>>>> () ||
+ t.is_a<vector<pair<string, optional<bool>>>> () ||
+ t.is_a<map<string, string>> () ||
+ t.is_a<map<string, optional<string>>> () ||
+ t.is_a<map<string, optional<bool>>> () ||
+ t.is_a<map<project_name, dir_path>> ())
+ {
+ h_array = true;
+ h_pair = true;
+ }
+ else if (t.is_a<map<optional<string>, string>> () ||
+ t.is_a<vector<pair<optional<string>, string>>> ())
+ {
+ h_array = true;
+ h_pair = false;
+ }
+
+ goto fall_through;
+ }
+
+ return;
+
+ fall_through:
+ ;
+ }
+
+ names storage;
+ names_view ns (reverse (v, storage, true /* reduce */));
+
+ if (ns.empty ())
+ {
+ // When it comes to representing an empty value, our options are: empty
+ // array ([]), empty object ({}), or an absent member. The latter feels
+ // closer to null than empty, so that's out. After some experimentation,
+ // it feels the best choice is to use array unless we know for sure it
+ // is not, in which case we use an object if it's a pair and empty
+ // string otherwise (the empty string makes sense because we serialize
+ // complex names as target names; see below).
+ //
+ if (!h_array || *h_array)
+ {
+ j.begin_array ();
+ j.end_array ();
+ }
+ else
+ {
+ if (h_pair)
+ {
+ j.begin_object ();
+ j.end_object ();
+ }
+ else
+ j.value ("");
+ }
+ }
+ else
+ {
+ if (!h_array)
+ h_array = ns.size () > 2 || (ns.size () == 2 && !ns.front ().pair);
+
+ if (*h_array)
+ j.begin_array ();
+
+ // While it may be tempting to try to provide a heterogeneous array
+ // (i.e., all strings, all objects, all pairs), in case of pairs we
+ // actually don't know whether a non-pair element is first or second
+ // (it's up to interpretation; though we do hint which one is optional
+ // for typed values above). So we serialize each name in its most
+ // appropriate form.
+ //
+ auto simple = [] (const name& n)
+ {
+ return n.simple () || n.directory () || n.file ();
+ };
+
+ auto s_simple = [&j] (const name& n)
+ {
+ if (n.simple ())
+ j.value (n.value);
+ else if (n.directory ())
+ j.value (n.dir.string ());
+ else if (n.file ())
+ {
+ // Note: both must be present due to earlier checks.
+ //
+ j.value ((n.dir / n.value).string ());
+ }
+ else
+ return false;
+
+ return true;
+ };
+
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ const name& l (*i++);
+ const name* r (l.pair ? &*i++ : nullptr);
+
+ optional<bool> hp (h_pair);
+
+ if (!hp && r != nullptr && simple (l) && simple (*r))
+ hp = true;
+
+ if (hp)
+ {
+ // Pair of simple names.
+ //
+ j.begin_object ();
+
+ if (r != nullptr)
+ {
+ j.member_name ("first"); s_simple (l);
+ j.member_name ("second"); s_simple (*r);
+ }
+ else
+ {
+ j.member_name (*hp ? "first" : "second"); s_simple (l);
+ }
+
+ j.end_object ();
+ }
+ else if (r == nullptr && s_simple (l))
+ ;
+ else
+ {
+ // If complex name (or pair thereof), then assume a target name.
+ //
+ dump_quoted_target_name (j,
+ names_view (&l, r != nullptr ? 2 : 1),
+ false /* relative */);
+ }
+ }
+
+ if (*h_array)
+ j.end_array ();
+ }
+ }
+#endif
+
enum class variable_kind {scope, tt_pat, target, rule, prerequisite};
static void
@@ -83,6 +400,10 @@ namespace build2
const variable& var (p.first);
const value& v (p.second);
+ // On one hand it might be helpful to print the visibility. On the
+ // other, it is always specified which means there will be a lot of
+ // noise. So probably not.
+ //
if (var.type != nullptr)
os << '[' << var.type->name << "] ";
@@ -123,6 +444,68 @@ namespace build2
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_variable (json::stream_serializer& j,
+ const variable_map& vm,
+ const variable_map::const_iterator& vi,
+ const scope& s,
+ variable_kind k)
+ {
+ // Note: see the buildfile version above for comments.
+
+ assert (k != variable_kind::tt_pat); // TODO
+
+ const auto& p (*vi);
+ const variable& var (p.first);
+ const value& v (p.second);
+
+ lookup l (v, var, vm);
+ if (k != variable_kind::prerequisite)
+ {
+ if (var.override ())
+ return; // Ignore.
+
+ if (var.overrides != nullptr)
+ {
+ l = s.lookup_override (
+ var,
+ make_pair (l, 1),
+ k == variable_kind::target || k == variable_kind::rule,
+ k == variable_kind::rule).first;
+
+ assert (l.defined ()); // We at least have the original.
+ }
+ }
+
+ // Note that we do not distinguish between variable/value type.
+ //
+ // An empty value of a non-array type is represented as an empty object
+ // ({}).
+ //
+#if 0
+ struct variable
+ {
+ string name;
+ optional<string> type;
+ json_value value; // string|number|boolean|null|object|array
+ };
+#endif
+
+ j.begin_object ();
+
+ j.member ("name", var.name);
+
+ if (l->type != nullptr)
+ j.member ("type", l->type->name);
+
+ j.member_name ("value");
+ dump_value (j, *l);
+
+ j.end_object ();
+ }
+#endif
+
static void
dump_variables (ostream& os,
string& ind,
@@ -139,6 +522,20 @@ namespace build2
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_variables (json::stream_serializer& j,
+ const variable_map& vars,
+ const scope& s,
+ variable_kind k)
+ {
+ for (auto i (vars.begin ()), e (vars.end ()); i != e; ++i)
+ {
+ dump_variable (j, vars, i, s, k);
+ }
+ }
+#endif
+
// Dump target type/pattern-specific variables.
//
static void
@@ -208,7 +605,7 @@ namespace build2
for (action a: r.actions)
os << ' ' << re.meta_operations[a.meta_operation ()]->name <<
- '(' << re.operations[a.operation ()]->name << ')';
+ '(' << re.operations[a.operation ()].info->name << ')';
os << endl;
r.dump_text (os, ind);
@@ -225,6 +622,14 @@ namespace build2
// Pattern.
//
os << ind;
+
+ // Avoid printing the derived name.
+ //
+ if (rp.rule_name.front () != '<' || rp.rule_name.back () != '>')
+ {
+ os << "[rule_name=" << rp.rule_name << "] ";
+ }
+
rp.dump (os);
// Recipes.
@@ -236,10 +641,27 @@ namespace build2
}
}
+ // Similar to target::matched() but for the load phase.
+ //
+ static inline bool
+ matched (const target& t, action a)
+ {
+ // Note: running serial and task_count is 0 before any operation has
+ // started.
+ //
+ if (size_t c = t[a].task_count.load (memory_order_relaxed))
+ {
+ if (c == t.ctx.count_applied () || c == t.ctx.count_executed ())
+ return true;
+ }
+
+ return false;
+ }
+
static void
- dump_target (optional<action> a,
- ostream& os,
+ dump_target (ostream& os,
string& ind,
+ optional<action> a,
const target& t,
const scope& s,
bool rel)
@@ -248,6 +670,9 @@ namespace build2
// scope. To achieve this we are going to temporarily lower the stream
// path verbosity to level 0.
//
+ // @@ Not if in src and out != src? Otherwise end up with ../../../...
+ // See JSON version for the state of the art.
+ //
stream_verbosity osv, nsv;
if (rel)
{
@@ -259,7 +684,38 @@ namespace build2
if (t.group != nullptr)
os << ind << t << " -> " << *t.group << endl;
- os << ind << t << ':';
+ os << ind;
+
+ // Target attributes.
+ //
+ if (!t.rule_hints.map.empty ())
+ {
+ os << '[';
+
+ bool f (true);
+ for (const rule_hints::value_type& v: t.rule_hints.map)
+ {
+ if (f)
+ f = false;
+ else
+ os << ", ";
+
+ if (v.type != nullptr)
+ os << v.type->name << '@';
+
+ os << "rule_hint=";
+
+ if (v.operation != default_id)
+ os << s.root_scope ()->root_extra->operations[v.operation].info->name
+ << '@';
+
+ os << v.hint;
+ }
+
+ os << "] ";
+ }
+
+ os << t << ':';
// First check if this is the simple case where we can print everything
// as a single declaration.
@@ -278,32 +734,26 @@ namespace build2
// If the target has been matched to a rule, we also print resolved
// prerequisite targets.
//
- // Note: running serial and task_count is 0 before any operation has
- // started.
- //
const prerequisite_targets* pts (nullptr);
{
action inner; // @@ Only for the inner part of the action currently.
- if (size_t c = t[inner].task_count.load (memory_order_relaxed))
+ if (matched (t, inner))
{
- if (c == t.ctx.count_applied () || c == t.ctx.count_executed ())
- {
- pts = &t.prerequisite_targets[inner];
+ pts = &t.prerequisite_targets[inner];
- bool f (false);
- for (const target* pt: *pts)
+ bool f (false);
+ for (const target* pt: *pts)
+ {
+ if (pt != nullptr)
{
- if (pt != nullptr)
- {
- f = true;
- break;
- }
+ f = true;
+ break;
}
-
- if (!f)
- pts = nullptr;
}
+
+ if (!f)
+ pts = nullptr;
}
}
@@ -467,10 +917,318 @@ namespace build2
stream_verb (os, osv);
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_target (json::stream_serializer& j,
+ optional<action> a,
+ const target& t,
+ const scope& s,
+ bool rel,
+ target_name_cache& tcache)
+ {
+ // Note: see the buildfile version above for comments.
+
+ // Note that the target name (and display_name) are relative to the
+ // containing scope (if any).
+ //
+#if 0
+ struct prerequisite
+ {
+ string name; // Quoted/qualified name.
+ string type;
+ vector<variable> variables; // Prerequisite variables.
+ };
+
+ struct loaded_target
+ {
+ string name; // Quoted/qualified name.
+ string display_name;
+ string type; // Target type.
+ //string declaration;
+ optional<string> group; // Quoted/qualified group target name.
+
+ vector<variable> variables; // Target variables.
+
+ vector<prerequisite> prerequisites;
+ };
+
+ // @@ TODO: target attributes (rule_hint)
+
+ struct prerequisite_target
+ {
+ string name; // Target name (always absolute).
+ string type;
+ bool adhoc;
+ };
+
+ struct operation_state
+ {
+ string rule; // null if direct recipe match
+
+ optional<string> state; // unchanged|changed|group
+
+ vector<variable> variables; // Rule variables.
+
+ vector<prerequisite_target> prerequisite_targets;
+ };
+
+ struct matched_target
+ {
+ string name;
+ string display_name;
+ string type;
+ //string declaration;
+ optional<string> group;
+
+ optional<path> path; // Absent if not path-based target, not assigned.
+
+ vector<variable> variables;
+
+ optional<operation_state> outer_operation; // null if not matched.
+ operation_state inner_operation; // null if not matched.
+ };
+#endif
+
+ j.begin_object ();
+
+ j.member_name ("name");
+ dump_quoted_target_name (j, t, rel /* relative */);
+
+ j.member_name ("display_name");
+ dump_display_target_name (j, t, rel /* relative */);
+
+ j.member ("type", t.type ().name);
+
+ // @@ This value currently doesn't make much sense:
+ //
+ // - why are all the system headers prereq-new?
+ //
+ // - why is synthesized obje{} prereq-new?
+ //
+#if 0
+ {
+ const char* v (nullptr);
+ switch (t.decl)
+ {
+ case target_decl::prereq_new: v = "prerequisite-new"; break;
+ case target_decl::prereq_file: v = "prerequisite-file"; break;
+ case target_decl::implied: v = "implied"; break;
+ case target_decl::real: v = "real"; break;
+ }
+ j.member ("declaration", v);
+ }
+#endif
+
+ if (t.group != nullptr)
+ {
+ j.member_name ("group");
+ dump_quoted_target_name (j, *t.group, tcache);
+ }
+
+ if (a)
+ {
+ const string* v (nullptr);
+
+ if (t.is_a<dir> () || t.is_a<fsdir> ())
+ {
+ v = &t.dir.string ();
+ }
+ else if (const auto* pt = t.is_a<path_target> ())
+ {
+ const path& p (pt->path ());
+
+ if (!p.empty ())
+ v = &p.string ();
+ }
+
+ if (v != nullptr)
+ j.member ("path", *v);
+ }
+
+ // Target variables.
+ //
+ if (!t.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, t.vars, s, variable_kind::target);
+ j.end_array ();
+ }
+
+ // Prerequisites.
+ //
+ if (!a)
+ {
+ const prerequisites& ps (t.prerequisites ());
+
+ if (!ps.empty ())
+ {
+ j.member_begin_array ("prerequisites");
+
+ for (const prerequisite& p: ps)
+ {
+ j.begin_object ();
+
+ {
+ // Cobble together an equivalent of dump_quoted_target_name().
+ //
+ prerequisite_key pk (p.key ());
+ target_key& tk (pk.tk);
+
+ // It's possible that the containing scope differs from
+ // prerequisite's. This, for example, happens when we copy the
+ // prerequisite for a synthesized obj{} dependency that happens to
+ // be in a subdirectory, as in exe{foo}:src/cxx{foo}. In this
+ // case, we need to rebase relative paths to the containing scope.
+ //
+ dir_path d, o;
+ if (p.scope != s)
+ {
+ if (tk.out->empty ())
+ {
+ if (tk.dir->relative ())
+ {
+ d = (p.scope.out_path () / *tk.dir).relative (s.out_path ());
+ tk.dir = &d;
+ }
+ }
+ else
+ {
+ if (tk.dir->relative ())
+ {
+ d = (p.scope.src_path () / *tk.dir).relative (s.src_path ());
+ tk.dir = &d;
+ }
+
+ if (tk.out->relative ())
+ {
+ o = (p.scope.out_path () / *tk.out).relative (s.out_path ());
+ if (o.empty ())
+ o = dir_path (".");
+ tk.out = &o;
+ }
+ }
+ }
+
+ // If prerequisite paths are absolute, keep them absolute.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (1, 0));
+
+ if (pk.proj)
+ os << *pk.proj << '%';
+
+ to_stream (os, pk.tk.as_name (), quote_mode::effective, '@');
+
+ j.member ("name", os.str ());
+ }
+
+ j.member ("type", p.type.name);
+
+ if (!p.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, p.vars, s, variable_kind::prerequisite);
+ j.end_array ();
+ }
+
+ j.end_object ();
+ }
+
+ j.end_array ();
+ }
+ }
+ else
+ {
+ // Matched rules and their state (prerequisite_targets, vars, etc).
+ //
+ auto dump_opstate = [&tcache, &j, &s, &t] (action a)
+ {
+ const target::opstate& o (t[a]);
+
+ j.begin_object ();
+
+ j.member ("rule", o.rule != nullptr ? o.rule->first.c_str () : nullptr);
+
+ // It feels natural to omit the unknown state, as if it corresponded
+ // to absent in optional<target_state>.
+ //
+ if (o.state != target_state::unknown)
+ {
+ assert (o.state == target_state::unchanged ||
+ o.state == target_state::changed ||
+ o.state == target_state::group);
+
+ j.member ("state", to_string (o.state));
+ }
+
+ if (!o.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, o.vars, s, variable_kind::rule);
+ j.end_array ();
+ }
+
+ {
+ bool first (true);
+ for (const prerequisite_target& pt: t.prerequisite_targets[a])
+ {
+ if (pt.target == nullptr)
+ continue;
+
+ if (first)
+ {
+ j.member_begin_array ("prerequisite_targets");
+ first = false;
+ }
+
+ j.begin_object ();
+
+ j.member_name ("name");
+ dump_quoted_target_name (j, *pt.target, tcache);
+
+ j.member ("type", pt.target->type ().name);
+
+ if (pt.adhoc ())
+ j.member ("adhoc", true);
+
+ j.end_object ();
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ j.end_object ();
+ };
+
+ if (a->outer ())
+ {
+ j.member_name ("outer_operation");
+ if (matched (t, *a))
+ dump_opstate (*a);
+ else
+ j.value (nullptr);
+ }
+
+ {
+ action ia (a->inner_action ());
+
+ j.member_name ("inner_operation");
+ if (matched (t, ia))
+ dump_opstate (ia);
+ else
+ j.value (nullptr);
+ }
+ }
+
+ j.end_object ();
+ }
+#endif
+
static void
- dump_scope (optional<action> a,
- ostream& os,
+ dump_scope (ostream& os,
string& ind,
+ optional<action> a,
scope_map::const_iterator& i,
bool rel)
{
@@ -545,21 +1303,25 @@ namespace build2
// disabled amalgamation will be printed directly inside the global
// scope).
//
- for (auto e (p.ctx.scopes.end ());
- (i != e &&
- i->second.front () != nullptr &&
- i->second.front ()->parent_scope () == &p); )
+ for (auto e (p.ctx.scopes.end ()); i != e; )
{
- if (vb || rb || sb)
+ if (i->second.front () == nullptr)
+ ++i; // Skip over src paths.
+ else if (i->second.front ()->parent_scope () != &p)
+ break; // Moved past our parent.
+ else
{
- os << endl;
- vb = rb = false;
- }
+ if (vb || rb || sb)
+ {
+ os << endl;
+ vb = rb = false;
+ }
- os << endl; // Extra newline between scope blocks.
+ os << endl; // Extra newline between scope blocks.
- dump_scope (a, os, ind, i, true /* relative */);
- sb = true;
+ dump_scope (os, ind, a, i, true /* relative */);
+ sb = true;
+ }
}
// Targets.
@@ -581,7 +1343,7 @@ namespace build2
}
os << endl; // Extra newline between targets.
- dump_target (a, os, ind, t, p, true /* relative */);
+ dump_target (os, ind, a, t, p, true /* relative */);
tb = true;
}
@@ -592,45 +1354,245 @@ namespace build2
<< ind << '}';
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_scope (json::stream_serializer& j,
+ optional<action> a,
+ scope_map::const_iterator& i,
+ bool rel,
+ target_name_cache& tcache)
+ {
+ // Note: see the buildfile version above for additional comments.
+
+ const scope& p (*i->second.front ());
+ const dir_path& d (i->first);
+ ++i;
+
+#if 0
+ struct scope
+ {
+ // The out_path member is relative to the parent scope. It is empty for
+ // the special global scope. The src_path member is absent if the same
+ // as out_path (in-source build or scope outside of project).
+ //
+ string out_path;
+ optional<string> src_path;
+
+ vector<variable> variables; // Non-type/pattern scope variables.
+
+ vector<scope> scopes; // Immediate children.
+
+ vector<loaded_target|matched_target> targets;
+ };
+#endif
+
+ j.begin_object ();
+
+ if (d.empty ())
+ j.member ("out_path", ""); // Global scope.
+ else
+ {
+ const dir_path& rd (rel ? relative (d) : d);
+ j.member ("out_path", rd.empty () ? string (".") : rd.string ());
+
+ if (!p.out_eq_src ())
+ j.member ("src_path", p.src_path ().string ());
+ }
+
+ const dir_path* orb (relative_base);
+ relative_base = &d;
+
+ // Scope variables.
+ //
+ if (!p.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, p.vars, p, variable_kind::scope);
+ j.end_array ();
+ }
+
+ // Nested scopes of which we are an immediate parent.
+ //
+ {
+ bool first (true);
+ for (auto e (p.ctx.scopes.end ()); i != e; )
+ {
+ if (i->second.front () == nullptr)
+ ++i;
+ else if (i->second.front ()->parent_scope () != &p)
+ break;
+ else
+ {
+ if (first)
+ {
+ j.member_begin_array ("scopes");
+ first = false;
+ }
+
+ dump_scope (j, a, i, true /* relative */, tcache);
+ }
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ // Targets.
+ //
+ {
+ bool first (true);
+ for (const auto& pt: p.ctx.targets)
+ {
+ const target& t (*pt);
+
+ if (&p != &t.base_scope ()) // @@ PERF
+ continue;
+
+ // Skip targets that haven't been matched for this action.
+ //
+ if (a)
+ {
+ if (!(matched (t, a->inner_action ()) ||
+ (a->outer () && matched (t, *a))))
+ continue;
+ }
+
+ if (first)
+ {
+ j.member_begin_array ("targets");
+ first = false;
+ }
+
+ dump_target (j, a, t, p, true /* relative */, tcache);
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ relative_base = orb;
+ j.end_object ();
+ }
+#endif
+
void
- dump (const context& c, optional<action> a)
+ dump (const context& c, optional<action> a, dump_format fmt)
{
auto i (c.scopes.begin ());
assert (i->second.front () == &c.global_scope);
- // We don't lock diag_stream here as dump() is supposed to be called from
- // the main thread prior/after to any other threads being spawned.
- //
- string ind;
- ostream& os (*diag_stream);
- dump_scope (a, os, ind, i, false /* relative */);
- os << endl;
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ // We don't lock diag_stream here as dump() is supposed to be called
+ // from the main thread prior/after to any other threads being
+ // spawned.
+ //
+ string ind;
+ ostream& os (*diag_stream);
+ dump_scope (os, ind, a, i, false /* relative */);
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+ dump_scope (j, a, i, false /* relative */, tc);
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
void
- dump (const scope& s, const char* cind)
+ dump (const scope* s, optional<action> a, dump_format fmt, const char* cind)
{
- const scope_map& m (s.ctx.scopes);
- auto i (m.find_exact (s.out_path ()));
- assert (i != m.end () && i->second.front () == &s);
+ scope_map::const_iterator i;
+ if (s != nullptr)
+ {
+ const scope_map& m (s->ctx.scopes);
+ i = m.find_exact (s->out_path ());
+ assert (i != m.end () && i->second.front () == s);
+ }
- string ind (cind);
- ostream& os (*diag_stream);
- dump_scope (nullopt /* action */, os, ind, i, false /* relative */);
- os << endl;
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ string ind (cind);
+ ostream& os (*diag_stream);
+
+ if (s != nullptr)
+ dump_scope (os, ind, a, i, false /* relative */);
+ else
+ os << ind << "<no known scope to dump>";
+
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+
+ if (s != nullptr)
+ dump_scope (j, a, i, false /* relative */, tc);
+ else
+ j.value (nullptr);
+
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
void
- dump (const target& t, const char* cind)
+ dump (const target* t, optional<action> a, dump_format fmt, const char* cind)
{
- string ind (cind);
- ostream& os (*diag_stream);
- dump_target (nullopt /* action */,
- os,
- ind,
- t,
- t.base_scope (),
- false /* relative */);
- os << endl;
+ const scope* bs (t != nullptr ? &t->base_scope () : nullptr);
+
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ string ind (cind);
+ ostream& os (*diag_stream);
+
+ if (t != nullptr)
+ dump_target (os, ind, a, *t, *bs, false /* relative */);
+ else
+ os << ind << "<no known target to dump>";
+
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+
+ if (t != nullptr)
+ dump_target (j, a, *t, *bs, false /* relative */, tc);
+ else
+ j.value (nullptr);
+
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
}
diff --git a/libbuild2/dump.hxx b/libbuild2/dump.hxx
index 6ec6944..1a1a080 100644
--- a/libbuild2/dump.hxx
+++ b/libbuild2/dump.hxx
@@ -4,6 +4,10 @@
#ifndef LIBBUILD2_DUMP_HXX
#define LIBBUILD2_DUMP_HXX
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
@@ -14,18 +18,40 @@
namespace build2
{
+ enum class dump_format {buildfile, json};
+
// Dump the build state to diag_stream. If action is specified, then assume
// rules have been matched for this action and dump action-specific
// information (like rule-specific variables).
//
+ // If scope or target is NULL, then assume not found and write a format-
+ // appropriate indication.
+ //
+ LIBBUILD2_SYMEXPORT void
+ dump (const context&, optional<action>, dump_format);
+
LIBBUILD2_SYMEXPORT void
- dump (const context&, optional<action> = nullopt);
+ dump (const scope*, optional<action>, dump_format, const char* ind = "");
LIBBUILD2_SYMEXPORT void
- dump (const scope&, const char* ind = "");
+ dump (const target*, optional<action>, dump_format, const char* ind = "");
+#ifndef BUILD2_BOOTSTRAP
+ // Dump (effectively) quoted target name, optionally relative (to the out
+ // tree).
+ //
+ LIBBUILD2_SYMEXPORT void
+ dump_quoted_target_name (butl::json::stream_serializer&,
+ const target&,
+ bool relative = false);
+
+ // Dump display target name, optionally relative (to the out tree).
+ //
LIBBUILD2_SYMEXPORT void
- dump (const target&, const char* ind = "");
+ dump_display_target_name (butl::json::stream_serializer&,
+ const target&,
+ bool relative = false);
+#endif
}
#endif // LIBBUILD2_DUMP_HXX
diff --git a/libbuild2/dyndep.cxx b/libbuild2/dyndep.cxx
index 73fc8eb..dbeb47e 100644
--- a/libbuild2/dyndep.cxx
+++ b/libbuild2/dyndep.cxx
@@ -5,6 +5,7 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/search.hxx>
#include <libbuild2/context.hxx>
#include <libbuild2/algorithm.hxx>
#include <libbuild2/filesystem.hxx>
@@ -18,61 +19,7 @@ namespace build2
bool dyndep_rule::
update (tracer& trace, action a, const target& t, timestamp ts)
{
- // In particular, this function is used to make sure header dependencies
- // are up to date.
- //
- // There would normally be a lot of headers for every source file (think
- // all the system headers) and just calling execute_direct() on all of
- // them can get expensive. At the same time, most of these headers are
- // existing files that we will never be updating (again, system headers,
- // for example) and the rule that will match them is the fallback
- // file_rule. That rule has an optimization: it returns noop_recipe (which
- // causes the target state to be automatically set to unchanged) if the
- // file is known to be up to date. So we do the update "smartly".
- //
- const path_target* pt (t.is_a<path_target> ());
-
- if (pt == nullptr)
- ts = timestamp_unknown;
-
- target_state os (t.matched_state (a));
-
- if (os == target_state::unchanged)
- {
- if (ts == timestamp_unknown)
- return false;
- else
- {
- // We expect the timestamp to be known (i.e., existing file).
- //
- timestamp mt (pt->mtime ());
- assert (mt != timestamp_unknown);
- return mt > ts;
- }
- }
- else
- {
- // We only want to return true if our call to execute() actually caused
- // an update. In particular, the target could already have been in
- // target_state::changed because of the dynamic dependency extraction
- // run for some other target.
- //
- // @@ MT perf: so we are going to switch the phase and execute for
- // any generated header.
- //
- phase_switch ps (t.ctx, run_phase::execute);
- target_state ns (execute_direct (a, t));
-
- if (ns != os && ns != target_state::unchanged)
- {
- l6 ([&]{trace << "updated " << t
- << "; old state " << os
- << "; new state " << ns;});
- return true;
- }
- else
- return ts != timestamp_unknown ? pt->newer (ts, ns) : false;
- }
+ return update_during_match (trace, a, t, ts);
}
optional<bool> dyndep_rule::
@@ -84,11 +31,11 @@ namespace build2
bool adhoc,
uintptr_t data)
{
- // Even if failing we still use try_match() in order to issue consistent
- // (with other places) diagnostics (rather than the generic "not rule to
- // update ...").
+ // Even if failing we still use try_match_sync() in order to issue
+ // consistent (with other places) diagnostics (rather than the generic
+ // "not rule to update ...").
//
- if (!try_match (a, pt).first)
+ if (!try_match_sync (a, pt).first)
{
if (!f)
return nullopt;
@@ -110,16 +57,52 @@ namespace build2
return r;
}
+ // Check if the specified prerequisite is updated during match by any other
+ // prerequisites of the specified target, recursively.
+ //
+ static bool
+ updated_during_match (action a, const target& t, size_t pts_n,
+ const target& pt)
+ {
+ const auto& pts (t.prerequisite_targets[a]);
+
+ for (size_t i (0); i != pts_n; ++i)
+ {
+ const prerequisite_target& p (pts[i]);
+
+ // If include_target flag is specified, then p.data contains the
+ // target pointer.
+ //
+ if (const target* xt =
+ (p.target != nullptr ? p.target :
+ ((p.include & prerequisite_target::include_target) != 0
+ ? reinterpret_cast<target*> (p.data)
+ : nullptr)))
+ {
+ if (xt == &pt && (p.include & prerequisite_target::include_udm) != 0)
+ return true;
+
+ if (size_t n = xt->prerequisite_targets[a].size ())
+ {
+ if (updated_during_match (a, *xt, n, pt))
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
optional<bool> dyndep_rule::
inject_existing_file (tracer& trace, const char* what,
- action a, target& t,
+ action a, target& t, size_t pts_n,
const file& pt,
timestamp mt,
bool f,
bool adhoc,
uintptr_t data)
{
- if (!try_match (a, pt).first)
+ if (!try_match_sync (a, pt).first)
{
if (!f)
return nullopt;
@@ -135,8 +118,11 @@ namespace build2
recipe_function* const* rf (pt[a].recipe.target<recipe_function*> ());
if (rf == nullptr || *rf != &noop_action)
{
- fail << what << ' ' << pt << " has non-noop recipe" <<
- info << "consider listing it as static prerequisite of " << t;
+ if (pts_n == 0 || !updated_during_match (a, t, pts_n, pt))
+ {
+ fail << what << ' ' << pt << " has non-noop recipe" <<
+ info << "consider listing it as static prerequisite of " << t;
+ }
}
bool r (update (trace, a, pt, mt));
@@ -150,21 +136,27 @@ namespace build2
void dyndep_rule::
verify_existing_file (tracer&, const char* what,
- action a, const target& t,
+ action a, const target& t, size_t pts_n,
const file& pt)
{
diag_record dr;
- if (pt.matched (a))
+ if (pt.matched (a, memory_order_acquire))
{
recipe_function* const* rf (pt[a].recipe.target<recipe_function*> ());
if (rf == nullptr || *rf != &noop_action)
{
- dr << fail << what << ' ' << pt << " has non-noop recipe";
+ if (pts_n == 0 || !updated_during_match (a, t, pts_n, pt))
+ {
+ dr << fail << what << ' ' << pt << " has non-noop recipe";
+ }
}
}
else if (pt.decl == target_decl::real)
{
+ // Note that this target could not possibly be updated during match
+ // since it's not matched.
+ //
dr << fail << what << ' ' << pt << " is explicitly declared as "
<< "target and may have non-noop recipe";
}
@@ -173,12 +165,6 @@ namespace build2
dr << info << "consider listing it as static prerequisite of " << t;
}
- // Reverse-lookup target type(s) from file name/extension.
- //
- // If the list of base target types is specified, then only these types and
- // those derived from them are considered. Otherwise, any file-based type is
- // considered but not the file type itself.
- //
small_vector<const target_type*, 2> dyndep_rule::
map_extension (const scope& bs,
const string& n, const string& e,
@@ -404,7 +390,7 @@ namespace build2
prev_ = nullptr;
}
- // See if this path is inside a project with an out-of-tree build and is
+ // See if this path is inside a project with an out of source build and is
// in the out directory tree.
//
const scope& bs (ctx_.scopes.find_out (d));
@@ -438,19 +424,35 @@ namespace build2
action a, const scope& bs, const target& t,
path& fp, bool cache, bool norm,
bool insert,
+ bool dynamic,
const function<dyndep_rule::map_extension_func>& map_extension,
const target_type& fallback,
const function<dyndep_rule::prefix_map_func>& get_pfx_map,
const dyndep_rule::srcout_map& so_map)
{
- // Find or maybe insert the target. The directory is only moved from if
- // insert is true. Note that it must be normalized.
+ // NOTE: see enter_header() caching logic if changing anyting here with
+ // regards to the target and base scope usage.
+
+ assert (!insert || t.ctx.phase == run_phase::match);
+
+ // Find or maybe insert the target.
+ //
+ // If insert is false, then don't consider dynamically-created targets
+ // (i.e., those that are not real or implied) unless dynamic is true, in
+ // which case return the target that would have been inserted.
+ //
+ // The directory is only moved from if insert is true. Note that it must
+ // be absolute and normalized.
//
- auto find = [&trace, what, &t,
- &map_extension, &fallback] (dir_path&& d,
- path&& f,
- bool insert) -> const file*
+ auto find = [&trace, what, &bs, &t,
+ &map_extension,
+ &fallback] (dir_path&& d,
+ path&& f,
+ bool insert,
+ bool dynamic = false) -> const file*
{
+ context& ctx (t.ctx);
+
// Split the file into its name part and extension. Here we can assume
// the name part is a valid filesystem name.
//
@@ -473,7 +475,7 @@ namespace build2
dir_path out;
// It's possible the extension-to-target type mapping is ambiguous (for
- // example, because both C and X-language headers use the same .h
+ // example, because both C and C++-language headers use the same .h
// extension). In this case we will first try to find one that matches
// an explicit target (similar logic to when insert is false).
//
@@ -486,15 +488,40 @@ namespace build2
// pick the first one (it's highly unlikely the source file extension
// mapping will differ based on the configuration).
//
+ // Note that we also need to remember the base scope for search() below
+ // (failed that, search_existing_file() will refuse to look).
+ //
+ const scope* s (nullptr);
{
- const scope& bs (**t.ctx.scopes.find (d).first);
- if (const scope* rs = bs.root_scope ())
+ // While we cannot accurately associate in the general case, we can do
+ // so if the path belongs to this project.
+ //
+ const scope& rs (*bs.root_scope ());
+ bool src (false);
+ if (d.sub (rs.out_path ()) ||
+ (src = (!rs.out_eq_src () && d.sub (rs.src_path ()))))
{
if (map_extension != nullptr)
tts = map_extension (bs, n, e);
- if (!bs.out_eq_src () && d.sub (bs.src_path ()))
- out = out_src (d, *rs);
+ if (src)
+ out = out_src (d, rs);
+
+ s = &bs;
+ }
+ else
+ {
+ const scope& bs (**ctx.scopes.find (d).first);
+ if (const scope* rs = bs.root_scope ())
+ {
+ if (map_extension != nullptr)
+ tts = map_extension (bs, n, e);
+
+ if (!rs->out_eq_src () && d.sub (rs->src_path ()))
+ out = out_src (d, *rs);
+
+ s = &bs;
+ }
}
}
@@ -504,9 +531,9 @@ namespace build2
if (tts.empty ())
{
// If the project doesn't "know" this extension then we can't possibly
- // find an explicit target of this type.
+ // find a real or implied target of this type.
//
- if (!insert)
+ if (!insert && !dynamic)
{
l6 ([&]{trace << "unknown " << what << ' ' << n << " extension '"
<< e << "'";});
@@ -538,7 +565,7 @@ namespace build2
{
const target_type& tt (*tts[i]);
- if (const target* x = t.ctx.targets.find (tt, d, out, n, e, trace))
+ if (const target* x = ctx.targets.find (tt, d, out, n, e, trace))
{
// What would be the harm in reusing a dynamically-inserted target
// if there is no buildfile-mentioned one? Probably none (since it
@@ -550,8 +577,7 @@ namespace build2
// implied ones because pre-entered members of a target group
// (e.g., cli.cxx) are implied.
//
- if (x->decl == target_decl::real ||
- x->decl == target_decl::implied)
+ if (operator>= (x->decl, target_decl::implied)) // @@ VC14
{
r = x;
break;
@@ -561,7 +587,7 @@ namespace build2
// Cache the dynamic target corresponding to tts[0] since that's
// what we will be inserting (see below).
//
- if (insert && i == 0)
+ if ((insert || dynamic) && i == 0)
f = x;
l6 ([&]{trace << "dynamic target with target type " << tt.name;});
@@ -571,7 +597,7 @@ namespace build2
l6 ([&]{trace << "no target with target type " << tt.name;});
}
- // Note: we can't do this because of the in-source builds where there
+ // Note: we can't do this because of the in source builds where there
// won't be explicit targets for non-generated files.
//
// This should be harmless, however, since in our world generated file
@@ -600,10 +626,29 @@ namespace build2
r = f;
}
- // @@ OPT: move d, out, n
- //
if (r == nullptr && insert)
- r = &search (t, *tts[0], d, out, n, &e, nullptr);
+ {
+ // Like search(t, pk) but don't fail if the target is in src.
+ //
+ // While it may seem like there is not much difference, the caller may
+ // actually do more than just issue more specific diagnostics. For
+ // example, it may defer the failure to the tool diagnostics.
+ //
+#if 0
+ r = &search (t, *tts[0], d, out, n, &e, s);
+#else
+ prerequisite_key pk {nullopt, {tts[0], &d, &out, &n, move (e)}, s};
+
+ r = pk.tk.type->search (ctx, &t, pk);
+
+ if (r == nullptr && pk.tk.out->empty ())
+ {
+ auto p (ctx.scopes.find (d, false));
+ if (*p.first != nullptr || ++p.first == p.second)
+ r = &create_new_target (ctx, pk);
+ }
+#endif
+ }
return static_cast<const file*> (r);
};
@@ -615,6 +660,9 @@ namespace build2
// Note: we now always use absolute path to the translation unit so this
// no longer applies. But let's keep it for posterity.
//
+ // Also note that we now assume (see cc::compile_rule::enter_header()) a
+ // relative path signifies a generated header.
+ //
#if 0
if (f.relative () && rels.relative ())
{
@@ -644,7 +692,7 @@ namespace build2
const file* pt (nullptr);
bool remapped (false);
- // If still relative then it does not exist.
+ // If relative then it does not exist.
//
if (fp.relative ())
{
@@ -701,7 +749,11 @@ namespace build2
// Maybe for diagnostics (i.e., we will actually try to build
// something there instead of just saying no mapping).
//
- pt = find (pd / d, fp.leaf (), insert && !i->first.empty ());
+ if (i->first.empty ())
+ pt = find (pd / d, fp.leaf (), false);
+ else
+ pt = find (pd / d, fp.leaf (), insert, dynamic);
+
if (pt != nullptr)
{
fp = pd / fp;
@@ -761,7 +813,7 @@ namespace build2
if (pt == nullptr)
{
l6 ([&]{trace << (insert ? "entering " : "finding ") << fp;});
- pt = find (fp.directory (), fp.leaf (), insert);
+ pt = find (fp.directory (), fp.leaf (), insert, dynamic);
}
}
@@ -780,7 +832,7 @@ namespace build2
return enter_file_impl (trace, what,
a, bs, t,
fp, cache, norm,
- true /* insert */,
+ true /* insert */, false,
map_ext, fallback, pfx_map, so_map);
}
@@ -788,6 +840,7 @@ namespace build2
find_file (tracer& trace, const char* what,
action a, const scope& bs, const target& t,
path& fp, bool cache, bool norm,
+ bool dynamic,
const function<map_extension_func>& map_ext,
const target_type& fallback,
const function<prefix_map_func>& pfx_map,
@@ -796,7 +849,264 @@ namespace build2
return enter_file_impl (trace, what,
a, bs, t,
fp, cache, norm,
- false /* insert */,
+ false /* insert */, dynamic,
map_ext, fallback, pfx_map, so_map);
}
+
+ static pair<const file&, bool>
+ inject_group_member_impl (action a, const scope& bs, mtime_target& g,
+ path f, string n, string e,
+ const target_type& tt,
+ const function<dyndep_rule::group_filter_func>& fl)
+ {
+ // NOTE: see adhoc_rule_regex_pattern::apply_group_members() for a variant
+ // of the same code.
+
+ // Note that we used to directly match such a member with group_recipe.
+ // But that messes up our dependency counts since we don't really know
+ // whether someone will execute such a member.
+ //
+ // So instead we now just link the member up to the group and rely on the
+ // special semantics in match_rule_impl() for groups with the dyn_members
+ // flag.
+ //
+ assert ((g.type ().flags & target_type::flag::dyn_members) ==
+ target_type::flag::dyn_members);
+
+ // We expect that nobody else can insert these members (seems reasonable
+ // seeing that their names are dynamically discovered).
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ tt,
+ f.directory (),
+ dir_path (), // Always in out.
+ move (n),
+ &e,
+ &bs));
+
+ const file& t (l.first.as<file> ()); // Note: non-const only if have lock.
+
+ // We don't need to match the group recipe directy from ad hoc
+ // recipes/rules due to the special semantics for explicit group members
+ // in match_rule_impl(). This is what skip_match is for.
+ //
+ if (l.second)
+ {
+ l.first.group = &g;
+ l.second.unlock ();
+ t.path (move (f));
+ return pair<const file&, bool> (t, true);
+ }
+ else
+ {
+ if (fl != nullptr && !fl (g, t))
+ return pair<const file&, bool> (t, false);
+ }
+
+ // Check if we already belong to this group. Note that this not a mere
+ // optimization since we may be in the member->group->member chain and
+ // trying to lock the member the second time would deadlock (this can be
+ // triggered, for example, by dist, which sort of depends on such members
+ // directly... which was not quite correct and is now fixed).
+ //
+ if (t.group == &g) // Note: atomic.
+ t.path (move (f));
+ else
+ {
+ // This shouldn't normally fail since we are the only ones that should
+ // know about this target (otherwise why is it dynamicaly discovered).
+ // However, nothing prevents the user from depending on such a target,
+ // however misguided.
+ //
+ target_lock tl (lock (a, t));
+
+ if (!tl)
+ fail << "group " << g << " member " << t << " is already matched" <<
+ info << "dynamically extracted group members cannot be used as "
+ << "prerequisites directly, only via group";
+
+ if (t.group == nullptr)
+ tl.target->group = &g;
+ else if (t.group != &g)
+ fail << "group " << g << " member " << t
+ << " is already member of group " << *t.group;
+
+ t.path (move (f));
+ }
+
+ return pair<const file&, bool> (t, true);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_group_member (action a, const scope& bs, mtime_target& g,
+ path f,
+ const target_type& tt,
+ const function<group_filter_func>& filter)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ return inject_group_member_impl (a, bs, g,
+ move (f), move (n).string (), move (e),
+ tt,
+ filter);
+ }
+
+ static const target_type&
+ map_target_type (const char* what,
+ const scope& bs,
+ const path& f, const string& n, const string& e,
+ const function<dyndep_rule::map_extension_func>& map_ext,
+ const target_type& fallback)
+ {
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ small_vector<const target_type*, 2> tts;
+ if (map_ext != nullptr)
+ tts = map_ext (bs, n, e);
+
+ // Not sure what else we can do in this case.
+ //
+ if (tts.size () > 1)
+ {
+ diag_record dr (fail);
+
+ dr << "mapping of " << what << " target path " << f
+ << " to target type is ambiguous";
+
+ for (const target_type* tt: tts)
+ dr << info << "can be " << tt->name << "{}";
+ }
+
+ const target_type& tt (tts.empty () ? fallback : *tts.front ());
+
+ if (!tt.is_a<file> ())
+ {
+ fail << what << " target path " << f << " mapped to non-file-based "
+ << "target type " << tt.name << "{}";
+ }
+
+ return tt;
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_group_member (const char* what,
+ action a, const scope& bs, mtime_target& g,
+ path f,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback,
+ const function<group_filter_func>& filter)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ const target_type& tt (
+ map_target_type (what, bs, f, n.string (), e, map_ext, fallback));
+
+ return inject_group_member_impl (a, bs, g,
+ move (f), move (n).string (), move (e),
+ tt,
+ filter);
+ }
+
+ pair<const file&, bool>
+ inject_adhoc_group_member_impl (action, const scope& bs, target& t,
+ path f, string n, string e,
+ const target_type& tt)
+ {
+ // Assume nobody else can insert these members (seems reasonable seeing
+ // that their names are dynamically discovered).
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ tt,
+ f.directory (),
+ dir_path (), // Always in out.
+ move (n),
+ &e,
+ &bs));
+
+ file* ft (&l.first.as<file> ()); // Note: non-const only if locked.
+
+ // Skip if this is one of the static targets (or a duplicate of the
+ // dynamic target).
+ //
+ // In particular, we expect to skip all the targets that we could not lock
+ // (e.g., in case all of this has already been done for the previous
+ // operation in a batch; make sure to test `update update update` and
+ // `update clean update ...` batches if changing anything here).
+ //
+ // While at it also find the ad hoc members list tail.
+ //
+ const_ptr<target>* tail (&t.adhoc_member);
+ for (target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ {
+ tail = nullptr;
+ break;
+ }
+
+ tail = &m->adhoc_member;
+ }
+
+ if (tail == nullptr)
+ return pair<const file&, bool> (*ft, false);
+
+ if (!l.second)
+ fail << "dynamic target " << *ft << " already exists and cannot be "
+ << "made ad hoc member of group " << t;
+
+ ft->group = &t;
+ l.second.unlock ();
+
+ // We need to be able to distinguish static targets from dynamic (see the
+ // static set hashing in adhoc_buildscript_rule::apply() for details).
+ //
+ assert (ft->decl != target_decl::real);
+
+ *tail = ft;
+ ft->path (move (f));
+
+ return pair<const file&, bool> (*ft, true);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_adhoc_group_member (action a, const scope& bs, target& t,
+ path f,
+ const target_type& tt)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ return inject_adhoc_group_member_impl (
+ a, bs, t, move (f), move (n).string (), move (e), tt);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_adhoc_group_member (const char* what,
+ action a, const scope& bs, target& t,
+ path f,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ const target_type& tt (
+ map_target_type (what, bs, f, n.string (), e, map_ext, fallback));
+
+
+ return inject_adhoc_group_member_impl (
+ a, bs, t, move (f), move (n).string (), move (e), tt);
+ }
}
diff --git a/libbuild2/dyndep.hxx b/libbuild2/dyndep.hxx
index b285704..a0949c4 100644
--- a/libbuild2/dyndep.hxx
+++ b/libbuild2/dyndep.hxx
@@ -14,16 +14,20 @@
#include <libbuild2/export.hxx>
// Additional functionality that is normally only useful for implementing
-// rules with dynamic dependencies.
+// rules with dynamic dependencies (usually prerequisites, but also target
+// group members).
//
namespace build2
{
class LIBBUILD2_SYMEXPORT dyndep_rule
{
public:
- // Update the target during the match phase. Return true if it has changed
- // or if the passed timestamp is not timestamp_unknown and is older than
- // the target.
+ // Update the target during the match phase. Return true if the target has
+ // changed or, if the passed timestamp is not timestamp_unknown, it is
+ // older than the target.
+ //
+ // Note that such a target must still be updated during the execute phase
+ // in order to keep the dependency counts straight.
//
static bool
update (tracer&, action, const target&, timestamp);
@@ -32,7 +36,7 @@ namespace build2
// target.
//
// Return the indication of whether it has changed or, if the passed
- // timestamp is not timestamp_unknown, is older than this timestamp. If
+ // timestamp is not timestamp_unknown, is newer than this timestamp. If
// the prerequisite target does not exists nor can be generated (no rule),
// then issue diagnostics and fail if the fail argument is true and return
// nullopt otherwise.
@@ -53,32 +57,39 @@ namespace build2
bool adhoc = false,
uintptr_t data = 0);
- // As above but verify the file is matched with noop_recipe and issue
- // diagnostics and fail otherwise (regardless of the fail flag).
+ // As above but verify the file is matched with noop_recipe or was updated
+ // during match and issue diagnostics and fail otherwise (regardless of
+ // the fail flag). Pass 0 for pts_n if don't want the "was updated during
+ // match" part.
//
// This version (together with verify_existing_file() below) is primarily
// useful for handling dynamic dependencies that are produced as a
// byproduct of recipe execution (and thus must have all the generated
// prerequisites specified statically).
//
+ // Note that this function expects all the static prerequisites of the
+ // target to already be matched and their number passed in pts_n.
+ //
static optional<bool>
inject_existing_file (tracer&, const char* what,
- action, target&,
+ action, target&, size_t pts_n,
const file& prerequiste,
timestamp,
bool fail,
bool adhoc = false,
uintptr_t data = 0);
- // Verify the file is matched with noop_recipe and issue diagnostics and
- // fail otherwise. If the file is not matched, then fail if the target is
- // not implied (that is, declared in a buildfile).
+ // Verify the file is matched with noop_recipe or was updated during match
+ // and issue diagnostics and fail otherwise. If the file is not matched,
+ // then fail if the target is not implied (that is, declared in a
+ // buildfile). Pass 0 for pts_n if don't want the "was updated during
+ // match" part.
//
// Note: can only be called in the execute phase.
//
static void
verify_existing_file (tracer&, const char* what,
- action, const target&,
+ action, const target&, size_t pts_n,
const file& prerequiste);
// Reverse-lookup target type(s) from file name/extension.
@@ -87,6 +98,10 @@ namespace build2
// and those derived from them are considered. Otherwise, any file-based
// type is considered but not the file type itself.
//
+ // It's possible the extension-to-target type mapping is ambiguous (for
+ // example, because both C and C++-language headers use the same .h
+ // extension). So this function can return multiple target types.
+ //
static small_vector<const target_type*, 2>
map_extension (const scope& base,
const string& name, const string& ext,
@@ -203,17 +218,86 @@ namespace build2
const srcout_map& = {});
// As above but do not insert the target if it doesn't already exist. This
- // function also returns NULL if the target exists but is implied (that
- // is, not declared in a buildfile).
+ // function also returns NULL if the target exists but is dynamic (that
+ // is, not real or implied), unless the dynamic argument is true.
//
static pair<const file*, bool>
find_file (tracer&, const char* what,
action, const scope& base, const target&,
path& prerequisite, bool cache, bool normalized,
+ bool dynamic,
const function<map_extension_func>&,
const target_type& fallback,
const function<prefix_map_func>& = nullptr,
const srcout_map& = {});
+
+ // Find or insert a target file path as a target of the specified type,
+ // make it a member of the specified (non-ad hoc) mtime target group and
+ // set its path. Return the target and an indication of whether it was
+ // made a member (can only be false if a filter is provided; see below).
+ //
+ // The file path must be absolute and normalized. Note that this function
+ // assumes that this member can only be matched via this group. The group
+ // type must have the target_type::flag::dyn_members flag.
+ //
+ // If specified, the group_filter function is called on the target before
+ // making it a group member, skipping it if this function returns false.
+ // Note that the filter is skipped if the target is newly inserted (the
+ // filter is meant to be used to skip duplicates).
+ //
+ using group_filter_func = bool (mtime_target& g, const file&);
+
+ static pair<const file&, bool>
+ inject_group_member (action, const scope& base, mtime_target&,
+ path,
+ const target_type&,
+ const function<group_filter_func>& = nullptr);
+
+ template <typename T>
+ static pair<const T&, bool>
+ inject_group_member (action a, const scope& bs, mtime_target& g,
+ path f,
+ const function<group_filter_func>& filter = nullptr)
+ {
+ auto p (inject_group_member (a, bs, g, move (f), T::static_type, filter));
+ return pair<const T&, bool> (p.first.template as<T> (), p.second);
+ }
+
+ // As above but the target type is determined using the map_extension
+ // function if specified, falling back to the fallback type if unable to
+ // (the what argument is used for diagnostics during this process).
+ //
+ static pair<const file&, bool>
+ inject_group_member (const char* what,
+ action, const scope& base, mtime_target& g,
+ path,
+ const function<map_extension_func>&,
+ const target_type& fallback,
+ const function<group_filter_func>& = nullptr);
+
+
+ // Find or insert a target file path as a target, make it a member of the
+ // specified ad hoc group unless it already is, and set its path. Return
+ // the target and an indication of whether it was added as a member.
+ //
+ // The file path must be absolute and normalized. Note that this function
+ // assumes that this target can only be known as a member of this group.
+ //
+ static pair<const file&, bool>
+ inject_adhoc_group_member (action, const scope& base, target& g,
+ path,
+ const target_type&);
+
+ // As above but the target type is determined using the map_extension
+ // function if specified, falling back to the fallback type if unable to
+ // (the what argument is used for diagnostics during this process).
+ //
+ static pair<const file&, bool>
+ inject_adhoc_group_member (const char* what,
+ action, const scope& base, target& g,
+ path,
+ const function<map_extension_func>&,
+ const target_type& fallback);
};
}
diff --git a/libbuild2/file-cache.cxx b/libbuild2/file-cache.cxx
index 1c1424f..caaf40c 100644
--- a/libbuild2/file-cache.cxx
+++ b/libbuild2/file-cache.cxx
@@ -28,6 +28,8 @@ namespace build2
if (!comp_path_.empty ())
try_rmfile_ignore_error (comp_path_);
+ // Note: state remains uninit until write::close().
+
pin ();
return write (*this);
}
diff --git a/libbuild2/file-cache.hxx b/libbuild2/file-cache.hxx
index d6904ed..98c2b67 100644
--- a/libbuild2/file-cache.hxx
+++ b/libbuild2/file-cache.hxx
@@ -92,7 +92,12 @@ namespace build2
// to the noop implementation.
//
explicit
- file_cache (bool compress = true);
+ file_cache (bool compress);
+
+ file_cache () = default; // Create uninitialized instance.
+
+ void
+ init (bool compress);
class entry;
@@ -114,9 +119,9 @@ namespace build2
// Move-to-NULL-only type.
//
- write (write&&);
+ write (write&&) noexcept;
write (const write&) = delete;
- write& operator= (write&&);
+ write& operator= (write&&) noexcept;
write& operator= (const write&) = delete;
~write ();
@@ -140,9 +145,9 @@ namespace build2
// Move-to-NULL-only type.
//
- read (read&&);
+ read (read&&) noexcept;
read (const read&) = delete;
- read& operator= (read&&);
+ read& operator= (read&&) noexcept;
read& operator= (const read&) = delete;
~read ();
@@ -203,9 +208,9 @@ namespace build2
// Move-to-NULL-only type.
//
- entry (entry&&);
+ entry (entry&&) noexcept;
entry (const entry&) = delete;
- entry& operator= (entry&&);
+ entry& operator= (entry&&) noexcept;
entry& operator= (const entry&) = delete;
~entry ();
diff --git a/libbuild2/file-cache.ixx b/libbuild2/file-cache.ixx
index 8385c90..99be5ad 100644
--- a/libbuild2/file-cache.ixx
+++ b/libbuild2/file-cache.ixx
@@ -65,26 +65,30 @@ namespace build2
}
inline file_cache::entry::
- entry (entry&& e)
+ entry (entry&& e) noexcept
: temporary (e.temporary),
state_ (e.state_),
path_ (move (e.path_)),
comp_path_ (move (e.comp_path_)),
pin_ (e.pin_)
{
+ e.state_ = null;
}
inline file_cache::entry& file_cache::entry::
- operator= (entry&& e)
+ operator= (entry&& e) noexcept
{
if (this != &e)
{
assert (state_ == null);
+
temporary = e.temporary;
state_ = e.state_;
path_ = move (e.path_);
comp_path_ = move (e.comp_path_);
pin_ = e.pin_;
+
+ e.state_ = null;
}
return *this;
}
@@ -105,14 +109,14 @@ namespace build2
}
inline file_cache::write::
- write (write&& e)
+ write (write&& e) noexcept
: entry_ (e.entry_)
{
e.entry_ = nullptr;
}
inline file_cache::write& file_cache::write::
- operator= (write&& e)
+ operator= (write&& e) noexcept
{
if (this != &e)
{
@@ -132,14 +136,14 @@ namespace build2
}
inline file_cache::read::
- read (read&& e)
+ read (read&& e) noexcept
: entry_ (e.entry_)
{
e.entry_ = nullptr;
}
inline file_cache::read& file_cache::read::
- operator= (read&& e)
+ operator= (read&& e) noexcept
{
if (this != &e)
{
@@ -173,9 +177,15 @@ namespace build2
: string ();
}
+ inline void file_cache::
+ init (bool compress)
+ {
+ compress_ = compress;
+ }
+
inline file_cache::
file_cache (bool compress)
- : compress_ (compress)
{
+ init (compress);
}
}
diff --git a/libbuild2/file.cxx b/libbuild2/file.cxx
index 4cf6d82..c0957ad 100644
--- a/libbuild2/file.cxx
+++ b/libbuild2/file.cxx
@@ -4,9 +4,11 @@
#include <libbuild2/file.hxx>
#include <cerrno>
+#include <cstring> // strlen()
#include <iomanip> // left, setw()
#include <sstream>
+#include <libbuild2/rule.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
#include <libbuild2/context.hxx>
@@ -28,6 +30,8 @@ namespace build2
{
// Standard and alternative build file/directory naming schemes.
//
+ extern const dir_path std_export_dir;
+ extern const dir_path alt_export_dir;
// build:
@@ -35,6 +39,7 @@ namespace build2
const dir_path std_root_dir (dir_path (std_build_dir) /= "root");
const dir_path std_bootstrap_dir (dir_path (std_build_dir) /= "bootstrap");
const dir_path std_build_build_dir (dir_path (std_build_dir) /= "build");
+ const dir_path std_export_dir (dir_path (std_build_dir) /= "export");
const path std_root_file (std_build_dir / "root.build");
const path std_bootstrap_file (std_build_dir / "bootstrap.build");
@@ -52,6 +57,7 @@ namespace build2
const dir_path alt_root_dir (dir_path (alt_build_dir) /= "root");
const dir_path alt_bootstrap_dir (dir_path (alt_build_dir) /= "bootstrap");
const dir_path alt_build_build_dir (dir_path (alt_build_dir) /= "build");
+ const dir_path alt_export_dir (dir_path (alt_build_dir) /= "export");
const path alt_root_file (alt_build_dir / "root.build2");
const path alt_bootstrap_file (alt_build_dir / "bootstrap.build2");
@@ -218,7 +224,7 @@ namespace build2
// Checking for plausability feels expensive since we have to recursively
// traverse the directory tree. Note, however, that if the answer is
// positive, then shortly after we will be traversing this tree anyway and
- // presumably this time getting the data from the cash (we don't really
+ // presumably this time getting the data from the cache (we don't really
// care about the negative answer since this is a degenerate case).
//
optional<path> bf;
@@ -306,7 +312,7 @@ namespace build2
{
tracer trace ("source_once");
- if (!once.buildfiles.insert (bf).second)
+ if (!once.root_extra->insert_buildfile (bf))
{
l5 ([&]{trace << "skipping already sourced " << bf;});
return false;
@@ -357,7 +363,7 @@ namespace build2
//
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
// If this is a link, then type() will try to stat() it. And if the
// link is dangling or points to something inaccessible, it will fail.
@@ -522,10 +528,14 @@ namespace build2
pair<scope&, scope*>
switch_scope (scope& root, const dir_path& out_base, bool proj)
{
+ context& ctx (root.ctx);
+
+ assert (ctx.phase == run_phase::load);
+
// First, enter the scope into the map and see if it is in any project. If
// it is not, then there is nothing else to do.
//
- auto i (root.ctx.scopes.rw (root).insert_out (out_base));
+ auto i (ctx.scopes.rw (root).insert_out (out_base));
scope& base (*i->second.front ());
scope* rs (nullptr);
@@ -546,7 +556,7 @@ namespace build2
// Switch to the new root scope.
//
- if (rs != &root)
+ if (rs != &root && !rs->root_extra->loaded)
load_root (*rs); // Load new root(s) recursively.
// Now we can figure out src_base and finish setting the scope.
@@ -581,37 +591,37 @@ namespace build2
fail << "variable out_root expected as first line in " << f << endf;
}
+ scope::root_extra_type::
+ root_extra_type (scope& root, bool a)
+ : altn (a),
+ loaded (false),
+
+ build_ext (a ? alt_build_ext : std_build_ext),
+ build_dir (a ? alt_build_dir : std_build_dir),
+ buildfile_file (a ? alt_buildfile_file : std_buildfile_file),
+ buildignore_file (a ? alt_buildignore_file : std_buildignore_file),
+ root_dir (a ? alt_root_dir : std_root_dir),
+ bootstrap_dir (a ? alt_bootstrap_dir : std_bootstrap_dir),
+ build_build_dir (a ? alt_build_build_dir : std_build_build_dir),
+ bootstrap_file (a ? alt_bootstrap_file : std_bootstrap_file),
+ root_file (a ? alt_root_file : std_root_file),
+ export_file (a ? alt_export_file : std_export_file),
+ src_root_file (a ? alt_src_root_file : std_src_root_file),
+ out_root_file (a ? alt_out_root_file : std_out_root_file),
+
+ var_pool (&root.ctx, &root.ctx.var_pool.rw (root), nullptr)
+ {
+ root.var_pool_ = &var_pool;
+ }
+
static void
setup_root_extra (scope& root, optional<bool>& altn)
{
assert (altn && root.root_extra == nullptr);
- bool a (*altn);
-
- root.root_extra.reset (
- new scope::root_extra_type {
- nullopt /* project */,
- nullopt /* amalgamation */,
- nullopt /* subprojects */,
- a,
- a ? alt_build_ext : std_build_ext,
- a ? alt_build_dir : std_build_dir,
- a ? alt_buildfile_file : std_buildfile_file,
- a ? alt_buildignore_file : std_buildignore_file,
- a ? alt_root_dir : std_root_dir,
- a ? alt_bootstrap_dir : std_bootstrap_dir,
- a ? alt_build_build_dir : std_build_build_dir,
- a ? alt_bootstrap_file : std_bootstrap_file,
- a ? alt_root_file : std_root_file,
- a ? alt_export_file : std_export_file,
- a ? alt_src_root_file : std_src_root_file,
- a ? alt_out_root_file : std_out_root_file,
- {}, /* meta_operations */
- {}, /* operations */
- {}, /* modules */
- {}, /* override_cache */
- {}, /* target_types */
- {}, /* environment */
- ""} /* environment_checksum */);
+
+ context& ctx (root.ctx);
+
+ root.root_extra.reset (new scope::root_extra_type (root, *altn));
// Enter built-in meta-operation and operation names. Loading of
// modules (via the src bootstrap; see below) can result in
@@ -621,9 +631,9 @@ namespace build2
root.insert_meta_operation (perform_id, mo_perform);
root.insert_meta_operation (info_id, mo_info);
- root.insert_operation (default_id, op_default);
- root.insert_operation (update_id, op_update);
- root.insert_operation (clean_id, op_clean);
+ root.insert_operation (default_id, op_default, nullptr);
+ root.insert_operation (update_id, op_update, ctx.var_update);
+ root.insert_operation (clean_id, op_clean, ctx.var_clean);
}
value&
@@ -842,10 +852,26 @@ namespace build2
try
{
- for (const dir_entry& de: dir_iterator (d, true /* ignore_dangling */))
+ // It's probably possible that a subproject can be a symlink with the
+ // link target, for example, being in a git submodule. Considering that,
+ // it makes sense to warn about dangling symlinks.
+ //
+ for (const dir_entry& de:
+ dir_iterator (d, dir_iterator::detect_dangling))
{
if (de.type () != entry_type::directory)
+ {
+ if (de.type () == entry_type::unknown)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / de.path ();
+ }
+
continue;
+ }
dir_path sd (d / path_cast<dir_path> (de.path ()));
@@ -913,7 +939,9 @@ namespace build2
}
void
- bootstrap_src (scope& rs, optional<bool>& altn)
+ bootstrap_src (scope& rs, optional<bool>& altn,
+ optional<dir_path> aovr,
+ bool sovr)
{
tracer trace ("bootstrap_src");
@@ -943,13 +971,24 @@ namespace build2
rs.root_extra->project = nullptr;
rs.root_extra->amalgamation = nullptr;
rs.root_extra->subprojects = nullptr;
+
+ // See GH issue #322.
+ //
+#if 0
+ assert (!aovr || aovr->empty ());
+#else
+ if (!(!aovr || aovr->empty ()))
+ fail << "amalgamation directory " << *aovr << " specified for simple "
+ << "project " << src_root <<
+ info << "see https://github.com/build2/build2/issues/322 for details";
+#endif
}
// We assume that bootstrap out cannot load this file explicitly. It
// feels wrong to allow this since that makes the whole bootstrap
// process hard to reason about. But we may try to bootstrap the same
// root scope multiple time.
//
- else if (rs.buildfiles.insert (bf).second)
+ else if (rs.root_extra->insert_buildfile (bf))
{
// Extract the project name and amalgamation variable value so that
// we can make them available while loading bootstrap.build.
@@ -985,7 +1024,13 @@ namespace build2
const project_name pn (cast<project_name> (move (*pv)));
rs.root_extra->project = &pn;
- if (av && (av->null || av->empty ()))
+ // @@ We will still have original values in the variables during
+ // bootstrap. Not sure what we can do about that. But it seems
+ // harmless.
+ //
+ if (aovr)
+ rs.root_extra->amalgamation = aovr->empty () ? nullptr : &*aovr;
+ else if (av && (av->null || av->empty ()))
rs.root_extra->amalgamation = nullptr;
{
@@ -1005,6 +1050,13 @@ namespace build2
fail << "variable " << *ctx.var_amalgamation << " expected as a "
<< "second line in " << bf;
}
+
+ // Replace the value if overridden.
+ //
+ // Note that root_extra::amalgamation will be re-pointed below.
+ //
+ if (aovr)
+ rs.vars.assign (ctx.var_amalgamation) = move (*aovr);
}
else
{
@@ -1071,6 +1123,12 @@ namespace build2
// no been configured. In this case falling through is what we want.
}
}
+ else if (v)
+ {
+ if (cast<dir_path> (v).absolute ())
+ fail << "absolute directory in variable " << *ctx.var_amalgamation
+ << " value";
+ }
// Do additional checks if the outer root could be our amalgamation.
//
@@ -1129,6 +1187,14 @@ namespace build2
auto rp (rs.vars.insert (*ctx.var_subprojects)); // Set NULL by default.
value& v (rp.first);
+ if (!sovr)
+ {
+ if (rp.second)
+ rp.second = false; // Keep NULL.
+ else
+ v = nullptr; // Make NULL.
+ }
+
if (rp.second)
{
// No subprojects set so we need to figure out if there are any.
@@ -1285,9 +1351,9 @@ namespace build2
// Call module's post-boot functions.
//
- for (size_t i (0); i != root.root_extra->modules.size (); ++i)
+ for (size_t i (0); i != root.root_extra->loaded_modules.size (); ++i)
{
- module_state& s (root.root_extra->modules[i]);
+ module_state& s (root.root_extra->loaded_modules[i]);
if (s.boot_post != nullptr)
boot_post_module (root, s);
@@ -1328,7 +1394,7 @@ namespace build2
}
void
- create_bootstrap_outer (scope& root)
+ create_bootstrap_outer (scope& root, bool subp)
{
context& ctx (root.ctx);
@@ -1376,7 +1442,7 @@ namespace build2
setup_root (rs, forwarded (root, out_root, v.as<dir_path> (), altn));
bootstrap_pre (rs, altn);
- bootstrap_src (rs, altn);
+ bootstrap_src (rs, altn, nullopt, subp);
// bootstrap_post() delayed until after create_bootstrap_outer().
}
else
@@ -1387,7 +1453,7 @@ namespace build2
rs.assign (ctx.var_forwarded) = true; // Only upgrade (see main()).
}
- create_bootstrap_outer (rs);
+ create_bootstrap_outer (rs, subp);
if (!bstrapped)
bootstrap_post (rs);
@@ -1475,22 +1541,19 @@ namespace build2
}
void
- load_root (scope& root)
+ load_root (scope& root,
+ const function<void (parser&)>& pre,
+ const function<void (parser&)>& post)
{
tracer trace ("load_root");
- context& ctx (root.ctx);
-
- const dir_path& out_root (root.out_path ());
- const dir_path& src_root (root.src_path ());
-
- // As an optimization, check if we have already loaded root.build. If
- // that's the case, then we have already been called for this project.
- //
- path f (src_root / root.root_extra->root_file);
-
- if (root.buildfiles.find (f) != root.buildfiles.end ())
+ if (root.root_extra->loaded)
+ {
+ assert (pre == nullptr && post == nullptr);
return;
+ }
+
+ context& ctx (root.ctx);
if (ctx.no_external_modules)
fail << "attempt to load project " << root << " after skipped loading "
@@ -1499,18 +1562,19 @@ namespace build2
// First load outer roots, if any.
//
if (scope* rs = root.parent_scope ()->root_scope ())
- load_root (*rs);
+ if (!rs->root_extra->loaded)
+ load_root (*rs);
// Finish off initializing bootstrapped modules (before mode).
//
// Note that init() can load additional modules invalidating iterators.
//
auto init_modules =
- [&root, n = root.root_extra->modules.size ()] (module_boot_init v)
+ [&root, n = root.root_extra->loaded_modules.size ()] (module_boot_init v)
{
for (size_t i (0); i != n; ++i)
{
- module_state& s (root.root_extra->modules[i]);
+ module_state& s (root.root_extra->loaded_modules[i]);
if (s.boot_init && *s.boot_init == v)
init_module (root, root, s.name, s.loc);
@@ -1530,6 +1594,11 @@ namespace build2
// Load hooks and root.build.
//
+ const dir_path& out_root (root.out_path ());
+ const dir_path& src_root (root.src_path ());
+
+ path f (src_root / root.root_extra->root_file);
+
// We can load the pre hooks before finishing off loading the bootstrapped
// modules (which, in case of config would load config.build) or after and
// one can come up with a plausible use-case for either approach. Note,
@@ -1545,10 +1614,22 @@ namespace build2
//
parser p (ctx, load_stage::root);
+ if (pre != nullptr)
+ {
+ pre (p);
+ p.reset ();
+ }
+
if (he) {source_hooks (p, root, hd, true /* pre */); p.reset ();}
if (fe) {source_once (p, root, root, f, root);}
if (he) {p.reset (); source_hooks (p, root, hd, false /* pre */);}
+ if (post != nullptr)
+ {
+ p.reset ();
+ post (p);
+ }
+
// Finish off initializing bootstrapped modules (after mode).
//
{
@@ -1556,12 +1637,19 @@ namespace build2
init_modules (module_boot_init::after);
}
- // Print the project configuration report, similar to how we do it in
+ // Print the project configuration report(s), similar to how we do it in
// build system modules.
//
- if (!p.config_report.empty () && verb >= (p.config_report_new ? 2 : 3))
+ using config_report = parser::config_report;
+
+ const project_name* proj (nullptr); // Resolve lazily.
+ for (const config_report& cr: p.config_reports)
{
- const project_name& proj (named_project (root)); // Can be empty.
+ if (verb < (cr.new_value ? 2 : 3))
+ continue;
+
+ if (proj == nullptr)
+ proj = &named_project (root); // Can be empty.
// @@ TODO/MAYBE:
//
@@ -1579,46 +1667,74 @@ namespace build2
// config @/tmp/tests
// libhello.tests.remote true
//
- string stem (!proj.empty () ? '.' + proj.variable () + '.' : string ());
+ // If the module name is not empty then it means the config variables
+ // are from the imported project and so we use that for <project>.
+ //
+ string stem (!cr.module.empty ()
+ ? '.' + cr.module.variable () + '.'
+ : (!proj->empty ()
+ ? '.' + proj->variable () + '.'
+ : string ()));
- // Calculate max name length.
+ // Return the variable name for printing.
//
- size_t pad (10);
- for (const pair<lookup, string>& lf: p.config_report)
+ auto name = [&stem] (const config_report::value& cv) -> const char*
{
- lookup l (lf.first);
+ lookup l (cv.val);
- size_t n;
if (l.value == nullptr)
{
- n = l.var->name.size ();
+ if (cv.org.empty ())
+ return l.var->name.c_str ();
+
+ // This case may or may not have the prefix.
+ //
+ size_t p, n (
+ !stem.empty ()
+ ? (p = cv.org.find (stem)) != string::npos ? p + stem.size () : 0
+ : cv.org.compare (0, 7, "config.") == 0 ? 7 : 0);
+
+ return cv.org.c_str () + n;
}
else
{
+ assert (cv.org.empty ()); // Sanity check.
+
size_t p (!stem.empty ()
? l.var->name.find (stem) + stem.size ()
: 7); // "config."
- n = l.var->name.size () - p;
+
+ return l.var->name.c_str () + p;
}
+ };
+
+ // Calculate max name length.
+ //
+ size_t pad (10);
+ for (const config_report::value& cv: cr.values)
+ {
+ size_t n (strlen (name (cv)));
if (n > pad)
pad = n;
}
// Use the special `config` module name (which doesn't have its own
- // report) for project configuration.
+ // report) for project's own configuration.
//
diag_record dr (text);
- dr << "config " << proj << '@' << root;
+ dr << (cr.module.empty () ? "config" : cr.module.string ().c_str ())
+ << ' ' << *proj << '@' << root;
names storage;
- for (const pair<lookup, string>& lf: p.config_report)
+ for (const config_report::value& cv: cr.values)
{
- lookup l (lf.first);
- const string& f (lf.second);
+ lookup l (cv.val);
+ const string& f (cv.fmt);
// If the report variable has been overriden, now is the time to
- // lookup its value.
+ // lookup its value. Note: see also the name() lambda above if
+ // changing anything here.
//
string n;
if (l.value == nullptr)
@@ -1634,26 +1750,30 @@ namespace build2
n = string (l.var->name, p);
}
+ const char* pn (name (cv)); // Print name.
+
dr << "\n ";
- if (const value& v = *l)
+ if (l)
{
storage.clear ();
- auto ns (reverse (v, storage));
+ auto ns (reverse (*l, storage, true /* reduce */));
if (f == "multiline")
{
- dr << n;
+ dr << pn;
for (auto& n: ns)
dr << "\n " << n;
}
else
- dr << left << setw (static_cast<int> (pad)) << n << ' ' << ns;
+ dr << left << setw (static_cast<int> (pad)) << pn << ' ' << ns;
}
else
- dr << left << setw (static_cast<int> (pad)) << n << " [null]";
+ dr << left << setw (static_cast<int> (pad)) << pn << " [null]";
}
}
+
+ root.root_extra->loaded = true;
}
scope&
@@ -1690,7 +1810,8 @@ namespace build2
if (load)
{
- load_root (rs);
+ if (!rs.root_extra->loaded)
+ load_root (rs);
setup_base (i, out_root, src_root); // Setup as base.
}
@@ -1812,7 +1933,9 @@ namespace build2
try
{
// Note: not using run_*() functions since need to be able to suppress
- // all errors, including inability to exec.
+ // all errors, including abnormal, inability to exec, etc., in case of
+ // optional import. Also, no need to buffer diagnostics since in the
+ // serial load.
//
if (verb >= 3)
print_process (args);
@@ -1872,10 +1995,19 @@ namespace build2
return r;
if (!opt)
- error (loc) << "invalid metadata signature in " << args[0]
- << " output" <<
+ {
+ diag_record dr;
+ dr << error (loc) << "invalid metadata signature in " << args[0]
+ << " output" <<
info << "expected '" << s << "'";
+ if (verb >= 1 && verb <= 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
+
goto fail;
}
@@ -1891,16 +2023,27 @@ namespace build2
if (pr.wait ())
{
if (!opt)
- error (loc) << "unable to read metadata from " << args[0];
+ error (loc) << "io error reading metadata from " << args[0];
}
else
{
// The child process presumably issued diagnostics but if it didn't,
- // the result will be very confusing. So let's issue something
- // generic for good measure.
+ // the result will be very confusing. So let's issue something generic
+ // for good measure. But also make it consistent with diagnostics
+ // issued by run_finish().
//
if (!opt)
- error (loc) << "unable to extract metadata from " << args[0];
+ {
+ diag_record dr;
+ dr << error (loc) << "unable to extract metadata from " << args[0] <<
+ info << "process " << args[0] << " " << *pr.exit;
+
+ if (verb >= 1 && verb <= 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
}
goto fail;
@@ -1916,8 +2059,7 @@ namespace build2
goto fail;
}
- fail:
-
+ fail:
if (opt)
{
metadata_cache.insert (pp.effect_string (), true);
@@ -1946,15 +2088,13 @@ namespace build2
&t);
}
- // Suggest appropriate ways to import the specified target (as type and
- // name) from the specified project.
- //
- static void
+ void
import_suggest (const diag_record& dr,
const project_name& pn,
- const target_type& tt,
+ const target_type* tt,
const string& tn,
- const char* qual = nullptr)
+ bool rule_hint,
+ const char* qual)
{
string pv (pn.variable ());
@@ -1966,15 +2106,19 @@ namespace build2
// Suggest ad hoc import but only if it's a path-based target (doing it
// for lib{} is very confusing).
//
- if (tt.is_a<path_target> ())
+ if (tt != nullptr && tt->is_a<path_target> ())
{
- string v (tt.is_a<exe> () && (pv == tn || pn == tn)
+ string v (tt->is_a<exe> () && (pv == tn || pn == tn)
? "config." + pv
- : "config.import." + pv + '.' + tn + '.' + tt.name);
+ : "config.import." + pv + '.' + tn + '.' + tt->name);
dr << info << "or use " << v << " configuration variable to specify "
<< "its " << (qual != nullptr ? qual : "") << "path";
}
+
+ if (rule_hint)
+ dr << info << "or use rule_hint attribute to specify a rule that can "
+ << "find this target";
}
// Return the processed target name as well as the project directory, if
@@ -1989,6 +2133,9 @@ namespace build2
// Return empty name if an ad hoc import resulted in a NULL target (only
// allowed if optional is true).
//
+ // Note that this function has a side effect of potentially marking some
+ // config.import.* variables as used.
+ //
pair<name, optional<dir_path>>
import_search (bool& new_value,
scope& ibase,
@@ -2020,6 +2167,9 @@ namespace build2
//
// 4. Normal import.
//
+ // @@ PERF: in quite a few places (local, subproject) we could have
+ // returned the scope and save on bootstrap in import_load().
+ //
if (tgt.unqualified ())
{
if (tgt.directory () && tgt.relative ())
@@ -2027,6 +2177,8 @@ namespace build2
if (tgt.absolute ())
{
+ // Ad hoc import.
+ //
// Actualize the directory to be analogous to the config.import.<proj>
// case (which is of abs_dir_path type).
//
@@ -2043,7 +2195,7 @@ namespace build2
fail (loc) << "project-local importation of target " << tgt
<< " from an unnamed project";
- tgt.proj = pn;
+ tgt.proj = pn; // Reduce to normal import.
return make_pair (move (tgt), optional<dir_path> (iroot.out_path ()));
}
@@ -2075,7 +2227,9 @@ namespace build2
// over anything that we may discover. In particular, we will prefer it
// over any bundled subprojects.
//
- auto& vp (iroot.var_pool ());
+ // Note: go straight for the public variable pool.
+ //
+ auto& vp (iroot.var_pool (true /* public */));
using config::lookup_config;
@@ -2245,7 +2399,8 @@ namespace build2
auto df = make_diag_frame (
[&proj, tt, &on] (const diag_record& dr)
{
- import_suggest (dr, proj, *tt, on, "alternative ");
+ import_suggest (
+ dr, proj, tt, on, false, "alternative ");
});
md = extract_metadata (e->process_path (),
@@ -2368,6 +2523,8 @@ namespace build2
{
tracer trace ("import_load");
+ uint64_t metav (meta ? 1 : 0); // Metadata version.
+
// We end up here in two cases: Ad hoc import, in which case name is
// unqualified and absolute and path is a base, not necessarily root. And
// normal import, in which case name must be project-qualified and path is
@@ -2430,14 +2587,51 @@ namespace build2
}
}
+ // First check the cache.
+ //
+ using import_key = context::import_key;
+
+ auto cache_find = [&ctx, &tgt, metav] (dir_path& out_root) ->
+ const pair<names, const scope&>*
+ {
+ import_key k {move (out_root), move (tgt), metav};
+
+ auto i (ctx.import_cache.find (k));
+ if (i != ctx.import_cache.end ())
+ return &i->second;
+
+ out_root = move (k.out_root);
+ tgt = move (k.target);
+
+ return nullptr;
+ };
+
+ if (proj)
+ {
+ if (const auto* r = cache_find (out_root))
+ return *r;
+ }
+
+ dir_path cache_out_root;
+
// Clear current project's environment.
//
auto_project_env penv (nullptr);
+ // Note: this loop does at most two iterations.
+ //
for (const scope* proot (nullptr); ; proot = root)
{
bool top (proot == nullptr);
+ // Check the cache for the subproject.
+ //
+ if (!top && proj)
+ {
+ if (const auto* r = cache_find (out_root))
+ return *r;
+ }
+
root = create_root (ctx, out_root, src_root)->second.front ();
bool bstrapped (bootstrapped (*root));
@@ -2516,6 +2710,8 @@ namespace build2
if (i != ps->end ())
{
+ cache_out_root = move (out_root);
+
const dir_path& d ((*i).second);
altn = nullopt;
out_root = root->out_path () / d;
@@ -2527,9 +2723,72 @@ namespace build2
fail (loc) << out_root << " is not out_root for " << *proj;
}
+ // Buildfile importation is quite different so handle it separately.
+ //
+ // Note that we don't need to load the project in this case.
+ //
+ // @@ For now we don't out-qualify the resulting target to be able to
+ // re-import it ad hoc (there is currently no support for out-qualified
+ // ad hoc import). Feels like this should be harmless since it's just a
+ // glorified path to a static file that nobody is actually going to use
+ // as a target (e.g., to depend upon).
+ //
+ if (tgt.type == "buildfile")
+ {
+ auto add_ext = [&altn] (string& n)
+ {
+ if (path_traits::find_extension (n) == string::npos)
+ {
+ if (n != (*altn ? alt_buildfile_file : std_buildfile_file).string ())
+ {
+ n += ".";
+ n += *altn ? alt_build_ext : std_build_ext;
+ }
+ }
+ };
+
+ if (proj)
+ {
+ name n;
+
+ if (src_root.empty ())
+ src_root = root->src_path ();
+
+ n.dir = move (src_root);
+ n.dir /= *altn ? alt_export_dir : std_export_dir;
+ if (!tgt.dir.empty ())
+ {
+ n.dir /= tgt.dir;
+ n.dir.normalize ();
+ }
+
+ n.type = tgt.type;
+ n.value = tgt.value;
+ add_ext (n.value);
+
+ pair<names, const scope&> r (names {move (n)}, *root);
+
+ // Cache.
+ //
+ if (cache_out_root.empty ())
+ cache_out_root = move (out_root);
+
+ ctx.import_cache.emplace (
+ import_key {move (cache_out_root), move (tgt), metav}, r);
+
+ return r;
+ }
+ else
+ {
+ add_ext (tgt.value);
+ return pair<names, const scope&> (names {move (tgt)}, *root);
+ }
+ }
+
// Load the imported root scope.
//
- load_root (*root);
+ if (!root->root_extra->loaded)
+ load_root (*root);
// If this is a normal import, then we go through the export stub.
//
@@ -2544,6 +2803,12 @@ namespace build2
// "Pass" the imported project's roots to the stub.
//
+ if (cache_out_root.empty ())
+ cache_out_root = out_root;
+
+ if (src_root.empty ())
+ src_root = root->src_path ();
+
ts.assign (ctx.var_out_root) = move (out_root);
ts.assign (ctx.var_src_root) = move (src_root);
@@ -2559,7 +2824,7 @@ namespace build2
// Pass the metadata compatibility version in import.metadata.
//
if (meta)
- ts.assign (ctx.var_import_metadata) = uint64_t (1);
+ ts.assign (ctx.var_import_metadata) = metav;
// Load the export stub. Note that it is loaded in the context of the
// importing project, not the imported one. The export stub will
@@ -2574,7 +2839,8 @@ namespace build2
l5 ([&]{trace << "importing " << es;});
// @@ Should we verify these are all unqualified names? Or maybe there
- // is a use-case for the export stub to return a qualified name?
+ // is a use-case for the export stub to return a qualified name? E.g.,
+ // re-export?
//
names v;
{
@@ -2585,7 +2851,7 @@ namespace build2
});
parser p (ctx);
- v = p.parse_export_stub (ifs, path_name (es), gs, ts);
+ v = p.parse_export_stub (ifs, path_name (es), *root, gs, ts);
}
// If there were no export directive executed in an export stub,
@@ -2595,7 +2861,14 @@ namespace build2
fail (loc) << "target " << tgt << " is not exported by project "
<< *proj;
- return pair<names, const scope&> (move (v), *root);
+ pair<names, const scope&> r (move (v), *root);
+
+ // Cache.
+ //
+ ctx.import_cache.emplace (
+ import_key {move (cache_out_root), move (tgt), metav}, r);
+
+ return r;
}
catch (const io_error& e)
{
@@ -2652,10 +2925,39 @@ namespace build2
}
}
- pair<names, import_kind>
+ const target_type&
+ import_target_type (scope& root,
+ const scope& iroot, const string& n,
+ const location& l)
+ {
+ // NOTE: see similar code in parser::parse_define().
+
+ const target_type* tt (iroot.find_target_type (n));
+ if (tt == nullptr)
+ fail (l) << "unknown imported target type " << n << " in project "
+ << iroot;
+
+ auto p (root.root_extra->target_types.insert (*tt));
+
+ if (!p.second && &p.first.get () != tt)
+ fail (l) << "imported target type " << n << " already defined in project "
+ << root;
+
+ return *tt;
+ }
+
+ static names
+ import2_buildfile (context&, names&&, bool, const location&);
+
+ static const target*
+ import2 (context&, const scope&, names&,
+ const string&, bool, const optional<string>&, bool,
+ const location&);
+
+ import_result<scope>
import (scope& base,
name tgt,
- bool ph2,
+ const optional<string>& ph2,
bool opt,
bool metadata,
const location& loc)
@@ -2682,7 +2984,10 @@ namespace build2
import_result<target> r (
import_direct (base, move (tgt), ph2, opt, metadata, loc));
- return make_pair (move (r.name), r.kind);
+ return import_result<scope> {
+ r.target != nullptr ? r.target->base_scope ().root_scope () : nullptr,
+ move (r.name),
+ r.kind};
}
pair<name, optional<dir_path>> r (
@@ -2698,6 +3003,7 @@ namespace build2
if (!r.second || r.second->empty ())
{
names ns;
+ const target* t (nullptr);
if (r.first.empty ())
{
@@ -2713,17 +3019,25 @@ namespace build2
//
if (ns.back ().qualified ())
{
- if (ph2)
+ if (ns.back ().type == "buildfile")
+ {
+ assert (ph2);
+ ns = import2_buildfile (ctx, move (ns), opt && !r.second, loc);
+ }
+ else if (ph2)
{
// This is tricky: we only want the optional semantics for the
// fallback case.
//
- if (const target* t = import (ctx,
- base.find_prerequisite_key (ns, loc),
- opt && !r.second /* optional */,
- nullopt /* metadata */,
- false /* existing */,
- loc))
+ t = import2 (ctx,
+ base, ns,
+ *ph2,
+ opt && !r.second /* optional */,
+ nullopt /* metadata */,
+ false /* existing */,
+ loc);
+
+ if (t != nullptr)
{
// Note that here r.first was still project-qualified and we
// have no choice but to call as_name(). This shouldn't cause
@@ -2739,40 +3053,99 @@ namespace build2
}
}
- return make_pair (
+ return import_result<scope> {
+ t != nullptr ? t->base_scope ().root_scope () : nullptr,
move (ns),
- r.second.has_value () ? import_kind::adhoc : import_kind::fallback);
+ r.second.has_value () ? import_kind::adhoc : import_kind::fallback};
}
import_kind k (r.first.absolute ()
? import_kind::adhoc
: import_kind::normal);
- return make_pair (
- import_load (base.ctx, move (r), false /* metadata */, loc).first,
- k);
+ pair<names, const scope&> p (
+ import_load (base.ctx, move (r), false /* metadata */, loc));
+
+ return import_result<scope> {&p.second, move (p.first), k};
}
const target*
- import (context& ctx,
- const prerequisite_key& pk,
- bool opt,
- const optional<string>& meta,
- bool exist,
- const location& loc)
+ import2 (context& ctx,
+ const prerequisite_key& pk,
+ const string& hint,
+ bool opt,
+ const optional<string>& meta,
+ bool exist,
+ const location& loc)
{
- tracer trace ("import");
+ tracer trace ("import2");
- assert (!meta || !exist);
+ // Neither hint nor metadata can be requested for existing.
+ //
+ assert (!exist || (!meta && hint.empty ()));
assert (pk.proj);
const project_name& proj (*pk.proj);
- // Target type-specific search.
- //
// Note that if this function returns a target, it should have the
// extension assigned (like the find/insert_target() functions) so that
// as_name() returns a stable name.
+
+ // Rule-specific resolution.
+ //
+ if (!hint.empty ())
+ {
+ assert (pk.scope != nullptr);
+
+ // Note: similar to/inspired by match_rule_impl().
+ //
+ // Search scopes outwards, stopping at the project root.
+ //
+ for (const scope* s (pk.scope);
+ s != nullptr;
+ s = s->root () ? nullptr : s->parent_scope ())
+ {
+ // We only look for rules that are registered for perform(update).
+ //
+ if (const operation_rule_map* om = s->rules[perform_id])
+ {
+ if (const target_type_rule_map* ttm = (*om)[update_id])
+ {
+ // Ignore the target type the rules are registered for (this is
+ // about prerequisite types, not target).
+ //
+ // @@ Note that the same rule could be registered for several
+ // types which means we will keep calling it repeatedly.
+ //
+ for (const auto& p: *ttm)
+ {
+ const name_rule_map& nm (p.second);
+
+ // Filter against the hint.
+ //
+ for (auto p (nm.find_sub (hint)); p.first != p.second; ++p.first)
+ {
+ const string& n (p.first->first);
+ const rule& r (p.first->second);
+
+ auto df = make_diag_frame (
+ [&pk, &n](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while importing " << pk << " using rule "
+ << n;
+ });
+
+ if (const target* t = r.import (pk, meta, loc))
+ return t;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Builtin resolution for certain target types.
//
const target_key& tk (pk.tk);
const target_type& tt (*tk.type);
@@ -2825,8 +3198,7 @@ namespace build2
auto df = make_diag_frame (
[&proj, &tt, &tk] (const diag_record& dr)
{
- import_suggest (
- dr, proj, tt, *tk.name, "alternative ");
+ import_suggest (dr, proj, &tt, *tk.name, false, "alternative ");
});
if (!(md = extract_metadata (pp, *meta, opt, loc)))
@@ -2835,6 +3207,9 @@ namespace build2
if (!t || *t == nullptr)
{
+ // Note: we need the lock because process_path() call below is not
+ // MT-safe.
+ //
pair<target&, ulock> r (insert_target (trace, ctx, tt, p));
t = &r.first;
@@ -2856,6 +3231,8 @@ namespace build2
return *t;
}
+ // NOTE: see similar code in import2() below if changing anything here.
+
if (opt || exist)
return nullptr;
@@ -2866,7 +3243,137 @@ namespace build2
dr << info << "consider adding its installation location" <<
info << "or explicitly specify its project name";
else
- import_suggest (dr, proj, tt, *tk.name);
+ // Use metadata as proxy for immediate import.
+ //
+ import_suggest (dr, proj, &tt, *tk.name, meta && hint.empty ());
+
+ dr << endf;
+ }
+
+ // As above but with scope/ns instead of pk. This version deals with the
+ // unknown target type case.
+ //
+ static const target*
+ import2 (context& ctx,
+ const scope& base, names& ns,
+ const string& hint,
+ bool opt,
+ const optional<string>& meta,
+ bool exist,
+ const location& loc)
+ {
+ // If we have a rule hint, then it's natural to expect this target type is
+ // known to the importing project. Ditto for project-less import.
+ //
+ const target_type* tt (nullptr);
+ if (hint.empty ())
+ {
+ size_t n;
+ if ((n = ns.size ()) != 0 && n == (ns[0].pair ? 2 : 1))
+ {
+ const name& n (ns.front ());
+
+ if (n.typed () && !n.proj->empty ())
+ {
+ tt = base.find_target_type (n.type);
+
+ if (tt == nullptr)
+ {
+ // A subset of code in the above version of import2().
+ //
+ if (opt || exist)
+ return nullptr;
+
+ diag_record dr;
+ dr << fail (loc) << "unable to import target " << ns;
+ import_suggest (dr, *n.proj, nullptr, string (), meta.has_value ());
+ }
+ }
+ }
+ }
+
+ return import2 (ctx,
+ base.find_prerequisite_key (ns, loc, tt),
+ hint,
+ opt,
+ meta,
+ exist,
+ loc);
+ }
+
+ static names
+ import2_buildfile (context&, names&& ns, bool opt, const location& loc)
+ {
+ tracer trace ("import2_buildfile");
+
+ assert (ns.size () == 1);
+ name n (move (ns.front ()));
+
+ // Our approach doesn't work for targets without a project so let's fail
+ // hard, even if optional.
+ //
+ if (!n.proj || n.proj->empty ())
+ fail (loc) << "unable to import target " << n << " without project name";
+
+ while (!build_install_buildfile.empty ()) // Breakout loop.
+ {
+ path f (build_install_buildfile /
+ dir_path (n.proj->string ()) /
+ n.dir /
+ n.value);
+
+ // See if we need to try with extensions.
+ //
+ bool ext (path_traits::find_extension (n.value) == string::npos &&
+ n.value != std_buildfile_file.string () &&
+ n.value != alt_buildfile_file.string ());
+
+ if (ext)
+ {
+ f += '.';
+ f += std_build_ext;
+ }
+
+ if (!exists (f))
+ {
+ l6 ([&]{trace << "tried " << f;});
+
+ if (ext)
+ {
+ f.make_base ();
+ f += '.';
+ f += alt_build_ext;
+
+ if (!exists (f))
+ {
+ l6 ([&]{trace << "tried " << f;});
+ break;
+ }
+ }
+ else
+ break;
+ }
+
+ // Split the path into the target.
+ //
+ ns = {name (f.directory (), move (n.type), f.leaf ().string ())};
+ return move (ns);
+ }
+
+ if (opt)
+ return names {};
+
+ diag_record dr;
+ dr << fail (loc) << "unable to import target " << n;
+
+ import_suggest (dr, *n.proj, nullptr /* tt */, n.value, false);
+
+ if (build_install_buildfile.empty ())
+ dr << info << "no exported buildfile installation location is "
+ << "configured in build2";
+ else
+ dr << info << "exported buildfile installation location is "
+ << build_install_buildfile;
dr << endf;
}
@@ -2875,7 +3382,7 @@ namespace build2
import_direct (bool& new_value,
scope& base,
name tgt,
- bool ph2,
+ const optional<string>& ph2,
bool opt,
bool metadata,
const location& loc,
@@ -2888,11 +3395,13 @@ namespace build2
l5 ([&]{trace << tgt << " from " << base << " for " << what;});
- assert ((!opt || ph2) && (!metadata || ph2));
+ assert ((!opt || ph2) && (!metadata || ph2) && tgt.type != "buildfile");
context& ctx (base.ctx);
assert (ctx.phase == run_phase::load);
+ scope& root (*base.root_scope ());
+
// Use the original target name as metadata key.
//
auto meta (metadata ? optional<string> (tgt.value) : nullopt);
@@ -2900,6 +3409,12 @@ namespace build2
names ns, rns;
import_kind k;
const target* pt (nullptr);
+ const scope* iroot (nullptr); // Imported root scope.
+
+ // Original project/name as imported for diagnostics.
+ //
+ string oname (meta ? tgt.value : string ());
+ project_name oproj (meta && tgt.proj ? *tgt.proj : project_name ());
pair<name, optional<dir_path>> r (
import_search (new_value,
@@ -2931,12 +3446,13 @@ namespace build2
// This is tricky: we only want the optional semantics for the
// fallback case.
//
- pt = import (ctx,
- base.find_prerequisite_key (ns, loc),
- opt && !r.second,
- meta,
- false /* existing */,
- loc);
+ pt = import2 (ctx,
+ base, ns,
+ *ph2,
+ opt && !r.second,
+ meta,
+ false /* existing */,
+ loc);
}
if (pt == nullptr)
@@ -2953,6 +3469,8 @@ namespace build2
// It's a bit fuzzy in which cases we end up here. So for now we keep
// the original if it's absolute and call as_name() otherwise.
//
+ // @@ TODO: resolve iroot or assume target type should be known?
+ //
if (r.first.absolute ())
rns.push_back (r.first);
@@ -2962,14 +3480,30 @@ namespace build2
else
{
k = r.first.absolute () ? import_kind::adhoc : import_kind::normal;
- rns = ns = import_load (base.ctx, move (r), metadata, loc).first;
+
+ pair<names, const scope&> p (
+ import_load (base.ctx, move (r), metadata, loc));
+
+ rns = ns = move (p.first);
+ iroot = &p.second;
}
if (pt == nullptr)
{
+ // Import (more precisely, alias) the target type into this project
+ // if not known.
+ //
+ const target_type* tt (nullptr);
+ if (iroot != nullptr && !ns.empty ())
+ {
+ const name& n (ns.front ());
+ if (n.typed ())
+ tt = &import_target_type (root, *iroot, n.type, loc);
+ }
+
// Similar logic to perform's search(). Note: modifies ns.
//
- target_key tk (base.find_target_key (ns, loc));
+ target_key tk (base.find_target_key (ns, loc, tt));
pt = ctx.targets.find (tk, trace);
if (pt == nullptr)
fail (loc) << "unknown imported target " << tk;
@@ -2986,10 +3520,20 @@ namespace build2
//
if (meta)
{
+ auto df = make_diag_frame (
+ [&oproj, &oname, &t] (const diag_record& dr)
+ {
+ if (!oproj.empty ())
+ import_suggest (dr, oproj, &t.type (), oname, false, "alternative ");
+ });
+
// The export.metadata value should start with the version followed by
// the metadata variable prefix.
//
- lookup l (t.vars[ctx.var_export_metadata]);
+ // Note: lookup on target, not target::vars since it could come from
+ // the group (think lib{} metadata).
+ //
+ lookup l (t[ctx.var_export_metadata]);
if (l && !l->empty ())
{
const names& ns (cast<names> (l));
@@ -3007,7 +3551,7 @@ namespace build2
catch (const invalid_argument& e)
{
fail (loc) << "invalid metadata version in imported target " << t
- << ": " << e;
+ << ": " << e << endf;
}
if (ver != 1)
@@ -3022,13 +3566,15 @@ namespace build2
const string& pfx (ns[1].value);
- auto& vp (ctx.var_pool.rw ()); // Load phase.
-
// See if we have the stable program name in the <var-prefix>.name
// variable. If its missing, set it to the metadata key (i.e., target
// name as imported) by default.
//
{
+ // Note: go straight for the public variable pool.
+ //
+ auto& vp (ctx.var_pool.rw ()); // Load phase.
+
value& nv (t.assign (vp.insert (pfx + ".name")));
if (!nv)
nv = *meta;
@@ -3039,10 +3585,8 @@ namespace build2
//
if (const auto* e = cast_null<strings> (t.vars[pfx + ".environment"]))
{
- scope& rs (*base.root_scope ());
-
for (const string& v: *e)
- config::save_environment (rs, v);
+ config::save_environment (root, v);
}
}
else
@@ -3052,6 +3596,31 @@ namespace build2
return import_result<target> {pt, move (rns), k};
}
+ path
+ import_buildfile (scope& bs, name n, bool opt, const location& loc)
+ {
+ names r (import (bs,
+ move (n),
+ string () /* phase2 */,
+ opt,
+ false /* metadata */,
+ loc).name);
+
+ path p;
+ if (!r.empty ()) // Optional not found.
+ {
+ // Note: see also parse_import().
+ //
+ assert (r.size () == 1); // See import_load() for details.
+ name& n (r.front ());
+ p = n.dir / n.value; // Should already include extension.
+ }
+ else
+ assert (opt);
+
+ return p;
+ }
+
ostream&
operator<< (ostream& o, const import_result<exe>& r)
{
@@ -3100,13 +3669,23 @@ namespace build2
//
mkdir (d / std_build_dir, verbosity);
+ auto diag = [verbosity] (const path& f)
+ {
+ if (verb >= verbosity)
+ {
+ if (verb >= 2)
+ text << "cat >" << f;
+ else if (verb)
+ print_diag ("save", f);
+ }
+ };
+
// Write build/bootstrap.build.
//
{
path f (d / std_bootstrap_file);
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
@@ -3152,8 +3731,7 @@ namespace build2
{
path f (d / std_root_file);
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
@@ -3201,8 +3779,7 @@ namespace build2
{
path f (d / std_build_dir / "config.build"); // std_config_file
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
@@ -3227,8 +3804,7 @@ namespace build2
{
path f (d / std_buildfile_file);
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
diff --git a/libbuild2/file.hxx b/libbuild2/file.hxx
index b47d8dc..36e4c00 100644
--- a/libbuild2/file.hxx
+++ b/libbuild2/file.hxx
@@ -17,7 +17,30 @@
namespace build2
{
class lexer;
-
+ class parser;
+
+ // The following filesystem entries in the build/ subdirectory are reserved
+ // by the build2 core:
+ //
+ // build/ -- build2 core-internal build state (e.g., recipes)
+ // bootstrap/ -- bootstrap state and hooks
+ // bootstrap.build -- bootstrap buildfile
+ // root/ -- root load hooks
+ // root.build -- root buildfile
+ // export.build -- export stub
+ // export/ -- exported buildfiles
+ //
+ // The build/, bootstrap/, root/, and config.build entries are in .gitignore
+ // as generated by bdep-new.
+ //
+ // The rest of the filesystem entries are shared between the project and the
+ // modules that it loads. In particular, if a project loads module named
+ // <mod>, then the <mod>.build, <mod>/, *.<mod> entries (spelled in any
+ // case) are reserved to this module and should not be used by the project
+ // unless explicitly allowed by the module. By convention, <mod>/build/ is
+ // for module-internal build state (e.g., C++ modules side-build) and is
+ // .gitignore'ed.
+ //
LIBBUILD2_SYMEXPORT extern const dir_path std_build_dir; // build/
// build/root.build
@@ -54,7 +77,7 @@ namespace build2
find_src_root (const dir_path&, optional<bool>& altn);
// The same as above but for project's out. Note that we also check whether
- // a directory happens to be src_root, in case this is an in-tree build with
+ // a directory happens to be src_root, in case this is an in source build with
// the result returned as the second half of the pair. Note also that if the
// input is normalized/actualized, then the output will be as well.
//
@@ -115,10 +138,11 @@ namespace build2
bool
source_once (scope& root, scope& base, const path&);
- // As above but checks against the specified scope rather than base.
+ // As above but checks against the specified root scope rather than this
+ // root scope.
//
LIBBUILD2_SYMEXPORT bool
- source_once (scope& root, scope& base, const path&, scope& once);
+ source_once (scope& root, scope& base, const path&, scope& once_root);
// Create project's root scope. Only set the src_root variable if the passed
// src_root value is not empty.
@@ -182,8 +206,15 @@ namespace build2
// Bootstrap the project's root scope, the src part.
//
+ // If amalgamation is present, then use the specified directory as the
+ // amalgamation instead of discovering or extracting it from bootstrap.build
+ // (use empty directory to disable amalgamation). If subprojects is false,
+ // then do not discover or extract subprojects.
+ //
LIBBUILD2_SYMEXPORT void
- bootstrap_src (scope& root, optional<bool>& altn);
+ bootstrap_src (scope& root, optional<bool>& altn,
+ optional<dir_path> amalgamation = nullopt,
+ bool subprojects = true);
// Return true if this scope has already been bootstrapped, that is, the
// following calls have already been made:
@@ -205,10 +236,11 @@ namespace build2
bootstrap_post (scope& root);
// Create and bootstrap outer root scopes, if any. Loading is done by
- // load_root().
+ // load_root(). If subprojects is false, then do not discover or extract
+ // subprojects.
//
LIBBUILD2_SYMEXPORT void
- create_bootstrap_outer (scope& root);
+ create_bootstrap_outer (scope& root, bool subprojects = true);
// Create and bootstrap inner root scopes, if any, recursively.
//
@@ -224,8 +256,13 @@ namespace build2
// loaded. Also make sure all outer root scopes are loaded prior to loading
// this root scope.
//
+ // If pre/post functions are specified, they are called before/after
+ // pre/post hooks, respectively.
+ //
LIBBUILD2_SYMEXPORT void
- load_root (scope& root);
+ load_root (scope& root,
+ const function<void (parser&)>& pre = nullptr,
+ const function<void (parser&)>& post = nullptr);
// Extract the specified variable value from a buildfile. It is expected to
// be the first non-blank/comment line and not to rely on any variable
@@ -309,10 +346,14 @@ namespace build2
// original; see the config.import.<proj>.<name>[.<type>] logic for details)
// in which case it should still be passed to import phase 2.
//
- // If phase2 is true then the phase 2 is performed right away (we call it
- // immediate import). Note that if optional is true, phase2 must be true as
- // well (and thus there is no rule-specific logic for optional imports). In
- // case of optional, empty names value is retuned if nothing was found.
+ // If phase2 is present then the phase 2 is performed right away (we call it
+ // immediate import). Note that if optional is true, phase2 must be present
+ // as well (and thus there is no rule-specific logic for optional imports).
+ // In case of optional, empty names value is returned if nothing was found.
+ // The value in phase2 is the optional rule hint that, if not empty, will be
+ // used to lookup a rule that will be asked to resolve the qualified target
+ // (see rule::import()). If it is empty, then built-in resolution logic will
+ // be used for some target types (currently only exe{}).
//
// If metadata is true, then load the target metadata. In this case phase2
// must be true as well.
@@ -320,7 +361,9 @@ namespace build2
// Note also that we return names rather than a single name: while normally
// it will be a single target name, it can be an out-qualified pair (if
// someone wants to return a source target) but it can also be a non-target
- // since we don't restrict what users can import/export.
+ // since we don't restrict what users can import/export. If name has
+ // buildfile type, then the result is an absolute buildfile target to be
+ // included (once) at the point of importation.
//
// Finally, note that import is (and should be kept) idempotent or, more
// precisely, "accumulatively idempotent" in that additional steps may be
@@ -328,10 +371,21 @@ namespace build2
//
enum class import_kind {adhoc, normal, fallback};
- LIBBUILD2_SYMEXPORT pair<names, import_kind>
+ template <typename T>
+ struct import_result
+ {
+ const T* target; // Note: T can be imported target or imported scope.
+ names name;
+ import_kind kind;
+ };
+
+ // Note that import_result<scope>::target may be NULL even if name is not
+ // empty (e.g, out of project target imported via phase 2).
+ //
+ LIBBUILD2_SYMEXPORT import_result<scope>
import (scope& base,
name,
- bool phase2,
+ const optional<string>& phase2,
bool optional,
bool metadata,
const location&);
@@ -339,23 +393,23 @@ namespace build2
// Import phase 2.
//
const target&
- import (context&, const prerequisite_key&);
+ import2 (context&, const prerequisite_key&);
// As above but import the target "here and now" without waiting for phase 2
// (and thus omitting any rule-specific logic). This version of import is,
// for example, used by build system modules to perform an implicit import
// of the corresponding tool.
//
- // If phase2 is false, then the second phase's fallback/default logic is
+ // If phase2 is absent, then the second phase's fallback/default logic is
// only invoked if the import was ad hoc (i.e., a relative path was
// specified via config.import.<proj>.<name>[.<type>]) with NULL returned
// otherwise.
//
- // If phase2 is true and optional is true, then NULL is returned instead of
- // failing if phase 2 could not find anything.
+ // If phase2 is present and optional is true, then NULL is returned instead
+ // of failing if phase 2 could not find anything.
//
// If metadata is true, then load the target metadata. In this case phase2
- // must be true as well.
+ // must be present as well.
//
// The what argument specifies what triggered the import (for example,
// "module load") and is used in diagnostics.
@@ -364,18 +418,20 @@ namespace build2
// target::as_name() for details) as well as the kind of import that was
// performed.
//
- template <typename T>
- struct import_result
- {
- const T* target;
- names name;
- import_kind kind;
- };
+ // Note: cannot be used to import buildfile targets (use import_buildfile()
+ // instead).
+
+ // Print import_direct<exe>() result either as a target for a normal import
+ // or as a process path for ad hoc and fallback imports. Normally used in
+ // build system modules to print the configuration report.
+ //
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const import_result<exe>&);
import_result<target>
import_direct (scope& base,
name,
- bool phase2,
+ const optional<string>& phase2,
bool optional,
bool metadata,
const location&,
@@ -390,13 +446,16 @@ namespace build2
import_direct (bool& new_value,
scope& base,
name,
- bool phase2,
+ const optional<string>& phase2,
bool optional,
bool metadata,
const location&,
const char* what = "import");
+ // As above but also cast the target and pass phase2 as bool (primarily
+ // for use in build system modules).
+ //
template <typename T>
import_result<T>
import_direct (scope&,
@@ -411,12 +470,16 @@ namespace build2
bool, bool, bool,
const location&, const char* = "import");
- // Print import_direct<exe>() result either as a target for a normal import
- // or as a process path for ad hoc and fallback imports. Normally used in
- // build system modules to print the configuration report.
+ // The import_direct() equivalent for importing buildfile targets. Return
+ // empty name if optional and not found. Note that the returned file path is
+ // not necessarily checked for existence so sourcing it may still fail.
//
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const import_result<exe>&);
+ // Note also that this function can be used for an ad hoc import by passing
+ // an absolute target name as would be returned by the normal import (can be
+ // useful for importing own buildfiles).
+ //
+ LIBBUILD2_SYMEXPORT path
+ import_buildfile (scope& base, name, bool optional, const location&);
// As import phase 2 but only imports as an already existing target. But
// unlike it, this function can be called during the load and execute
@@ -458,6 +521,27 @@ namespace build2
bool metadata,
const location&);
+ // Import (more precisely, alias as if using the `define =` syntax) the
+ // target type from imported project (iroot) into this project (root). If
+ // the target type with this name is already defined in this project, then
+ // make sure it is the same as in the imported project.
+ //
+ LIBBUILD2_SYMEXPORT const target_type&
+ import_target_type (scope& root,
+ const scope& iroot, const string&,
+ const location&);
+
+ // Suggest appropriate ways to import the specified target (as type and
+ // name) from the specified project.
+ //
+ void
+ import_suggest (const diag_record&,
+ const project_name&,
+ const target_type*,
+ const string& name,
+ bool rule_hint,
+ const char* qual = nullptr);
+
// Create a build system project in the specified directory.
//
LIBBUILD2_SYMEXPORT void
@@ -472,7 +556,7 @@ namespace build2
const optional<string>& config_file, // Ad hoc config.build contents.
bool buildfile, // Create root buildfile.
const char* who, // Who is creating it.
- uint16_t verbosity = 1); // Diagnostic verbosity.
+ uint16_t verbosity); // Diagnostic verbosity.
}
#include <libbuild2/file.ixx>
diff --git a/libbuild2/file.ixx b/libbuild2/file.ixx
index dbd892d..dc39bcb 100644
--- a/libbuild2/file.ixx
+++ b/libbuild2/file.ixx
@@ -22,15 +22,16 @@ namespace build2
}
LIBBUILD2_SYMEXPORT const target*
- import (context&,
- const prerequisite_key&,
- bool optional_,
- const optional<string>& metadata, // False or metadata key.
- bool existing,
- const location&);
+ import2 (context&,
+ const prerequisite_key&,
+ const string& hint,
+ bool optional_,
+ const optional<string>& metadata, // False or metadata key.
+ bool existing,
+ const location&);
inline const target&
- import (context& ctx, const prerequisite_key& pk)
+ import2 (context& ctx, const prerequisite_key& pk)
{
assert (ctx.phase == run_phase::match);
@@ -39,13 +40,13 @@ namespace build2
// Looks like the only way to do this is to keep location in name and
// then in prerequisite. Perhaps one day...
//
- return *import (ctx, pk, false, nullopt, false, location ());
+ return *import2 (ctx, pk, string (), false, nullopt, false, location ());
}
inline import_result<target>
import_direct (scope& base,
name tgt,
- bool ph2, bool opt, bool md,
+ const optional<string>& ph2, bool opt, bool md,
const location& loc, const char* w)
{
bool dummy (false);
@@ -59,7 +60,13 @@ namespace build2
bool ph2, bool opt, bool md,
const location& loc, const char* w)
{
- auto r (import_direct (base, move (tgt), ph2, opt, md, loc, w));
+ auto r (import_direct (base,
+ move (tgt),
+ ph2 ? optional<string> (string ()) : nullopt,
+ opt,
+ md,
+ loc,
+ w));
return import_result<T> {
r.target != nullptr ? &r.target->as<const T> () : nullptr,
move (r.name),
@@ -74,7 +81,14 @@ namespace build2
bool ph2, bool opt, bool md,
const location& loc, const char* w)
{
- auto r (import_direct (nv, base, move (tgt), ph2, opt, md, loc, w));
+ auto r (import_direct (nv,
+ base,
+ move (tgt),
+ ph2 ? optional<string> (string ()) : nullopt,
+ opt,
+ md,
+ loc,
+ w));
return import_result<T> {
r.target != nullptr ? &r.target->as<const T> () : nullptr,
move (r.name),
@@ -84,6 +98,6 @@ namespace build2
inline const target*
import_existing (context& ctx, const prerequisite_key& pk)
{
- return import (ctx, pk, false, nullopt, true, location ());
+ return import2 (ctx, pk, string (), false, nullopt, true, location ());
}
}
diff --git a/libbuild2/filesystem.cxx b/libbuild2/filesystem.cxx
index 2e3309d..f340dd7 100644
--- a/libbuild2/filesystem.cxx
+++ b/libbuild2/filesystem.cxx
@@ -15,7 +15,12 @@ namespace build2
touch (context& ctx, const path& p, bool create, uint16_t v)
{
if (verb >= v)
- text << "touch " << p;
+ {
+ if (verb >= 2)
+ text << "touch " << p;
+ else if (verb)
+ print_diag ("touch", p);
+ }
if (ctx.dry_run)
return;
@@ -50,25 +55,30 @@ namespace build2
// We don't want to print the command if the directory already exists.
// This makes the below code a bit ugly.
//
- mkdir_status ms;
+ auto print = [v, &d] (bool ovr)
+ {
+ if (verb >= v || ovr)
+ {
+ if (verb >= 2)
+ text << "mkdir " << d;
+ else if (verb)
+ print_diag ("mkdir", d);
+ }
+ };
+ mkdir_status ms;
try
{
ms = try_mkdir (d);
}
catch (const system_error& e)
{
- if (verb >= v)
- text << "mkdir " << d;
-
+ print (true);
fail << "unable to create directory " << d << ": " << e << endf;
}
if (ms == mkdir_status::success)
- {
- if (verb >= v)
- text << "mkdir " << d;
- }
+ print (false);
return ms;
}
@@ -79,25 +89,30 @@ namespace build2
// We don't want to print the command if the directory already exists.
// This makes the below code a bit ugly.
//
- mkdir_status ms;
+ auto print = [v, &d] (bool ovr)
+ {
+ if (verb >= v || ovr)
+ {
+ if (verb >= 2)
+ text << "mkdir -p " << d;
+ else if (verb)
+ print_diag ("mkdir -p", d);
+ }
+ };
+ mkdir_status ms;
try
{
ms = try_mkdir_p (d);
}
catch (const system_error& e)
{
- if (verb >= v)
- text << "mkdir -p " << d;
-
+ print (true);
fail << "unable to create directory " << d << ": " << e << endf;
}
if (ms == mkdir_status::success)
- {
- if (verb >= v)
- text << "mkdir -p " << d;
- }
+ print (false);
return ms;
}
@@ -106,7 +121,12 @@ namespace build2
mvfile (const path& f, const path& t, uint16_t v)
{
if (verb >= v)
- text << "mv " << f << ' ' << t;
+ {
+ if (verb >= 2)
+ text << "mv " << f << ' ' << t;
+ else if (verb)
+ print_diag ("mv", f, t);
+ }
try
{
@@ -126,10 +146,18 @@ namespace build2
fs_status<rmfile_status>
rmsymlink (context& ctx, const path& p, bool d, uint16_t v)
{
- auto print = [&p, v] ()
+ auto print = [&p, v] (bool ovr)
{
- if (verb >= v)
- text << "rm " << p.string ();
+ if (verb >= v || ovr)
+ {
+ // Note: strip trailing directory separator (but keep as path for
+ // relative).
+ //
+ if (verb >= 2)
+ text << "rm " << p.string ();
+ else if (verb)
+ print_diag ("rm", p.to_directory () ? path (p.string ()) : p);
+ }
};
rmfile_status rs;
@@ -144,12 +172,12 @@ namespace build2
}
catch (const system_error& e)
{
- print ();
+ print (true);
fail << "unable to remove symlink " << p.string () << ": " << e << endf;
}
if (rs == rmfile_status::success)
- print ();
+ print (false);
return rs;
}
@@ -166,7 +194,12 @@ namespace build2
return rmdir_status::not_exist;
if (verb >= v)
- text << "rmdir -r " << d;
+ {
+ if (verb >= 2)
+ text << "rmdir -r " << d;
+ else if (verb)
+ print_diag ("rmdir -r", d);
+ }
if (!ctx.dry_run)
{
@@ -258,7 +291,7 @@ namespace build2
{
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
// The .buildignore filesystem entry should be of the regular file
// type.
diff --git a/libbuild2/filesystem.hxx b/libbuild2/filesystem.hxx
index 565e832..7b45a08 100644
--- a/libbuild2/filesystem.hxx
+++ b/libbuild2/filesystem.hxx
@@ -22,6 +22,8 @@
//
namespace build2
{
+ using butl::entry_type;
+
using butl::auto_rmfile;
using butl::auto_rmdir;
@@ -73,10 +75,10 @@ namespace build2
using mkdir_status = butl::mkdir_status;
LIBBUILD2_SYMEXPORT fs_status<mkdir_status>
- mkdir (const dir_path&, uint16_t verbosity = 1);
+ mkdir (const dir_path&, uint16_t verbosity);
LIBBUILD2_SYMEXPORT fs_status<mkdir_status>
- mkdir_p (const dir_path&, uint16_t verbosity = 1);
+ mkdir_p (const dir_path&, uint16_t verbosity);
// Rename a file (or file symlink) overwriting the destination if exists.
//
@@ -166,7 +168,7 @@ namespace build2
//
LIBBUILD2_SYMEXPORT fs_status<mkdir_status>
mkdir_buildignore (context&,
- const dir_path&, const path&, uint16_t verbosity = 1);
+ const dir_path&, const path&, uint16_t verbosity);
// Return true if the directory is empty or only contains the .buildignore
// file. Fail if the directory doesn't exist.
diff --git a/libbuild2/filesystem.txx b/libbuild2/filesystem.txx
index 7404532..afdb48d 100644
--- a/libbuild2/filesystem.txx
+++ b/libbuild2/filesystem.txx
@@ -1,8 +1,6 @@
// file : libbuild2/filesystem.txx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <type_traits> // is_base_of
-
#include <libbuild2/diagnostics.hxx>
namespace build2
@@ -15,16 +13,17 @@ namespace build2
// We don't want to print the command if we couldn't remove the file
// because it does not exist (just like we don't print the update command
- // if the file is up to date). This makes the below code a bit ugly.
+ // if the file is up to date). But we always want to print some command
+ // before we issue diagnostics. This makes the below code a bit ugly.
//
- auto print = [&f, &t, v] ()
+ auto print = [&f, &t, v] (bool ovr)
{
- if (verb >= v)
+ if (verb >= v || ovr)
{
if (verb >= 2)
text << "rm " << f;
else if (verb)
- text << "rm " << t;
+ print_diag ("rm", t); // T can be target or path.
}
};
@@ -38,12 +37,12 @@ namespace build2
}
catch (const system_error& e)
{
- print ();
+ print (true);
fail << "unable to remove file " << f << ": " << e << endf;
}
if (rs == rmfile_status::success)
- print ();
+ print (false);
return rs;
}
@@ -56,16 +55,17 @@ namespace build2
// We don't want to print the command if we couldn't remove the directory
// because it does not exist (just like we don't print mkdir if it already
- // exists) or if it is not empty. This makes the below code a bit ugly.
+ // exists) or if it is not empty. But we always want to print some command
+ // before we issue diagnostics. This makes the below code a bit ugly.
//
- auto print = [&d, &t, v] ()
+ auto print = [&d, &t, v] (bool ovr)
{
- if (verb >= v)
+ if (verb >= v || ovr)
{
if (verb >= 2)
text << "rmdir " << d;
else if (verb)
- text << (std::is_base_of<dir_path, T>::value ? "rmdir " : "rm ") << t;
+ print_diag ("rmdir", t); // T can be target or dir_path.
}
};
@@ -79,7 +79,7 @@ namespace build2
}
catch (const system_error& e)
{
- print ();
+ print (true);
fail << "unable to remove directory " << d << ": " << e << endf;
}
@@ -87,14 +87,14 @@ namespace build2
{
case rmdir_status::success:
{
- print ();
+ print (false);
break;
}
case rmdir_status::not_empty:
{
if (verb >= v && verb >= 2)
{
- text << d << " is "
+ info << d << " is "
<< (w ? "current working directory" : "not empty")
<< ", not removing";
}
diff --git a/libbuild2/forward.hxx b/libbuild2/forward.hxx
index d2b8989..057ab24 100644
--- a/libbuild2/forward.hxx
+++ b/libbuild2/forward.hxx
@@ -26,6 +26,7 @@ namespace build2
struct variable;
class variable_pool;
+ class variable_patterns;
class variable_map;
struct variable_override;
using variable_overrides = vector<variable_override>;
diff --git a/libbuild2/function.cxx b/libbuild2/function.cxx
index eaf3f9e..3110547 100644
--- a/libbuild2/function.cxx
+++ b/libbuild2/function.cxx
@@ -213,7 +213,7 @@ namespace build2
if (f->arg_types[i] &&
*f->arg_types[i] == nullptr &&
args[i].type != nullptr)
- untypify (args[i]);
+ untypify (args[i], true /* reduce */);
}
}
@@ -348,29 +348,40 @@ namespace build2
// Static-initialize the function map and populate with builtin functions.
//
+ // NOTE: remember to also arrange for automatic documentation extraction in
+ // doc/buildfile!
+ void bool_functions (function_map&); // functions-bool.cxx
void builtin_functions (function_map&); // functions-builtin.cxx
void filesystem_functions (function_map&); // functions-filesystem.cxx
+ void integer_functions (function_map&); // functions-integer.cxx
+ void json_functions (function_map&); // functions-json.cxx
void name_functions (function_map&); // functions-name.cxx
void path_functions (function_map&); // functions-path.cxx
void process_functions (function_map&); // functions-process.cxx
void process_path_functions (function_map&); // functions-process-path.cxx
void regex_functions (function_map&); // functions-regex.cxx
void string_functions (function_map&); // functions-string.cxx
+ void target_functions (function_map&); // functions-target.cxx
void target_triplet_functions (function_map&); // functions-target-triplet.cxx
void project_name_functions (function_map&); // functions-target-triplet.cxx
+
void
insert_builtin_functions (function_map& m)
{
+ bool_functions (m);
builtin_functions (m);
filesystem_functions (m);
+ integer_functions (m);
+ json_functions (m);
name_functions (m);
path_functions (m);
process_functions (m);
process_path_functions (m);
regex_functions (m);
string_functions (m);
+ target_functions (m);
target_triplet_functions (m);
project_name_functions (m);
}
diff --git a/libbuild2/function.hxx b/libbuild2/function.hxx
index 81ece89..cda856a 100644
--- a/libbuild2/function.hxx
+++ b/libbuild2/function.hxx
@@ -4,8 +4,9 @@
#ifndef LIBBUILD2_FUNCTION_HXX
#define LIBBUILD2_FUNCTION_HXX
-#include <utility> // index_sequence
-#include <type_traits> // aligned_storage
+#include <cstddef> // max_align_t
+#include <utility> // index_sequence
+#include <type_traits> // is_*
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
@@ -133,8 +134,8 @@ namespace build2
// Auxiliary data storage. Note that it is expected to be trivially
// copyable and destructible.
//
- std::aligned_storage<sizeof (void*) * 3>::type data;
- static const size_t data_size = sizeof (decltype (data));
+ static const size_t data_size = sizeof (void*) * 3;
+ alignas (std::max_align_t) unsigned char data[data_size];
function_overload (const char* an,
size_t mi, size_t ma, types ts,
@@ -952,7 +953,8 @@ namespace build2
// Low-level interface that can be used to pass additional data.
//
- // Note that the call to this function sidesteps the thunk.
+ // Note that the call to this function sidesteps the thunk. One notable
+ // consequence of this is that the values are not checked for NULL.
//
template <typename D, typename... A>
void
diff --git a/libbuild2/function.test.cxx b/libbuild2/function.test.cxx
index b09e4f7..37ed5ff 100644
--- a/libbuild2/function.test.cxx
+++ b/libbuild2/function.test.cxx
@@ -44,13 +44,13 @@ namespace build2
// Fake build system driver, default verbosity.
//
init_diag (1);
- init (nullptr, argv[0]);
+ init (nullptr, argv[0], true);
// Serial execution.
//
scheduler sched (1);
global_mutexes mutexes (1);
- file_cache fcache;
+ file_cache fcache (true);
context ctx (sched, mutexes, fcache);
auto& functions (ctx.functions);
@@ -115,7 +115,7 @@ namespace build2
else if (!a.empty ())
{
names storage;
- cout << reverse (a, storage);
+ cout << reverse (a, storage, true /* reduce */);
}
cout << endl;
}
@@ -124,7 +124,9 @@ namespace build2
try
{
- scope& s (ctx.global_scope.rw ());
+ // Use temp scope for the private variable pool.
+ //
+ temp_scope s (ctx.global_scope.rw ());
parser p (ctx);
p.parse_buildfile (cin, path_name ("buildfile"), &s, s);
diff --git a/libbuild2/functions-bool.cxx b/libbuild2/functions-bool.cxx
new file mode 100644
index 0000000..bb2fd3f
--- /dev/null
+++ b/libbuild2/functions-bool.cxx
@@ -0,0 +1,26 @@
+// file : libbuild2/functions-bool.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+using namespace std;
+
+namespace build2
+{
+ void
+ bool_functions (function_map& m)
+ {
+ function_family f (m, "bool");
+
+ // $string(<bool>)
+ //
+ // Convert a boolean value to a string literal `true` or `false`.
+ //
+
+ // Note that we don't handle NULL values for this type since it has no
+ // empty representation.
+ //
+ f["string"] += [](bool b) {return b ? "true" : "false";};
+ }
+}
diff --git a/libbuild2/functions-builtin.cxx b/libbuild2/functions-builtin.cxx
index c013c3b..e24ff8e 100644
--- a/libbuild2/functions-builtin.cxx
+++ b/libbuild2/functions-builtin.cxx
@@ -26,7 +26,7 @@ namespace build2
if (s == "dedup")
r = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
return r;
@@ -37,11 +37,17 @@ namespace build2
{
function_family f (m, "builtin");
- // Note that we may want to extend the scope argument to a more general
- // notion of "lookup context" (scope, target, prerequisite).
+ // $defined(<variable>)
+ //
+ // Return true if the specified variable is defined in the calling scope or
+ // any outer scopes.
//
// Note that this function is not pure.
//
+
+ // Note that we may want to extend the scope argument to a more general
+ // notion of "lookup context" (scope, target, prerequisite).
+ //
f.insert ("defined", false) += [](const scope* s, names name)
{
if (s == nullptr)
@@ -50,7 +56,17 @@ namespace build2
return (*s)[convert<string> (move (name))].defined ();
};
- // Return variable visibility if it has been entered and NULL otherwise.
+ // $visibility(<variable>)
+ //
+ // Return variable visibility if it is known and `null` otherwise.
+ //
+ // Possible visibility value are:
+ //
+ // global -- all outer scopes
+ // project -- this project (no outer projects)
+ // scope -- this scope (no outer scopes)
+ // target -- target and target type/pattern-specific
+ // prereq -- prerequisite-specific
//
// Note that this function is not pure.
//
@@ -60,88 +76,125 @@ namespace build2
fail << "visibility() called out of scope" << endf;
const variable* var (
- s->ctx.var_pool.find (convert<string> (move (name))));
+ s->var_pool ().find (convert<string> (move (name))));
return (var != nullptr
? optional<string> (to_string (var->visibility))
: nullopt);
};
+ // $type(<value>)
+ //
+ // Return the type name of the value or empty string if untyped.
+ //
f["type"] += [](value* v) {return v->type != nullptr ? v->type->name : "";};
+
+ // $null(<value>)
+ //
+ // Return true if the value is `null`.
+ //
f["null"] += [](value* v) {return v->null;};
+
+ // $empty(<value>)
+ //
+ // Return true if the value is empty.
+ //
f["empty"] += [](value* v) {return v->null || v->empty ();};
- f["identity"] += [](value* v) {return move (*v);};
- // string
+ // $first(<value>[, <not_pair>])
+ // $second(<value>[, <not_pair>])
//
- f["string"] += [](bool b) {return b ? "true" : "false";};
- f["string"] += [](int64_t i) {return to_string (i);};
- f["string"] += [](uint64_t i) {return to_string (i);};
-
- // Quote a value returning its string representation. If escape is true,
- // then also escape (with a backslash) the quote characters being added
- // (this is useful if the result will be re-parsed, for example as a
- // Testscript command line).
+ // Return the first or the second half of a pair, respectively. If a value
+ // is not a pair, then return `null` unless the <not_pair> argument is
+ // `true`, in which case return the non-pair value.
//
- f["quote"] += [](value* v, optional<value> escape)
+ // If multiple pairs are specified, then return the list of first/second
+ // halfs. If an element is not a pair, then omit it from the resulting
+ // list unless the <not_pair> argument is `true`, in which case add the
+ // non-pair element to the list.
+ //
+ f["first"] += [] (names ns, optional<value> not_pair)
{
- if (v->null)
- return string ();
+ // @@ TODO: would be nice to return typed half if passed typed value.
- untypify (*v); // Reverse to names.
+ bool np (not_pair && convert<bool> (move (*not_pair)));
- ostringstream os;
- to_stream (os,
- v->as<names> (),
- true /* quote */,
- '@' /* pair */,
- escape && convert<bool> (move (*escape)));
- return os.str ();
- };
+ names r;
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ name& f (*i++);
+ name* s (f.pair ? &*i++ : nullptr);
+
+ if (s != nullptr || np)
+ {
+ f.pair = '\0';
+ r.push_back (move (f));
+ }
+ else if (ns.size () == 1)
+ return value (nullptr); // Single non-pair.
+ }
- // $size(<ints>)
- //
- // Return the number of elements in the sequence.
- //
- f["size"] += [] (int64s v) {return v.size ();};
- f["size"] += [] (uint64s v) {return v.size ();};
+ return value (move (r));
+ };
- // $sort(<ints> [, <flags>])
- //
- // Sort integers in ascending order.
- //
- // The following flags are supported:
- //
- // dedup - in addition to sorting also remove duplicates
- //
- f["sort"] += [](int64s v, optional<names> fs)
+ f["second"] += [] (names ns, optional<value> not_pair)
{
- sort (v.begin (), v.end ());
+ bool np (not_pair && convert<bool> (move (*not_pair)));
- if (functions_sort_flags (move (fs)))
- v.erase (unique (v.begin(), v.end()), v.end ());
+ names r;
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ name& f (*i++);
+ name* s (f.pair ? &*i++ : nullptr);
+
+ if (s != nullptr)
+ r.push_back (move (*s));
+ else if (np)
+ r.push_back (move (f));
+ else if (ns.size () == 1)
+ return value (nullptr); // Single non-pair.
+ }
- return v;
+ return value (move (r));
};
- f["sort"] += [](uint64s v, optional<names> fs)
+ // Leave this one undocumented for now since it's unclear why would anyone
+ // want to use it currently (we don't yet have any function composition
+ // facilities).
+ //
+ f["identity"] += [](value* v) {return move (*v);};
+
+ // $quote(<value>[, <escape>])
+ //
+ // Quote the value returning its string representation. If <escape> is
+ // `true`, then also escape (with a backslash) the quote characters being
+ // added (this is useful if the result will be re-parsed, for example as a
+ // script command line).
+ //
+ f["quote"] += [](value* v, optional<value> escape)
{
- sort (v.begin (), v.end ());
+ if (v->null)
+ return string ();
- if (functions_sort_flags (move (fs)))
- v.erase (unique (v.begin(), v.end()), v.end ());
+ untypify (*v, true /* reduce */); // Reverse to names.
- return v;
+ ostringstream os;
+ to_stream (os,
+ v->as<names> (),
+ quote_mode::normal,
+ '@' /* pair */,
+ escape && convert<bool> (move (*escape)));
+ return os.str ();
};
- // getenv
+ // $getenv(<name>)
//
- // Return NULL if the environment variable is not set, untyped value
- // otherwise.
+ // Get the value of the environment variable. Return `null` if the
+ // environment variable is not set.
//
// Note that if the build result can be affected by the variable being
- // queried, then it should be reported with the config.environment
+ // queried, then it should be reported with the `config.environment`
// directive.
//
// Note that this function is not pure.
diff --git a/libbuild2/functions-filesystem.cxx b/libbuild2/functions-filesystem.cxx
index ef7bfc5..665a0f3 100644
--- a/libbuild2/functions-filesystem.cxx
+++ b/libbuild2/functions-filesystem.cxx
@@ -7,6 +7,7 @@
#include <libbuild2/variable.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -29,12 +30,27 @@ namespace build2
return true;
};
+ auto dangling = [] (const dir_entry& de)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << de.base () / de.path ();
+
+ return true;
+ };
+
// Print paths "as is" in the diagnostics.
//
try
{
if (pattern.absolute ())
- path_search (pattern, add);
+ path_search (pattern,
+ add,
+ dir_path () /* start */,
+ path_match_flags::follow_symlinks,
+ dangling);
else
{
// An absolute start directory must be specified for the relative
@@ -54,7 +70,11 @@ namespace build2
<< "' is relative";
}
- path_search (pattern, add, *start);
+ path_search (pattern,
+ add,
+ *start,
+ path_match_flags::follow_symlinks,
+ dangling);
}
}
catch (const system_error& e)
@@ -83,14 +103,19 @@ namespace build2
function_family f (m, "filesystem");
- // path_search
+ // $path_search(<pattern>[, <start-dir>])
//
- // Return filesystem paths that match the pattern. If the pattern is an
- // absolute path, then the start directory is ignored (if present).
- // Otherwise, the start directory must be specified and be absolute.
+ // Return filesystem paths that match the shell-like wildcard pattern. If
+ // the pattern is an absolute path, then the start directory is ignored
+ // (if present). Otherwise, the start directory must be specified and be
+ // absolute.
//
// Note that this function is not pure.
//
+
+ // @@ In the future we may want to add a flag that controls the
+ // dangling/inaccessible treatment.
+ //
{
auto e (f.insert ("path_search", false));
@@ -115,6 +140,5 @@ namespace build2
convert<dir_path> (move (start)));
};
}
-
}
}
diff --git a/libbuild2/functions-integer.cxx b/libbuild2/functions-integer.cxx
new file mode 100644
index 0000000..8f9e2cf
--- /dev/null
+++ b/libbuild2/functions-integer.cxx
@@ -0,0 +1,156 @@
+// file : libbuild2/functions-integer.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+using namespace std;
+
+namespace build2
+{
+ extern bool
+ functions_sort_flags (optional<names>); // functions-builtin.cxx
+
+ static string
+ to_string (uint64_t i, optional<value> base, optional<value> width)
+ {
+ int b (base ?
+ static_cast<int> (convert<uint64_t> (move (*base)))
+ : 10);
+
+ size_t w (width
+ ? static_cast<size_t> (convert<uint64_t> (move (*width)))
+ : 0);
+
+ return (to_string (i, b, w));
+ }
+
+ void
+ integer_functions (function_map& m)
+ {
+ function_family f (m, "integer");
+
+ // $string(<int64>)
+ // $string(<uint64>[, <base>[, <width>]])
+ //
+ // Convert an integer to a string. For unsigned integers we can specify
+ // the desired base and width. For example:
+ //
+ // x = [uint64] 0x0000ffff
+ //
+ // c.poptions += "-DOFFSET=$x" # -DOFFSET=65535
+ // c.poptions += "-DOFFSET=$string($x, 16)" # -DOFFSET=0xffff
+ // c.poptions += "-DOFFSET=$string($x, 16, 8)" # -DOFFSET=0x0000ffff
+ //
+
+ // Note that we don't handle NULL values for these type since they have no
+ // empty representation.
+ //
+ f["string"] += [](int64_t i) {return to_string (i);};
+
+ f["string"] += [](uint64_t i, optional<value> base, optional<value> width)
+ {
+ return to_string (i, move (base), move (width));
+ };
+
+ // $integer_sequence(<begin>, <end>[, <step>])
+ //
+ // Return the list of uint64 integers starting from <begin> (including) to
+ // <end> (excluding) with the specified <step> or `1` if unspecified. If
+ // <begin> is greater than <end>, empty list is returned.
+ //
+
+ // Note that currently negative numbers are not supported but this could
+ // be handled if required (e.g., by returning int64s in this case).
+ //
+ // Note also that we could improve this by adding a shortcut to get the
+ // indexes of a list (for example, $indexes(<list>) plus potentially a
+ // similar $keys() function for maps).
+ //
+ f["integer_sequence"] += [](value begin, value end, optional<value> step)
+ {
+ uint64_t b (convert<uint64_t> (move (begin)));
+ uint64_t e (convert<uint64_t> (move (end)));
+ uint64_t s (step ? convert<uint64_t> (move (*step)) : 1);
+
+ uint64s r;
+ if (b < e)
+ {
+ r.reserve (static_cast<size_t> ((e - b) / s + 1));
+
+ for (; b < e; b += s)
+ r.push_back (static_cast<size_t> (b));
+ }
+
+ return r;
+ };
+
+ // $size(<ints>)
+ //
+ // Return the number of elements in the sequence.
+ //
+ f["size"] += [] (int64s v) {return v.size ();};
+ f["size"] += [] (uint64s v) {return v.size ();};
+
+ // $sort(<ints> [, <flags>])
+ //
+ // Sort integers in ascending order.
+ //
+ // The following flags are supported:
+ //
+ // dedup - in addition to sorting also remove duplicates
+ //
+ f["sort"] += [](int64s v, optional<names> fs)
+ {
+ sort (v.begin (), v.end ());
+
+ if (functions_sort_flags (move (fs)))
+ v.erase (unique (v.begin(), v.end()), v.end ());
+
+ return v;
+ };
+
+ f["sort"] += [](uint64s v, optional<names> fs)
+ {
+ sort (v.begin (), v.end ());
+
+ if (functions_sort_flags (move (fs)))
+ v.erase (unique (v.begin(), v.end()), v.end ());
+
+ return v;
+ };
+
+ // $find(<ints>, <int>)
+ //
+ // Return true if the integer sequence contains the specified integer.
+ //
+ f["find"] += [](int64s vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<int64_t> (move (v))) != vs.end ();
+ };
+
+ f["find"] += [](uint64s vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<uint64_t> (move (v))) != vs.end ();
+ };
+
+ // $find_index(<ints>, <int>)
+ //
+ // Return the index of the first element in the integer sequence that is
+ // equal to the specified integer or `$size(ints)` if none is found.
+ //
+ f["find_index"] += [](int64s vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<int64_t> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
+ f["find_index"] += [](uint64s vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<uint64_t> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+ }
+}
diff --git a/libbuild2/functions-json.cxx b/libbuild2/functions-json.cxx
new file mode 100644
index 0000000..e06d9a5
--- /dev/null
+++ b/libbuild2/functions-json.cxx
@@ -0,0 +1,335 @@
+// file : libbuild2/functions-json.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/parser.hxx>
+# include <libbutl/json/serializer.hxx>
+#endif
+
+using namespace std;
+
+namespace build2
+{
+ static size_t
+ array_find_index (const json_value& a, value v)
+ {
+ if (a.type != json_type::array)
+ fail << "expected json array instead of " << to_string (a.type)
+ << " as first argument";
+
+ auto b (a.array.begin ()), e (a.array.end ());
+ auto i (find (b, e, convert<json_value> (move (v))));
+ return i != e ? i - b : a.array.size ();
+ };
+
+ void
+ json_functions (function_map& m)
+ {
+ function_family f (m, "json");
+
+ // $value_type(<json>[, <distinguish_numbers>])
+ //
+ // Return the type of a JSON value: `null`, `boolean`, `number`, `string`,
+ // `array`, or `object`. If the <distinguish_numbers> argument is `true`,
+ // then instead of `number` return `signed number`, `unsigned number`, or
+ // `hexadecimal number`.
+ //
+ f["value_type"] += [] (json_value v, optional<value> distinguish_numbers)
+ {
+ bool dn (distinguish_numbers &&
+ convert<bool> (move (*distinguish_numbers)));
+
+ return to_string (v.type, dn);
+ };
+
+ // $value_size(<json>)
+ //
+ // Return the size of a JSON value.
+ //
+ // The size of a `null` value is `0`. The sizes of simple values
+ // (`boolean`, `number`, and `string`) is `1`. The size of `array` and
+ // `object` values is the number of elements and members, respectively.
+ //
+ // Note that the size of a `string` JSON value is not the length of the
+ // string. To get the length call `$string.size()` instead by casting the
+ // JSON value to the `string` value type.
+ //
+ f["value_size"] += [] (json_value v) -> size_t
+ {
+ // Note: should be consistent with value_traits<json_value>::empty(),
+ // json_subscript().
+ //
+ switch (v.type)
+ {
+ case json_type::null: return 0;
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ case json_type::string: break;
+ case json_type::array: return v.array.size ();
+ case json_type::object: return v.object.size ();
+ }
+
+ return 1;
+ };
+
+ // $member_name(<json-member>)
+ //
+ // Return the name of a JSON object member.
+ //
+ f["member_name"] += [] (json_value v)
+ {
+ // A member becomes an object with a single member (see json_reverse()
+ // for details).
+ //
+ if (v.type == json_type::object && v.object.size () == 1)
+ return move (v.object.front ().name);
+
+ fail << "json object member expected instead of " << v.type << endf;
+ };
+
+ // $member_value(<json-member>)
+ //
+ // Return the value of a JSON object member.
+ //
+ f["member_value"] += [] (json_value v)
+ {
+ // A member becomes an object with a single member (see json_reverse()
+ // for details).
+ //
+ if (v.type == json_type::object && v.object.size () == 1)
+ {
+ // Reverse simple JSON values to the corresponding fundamental type
+ // values for consistency with subscript/iteration (see
+ // json_subscript_impl() for background).
+ //
+ json_value& jr (v.object.front ().value);
+
+ switch (jr.type)
+ {
+#if 0
+ case json_type::null: return value (names {});
+#else
+ case json_type::null: return value ();
+#endif
+ case json_type::boolean: return value (jr.boolean);
+ case json_type::signed_number: return value (jr.signed_number);
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: return value (jr.unsigned_number);
+ case json_type::string: return value (move (jr.string));
+ case json_type::array:
+ case json_type::object: return value (move (jr));
+ }
+ }
+
+ fail << "json object member expected instead of " << v.type << endf;
+ };
+
+ // $object_names(<json-object>)
+ //
+ // Return the list of names in the JSON object. If the JSON `null` is
+ // passed instead, assume it is a missing object and return an empty list.
+ //
+ f["object_names"] += [] (json_value o)
+ {
+ names ns;
+
+ if (o.type == json_type::null)
+ ;
+ else if (o.type == json_type::object)
+ {
+ ns.reserve (o.object.size ());
+
+ for (json_member& m: o.object)
+ ns.push_back (name (move (m.name)));
+ }
+ else
+ fail << "expected json object instead of " << to_string (o.type);
+
+ return ns;
+ };
+
+ // $array_size(<json-array>)
+ //
+ // Return the number of elements in the JSON array. If the JSON `null`
+ // value is passed instead, assume it is a missing array and return `0`.
+ //
+ f["array_size"] += [] (json_value a) -> size_t
+ {
+ if (a.type == json_type::null)
+ return 0;
+
+ if (a.type == json_type::array)
+ return a.array.size ();
+
+ fail << "expected json array instead of " << to_string (a.type) << endf;
+ };
+
+ // $array_find(<json-array>, <json>)
+ //
+ // Return true if the JSON array contains the specified JSON value. If the
+ // JSON `null` value is passed instead, assume it is a missing array and
+ // return `false`.
+ //
+ f["array_find"] += [] (json_value a, value v)
+ {
+ if (a.type == json_type::null)
+ return false;
+
+ size_t i (array_find_index (a, move (v)));
+ return i != a.array.size (); // We now know it's an array.
+ };
+
+ // $array_find_index(<json-array>, <json>)
+ //
+ // Return the index of the first element in the JSON array that is equal
+ // to the specified JSON value or `$array_size(<json-array>)` if none is
+ // found. If the JSON `null` value is passed instead, assume it is a
+ // missing array and return `0`.
+ //
+ f["array_find_index"] += [](json_value a, value v) -> size_t
+ {
+ if (a.type == json_type::null)
+ return 0;
+
+ return array_find_index (a, move (v));
+ };
+
+#ifndef BUILD2_BOOTSTRAP
+
+ // @@ Flag to support multi-value (returning it as JSON array)? Then
+ // probably also in $serialize().
+ //
+ // @@ Flag to override duplicates instead of failing?
+
+ // $json.load(<path>)
+ //
+ // Parse the contents of the specified file as JSON input text and return
+ // the result as a value of the `json` type.
+ //
+ // See also `$json.parse()`.
+ //
+ // Note that this function is not pure.
+ //
+ f.insert (".load", false) += [] (names xf)
+ {
+ path f (convert<path> (move (xf)));
+
+ try
+ {
+ ifdstream is (f);
+ json_parser p (is, f.string ());
+ return json_value (p);
+ }
+ catch (const invalid_json_input& e)
+ {
+ fail (location (f, e.line, e.column)) << "invalid json input: " << e <<
+ info << "byte offset " << e.position << endf;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << f << ": " << e << endf;
+ }
+ };
+
+ // $json.parse(<text>)
+ //
+ // Parse the specified JSON input text and return the result as a value of
+ // the `json` type.
+ //
+ // See also `$json.load()` and `$json.serialize()`.
+ //
+ f[".parse"] += [] (names text)
+ {
+ string t (convert<string> (move (text)));
+
+ try
+ {
+ json_parser p (t, nullptr /* name */);
+ return json_value (p);
+ }
+ catch (const invalid_json_input& e)
+ {
+ fail << "invalid json input: " << e <<
+ info << "line " << e.line
+ << ", column " << e.column
+ << ", byte offset " << e.position << endf;
+ }
+ };
+
+ // $serialize(<json>[, <indentation>])
+ //
+ // Serialize the specified JSON value and return the resulting JSON output
+ // text.
+ //
+ // The optional <indentation> argument specifies the number of indentation
+ // spaces that should be used for pretty-printing. If `0` is passed, then
+ // no pretty-printing is performed. The default is `2` spaces.
+ //
+ // See also `$json.parse()`.
+ //
+ f["serialize"] += [] (json_value v, optional<value> indentation)
+ {
+ uint64_t i (indentation ? convert<uint64_t> (*indentation) : 2);
+
+ try
+ {
+ // For the diagnostics test.
+ //
+#if 0
+ if (v.type == json_type::string && v.string == "deadbeef")
+ {
+ v.string[4] = 0xe0;
+ v.string[5] = 0xe0;
+ }
+#endif
+
+ string o;
+ json_buffer_serializer s (o, i);
+ v.serialize (s);
+ return o;
+ }
+ catch (const invalid_json_output& e)
+ {
+ diag_record dr;
+ dr << fail << "invalid json value: " << e;
+
+ if (e.event)
+ dr << info << "while serializing " << to_string (*e.event);
+
+ if (e.offset != string::npos)
+ dr << info << "offending byte offset " << e.offset;
+
+ dr << endf;
+ }
+ };
+#endif
+
+ // $size(<json-set>)
+ // $size(<json-map>)
+ //
+ // Return the number of elements in the sequence.
+ //
+ f["size"] += [] (set<json_value> v) {return v.size ();};
+ f["size"] += [] (map<json_value, json_value> v) {return v.size ();};
+
+ // $keys(<json-map>)
+ //
+ // Return the list of keys in a json map as a json array.
+ //
+ // Note that the result is sorted in ascending order.
+ //
+ f["keys"] += [](map<json_value, json_value> v)
+ {
+ json_value r (json_type::array);
+ r.array.reserve (v.size ());
+ for (pair<const json_value, json_value>& p: v)
+ r.array.push_back (p.first); // @@ PERF: use C++17 map::extract() to steal.
+ return r;
+ };
+ }
+}
diff --git a/libbuild2/functions-name.cxx b/libbuild2/functions-name.cxx
index 800c377..456f85b 100644
--- a/libbuild2/functions-name.cxx
+++ b/libbuild2/functions-name.cxx
@@ -1,6 +1,8 @@
// file : libbuild2/functions-name.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
+#include <libbuild2/functions-name.hxx>
+
#include <libbuild2/scope.hxx>
#include <libbuild2/function.hxx>
#include <libbuild2/variable.hxx>
@@ -18,38 +20,165 @@ namespace build2
// out of scope). See scope::find_target_type() for details. Allow out-
// qualified names (out is discarded).
//
- static pair<name, optional<string>>
- to_target_name (const scope* s, name&& n, const name& o = name ())
+ static pair<const target_type*, optional<string>>
+ to_target_type (const scope* s, name& n, const name& o = name ())
{
if (n.pair && !o.directory ())
fail << "name pair in names";
- optional<string> e;
+ return s != nullptr
+ ? s->find_target_type (n, location ())
+ : pair<const target_type*, optional<string>> {nullptr, nullopt};
+ }
- if (s != nullptr)
- {
- auto rp (s->find_target_type (n, location ()));
+ static pair<name, optional<string>>
+ to_target_name (const scope* s, name&& n, const name& o = name ())
+ {
+ auto rp (to_target_type (s, n, o));
- if (rp.first != nullptr)
- n.type = rp.first->name;
+ if (rp.first != nullptr)
+ n.type = rp.first->name;
- e = move (rp.second);
+ if (n.value.empty () && (n.type == "dir" || n.type == "fsdir"))
+ {
+ n.value = n.dir.leaf ().string ();
+ n.dir.make_directory ();
}
- return make_pair (move (n), move (e));
+ return make_pair (move (n), move (rp.second));
}
- // Note: this helper mey be used by other functions that operate on targets.
- //
- LIBBUILD2_SYMEXPORT const target&
+ const target&
to_target (const scope& s, name&& n, name&& o)
{
+ // Note: help the user out and search in both out and src like a
+ // prerequisite.
+ //
if (const target* r = search_existing (n, s, o.dir))
return *r;
- fail << "target "
- << (n.pair ? names {move (n), move (o)} : names {move (n)})
- << " not found" << endf;
+ // Inside recipes we don't treat `{}` as special so a literal target name
+ // will have no type and won't be found, which is confusing as hell.
+ //
+ bool typed (n.typed ());
+
+ diag_record dr (fail);
+
+ dr << "target "
+ << (n.pair ? names {move (n), move (o)} : names {move (n)})
+ << " not found";
+
+ if (!typed)
+ dr << info << "wrap it in ([names] ...) if this is literal target name "
+ << "specified inside recipe";
+
+ dr << endf;
+ }
+
+ const target&
+ to_target (const scope& s, names&& ns)
+ {
+ assert (ns.size () == (ns[0].pair ? 2 : 1));
+
+ name o;
+ return to_target (s, move (ns[0]), move (ns[0].pair ? ns[1] : o));
+ }
+
+ static bool
+ is_a (const scope* s, name&& n, const name& o, names&& t)
+ {
+ if (s == nullptr)
+ fail << "name.is_a() called out of scope";
+
+ string tts (convert<string> (move (t)));
+ const target_type* tt (s->find_target_type (tts));
+ if (tt == nullptr)
+ fail << "unknown target type " << tts;
+
+ const target_type* ntt (to_target_type (s, n, o).first);
+ if (ntt == nullptr)
+ {
+ // If this is an imported target and the target type is unknown, then
+ // it cannot possibly match one of the known types. We handle it like
+ // this instead of failing because the later failure (e.g., as a
+ // result of this target listed as prerequisite) will have more
+ // accurate diagnostics. See also filter() below.
+ //
+ if (n.proj)
+ return false;
+
+ fail << "unknown target type " << n.type << " in " << n;
+ }
+
+ return ntt->is_a (*tt);
+ }
+
+ static names
+ filter (const scope* s, names ns, names ts, bool out)
+ {
+ if (s == nullptr)
+ fail << "name." << (out ? "filter_out" : "filter")
+ << "() called out of scope";
+
+ small_vector<const target_type*, 1> tts;
+ for (const name& n: ts)
+ {
+ if (!n.simple ())
+ fail << "invalid target type name " << n;
+
+ if (n.pair)
+ fail << "pair in target type name " << n;
+
+ const target_type* tt (s->find_target_type (n.value));
+ if (tt == nullptr)
+ fail << "unknown target type " << n.value;
+
+ tts.push_back (tt);
+ }
+
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i);
+ bool p (n.pair);
+
+ // to_target_type() splits the name into the target name and extension.
+ // While we could try to reconstitute it with combine_name(), there are
+ // murky corner cases (see the default_extension argument) which won't
+ // be easy to handle. So let's just make a copy. Looking at the
+ // implementation of scope::find_target_type(), we can optimize for the
+ // (common) typed case by only copying the type.
+ //
+ name c (n.typed () ? name (n.type, "") : n);
+
+ const target_type* ntt (to_target_type (s, c, p ? *++i : name ()).first);
+ if (ntt == nullptr)
+ {
+ // If this is an imported target and the target type is unknown, then
+ // it cannot possibly match one of the known types. We handle it like
+ // this instead of failing because the later failure (e.g., as a
+ // result of this target listed as prerequisite) will have more
+ // accurate diagnostics. See also is_a() above.
+ //
+ if (!n.proj)
+ fail << "unknown target type " << n.type << " in " << n;
+ }
+
+ if (ntt != nullptr
+ ? (find_if (tts.begin (), tts.end (),
+ [ntt] (const target_type* tt)
+ {
+ return ntt->is_a (*tt);
+ }) != tts.end ()) != out
+ : out)
+ {
+ r.push_back (move (n));
+ if (p)
+ r.push_back (move (*i));
+ }
+ }
+
+ return r;
}
void
@@ -62,17 +191,30 @@ namespace build2
// on prerequisite names. They also won't always return the same result as
// if we were interrogating an actual target (e.g., the directory may be
// relative). Plus we now have functions that can only be called on
- // targets (see below).
+ // targets (see functions-target.cxx).
//
- function_family fn (m, "name");
+ function_family f (m, "name");
- fn["string"] += [](name n) {return to_string (n);};
+ // Note: let's leave this undocumented for now since it's not often needed
+ // and is a can of worms.
+ //
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](name* n)
+ {
+ return n != nullptr ? to_string (move (*n)) : string ();
+ };
- fn["name"] += [](const scope* s, name n)
+ // $name(<names>)
+ //
+ // Return the name of a target (or a list of names for a list of targets).
+ //
+ f["name"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.value;
};
- fn["name"] += [](const scope* s, names ns)
+ f["name"] += [](const scope* s, names ns)
{
small_vector<string, 1> r;
@@ -90,14 +232,18 @@ namespace build2
make_move_iterator (r.end ())));
};
- // Note: returns NULL if extension is unspecified (default) and empty if
- // specified as no extension.
+ // $extension(<name>)
+ //
+ // Return the extension of a target.
//
- fn["extension"] += [](const scope* s, name n)
+ // Note that this function returns `null` if the extension is unspecified
+ // (default) and empty string if it's specified as no extension.
+ //
+ f["extension"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).second;
};
- fn["extension"] += [](const scope* s, names ns)
+ f["extension"] += [](const scope* s, names ns)
{
// Note: can't do multiple due to NULL semantics.
//
@@ -112,11 +258,16 @@ namespace build2
return to_target_name (s, move (n), o).second;
};
- fn["directory"] += [](const scope* s, name n)
+ // $directory(<names>)
+ //
+ // Return the directory of a target (or a list of directories for a list
+ // of targets).
+ //
+ f["directory"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.dir;
};
- fn["directory"] += [](const scope* s, names ns)
+ f["directory"] += [](const scope* s, names ns)
{
small_vector<dir_path, 1> r;
@@ -134,11 +285,16 @@ namespace build2
make_move_iterator (r.end ())));
};
- fn["target_type"] += [](const scope* s, name n)
+ // $target_type(<names>)
+ //
+ // Return the target type name of a target (or a list of target type names
+ // for a list of targets).
+ //
+ f["target_type"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.type;
};
- fn["target_type"] += [](const scope* s, names ns)
+ f["target_type"] += [](const scope* s, names ns)
{
small_vector<string, 1> r;
@@ -156,13 +312,15 @@ namespace build2
make_move_iterator (r.end ())));
};
- // Note: returns NULL if no project specified.
+ // $project(<name>)
//
- fn["project"] += [](const scope* s, name n)
+ // Return the project of a target or `null` if not project-qualified.
+ //
+ f["project"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.proj;
};
- fn["project"] += [](const scope* s, names ns)
+ f["project"] += [](const scope* s, names ns)
{
// Note: can't do multiple due to NULL semantics.
//
@@ -177,11 +335,50 @@ namespace build2
return to_target_name (s, move (n), o).first.proj;
};
+ // $is_a(<name>, <target-type>)
+ //
+ // Return true if the <name>'s target type is-a <target-type>. Note that
+ // this is a dynamic type check that takes into account target type
+ // inheritance.
+ //
+ f["is_a"] += [](const scope* s, name n, names t)
+ {
+ return is_a (s, move (n), name (), move (t));
+ };
+ f["is_a"] += [](const scope* s, names ns, names t)
+ {
+ auto i (ns.begin ());
+
+ name& n (*i);
+ const name& o (n.pair ? *++i : name ());
+
+ if (++i != ns.end ())
+ fail << "invalid name value: multiple names"; // Like in convert().
+
+ return is_a (s, move (n), o, move (t));
+ };
+
+ // $filter(<names>, <target-types>)
+ // $filter_out(<names>, <target-types>)
+ //
+ // Return names with target types which are-a (`filter`) or not are-a
+ // (`filter_out`) one of <target-types>. See `$is_a()` for background.
+ //
+ f["filter"] += [](const scope* s, names ns, names ts)
+ {
+ return filter (s, move (ns), move (ts), false /* out */);
+ };
+
+ f["filter_out"] += [](const scope* s, names ns, names ts)
+ {
+ return filter (s, move (ns), move (ts), true /* out */);
+ };
+
// $size(<names>)
//
// Return the number of elements in the sequence.
//
- fn["size"] += [] (names ns)
+ f["size"] += [] (names ns)
{
size_t n (0);
@@ -195,109 +392,60 @@ namespace build2
return n;
};
- // $sort(<names> [, <flags>])
+ // $sort(<names>[, <flags>])
//
// Sort names in ascending order.
//
// The following flags are supported:
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
- fn["sort"] += [] (names ns, optional<names> fs)
+ f["sort"] += [] (names ns, optional<names> fs)
{
//@@ TODO: shouldn't we do this in a pair-aware manner?
sort (ns.begin (), ns.end ());
if (functions_sort_flags (move (fs)))
- ns.erase (unique (ns.begin(), ns.end()), ns.end ());
+ ns.erase (unique (ns.begin (), ns.end ()), ns.end ());
return ns;
};
- // Functions that can be called only on real targets.
+ // $find(<names>, <name>)
//
- function_family ft (m, "target");
-
- // Note that while this function is not technically pure, we don't mark it
- // as such since it can only be called (normally form a recipe) after the
- // target has been matched, meaning that this target is a prerequisite and
- // therefore this impurity has been accounted for.
+ // Return true if the name sequence contains the specified name.
//
- ft["path"] += [](const scope* s, names ns)
+ f["find"] += [](names vs, names v)
{
- if (s == nullptr)
- fail << "target.path() called out of scope";
-
- // Most of the time we will have a single target so optimize for that.
- //
- small_vector<path, 1> r;
-
- for (auto i (ns.begin ()); i != ns.end (); ++i)
- {
- name& n (*i), o;
- const target& t (to_target (*s, move (n), move (n.pair ? *++i : o)));
-
- if (const auto* pt = t.is_a<path_target> ())
- {
- const path& p (pt->path ());
-
- if (&p != &empty_path)
- r.push_back (p);
- else
- fail << "target " << t << " path is not assigned";
- }
- else
- fail << "target " << t << " is not path-based";
- }
-
- // We want the result to be path if we were given a single target and
- // paths if multiple (or zero). The problem is, we cannot distinguish it
- // based on the argument type (e.g., name vs names) since passing an
- // out-qualified single target requires two names.
- //
- if (r.size () == 1)
- return value (move (r[0]));
+ //@@ TODO: shouldn't we do this in a pair-aware manner?
- return value (paths (make_move_iterator (r.begin ()),
- make_move_iterator (r.end ())));
+ return find (vs.begin (), vs.end (),
+ convert<name> (move (v))) != vs.end ();
};
- // This one can only be called on a single target since we don't support
- // containers of process_path's (though we probably could).
+ // $find_index(<names>, <name>)
//
- // Note that while this function is not technically pure, we don't mark it
- // as such for the same reasons as $path() above.
+ // Return the index of the first element in the name sequence that is
+ // equal to the specified name or `$size(names)` if none is found.
//
- fn["process_path"] += [](const scope* s, names ns)
+ f["find_index"] += [](names vs, names v)
{
- if (s == nullptr)
- fail << "target.process_path() called out of scope";
-
- if (ns.empty () || ns.size () != (ns[0].pair ? 2 : 1))
- fail << "target.process_path() expects single target";
-
- name o;
- const target& t (
- to_target (*s, move (ns[0]), move (ns[0].pair ? ns[1] : o)));
-
- if (const auto* et = t.is_a<exe> ())
- {
- process_path r (et->process_path ());
-
- if (r.empty ())
- fail << "target " << t << " path is not assigned";
+ //@@ TODO: shouldn't we do this in a pair-aware manner?
- return r;
- }
- else
- fail << "target " << t << " is not process_path-based" << endf;
+ auto i (find (vs.begin (), vs.end (), convert<name> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
};
// Name-specific overloads from builtins.
//
function_family fb (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
fb[".concat"] += [](dir_path d, name n)
{
d /= n.dir;
diff --git a/libbuild2/functions-name.hxx b/libbuild2/functions-name.hxx
new file mode 100644
index 0000000..34fa4b8
--- /dev/null
+++ b/libbuild2/functions-name.hxx
@@ -0,0 +1,30 @@
+// file : libbuild2/functions-name.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_FUNCTIONS_NAME_HXX
+#define LIBBUILD2_FUNCTIONS_NAME_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/forward.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/export.hxx>
+
+namespace build2
+{
+ // Helpers that may be useful to other functions that operate on target
+ // name.
+
+ // Resolve the name to target issuing diagnostics and failing if not found.
+ //
+ LIBBUILD2_SYMEXPORT const target&
+ to_target (const scope&, name&&, name&& out);
+
+ // As above but from the names vector which should contain a single name
+ // or an out-qualified name pair (asserted).
+ //
+ LIBBUILD2_SYMEXPORT const target&
+ to_target (const scope&, names&&);
+}
+
+#endif // LIBBUILD2_FUNCTIONS_NAME_HXX
diff --git a/libbuild2/functions-path.cxx b/libbuild2/functions-path.cxx
index 8362d2e..4b114f5 100644
--- a/libbuild2/functions-path.cxx
+++ b/libbuild2/functions-path.cxx
@@ -154,14 +154,66 @@ namespace build2
return path_match (entry, pattern, *start);
}
+ // Don't fail for absolute paths on Windows and, for example, just return
+ // c:/foo for c:\foo.
+ //
+ template <typename P>
+ static inline string
+ posix_string (P&& p)
+ {
+#ifndef _WIN32
+ return move (p).posix_string ();
+#else
+ if (p.relative ())
+ return move (p).posix_string ();
+
+ // Note: also handles root directories.
+ //
+ dir_path d (p.root_directory ());
+ return d.string () + '/' + p.leaf (d).posix_string ();
+#endif
+ }
+
+ // Similar to the above don't fail for absolute paths on Windows.
+ //
+ template <typename P>
+ static inline string
+ posix_representation (P&& p)
+ {
+#ifndef _WIN32
+ return move (p).posix_representation ();
+#else
+ if (p.relative ())
+ return move (p).posix_representation ();
+
+ // Note: also handles root directories.
+ //
+ dir_path d (p.root_directory ());
+ return d.string () + '/' + p.leaf (d).posix_representation ();
+#endif
+ }
+
void
path_functions (function_map& m)
{
function_family f (m, "path", &path_thunk);
- // string
+ // $string(<paths>)
+ //
+ // Return the traditional string representation of a path (or a list of
+ // string representations for a list of paths). In particular, for
+ // directory paths, the traditional representation does not include the
+ // trailing directory separator (except for the POSIX root directory). See
+ // `$representation()` below for the precise string representation.
+ //
+
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
//
- f["string"] += [](path p) {return move (p).string ();};
+ f["string"] += [](path* p)
+ {
+ return p != nullptr ? move (*p).string () : string ();
+ };
f["string"] += [](paths v)
{
@@ -179,7 +231,53 @@ namespace build2
return r;
};
- // representation
+ // $posix_string(<paths>)
+ // $path.posix_string(<untyped>)
+ //
+ // Return the traditional string representation of a path (or a list of
+ // string representations for a list of paths) using the POSIX directory
+ // separators (forward slashes).
+ //
+ f["posix_string"] += [](path p) {return posix_string (move (p));};
+ f["posix_string"] += [](dir_path p) {return posix_string (move (p));};
+
+ f["posix_string"] += [](paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_string (move (p)));
+ return r;
+ };
+
+ f["posix_string"] += [](dir_paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_string (move (p)));
+ return r;
+ };
+
+ f[".posix_string"] += [](names ns)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of strings.
+ //
+ for (name& n: ns)
+ {
+ n = n.directory ()
+ ? posix_string (move (n.dir))
+ : posix_string (convert<path> (move (n)));
+ }
+ return ns;
+ };
+
+ // $representation(<paths>)
+ //
+ // Return the precise string representation of a path (or a list of string
+ // representations for a list of paths). In particular, for directory
+ // paths, the precise representation includes the trailing directory
+ // separator. See `$string()` above for the traditional string
+ // representation.
//
f["representation"] += [](path p) {return move (p).representation ();};
@@ -199,8 +297,61 @@ namespace build2
return r;
};
- // canonicalize
+ // $posix_representation(<paths>)
+ // $path.posix_representation(<untyped>)
+ //
+ // Return the precise string representation of a path (or a list of string
+ // representations for a list of paths) using the POSIX directory
+ // separators (forward slashes).
//
+ f["posix_representation"] += [](path p)
+ {
+ return posix_representation (move (p));
+ };
+
+ f["posix_representation"] += [](dir_path p)
+ {
+ return posix_representation (move (p));
+ };
+
+ f["posix_representation"] += [](paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_representation (move (p)));
+ return r;
+ };
+
+ f["posix_representation"] += [](dir_paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_representation (move (p)));
+ return r;
+ };
+
+ f[".posix_representation"] += [](names ns)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of strings.
+ //
+ for (name& n: ns)
+ {
+ n = n.directory ()
+ ? posix_representation (move (n.dir))
+ : posix_representation (convert<path> (move (n)));
+ }
+ return ns;
+ };
+
+ // $canonicalize(<paths>)
+ // $path.canonicalize(<untyped>)
+ //
+ // Canonicalize the path (or list of paths) by converting all the
+ // directory separators to the canonical form for the host platform. Note
+ // that multiple directory separators are not collapsed.
+ //
+
// @@ TODO: add ability to specify alternative separator.
//
f["canonicalize"] += [](path p) {p.canonicalize (); return p;};
@@ -236,7 +387,13 @@ namespace build2
return ns;
};
- // normalize
+ // $normalize(<paths>)
+ // $path.normalize(<untyped>)
+ //
+ // Normalize the path (or list of paths) by collapsing the `.` and `..`
+ // components if possible, collapsing multiple directory separators, and
+ // converting all the directory separators to the canonical form for the
+ // host platform.
//
f["normalize"] += [](path p) {p.normalize (); return p;};
f["normalize"] += [](dir_path p) {p.normalize (); return p;};
@@ -271,7 +428,16 @@ namespace build2
return ns;
};
- // actualize
+ // $actualize(<paths>)
+ // $path.actualize(<untyped>)
+ //
+ // Actualize the path (or list of paths) by first normalizing it and then
+ // for host platforms with case-insensitive filesystems obtaining the
+ // actual spelling of the path.
+ //
+ // Note that only an absolute path can be actualized. If a path component
+ // does not exist, then its (and all subsequent) spelling is
+ // unchanged. This is a potentially expensive operation.
//
// Note that this function is not pure.
//
@@ -312,11 +478,12 @@ namespace build2
return ns;
};
- // $directory(<path>)
// $directory(<paths>)
+ // $path.directory(<untyped>)
//
- // Return the directory part of the path or empty path if there is no
- // directory. Directory of a root directory is an empty path.
+ // Return the directory part of a path (or a list of directory parts for a
+ // list of paths) or an empty path if there is no directory. A directory of
+ // a root directory is an empty path.
//
f["directory"] += &path::directory;
@@ -350,11 +517,12 @@ namespace build2
return ns;
};
- // $root_directory(<path>)
// $root_directory(<paths>)
+ // $path.root_directory(<untyped>)
//
- // Return the root directory of the path or empty path if the directory is
- // not absolute.
+ // Return the root directory of a path (or a list of root directories for
+ // a list of paths) or an empty path if the specified path is not
+ // absolute.
//
f["root_directory"] += &path::root_directory;
@@ -388,17 +556,22 @@ namespace build2
return ns;
};
- // $leaf(<path>)
- //
- f["leaf"] += &path::leaf;
-
- // $leaf(<path>, <dir-path>)
+ // $leaf(<paths>)
+ // $path.leaf(<untyped>)
// $leaf(<paths>, <dir-path>)
+ // $path.leaf(<untyped>, <dir-path>)
//
- // Return the path without the specified directory part. Return empty path
- // if the paths are the same. Issue diagnostics and fail if the directory
- // is not a prefix of the path. Note: expects both paths to be normalized.
+ // First form (one argument): return the last component of a path (or a
+ // list of last components for a list of paths).
//
+ // Second form (two arguments): return a path without the specified
+ // directory part (or a list of paths without the directory part for a
+ // list of paths). Return an empty path if the paths are the same. Issue
+ // diagnostics and fail if the directory is not a prefix of the
+ // path. Note: expects both paths to be normalized.
+ //
+ f["leaf"] += &path::leaf;
+
f["leaf"] += [](path p, dir_path d)
{
return leaf (p, move (d));
@@ -434,13 +607,13 @@ namespace build2
return ns;
};
- // $relative(<path>, <dir-path>)
// $relative(<paths>, <dir-path>)
+ // $path.relative(<untyped>, <dir-path>)
//
- // Return a path relative to the specified directory that is equivalent to
- // the specified path. Issue diagnostics and fail if a relative path
- // cannot be derived (for example, paths are on different drives on
- // Windows).
+ // Return the path relative to the specified directory that is equivalent
+ // to the specified path (or a list of relative paths for a list of
+ // specified paths). Issue diagnostics and fail if a relative path cannot
+ // be derived (for example, paths are on different drives on Windows).
//
f["relative"] += [](path p, dir_path d)
{
@@ -477,7 +650,11 @@ namespace build2
return ns;
};
- // base
+ // $base(<paths>)
+ // $path.base(<untyped>)
+ //
+ // Return the base part (without the extension) of a path (or a list of
+ // base parts for a list of paths).
//
f["base"] += &path::base;
@@ -511,7 +688,11 @@ namespace build2
return ns;
};
- // extension
+ // $extension(<path>)
+ // $path.extension(<untyped>)
+ //
+ // Return the extension part (without the dot) of a path or empty string
+ // if there is no extension.
//
f["extension"] += &extension;
@@ -521,32 +702,29 @@ namespace build2
};
// $size(<paths>)
- // $size(<dir_paths>)
+ // $size(<path>)
+ //
+ // First form: return the number of elements in the paths sequence.
+ //
+ // Second form: return the number of characters (bytes) in the path. Note
+ // that for `dir_path` the result does not include the trailing directory
+ // separator (except for the POSIX root directory).
//
- // Return the number of elements in the sequence.
//
f["size"] += [] (paths v) {return v.size ();};
f["size"] += [] (dir_paths v) {return v.size ();};
- // $size(<path>)
- // $size(<dir_path>)
- //
- // Return the number of characters (bytes) in the path. Note that for
- // dir_path the result does not include the trailing directory separator
- // (except for the POSIX root directory).
- //
f["size"] += [] (path v) {return v.size ();};
f["size"] += [] (dir_path v) {return v.size ();};
- // $sort(<paths> [, <flags>])
- // $sort(<dir_paths> [, <flags>])
+ // $sort(<paths>[, <flags>])
//
- // Sort paths in ascending order. Note that on case-insensitive filesystem
- // the order is case-insensitive.
+ // Sort paths in ascending order. Note that on host platforms with a
+ // case-insensitive filesystem the order is case-insensitive.
//
// The following flags are supported:
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
f["sort"] += [](paths v, optional<names> fs)
{
@@ -568,34 +746,73 @@ namespace build2
return v;
};
- // $path.match(<val>, <pat> [, <start>])
+ // $find(<paths>, <path>)
//
- // Match a filesystem entry name against a name pattern (both are strings),
- // or a filesystem entry path against a path pattern. For the latter case
- // the start directory may also be required (see below). The semantics of
- // the pattern and name/entry arguments is determined according to the
+ // Return true if the paths sequence contains the specified path. Note
+ // that on host platforms with a case-insensitive filesystem the
+ // comparison is case-insensitive.
+ //
+ f["find"] += [](paths vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<path> (move (v))) != vs.end ();
+ };
+
+ f["find"] += [](dir_paths vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<dir_path> (move (v))) != vs.end ();
+ };
+
+ // $find_index(<paths>, <path>)
+ //
+ // Return the index of the first element in the paths sequence that is
+ // equal to the specified path or `$size(paths)` if none is found. Note
+ // that on host platforms with a case-insensitive filesystem the
+ // comparison is case-insensitive.
+ //
+ f["find_index"] += [](paths vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<path> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
+ f["find_index"] += [](dir_paths vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<dir_path> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
+ // $path.match(<entry>, <pattern>[, <start-dir>])
+ //
+ // Match a filesystem entry name against a name pattern (both are
+ // strings), or a filesystem entry path against a path pattern. For the
+ // latter case the start directory may also be required (see below). The
+ // pattern is a shell-like wildcard pattern. The semantics of the
+ // <pattern> and <entry> arguments is determined according to the
// following rules:
//
- // - The arguments must be of the string or path types, or be untyped.
+ // 1. The arguments must be of the string or path types, or be untyped.
//
- // - If one of the arguments is typed, then the other one must be of the
- // same type or be untyped. In the later case, an untyped argument is
- // converted to the type of the other argument.
+ // 2. If one of the arguments is typed, then the other one must be of the
+ // same type or be untyped. In the later case, an untyped argument is
+ // converted to the type of the other argument.
//
- // - If both arguments are untyped and the start directory is specified,
- // then the arguments are converted to the path type.
+ // 3. If both arguments are untyped and the start directory is specified,
+ // then the arguments are converted to the path type.
//
- // - If both arguments are untyped and the start directory is not
- // specified, then, if one of the arguments is syntactically a path (the
- // value contains a directory separator), convert them to the path type,
- // otherwise to the string type (match as names).
+ // 4. If both arguments are untyped and the start directory is not
+ // specified, then, if one of the arguments is syntactically a path (the
+ // value contains a directory separator), then they are converted to the
+ // path type, otherwise -- to the string type (match as names).
//
- // If pattern and entry paths are both either absolute or relative and
- // non-empty, and the first pattern component is not a self-matching
- // wildcard (doesn't contain ***), then the start directory is not
- // required, and is ignored if specified. Otherwise, the start directory
- // must be specified and be an absolute path.
+ // If pattern and entry paths are both either absolute or relative and not
+ // empty, and the first pattern component is not a self-matching wildcard
+ // (doesn't contain `***`), then the start directory is not required, and
+ // is ignored if specified. Otherwise, the start directory must be
+ // specified and be an absolute path.
//
+
// Name matching.
//
f[".match"] += [](string name, string pattern)
@@ -655,6 +872,11 @@ namespace build2
//
function_family b (m, "builtin", &path_thunk);
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected, especially
+ // if the NULL value is on the LHS. So for now we keep it a bit tighter.
+ //
b[".concat"] += &concat_path_string;
b[".concat"] += &concat_dir_path_string;
@@ -667,5 +889,15 @@ namespace build2
{
return concat_dir_path_string (move (l), convert<string> (move (ur)));
};
+
+ b[".concat"] += [](dir_path l, dir_path r)
+ {
+ return value (move (l /= r));
+ };
+
+ b[".concat"] += [](dir_path l, path r)
+ {
+ return value (path_cast<path> (move (l)) /= r);
+ };
}
}
diff --git a/libbuild2/functions-process-path.cxx b/libbuild2/functions-process-path.cxx
index 486a806..6746623 100644
--- a/libbuild2/functions-process-path.cxx
+++ b/libbuild2/functions-process-path.cxx
@@ -11,24 +11,47 @@ namespace build2
void
process_path_functions (function_map& m)
{
- {
- function_family f (m, "process_path");
+ function_family f (m, "process_path");
+
+ // $recall(<process-path>)
+ //
+ // Return the recall path of an executable, that is, a path that is not
+ // necessarily absolute but which nevertheless can be used to re-run the
+ // executable in the current environment. This path, for example, could be
+ // used in diagnostics when printing the failing command line.
+ //
+
+ // As discussed in value_traits<process_path>, we always have recall.
+ //
+ f["recall"] += &process_path::recall;
- // As discussed in value_traits<process_path>, we always have recall.
- //
- f["recall"] += &process_path::recall;
- f["effect"] += [](process_path p)
- {
- return move (p.effect.empty () ? p.recall : p.effect);
- };
- }
+ // $effect(<process-path>)
+ //
+ // Return the effective path of an executable, that is, the absolute path
+ // to the executable that will also include any omitted extensions, etc.
+ //
+ f["effect"] += [] (process_path p)
{
- function_family f (m, "process_path_ex");
+ return move (p.effect.empty () ? p.recall : p.effect);
+ };
+
+ // $name(<process-path-ex>)
+ //
+ // Return the stable process name for diagnostics.
+ //
+ f["name"] += &process_path_ex::name;
+
+ // $checksum(<process-path-ex>)
+ //
+ // Return the executable checksum for change tracking.
+ //
+ f["checksum"] += &process_path_ex::checksum;
- f["name"] += &process_path_ex::name;
- f["checksum"] += &process_path_ex::checksum;
- f["env_checksum"] += &process_path_ex::env_checksum;
- }
+ // $env_checksum(<process-path-ex>)
+ //
+ // Return the environment checksum for change tracking.
+ //
+ f["env_checksum"] += &process_path_ex::env_checksum;
}
}
diff --git a/libbuild2/functions-process.cxx b/libbuild2/functions-process.cxx
index c4e5c24..6faa798 100644
--- a/libbuild2/functions-process.cxx
+++ b/libbuild2/functions-process.cxx
@@ -4,6 +4,8 @@
#include <libbutl/regex.hxx>
#include <libbutl/builtin.hxx>
+#include <libbuild2/scope.hxx>
+#include <libbuild2/context.hxx>
#include <libbuild2/function.hxx>
#include <libbuild2/variable.hxx>
@@ -141,6 +143,9 @@ namespace build2
builtin_callbacks cb;
fdpipe ofd (open_pipe ());
+ if (verb >= 3)
+ print_process (process_args (bn, args));
+
uint8_t rs; // Storage.
butl::builtin b (bf (rs,
args,
@@ -172,7 +177,16 @@ namespace build2
// While assuming that the builtin has issued the diagnostics on failure
// we still print the error message (see process_finish() for details).
//
- fail << bn << " builtin " << process_exit (rs) << endf;
+ diag_record dr;
+ dr << fail << "builtin " << bn << " " << process_exit (rs);
+
+ if (verb >= 1 && verb <= 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, process_args (bn, args));
+ }
+
+ dr << endf;
}
catch (const system_error& e)
{
@@ -181,18 +195,32 @@ namespace build2
}
static inline value
- run_builtin (builtin_function* bf, const strings& args, const string& bn)
+ run_builtin (const scope* s,
+ builtin_function* bf,
+ const strings& args,
+ const string& bn)
{
+ // See below.
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run() called during " << s->ctx.phase << " phase";
+
return run_builtin_impl (bf, args, bn, read);
}
static inline value
- run_builtin_regex (builtin_function* bf,
+ run_builtin_regex (const scope* s,
+ builtin_function* bf,
const strings& args,
const string& bn,
const string& pat,
const optional<string>& fmt)
{
+ // See below.
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run_regex() called during " << s->ctx.phase << " phase";
+
// Note that we rely on the "small function object" optimization here.
//
return run_builtin_impl (bf, args, bn,
@@ -293,6 +321,9 @@ namespace build2
[] (const string& s) {return s.c_str ();});
cargs.push_back (nullptr);
+ // Note that for now these functions can only be called during the load
+ // phase (see below) and so no diagnostics buffering is needed.
+ //
return run_start (3 /* verbosity */,
pp,
cargs,
@@ -309,15 +340,7 @@ namespace build2
void
process_finish (const scope*, const cstrings& args, process& pr)
{
- try
- {
- if (!pr.wait ())
- fail << "process " << args[0] << " " << *pr.exit;
- }
- catch (const process_error& e)
- {
- fail << "unable to execute " << args[0] << ": " << e;
- }
+ run_finish (args, pr, 2 /* verbosity */);
}
// Run a process.
@@ -352,6 +375,15 @@ namespace build2
static inline value
run_process (const scope* s, const process_path& pp, const strings& args)
{
+ // The only plausible place where these functions can be called outside
+ // the load phase are scripts and there it doesn't make much sense to use
+ // them (the same can be achieved with commands in a uniform manner). Note
+ // that if there is no scope, then this is most likely (certainly?) the
+ // load phase (for example, command line).
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run() called during " << s->ctx.phase << " phase";
+
return run_process_impl (s, pp, args, read);
}
@@ -362,6 +394,11 @@ namespace build2
const string& pat,
const optional<string>& fmt)
{
+ // See above.
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run_regex() called during " << s->ctx.phase << " phase";
+
// Note that we rely on the "small function object" optimization here.
//
return run_process_impl (s, pp, args,
@@ -377,7 +414,7 @@ namespace build2
if (builtin_function* bf = builtin (args))
{
pair<string, strings> ba (builtin_args (bf, move (args), "run"));
- return run_builtin (bf, ba.second, ba.first);
+ return run_builtin (s, bf, ba.second, ba.first);
}
else
{
@@ -395,7 +432,7 @@ namespace build2
if (builtin_function* bf = builtin (args))
{
pair<string, strings> ba (builtin_args (bf, move (args), "run_regex"));
- return run_builtin_regex (bf, ba.second, ba.first, pat, fmt);
+ return run_builtin_regex (s, bf, ba.second, ba.first, pat, fmt);
}
else
{
@@ -413,14 +450,15 @@ namespace build2
// $process.run(<prog>[ <args>...])
//
- // Run builtin or external program and return trimmed stdout.
+ // Run builtin or external program and return trimmed `stdout` output.
//
// Note that if the result of executing the program can be affected by
// environment variables and this result can in turn affect the build
// result, then such variables should be reported with the
- // config.environment directive.
+ // `config.environment` directive.
//
- // Note that this function is not pure.
+ // Note that this function is not pure and can only be called during the
+ // load phase.
//
f.insert (".run", false) += [](const scope* s, names args)
{
@@ -432,21 +470,23 @@ namespace build2
return run_process (s, pp, strings ());
};
- // $process.run_regex(<prog>[ <args>...], <pat> [, <fmt>])
+ // $process.run_regex(<prog>[ <args>...], <pat>[, <fmt>])
//
- // Run builtin or external program and return stdout lines matched and
- // optionally processed with regex.
+ // Run builtin or external program and return `stdout` output lines
+ // matched and optionally processed with a regular expression.
//
// Each line of stdout (including the customary trailing blank) is matched
// (as a whole) against <pat> and, if successful, returned, optionally
- // processed with <fmt>, as an element of a list.
+ // processed with <fmt>, as an element of a list. See the `$regex.*()`
+ // function family for details on regular expressions and format strings.
//
// Note that if the result of executing the program can be affected by
// environment variables and this result can in turn affect the build
// result, then such variables should be reported with the
- // config.environment directive.
+ // `config.environment` directive.
//
- // Note that this function is not pure.
+ // Note that this function is not pure and can only be called during the
+ // load phase.
//
{
auto e (f.insert (".run_regex", false));
diff --git a/libbuild2/functions-project-name.cxx b/libbuild2/functions-project-name.cxx
index 145e62c..23523f0 100644
--- a/libbuild2/functions-project-name.cxx
+++ b/libbuild2/functions-project-name.cxx
@@ -13,8 +13,28 @@ namespace build2
{
function_family f (m, "project_name");
- f["string"] += [](project_name p) {return move (p).string ();};
+ // $string(<project-name>)
+ //
+ // Return the string representation of a project name. See also the
+ // `$variable()` function below.
+ //
+
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](project_name* p)
+ {
+ return p != nullptr ? move (*p).string () : string ();
+ };
+ // $base(<project-name>[, <extension>])
+ //
+ // Return the base part (without the extension) of a project name.
+ //
+ // If <extension> is specified, then only remove that extension. Note that
+ // <extension> should not include the dot and the comparison is always
+ // case-insensitive.
+ //
f["base"] += [](project_name p, optional<string> ext)
{
return ext ? p.base (ext->c_str ()) : p.base ();
@@ -25,13 +45,30 @@ namespace build2
return p.base (convert<string> (move (ext)).c_str ());
};
+ // $extension(<project-name>)
+ //
+ // Return the extension part (without the dot) of a project name or empty
+ // string if there is no extension.
+ //
f["extension"] += &project_name::extension;
+
+ // $variable(<project-name>)
+ //
+ // Return the string representation of a project name that is sanitized to
+ // be usable as a variable name. Specifically, `.`, `-`, and `+` are
+ // replaced with `_`.
+ //
f["variable"] += &project_name::variable;
// Project name-specific overloads from builtins.
//
function_family b (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
b[".concat"] += [](project_name n, string s)
{
string r (move (n).string ());
diff --git a/libbuild2/functions-regex.cxx b/libbuild2/functions-regex.cxx
index 2f0d122..cf3ffd0 100644
--- a/libbuild2/functions-regex.cxx
+++ b/libbuild2/functions-regex.cxx
@@ -21,7 +21,7 @@ namespace build2
// Optimize for the string value type.
//
if (v.type != &value_traits<string>::value_type)
- untypify (v);
+ untypify (v, true /* reduce */);
return convert<string> (move (v));
}
@@ -69,7 +69,7 @@ namespace build2
else if (s == "return_subs")
subs = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -92,10 +92,7 @@ namespace build2
names r;
for (size_t i (1); i != m.size (); ++i)
- {
- if (m[i].matched)
- r.emplace_back (m.str (i));
- }
+ r.emplace_back (m[i].matched ? m.str (i) : string ());
return value (move (r));
}
@@ -129,7 +126,7 @@ namespace build2
else if (s == "return_subs")
subs = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -161,10 +158,7 @@ namespace build2
if (subs)
{
for (size_t i (1); i != m.size (); ++i)
- {
- if (m[i].matched)
- r.emplace_back (m.str (i));
- }
+ r.emplace_back (m[i].matched ? m.str (i) : string ());
}
return value (move (r));
@@ -174,7 +168,9 @@ namespace build2
}
static pair<regex::flag_type, regex_constants::match_flag_type>
- parse_replacement_flags (optional<names>&& flags, bool first_only = true)
+ parse_replacement_flags (optional<names>&& flags,
+ bool first_only = true,
+ bool* copy_empty = nullptr)
{
regex::flag_type rf (regex::ECMAScript);
regex_constants::match_flag_type mf (regex_constants::match_default);
@@ -191,8 +187,10 @@ namespace build2
mf |= regex_constants::format_first_only;
else if (s == "format_no_copy")
mf |= regex_constants::format_no_copy;
+ else if (copy_empty != nullptr && s == "format_copy_empty")
+ *copy_empty = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -334,7 +332,10 @@ namespace build2
const string& fmt,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags), false));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ false /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
names r;
@@ -342,10 +343,10 @@ namespace build2
try
{
regex_replace_search (to_string (move (v)), rge, fmt,
- [&r] (string::const_iterator b,
- string::const_iterator e)
+ [copy_empty, &r] (string::const_iterator b,
+ string::const_iterator e)
{
- if (b != e)
+ if (copy_empty || b != e)
r.emplace_back (string (b, e));
},
fl.second);
@@ -364,26 +365,29 @@ namespace build2
// apply() overloads (below) for details.
//
static names
- apply (names&& s,
+ apply (names&& ns,
const string& re,
const string& fmt,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags)));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ true /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
names r;
try
{
- for (auto& v: s)
+ for (auto& n: ns)
{
- string s (regex_replace_search (convert<string> (move (v)),
+ string s (regex_replace_search (convert<string> (move (n)),
rge,
fmt,
fl.second).first);
- if (!s.empty ())
+ if (copy_empty || !s.empty ())
r.emplace_back (move (s));
}
}
@@ -411,7 +415,7 @@ namespace build2
if (s == "icase")
r |= regex::icase;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -422,67 +426,141 @@ namespace build2
// See find_match() overloads (below) for details.
//
static bool
- find_match (names&& s, const string& re, optional<names>&& flags)
+ find_match (names&& ns, const string& re, optional<names>&& flags)
{
regex::flag_type fl (parse_find_flags (move (flags)));
regex rge (parse_regex (re, fl));
- for (auto& v: s)
+ for (auto& n: ns)
{
- if (regex_match (convert<string> (move (v)), rge))
+ if (regex_match (convert<string> (move (n)), rge))
return true;
}
return false;
}
+ // Return a list of elements that match (matching is true) or don't match
+ // (matching is false) the regular expression. See filter_match() and
+ // filter_out_match() overloads (below) for details.
+ //
+ static names
+ filter_match (names&& ns,
+ const string& re,
+ optional<names>&& flags,
+ bool matching)
+ {
+ regex::flag_type fl (parse_find_flags (move (flags)));
+ regex rge (parse_regex (re, fl));
+
+ names r;
+
+ for (name& n: ns)
+ {
+ // Note that we need to preserve the element while converting it to
+ // string since we may add it to the resulting list. But let's optimize
+ // this for the simple value case by round-tripping it through the
+ // string.
+ //
+ bool s (n.simple ());
+ string v (convert<string> (s ? move (n) : name (n)));
+
+ if (regex_match (v, rge) == matching)
+ r.emplace_back (s ? name (move (v)) : move (n));
+ }
+
+ return r;
+ }
+
// Return true if a part of any of the list elements matches the regular
// expression. See find_search() overloads (below) for details.
//
static bool
- find_search (names&& s, const string& re, optional<names>&& flags)
+ find_search (names&& ns, const string& re, optional<names>&& flags)
{
regex::flag_type fl (parse_find_flags (move (flags)));
regex rge (parse_regex (re, fl));
- for (auto& v: s)
+ for (auto& n: ns)
{
- if (regex_search (convert<string> (move (v)), rge))
+ if (regex_search (convert<string> (move (n)), rge))
return true;
}
return false;
}
+ // Return those elements of a list which have a match (matching is true) or
+ // have no match (matching is false) between the regular expression and
+ // some/any part of the element. See filter_search() and filter_out_search()
+ // overloads (below) for details.
+ //
+ static names
+ filter_search (names&& ns,
+ const string& re,
+ optional<names>&& flags,
+ bool matching)
+ {
+ regex::flag_type fl (parse_find_flags (move (flags)));
+ regex rge (parse_regex (re, fl));
+
+ names r;
+
+ for (auto& n: ns)
+ {
+ // Note that we need to preserve the element while converting it to
+ // string since we may add it to the resulting list. But let's optimize
+ // this for the simple value case by round-tripping it through the
+ // string.
+ //
+ bool s (n.simple ());
+ string v (convert<string> (s ? move (n) : name (n)));
+
+ if (regex_search (v, rge) == matching)
+ r.emplace_back (s ? name (move (v)) : move (n));
+ }
+
+ return r;
+ }
+
// Replace matched parts of list elements using the format string and
// concatenate the transformed elements. See merge() overloads (below) for
// details.
//
static names
- merge (names&& s,
+ merge (names&& ns,
const string& re,
const string& fmt,
optional<string>&& delim,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags)));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ true /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
string rs;
try
{
- for (auto& v: s)
+ bool first (true);
+ for (auto& n: ns)
{
- string s (regex_replace_search (convert<string> (move (v)),
+ string s (regex_replace_search (convert<string> (move (n)),
rge,
fmt,
fl.second).first);
- if (!s.empty ())
+ if (copy_empty || !s.empty ())
{
- if (!rs.empty () && delim)
- rs.append (*delim);
+ if (delim)
+ {
+ if (first)
+ first = false;
+ else
+ rs.append (*delim);
+ }
rs.append (s);
}
@@ -510,129 +588,203 @@ namespace build2
//
// Match a value of an arbitrary type against the regular expression.
// Convert the value to string prior to matching. Return the boolean value
- // unless return_subs flag is specified (see below), in which case return
- // names (NULL if no match).
+ // unless `return_subs` flag is specified (see below), in which case
+ // return names (or `null` if no match).
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- // return_subs - return names (rather than boolean), that contain
- // sub-strings that match the marked sub-expressions and
- // NULL if no match
+ // return_subs - return names (rather than boolean), that contain
+ // sub-strings that match the marked sub-expressions
+ // and null if no match
//
- f[".match"] += [](value s, string re, optional<names> flags)
+ f[".match"] += [](value v, string re, optional<names> flags)
{
- return match (move (s), re, move (flags));
+ return match (move (v), re, move (flags));
};
- f[".match"] += [](value s, names re, optional<names> flags)
+ f[".match"] += [](value v, names re, optional<names> flags)
{
- return match (move (s), convert<string> (move (re)), move (flags));
+ return match (move (v), convert<string> (move (re)), move (flags));
};
// $regex.find_match(<vals>, <pat> [, <flags>])
//
// Match list elements against the regular expression and return true if
- // the match is found. Convert the elements to string prior to matching.
+ // the match is found. Convert the elements to strings prior to matching.
+ //
+ // The following flags are supported:
+ //
+ // icase - match ignoring case
+ //
+ f[".find_match"] += [](names ns, string re, optional<names> flags)
+ {
+ return find_match (move (ns), re, move (flags));
+ };
+
+ f[".find_match"] += [](names ns, names re, optional<names> flags)
+ {
+ return find_match (move (ns), convert<string> (move (re)), move (flags));
+ };
+
+ // $regex.filter_match(<vals>, <pat> [, <flags>])
+ // $regex.filter_out_match(<vals>, <pat> [, <flags>])
+ //
+ // Return elements of a list that match (`filter`) or do not match
+ // (`filter_out`) the regular expression. Convert the elements to strings
+ // prior to matching.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- f[".find_match"] += [](names s, string re, optional<names> flags)
+ f[".filter_match"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_match (move (ns), re, move (flags), true /* matching */);
+ };
+
+ f[".filter_match"] += [](names ns, names re, optional<names> flags)
{
- return find_match (move (s), re, move (flags));
+ return filter_match (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ true /* matching */);
};
- f[".find_match"] += [](names s, names re, optional<names> flags)
+ f[".filter_out_match"] += [](names s, string re, optional<names> flags)
{
- return find_match (move (s), convert<string> (move (re)), move (flags));
+ return filter_match (move (s), re, move (flags), false /* matching */);
+ };
+
+ f[".filter_out_match"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_match (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ false /* matching */);
};
// $regex.search(<val>, <pat> [, <flags>])
//
// Determine if there is a match between the regular expression and some
// part of a value of an arbitrary type. Convert the value to string prior
- // to searching. Return the boolean value unless return_match or
- // return_subs flag is specified (see below) in which case return names
- // (NULL if no match).
+ // to searching. Return the boolean value unless `return_match` or
+ // `return_subs` flag is specified (see below) in which case return names
+ // (`null` if no match).
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- // return_match - return names (rather than boolean), that contain a
- // sub-string that matches the whole regular expression and
- // NULL if no match
+ // return_match - return names (rather than boolean), that contain a
+ // sub-string that matches the whole regular expression
+ // and null if no match
//
- // return_subs - return names (rather than boolean), that contain
- // sub-strings that match the marked sub-expressions and
- // NULL if no match
+ // return_subs - return names (rather than boolean), that contain
+ // sub-strings that match the marked sub-expressions
+ // and null if no match
//
- // If both return_match and return_subs flags are specified then the
+ // If both `return_match` and `return_subs` flags are specified then the
// sub-string that matches the whole regular expression comes first.
//
- f[".search"] += [](value s, string re, optional<names> flags)
+ f[".search"] += [](value v, string re, optional<names> flags)
{
- return search (move (s), re, move (flags));
+ return search (move (v), re, move (flags));
};
- f[".search"] += [](value s, names re, optional<names> flags)
+ f[".search"] += [](value v, names re, optional<names> flags)
{
- return search (move (s), convert<string> (move (re)), move (flags));
+ return search (move (v), convert<string> (move (re)), move (flags));
};
// $regex.find_search(<vals>, <pat> [, <flags>])
//
// Determine if there is a match between the regular expression and some
- // part of any of the list elements. Convert the elements to string prior
+ // part of any of the list elements. Convert the elements to strings prior
// to matching.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- f[".find_search"] += [](names s, string re, optional<names> flags)
+ f[".find_search"] += [](names ns, string re, optional<names> flags)
{
- return find_search (move (s), re, move (flags));
+ return find_search (move (ns), re, move (flags));
};
- f[".find_search"] += [](names s, names re, optional<names> flags)
+ f[".find_search"] += [](names ns, names re, optional<names> flags)
{
- return find_search (move (s),
+ return find_search (move (ns),
convert<string> (move (re)),
move (flags));
};
+ // $regex.filter_search(<vals>, <pat> [, <flags>])
+ // $regex.filter_out_search(<vals>, <pat> [, <flags>])
+ //
+ // Return elements of a list for which there is a match (`filter`) or no
+ // match (`filter_out`) between the regular expression and some part of
+ // the element. Convert the elements to strings prior to matching.
+ //
+ // The following flags are supported:
+ //
+ // icase - match ignoring case
+ //
+ f[".filter_search"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_search (move (ns), re, move (flags), true /* matching */);
+ };
+
+ f[".filter_search"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_search (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ true /* matching */);
+ };
+
+ f[".filter_out_search"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_search (move (ns), re, move (flags), false /* matching */);
+ };
+
+ f[".filter_out_search"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_search (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ false /* matching */);
+ };
+
// $regex.replace(<val>, <pat>, <fmt> [, <flags>])
//
// Replace matched parts in a value of an arbitrary type, using the format
// string. Convert the value to string prior to matching. The result value
// is always untyped, regardless of the argument type.
//
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
- //
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- // format_first_only - only replace the first match
+ // format_first_only - only replace the first match
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // If both format_first_only and format_no_copy flags are specified then
- // the result will only contain the replacement of the first match.
+ // If both `format_first_only` and `format_no_copy` flags are specified
+ // then the result will only contain the replacement of the first match.
//
- f[".replace"] += [](value s, string re, string fmt, optional<names> flags)
+ // See also `$string.replace()`.
+ //
+ f[".replace"] += [](value v, string re, string fmt, optional<names> flags)
{
- return replace (move (s), re, fmt, move (flags));
+ return replace (move (v), re, fmt, move (flags));
};
- f[".replace"] += [](value s, names re, names fmt, optional<names> flags)
+ f[".replace"] += [](value v, names re, names fmt, optional<names> flags)
{
- return replace (move (s),
+ return replace (move (v),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
@@ -641,38 +793,38 @@ namespace build2
// $regex.replace_lines(<val>, <pat>, <fmt> [, <flags>])
//
// Convert the value to string, parse it into lines and for each line
- // apply the $regex.replace() function with the specified pattern, format,
- // and flags. If the format argument is NULL, omit the "all-NULL"
- // replacements for the matched lines from the result. Return unmatched
- // lines and line replacements as a name list unless return_lines flag is
- // specified (see below), in which case return a single multi-line simple
- // name value.
+ // apply the `$regex.replace()` function with the specified pattern,
+ // format, and flags. If the format argument is `null`, omit the
+ // "all-`null`" replacements for the matched lines from the result. Return
+ // unmatched lines and line replacements as a `name` list unless
+ // `return_lines` flag is specified (see below), in which case return a
+ // single multi-line simple `name` value.
//
- // The following flags are supported in addition to the $regex.replace()
- // function flags:
+ // The following flags are supported in addition to the `$regex.replace()`
+ // function's flags:
//
- // return_lines - return the simple name (rather than a name list)
- // containing the unmatched lines and line replacements
- // separated with newlines.
+ // return_lines - return the simple name (rather than a name list)
+ // containing the unmatched lines and line replacements
+ // separated with newlines.
//
- // Note that if format_no_copy is specified, unmatched lines are not
+ // Note that if `format_no_copy` is specified, unmatched lines are not
// copied either.
//
- f[".replace_lines"] += [](value s,
- string re,
- string fmt,
- optional<names> flags)
+ f[".replace_lines"] += [](value v,
+ string re,
+ string fmt,
+ optional<names> flags)
{
- return replace_lines (move (s), re, move (fmt), move (flags));
+ return replace_lines (move (v), re, move (fmt), move (flags));
};
- f[".replace_lines"] += [](value s,
- names re,
- names* fmt,
- optional<names> flags)
+ f[".replace_lines"] += [](value v,
+ names re,
+ names* fmt,
+ optional<names> flags)
{
return replace_lines (
- move (s),
+ move (v),
convert<string> (move (re)),
(fmt != nullptr
? optional<string> (convert<string> (move (*fmt)))
@@ -683,26 +835,27 @@ namespace build2
// $regex.split(<val>, <pat>, <fmt> [, <flags>])
//
// Split a value of an arbitrary type into a list of unmatched value parts
- // and replacements of the matched parts, omitting empty ones. Convert the
- // value to string prior to matching.
- //
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
+ // and replacements of the matched parts, omitting empty ones (unless the
+ // `format_copy_empty` flag is specified). Convert the value to string
+ // prior to matching.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
+ //
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_copy_empty - copy empty elements into the result
//
- f[".split"] += [](value s, string re, string fmt, optional<names> flags)
+ f[".split"] += [](value v, string re, string fmt, optional<names> flags)
{
- return split (move (s), re, fmt, move (flags));
+ return split (move (v), re, fmt, move (flags));
};
- f[".split"] += [](value s, names re, names fmt, optional<names> flags)
+ f[".split"] += [](value v, names re, names fmt, optional<names> flags)
{
- return split (move (s),
+ return split (move (v),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
@@ -711,45 +864,52 @@ namespace build2
// $regex.merge(<vals>, <pat>, <fmt> [, <delim> [, <flags>]])
//
// Replace matched parts in a list of elements using the regex format
- // string. Convert the elements to string prior to matching. The result
+ // string. Convert the elements to strings prior to matching. The result
// value is untyped and contains concatenation of transformed non-empty
- // elements optionally separated with a delimiter.
- //
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
+ // elements (unless the `format_copy_empty` flag is specified) optionally
+ // separated with a delimiter.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
+ //
+ // format_first_only - only replace the first match
//
- // format_first_only - only replace the first match
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_copy_empty - copy empty elements into the result
//
- // If both format_first_only and format_no_copy flags are specified then
- // the result will be a concatenation of only the first match
+ // If both `format_first_only` and `format_no_copy` flags are specified
+ // then the result will be a concatenation of only the first match
// replacements.
//
- f[".merge"] += [](names s,
- string re,
- string fmt,
- optional<string> delim,
- optional<names> flags)
- {
- return merge (move (s), re, fmt, move (delim), move (flags));
+ f[".merge"] += [](names ns,
+ string re,
+ string fmt,
+ optional<string*> delim,
+ optional<names> flags)
+ {
+ return merge (move (ns),
+ re,
+ fmt,
+ delim && *delim != nullptr
+ ? move (**delim)
+ : optional<string> (),
+ move (flags));
};
- f[".merge"] += [](names s,
- names re,
- names fmt,
- optional<names> delim,
- optional<names> flags)
+ f[".merge"] += [](names ns,
+ names re,
+ names fmt,
+ optional<names*> delim,
+ optional<names> flags)
{
- return merge (move (s),
+ return merge (move (ns),
convert<string> (move (re)),
convert<string> (move (fmt)),
- delim
- ? convert<string> (move (*delim))
+ delim && *delim != nullptr
+ ? convert<string> (move (**delim))
: optional<string> (),
move (flags));
};
@@ -757,32 +917,33 @@ namespace build2
// $regex.apply(<vals>, <pat>, <fmt> [, <flags>])
//
// Replace matched parts of each element in a list using the regex format
- // string. Convert the elements to string prior to matching. Return a list
- // of transformed elements, omitting the empty ones.
- //
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
+ // string. Convert the elements to strings prior to matching. Return a
+ // list of transformed elements, omitting the empty ones (unless the
+ // `format_copy_empty` flag is specified).
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
+ //
+ // format_first_only - only replace the first match
//
- // format_first_only - only replace the first match
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_copy_empty - copy empty elements into the result
//
- // If both format_first_only and format_no_copy flags are specified then
- // the result elements will only contain the replacement of the first
+ // If both `format_first_only` and `format_no_copy` flags are specified
+ // then the result elements will only contain the replacement of the first
// match.
//
- f[".apply"] += [](names s, string re, string fmt, optional<names> flags)
+ f[".apply"] += [](names ns, string re, string fmt, optional<names> flags)
{
- return apply (move (s), re, fmt, move (flags));
+ return apply (move (ns), re, fmt, move (flags));
};
- f[".apply"] += [](names s, names re, names fmt, optional<names> flags)
+ f[".apply"] += [](names ns, names re, names fmt, optional<names> flags)
{
- return apply (move (s),
+ return apply (move (ns),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
diff --git a/libbuild2/functions-string.cxx b/libbuild2/functions-string.cxx
index 4a03a5e..b7e0a17 100644
--- a/libbuild2/functions-string.cxx
+++ b/libbuild2/functions-string.cxx
@@ -8,18 +8,148 @@ using namespace std;
namespace build2
{
+ static string
+ replace (string&& s, value&& fv, value&& tv, optional<names>&& fs)
+ {
+ bool ic (false), fo (false), lo (false);
+ if (fs)
+ {
+ for (name& f: *fs)
+ {
+ string s (convert<string> (move (f)));
+
+ if (s == "icase")
+ ic = true;
+ else if (s == "first_only")
+ fo = true;
+ else if (s == "last_only")
+ lo = true;
+ else
+ throw invalid_argument ("invalid flag '" + s + '\'');
+ }
+ }
+
+ string f (convert<string> (move (fv)));
+ string t (convert<string> (move (tv)));
+
+ if (f.empty ())
+ throw invalid_argument ("empty <from> substring");
+
+ if (!s.empty ())
+ {
+ // Note that we don't cache s.size () since the string size will be
+ // changing as we are replacing. In fact, we may end up with an empty
+ // string after a replacement.
+
+ size_t fn (f.size ());
+
+ // Look for the substring forward in the [p, n) range.
+ //
+ auto find = [&s, &f, fn, ic] (size_t p) -> size_t
+ {
+ for (size_t n (s.size ()); p != n; ++p)
+ {
+ if (n - p >= fn &&
+ (ic
+ ? icasecmp (f, s.c_str () + p, fn)
+ : s.compare (p, fn, f)) == 0)
+ return p;
+ }
+
+ return string::npos;
+ };
+
+ // Look for the substring backard in the [0, n) range.
+ //
+ auto rfind = [&s, &f, fn, ic] (size_t n) -> size_t
+ {
+ if (n >= fn)
+ {
+ n -= fn; // Don't consider characters out of range.
+
+ for (size_t p (n);; )
+ {
+ if ((ic
+ ? icasecmp (f, s.c_str () + p, fn)
+ : s.compare (p, fn, f)) == 0)
+ return p;
+
+ if (--p == 0)
+ break;
+ }
+ }
+
+ return string::npos;
+ };
+
+ if (fo || lo)
+ {
+ size_t p (lo ? rfind (s.size ()) : find (0));
+
+ if (fo && lo && p != string::npos)
+ {
+ if (p != find (0))
+ p = string::npos;
+ }
+
+ if (p != string::npos)
+ s.replace (p, fn, t);
+ }
+ else
+ {
+ for (size_t p (0); (p = find (0)) != string::npos; p += fn)
+ s.replace (p, fn, t);
+ }
+ }
+
+ return move (s);
+ }
+
+ static size_t
+ find_index (const strings& vs, value&& v, optional<names>&& fs)
+ {
+ bool ic (false);
+ if (fs)
+ {
+ for (name& f: *fs)
+ {
+ string s (convert<string> (move (f)));
+
+ if (s == "icase")
+ ic = true;
+ else
+ throw invalid_argument ("invalid flag '" + s + '\'');
+ }
+ }
+
+ auto i (find_if (vs.begin (), vs.end (),
+ [ic, y = convert<string> (move (v))] (const string& x)
+ {
+ return (ic ? icasecmp (x, y) : x.compare (y)) == 0;
+ }));
+
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ }
+
void
string_functions (function_map& m)
{
function_family f (m, "string");
- f["string"] += [](string s) {return s;};
-
- // @@ Shouldn't it concatenate elements into the single string?
- // @@ Doesn't seem to be used so far. Can consider removing.
+ // Note: leave undocumented since there is no good reason for the user to
+ // call this function (which would be converting string to string).
+ //
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
//
- // f["string"] += [](strings v) {return v;};
+ f["string"] += [](string* s)
+ {
+ return s != nullptr ? move (*s) : string ();
+ };
+ // $string.icasecmp(<untyped>, <untyped>)
+ // $icasecmp(<string>, <string>)
+ //
// Compare ASCII strings ignoring case and returning the boolean value.
//
f["icasecmp"] += [](string x, string y)
@@ -43,7 +173,43 @@ namespace build2
convert<string> (move (y))) == 0;
};
- // Trim.
+ // $string.replace(<untyped>, <from>, <to> [, <flags>])
+ // $replace(<string>, <from>, <to> [, <flags>])
+ //
+ // Replace occurences of substring <from> with <to> in a string. The
+ // <from> substring must not be empty.
+ //
+ // The following flags are supported:
+ //
+ // icase - compare ignoring case
+ //
+ // first_only - only replace the first match
+ //
+ // last_only - only replace the last match
+ //
+ //
+ // If both `first_only` and `last_only` flags are specified, then <from>
+ // is replaced only if it occurs in the string once.
+ //
+ // See also `$regex.replace()`.
+ //
+ f["replace"] += [](string s, value f, value t, optional<names> fs)
+ {
+ return replace (move (s), move (f), move (t), move (fs));
+ };
+
+ f[".replace"] += [](names s, value f, value t, optional<names> fs)
+ {
+ return names {
+ name (
+ replace (
+ convert<string> (move (s)), move (f), move (t), move (fs)))};
+ };
+
+ // $string.trim(<untyped>)
+ // $trim(<string>)
+ //
+ // Trim leading and trailing whitespaces in a string.
//
f["trim"] += [](string s)
{
@@ -55,7 +221,12 @@ namespace build2
return names {name (trim (convert<string> (move (s))))};
};
- // Convert ASCII strings into lower/upper case.
+ // $string.lcase(<untyped>)
+ // $string.ucase(<untyped>)
+ // $lcase(<string>)
+ // $ucase(<string>)
+ //
+ // Convert ASCII string into lower/upper case.
//
f["lcase"] += [](string s)
{
@@ -78,16 +249,18 @@ namespace build2
};
// $size(<strings>)
- //
- // Return the number of elements in the sequence.
- //
- f["size"] += [] (strings v) {return v.size ();};
-
+ // $size(<string-set>)
+ // $size(<string-map>)
// $size(<string>)
//
- // Return the number of characters (bytes) in the string.
+ // First three forms: return the number of elements in the sequence.
//
- f["size"] += [] (string v) {return v.size ();};
+ // Fourth form: return the number of characters (bytes) in the string.
+ //
+ f["size"] += [] (strings v) {return v.size ();};
+ f["size"] += [] (set<string> v) {return v.size ();};
+ f["size"] += [] (map<string, string> v) {return v.size ();};
+ f["size"] += [] (string v) {return v.size ();};
// $sort(<strings> [, <flags>])
//
@@ -95,9 +268,9 @@ namespace build2
//
// The following flags are supported:
//
- // icase - sort ignoring case
+ // icase - sort ignoring case
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
f["sort"] += [](strings v, optional<names> fs)
{
@@ -114,7 +287,7 @@ namespace build2
else if (s == "dedup")
dd = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -135,23 +308,75 @@ namespace build2
return v;
};
+ // $find(<strings>, <string>[, <flags>])
+ //
+ // Return true if the string sequence contains the specified string.
+ //
+ // The following flags are supported:
+ //
+ // icase - compare ignoring case
+ //
+ // See also `$regex.find_match()` and `$regex.find_search()`.
+ //
+ f["find"] += [](strings vs, value v, optional<names> fs)
+ {
+ return find_index (vs, move (v), move (fs)) != vs.size ();
+ };
+
+ // $find_index(<strings>, <string>[, <flags>])
+ //
+ // Return the index of the first element in the string sequence that
+ // is equal to the specified string or `$size(strings)` if none is
+ // found.
+ //
+ // The following flags are supported:
+ //
+ // icase - compare ignoring case
+ //
+ f["find_index"] += [](strings vs, value v, optional<names> fs)
+ {
+ return find_index (vs, move (v), move (fs));
+ };
+
+ // $keys(<string-map>)
+ //
+ // Return the list of keys in a string map.
+ //
+ // Note that the result is sorted in ascending order.
+ //
+ f["keys"] += [](map<string, string> v)
+ {
+ strings r;
+ r.reserve (v.size ());
+ for (pair<const string, string>& p: v)
+ r.push_back (p.first); // @@ PERF: use C++17 map::extract() to steal.
+ return r;
+ };
+
// String-specific overloads from builtins.
//
function_family b (m, "builtin");
- b[".concat"] += [](string l, string r) {l += r; return l;};
+ // Note that we must handle NULL values (relied upon by the parser to
+ // provide concatenation semantics consistent with untyped values).
+ //
+ b[".concat"] += [](string* l, string* r)
+ {
+ return l != nullptr
+ ? r != nullptr ? move (*l += *r) : move (*l)
+ : r != nullptr ? move (*r) : string ();
+ };
- b[".concat"] += [](string l, names ur)
+ b[".concat"] += [](string* l, names* ur)
{
- l += convert<string> (move (ur));
- return l;
+ string r (ur != nullptr ? convert<string> (move (*ur)) : string ());
+ return l != nullptr ? move (*l += r) : move (r);
};
- b[".concat"] += [](names ul, string r)
+ b[".concat"] += [](names* ul, string* r)
{
- string l (convert<string> (move (ul)));
- l += r;
- return l;
+ string l (ul != nullptr ? convert<string> (move (*ul)) : string ());
+ return r != nullptr ? move (l += *r) : move (l);
};
}
}
diff --git a/libbuild2/functions-target-triplet.cxx b/libbuild2/functions-target-triplet.cxx
index 4b0ec02..6e12c97 100644
--- a/libbuild2/functions-target-triplet.cxx
+++ b/libbuild2/functions-target-triplet.cxx
@@ -13,13 +13,39 @@ namespace build2
{
function_family f (m, "target_triplet");
- f["string"] += [](target_triplet t) {return t.string ();};
- f["representation"] += [](target_triplet t) {return t.representation ();};
+ // $string(<target-triplet>)
+ //
+ // Return the canonical (that is, without the `unknown` vendor component)
+ // target triplet string.
+ //
+
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](target_triplet* t)
+ {
+ return t != nullptr ? t->string () : string ();
+ };
+
+ // $representation(<target-triplet>)
+ //
+ // Return the complete target triplet string that always contains the
+ // vendor component.
+ //
+ f["representation"] += [](target_triplet t)
+ {
+ return t.representation ();
+ };
// Target triplet-specific overloads from builtins.
//
function_family b (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
b[".concat"] += [](target_triplet l, string sr) {return l.string () + sr;};
b[".concat"] += [](string sl, target_triplet r) {return sl + r.string ();};
diff --git a/libbuild2/functions-target.cxx b/libbuild2/functions-target.cxx
new file mode 100644
index 0000000..d564aa2
--- /dev/null
+++ b/libbuild2/functions-target.cxx
@@ -0,0 +1,108 @@
+// file : libbuild2/functions-target.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/functions-name.hxx> // to_target()
+
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+using namespace std;
+
+namespace build2
+{
+ void
+ target_functions (function_map& m)
+ {
+ // Functions that can be called only on real targets.
+ //
+ function_family f (m, "target");
+
+ // $path(<names>)
+ //
+ // Return the path of a target (or a list of paths for a list of
+ // targets). The path must be assigned, which normally happens during
+ // match. As a result, this function is normally called form a recipe.
+ //
+ // Note that while this function is technically not pure, we don't mark it
+ // as such since it can only be called (normally form a recipe) after the
+ // target has been matched, meaning that this target is a prerequisite and
+ // therefore this impurity has been accounted for.
+ //
+ f["path"] += [](const scope* s, names ns)
+ {
+ if (s == nullptr)
+ fail << "target.path() called out of scope";
+
+ // Most of the time we will have a single target so optimize for that.
+ //
+ small_vector<path, 1> r;
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i), o;
+ const target& t (to_target (*s, move (n), move (n.pair ? *++i : o)));
+
+ if (const auto* pt = t.is_a<path_target> ())
+ {
+ const path& p (pt->path ());
+
+ if (&p != &empty_path)
+ r.push_back (p);
+ else
+ fail << "target " << t << " path is not assigned";
+ }
+ else
+ fail << "target " << t << " is not path-based";
+ }
+
+ // We want the result to be path if we were given a single target and
+ // paths if multiple (or zero). The problem is, we cannot distinguish it
+ // based on the argument type (e.g., name vs names) since passing an
+ // out-qualified single target requires two names.
+ //
+ if (r.size () == 1)
+ return value (move (r[0]));
+
+ return value (paths (make_move_iterator (r.begin ()),
+ make_move_iterator (r.end ())));
+ };
+
+ // $process_path(<name>)
+ //
+ // Return the process path of an executable target.
+ //
+ // Note that while this function is not technically pure, we don't mark it
+ // as such for the same reasons as for `$path()` above.
+ //
+
+ // This one can only be called on a single target since we don't support
+ // containers of process_path's (though we probably could).
+ //
+ f["process_path"] += [](const scope* s, names ns)
+ {
+ if (s == nullptr)
+ fail << "target.process_path() called out of scope";
+
+ if (ns.empty () || ns.size () != (ns[0].pair ? 2 : 1))
+ fail << "target.process_path() expects single target";
+
+ name o;
+ const target& t (
+ to_target (*s, move (ns[0]), move (ns[0].pair ? ns[1] : o)));
+
+ if (const auto* et = t.is_a<exe> ())
+ {
+ process_path r (et->process_path ());
+
+ if (r.empty ())
+ fail << "target " << t << " path is not assigned";
+
+ return r;
+ }
+ else
+ fail << "target " << t << " is not executable-based" << endf;
+ };
+ }
+}
diff --git a/libbuild2/in/init.cxx b/libbuild2/in/init.cxx
index 18071f8..2fb73e1 100644
--- a/libbuild2/in/init.cxx
+++ b/libbuild2/in/init.cxx
@@ -34,7 +34,10 @@ namespace build2
// Enter variables.
//
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// Alternative variable substitution symbol with '$' being the
// default.
@@ -58,7 +61,27 @@ namespace build2
// is still stricter than the autoconf's semantics which also leaves
// unknown substitutions as is.
//
- vp.insert<string> ("in.substitution");
+ const variable& im (vp.insert<string> ("in.mode"));
+
+ // Original name of this variable for backwards compatibility.
+ //
+ vp.insert_alias (im, "in.substitution");
+
+ // Substitution map. Substitutions can be specified as key-value pairs
+ // rather than buildfile variables. This map is checked before the
+ // variables. An absent value in key-value has the NULL semantics.
+ //
+ // This mechanism has two primary uses: Firstly, it allows us to have
+ // substitution names that cannot be specified as buildfile variables.
+ // For example, a name may start with an underscore and thus be
+ // reserved or it may refer to one of the predefined variables such a
+ // `include` or `extension` that may have a wrong visibility and/or
+ // type.
+ //
+ // Secondly, this mechanism allows us to encapsulate a group of
+ // substitutions and pass this group around as a single value.
+ //
+ vp.insert<map<string, optional<string>>> ("in.substitutions");
// Fallback value to use for NULL value substitutions. If unspecified,
// NULL substitutions are an error.
diff --git a/libbuild2/in/rule.cxx b/libbuild2/in/rule.cxx
index faf1ec1..31a9d94 100644
--- a/libbuild2/in/rule.cxx
+++ b/libbuild2/in/rule.cxx
@@ -23,14 +23,14 @@ namespace build2
namespace in
{
bool rule::
- match (action a, target& xt, const string&) const
+ match (action a, target& xt) const
{
tracer trace ("in::rule::match");
if (!xt.is_a<file> ()) // See module init() for details.
return false;
- file& t (static_cast<file&> (xt));
+ file& t (xt.as<file> ());
bool fi (false); // Found in.
for (prerequisite_member p: group_prerequisite_members (a, t))
@@ -47,17 +47,24 @@ namespace build2
if (!fi)
l5 ([&]{trace << "no in file prerequisite for target " << t;});
+ // If we match, derive the file name here instead of in apply() to make
+ // it available early for the in{} prerequisite search (see
+ // install::file_rule::apply_impl() for background).
+ //
+ if (fi)
+ t.derive_path ();
+
return fi;
}
recipe rule::
apply (action a, target& xt) const
{
- file& t (static_cast<file&> (xt));
+ file& t (xt.as<file> ());
- // Derive the file name.
+ // Make sure derived rules assign the path in match().
//
- t.derive_path ();
+ assert (!t.path ().empty ());
// Inject dependency on the output directory.
//
@@ -108,7 +115,7 @@ namespace build2
// Substitution mode.
//
bool strict (strict_);
- if (const string* s = cast_null<string> (t["in.substitution"]))
+ if (const string* s = cast_null<string> (t["in.mode"]))
{
if (*s == "lax")
strict = false;
@@ -116,6 +123,11 @@ namespace build2
fail << "invalid substitution mode '" << *s << "'";
}
+ // Substitution map.
+ //
+ const substitution_map* smap (
+ cast_null<map<string, optional<string>>> (t["in.substitutions"]));
+
// NULL substitutions.
//
optional<string> null;
@@ -251,10 +263,13 @@ namespace build2
substitute (location (ip, ln),
a, t,
name, flags,
- strict, null));
+ strict, smap, null));
assert (v); // Rule semantics change without version increment?
+ if (p3 != string::npos)
+ p3 -= p2; // Hash length.
+
if (s->compare (p2, p3, sha256 (*v).string ()) == 0)
{
dd_skip++;
@@ -291,7 +306,35 @@ namespace build2
if (verb >= 2)
text << program_ << ' ' << ip << " >" << tp;
else if (verb)
- text << program_ << ' ' << ip;
+ {
+ // If we straight print the target, in most cases we will end up with
+ // something ugly like in{version...h.in} (due to the in{} target
+ // type search semantics). There is the `...h` part but also the
+ // `.in` part that is redundant given in{}. So let's tidy this up
+ // a bit if the extension could have been derived by in_search().
+ //
+ target_key ik (i.key ());
+
+ if (ik.ext)
+ {
+ string& ie (*ik.ext);
+ const string* te (t.ext ());
+
+ size_t in (ie.size ());
+ size_t tn (te != nullptr ? te->size () : 0);
+
+ if (in == tn + (tn != 0 ? 1 : 0) + 2) // [<te>.]in
+ {
+ if (ie.compare (in - 2, 2, "in") == 0)
+ {
+ if (tn == 0 || (ie.compare (0, tn, *te) == 0 && ie[tn] == '.'))
+ ie.clear ();
+ }
+ }
+ }
+
+ print_diag (program_.c_str (), move (ik), t);
+ }
// Read and process the file, one line at a time, while updating depdb.
//
@@ -336,7 +379,7 @@ namespace build2
#endif
auto_rmfile arm (tp);
- // Note: this default will only be used if the file if empty (i.e.,
+ // Note: this default will only be used if the file is empty (i.e.,
// does not contain even a newline).
//
const char* nl (
@@ -347,8 +390,8 @@ namespace build2
#endif
);
- string s; // Reuse the buffer.
- for (uint64_t ln (1);; ++ln)
+ uint64_t ln (1);
+ for (string s;; ++ln)
{
what = "read"; whom = &ip;
if (!getline (ifs, s))
@@ -361,22 +404,31 @@ namespace build2
if (crlf)
s.pop_back();
+ what = "write"; whom = &tp;
+ if (ln != 1)
+ ofs << nl;
+
+ nl = crlf ? "\r\n" : "\n"; // Preserve the original line ending.
+
+ if (ln == 1)
+ perform_update_pre (a, t, ofs, nl);
+
// Not tracking column for now (see also depdb above).
//
process (location (ip, ln),
a, t,
dd, dd_skip,
s, 0,
- (crlf ? "\r\n" : "\n"), sym, strict, null);
+ nl, sym, strict, smap, null);
- what = "write"; whom = &tp;
- if (ln != 1)
- ofs << nl; // See below.
ofs << s;
-
- nl = crlf ? "\r\n" : "\n"; // Preserve the original line ending.
}
+ what = "write"; whom = &tp;
+ if (ln == 1)
+ perform_update_pre (a, t, ofs, nl);
+ perform_update_post (a, t, ofs, nl);
+
// Close depdb before closing the output file so its mtime is not
// newer than of the output.
//
@@ -416,6 +468,16 @@ namespace build2
}
void rule::
+ perform_update_pre (action, const target&, ofdstream&, const char*) const
+ {
+ }
+
+ void rule::
+ perform_update_post (action, const target&, ofdstream&, const char*) const
+ {
+ }
+
+ void rule::
process (const location& l,
action a, const target& t,
depdb& dd, size_t& dd_skip,
@@ -423,6 +485,7 @@ namespace build2
const char* nl,
char sym,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
// Scan the line looking for substiutions in the $<name>$ form. In the
@@ -478,8 +541,7 @@ namespace build2
dd, dd_skip,
string (s, b + 1, e - b -1),
nullopt /* flags */,
- strict,
- null))
+ strict, smap, null))
{
replace_newlines (*val, nl);
@@ -500,9 +562,10 @@ namespace build2
const string& n,
optional<uint64_t> flags,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
- optional<string> val (substitute (l, a, t, n, flags, strict, null));
+ optional<string> val (substitute (l, a, t, n, flags, strict, smap, null));
if (val)
{
@@ -539,6 +602,7 @@ namespace build2
const string& n,
optional<uint64_t> flags,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
// In the lax mode scan the fragment to make sure it is a variable name
@@ -563,7 +627,7 @@ namespace build2
}
}
- return lookup (l, a, t, n, flags, null);
+ return lookup (l, a, t, n, flags, smap, null);
}
string rule::
@@ -571,10 +635,32 @@ namespace build2
action, const target& t,
const string& n,
optional<uint64_t> flags,
+ const substitution_map* smap,
const optional<string>& null) const
{
assert (!flags);
+ // First look in the substitution map.
+ //
+ if (smap != nullptr)
+ {
+ auto i (smap->find (n));
+
+ if (i != smap->end ())
+ {
+ if (i->second)
+ return *i->second;
+
+ if (null)
+ return *null;
+
+ fail (loc) << "null value in substitution map entry '" << n << "'" <<
+ info << "use in.null to specify null value substiution string";
+ }
+ }
+
+ // Next look for the buildfile variable.
+ //
auto l (t[n]);
if (l.defined ())
@@ -585,9 +671,9 @@ namespace build2
{
if (null)
return *null;
- else
- fail (loc) << "null value in variable '" << n << "'" <<
- info << "use in.null to specify null value substiution string";
+
+ fail (loc) << "null value in variable '" << n << "'" <<
+ info << "use in.null to specify null value substiution string";
}
// For typed values call string() for conversion.
diff --git a/libbuild2/in/rule.hxx b/libbuild2/in/rule.hxx
index 1294586..67c2509 100644
--- a/libbuild2/in/rule.hxx
+++ b/libbuild2/in/rule.hxx
@@ -18,8 +18,14 @@ namespace build2
{
// Preprocess an .in file.
//
- // Note that a derived rule can use the target data pad to cache data
- // (e.g., in match() or apply()) to be used in substitute/lookup() calls.
+ // Note that a derived rule can use the target auxiliary data storage to
+ // cache data (e.g., in match() or apply()) to be used in substitute() and
+ // lookup() calls.
+ //
+ // A derived rule is also required to derive the target file name in
+ // match() instead of apply() to make it available early for the in{}
+ // prerequisite search (see install::file_rule::apply_impl() for
+ // background).
//
// Note also that currently this rule ignores the dry-run mode (see
// perform_update() for the rationale).
@@ -43,7 +49,7 @@ namespace build2
null_ (move (null)) {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -53,6 +59,7 @@ namespace build2
// Customization hooks and helpers.
//
+ using substitution_map = map<string, optional<string>>;
// Perform prerequisite search.
//
@@ -66,6 +73,16 @@ namespace build2
virtual void
perform_update_depdb (action, const target&, depdb&) const;
+ // Pre/post update.
+ //
+ virtual void
+ perform_update_pre (action, const target&,
+ ofdstream&, const char* newline) const;
+
+ virtual void
+ perform_update_post (action, const target&,
+ ofdstream&, const char* newline) const;
+
// Perform variable lookup.
//
// Flags can be used by a custom implementation to alter the lookup
@@ -78,6 +95,7 @@ namespace build2
action, const target&,
const string& name,
optional<uint64_t> flags,
+ const substitution_map*,
const optional<string>& null) const;
// Perform variable substitution. Return nullopt if it should be
@@ -89,6 +107,7 @@ namespace build2
const string& name,
optional<uint64_t> flags,
bool strict,
+ const substitution_map*,
const optional<string>& null) const;
// Call the above version and do any necessary depdb saving.
@@ -100,6 +119,7 @@ namespace build2
const string& name,
optional<uint64_t> flags,
bool strict,
+ const substitution_map*,
const optional<string>& null) const;
// Process a line of input from the specified position performing any
@@ -113,6 +133,7 @@ namespace build2
const char* newline,
char sym,
bool strict,
+ const substitution_map*,
const optional<string>& null) const;
// Replace newlines in a multi-line value with the given newline
diff --git a/libbuild2/in/target.cxx b/libbuild2/in/target.cxx
index d9bc8a7..d664e3a 100644
--- a/libbuild2/in/target.cxx
+++ b/libbuild2/in/target.cxx
@@ -10,7 +10,7 @@ namespace build2
namespace in
{
static const target*
- in_search (const target& xt, const prerequisite_key& cpk)
+ in_search (context& ctx, const target* xt, const prerequisite_key& cpk)
{
// If we have no extension then derive it from our target. Then delegate
// to file_search().
@@ -18,18 +18,26 @@ namespace build2
prerequisite_key pk (cpk);
optional<string>& e (pk.tk.ext);
- if (!e)
+ if (!e && xt != nullptr)
{
- if (const file* t = xt.is_a<file> ())
+ // Why is the extension, say, .h.in and not .in (with .h being in the
+ // name)? While this is mostly academic (in this case things will work
+ // the same either way), conceptually, it is a header template rather
+ // than some file template. In other words, we are adding the second
+ // level classification.
+ //
+ // See also the low verbosity tidying up code in the rule.
+ //
+ if (const file* t = xt->is_a<file> ())
{
const string& te (t->derive_extension ());
e = te + (te.empty () ? "" : ".") + "in";
}
else
- fail << "prerequisite " << pk << " for a non-file target " << xt;
+ fail << "prerequisite " << pk << " for a non-file target " << *xt;
}
- return file_search (xt, pk);
+ return file_search (ctx, xt, pk);
}
static bool
@@ -51,9 +59,9 @@ namespace build2
&target_extension_none,
nullptr, /* default_extension */ // Taken care of by search.
&in_pattern,
- &target_print_1_ext_verb, // Same as file.
+ &target_print_1_ext_verb, // Same as file (but see rule).
&in_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/in/target.hxx b/libbuild2/in/target.hxx
index 20a0c44..619c06e 100644
--- a/libbuild2/in/target.hxx
+++ b/libbuild2/in/target.hxx
@@ -35,11 +35,14 @@ namespace build2
class LIBBUILD2_IN_SYMEXPORT in: public file
{
public:
- using file::file;
+ in (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/install/functions.cxx b/libbuild2/install/functions.cxx
index 5668efe..1de4d3e 100644
--- a/libbuild2/install/functions.cxx
+++ b/libbuild2/install/functions.cxx
@@ -15,17 +15,125 @@ namespace build2
{
function_family f (m, "install");
- // Resolve potentially relative install.* value to an absolute directory
- // based on (other) install.* values visible from the calling scope.
+ // $install.resolve(<dir>[, <rel_base>])
+ //
+ // @@ TODO: add overload to call resolve_file().
+ //
+ // Resolve potentially relative install.* value to an absolute and
+ // normalized directory based on (other) install.* values visible from
+ // the calling scope.
+ //
+ // If rel_base is specified and is not empty, then make the resulting
+ // directory relative to it. If rel_base itself is relative, first
+ // resolve it to an absolute and normalized directory based on install.*
+ // values. Note that this argument is mandatory if this function is
+ // called during relocatable installation (install.relocatable is true).
+ // While you can pass empty directory to suppress this functionality,
+ // make sure this does not render the result non-relocatable.
+ //
+ // As an example, consider an executable that supports loading plugins
+ // and requires the plugin installation directory to be embedded into
+ // the executable during the build. The common way to support
+ // relocatable installations for such cases is to embed a path relative
+ // to the executable and complete it at runtime. If you would like to
+ // always use the relative path, regardless of whether the installation
+ // is relocatable of not, then you can simply always pass rel_base, for
+ // example:
+ //
+ // plugin_dir = $install.resolve($install.lib, $install.bin)
+ //
+ // Alternatively, if you would like to continue using absolute paths for
+ // non-relocatable installations, then you can use something like this:
+ //
+ // plugin_dir = $install.resolve($install.lib, ($install.relocatable ? $install.bin : [dir_path] ))
+ //
+ // Finally, if you are unable to support relocatable installations, the
+ // correct way to handle this is NOT to always pass an empty path for
+ // rel_base but rather assert in root.build that your project does not
+ // support relocatable installations, for example:
+ //
+ // assert (!$install.relocatable) 'relocatable installation not supported'
//
// Note that this function is not pure.
//
- f.insert (".resolve", false) += [] (const scope* s, dir_path d)
+ f.insert (".resolve", false) += [] (const scope* s,
+ dir_path dir,
+ optional<dir_path> rel_base)
{
if (s == nullptr)
fail << "install.resolve() called out of scope" << endf;
- return resolve_dir (*s, move (d));
+ if (!rel_base)
+ {
+ const scope& rs (*s->root_scope ());
+
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ fail << "relocatable installation requires relative base "
+ << "directory" <<
+ info << "pass empty relative base directory if this call does "
+ << "not affect installation relocatability" <<
+ info << "or add `assert (!$install.relocatable) 'relocatable "
+ << "installation not supported'` before the call";
+ }
+ }
+
+ return resolve_dir (*s,
+ move (dir),
+ rel_base ? move (*rel_base) : dir_path ());
+ };
+
+ // @@ TODO: add $install.chroot().
+
+ // $install.filter(<path>[, <type>])
+ //
+ // Apply filters from config.install.filter and return true if the
+ // specified filesystem entry should be installed/uninstalled. Note that
+ // the entry is specified as an absolute and normalized installation
+ // path (so not $path($>) but $install.resolve($>)).
+ //
+ // The type argument can be one of `regular`, `directory`, or `symlink`.
+ // If unspecified, either `directory` or `regular` is assumed, based on
+ // whether path is syntactially a directory (ends with a directory
+ // separator).
+ //
+ // Note that this function is not pure.
+ //
+ f.insert (".filter", false) += [] (const scope* s,
+ path p,
+ optional<names> ot)
+ {
+ if (s == nullptr)
+ fail << "install.filter() called out of scope" << endf;
+
+ entry_type t;
+ if (ot)
+ {
+ string v (convert<string> (move (*ot)));
+
+ if (v == "regular") t = entry_type::regular;
+ else if (v == "directory") t = entry_type::directory;
+ else if (v == "symlink") t = entry_type::symlink;
+ else throw invalid_argument ("unknown type '" + v + '\'');
+ }
+ else
+ t = p.to_directory () ? entry_type::directory : entry_type::regular;
+
+ // Split into directory and leaf.
+ //
+ dir_path d;
+ if (t == entry_type::directory)
+ {
+ d = path_cast<dir_path> (move (p));
+ p = path (); // No leaf.
+ }
+ else
+ {
+ d = p.directory ();
+ p.make_leaf ();
+ }
+
+ return filter_entry (*s->root_scope (), d, p, t);
};
}
}
diff --git a/libbuild2/install/init.cxx b/libbuild2/install/init.cxx
index 25dc845..3df912f 100644
--- a/libbuild2/install/init.cxx
+++ b/libbuild2/install/init.cxx
@@ -166,6 +166,8 @@ namespace build2
bool global (*name == '\0');
+ auto& vp (rs.var_pool (true /* default */)); // All qualified.
+
if (spec)
{
vn = "config.install";
@@ -175,7 +177,7 @@ namespace build2
vn += name;
}
vn += var;
- const variable& vr (rs.var_pool ().insert<CT> (move (vn)));
+ const variable& vr (vp.insert<CT> (move (vn)));
using config::lookup_config;
@@ -192,7 +194,7 @@ namespace build2
vn = "install.";
vn += name;
vn += var;
- const variable& vr (rs.var_pool ().insert<T> (move (vn)));
+ const variable& vr (vp.insert<T> (move (vn)));
value& v (rs.assign (vr));
@@ -236,7 +238,7 @@ namespace build2
// This one doesn't have config.* value (only set in a buildfile).
//
if (!global)
- rs.var_pool ().insert<bool> (string ("install.") + n + ".subdirs");
+ rs.var_pool (true).insert<bool> (string ("install.") + n + ".subdirs");
}
void
@@ -250,6 +252,20 @@ namespace build2
context& ctx (rs.ctx);
+ // Enter module variables (note that init() below enters some more).
+ //
+ // The install variable is a path, not dir_path, since it can be used
+ // to both specify the target directory (to install with the same file
+ // name) or target file (to install with a different name). And the
+ // way we distinguish between the two is via the presence/absence of
+ // the trailing directory separator.
+ //
+ // Plus it can have the special true/false values when acting as an
+ // operation variable.
+ //
+ auto& ovar (rs.var_pool ().insert<path> ("install",
+ variable_visibility::target));
+
// Register the install function family if this is the first instance of
// the install modules.
//
@@ -258,9 +274,9 @@ namespace build2
// Register our operations.
//
- rs.insert_operation (install_id, op_install);
- rs.insert_operation (uninstall_id, op_uninstall);
- rs.insert_operation (update_for_install_id, op_update_for_install);
+ rs.insert_operation (install_id, op_install, &ovar);
+ rs.insert_operation (uninstall_id, op_uninstall, &ovar);
+ rs.insert_operation (update_for_install_id, op_update_for_install, &ovar);
}
static const path cmd ("install");
@@ -269,24 +285,26 @@ namespace build2
//
#define DIR(N, V) static const dir_path dir_##N (V)
- DIR (data_root, dir_path ("root"));
- DIR (exec_root, dir_path ("root"));
+ DIR (data_root, dir_path ("root"));
+ DIR (exec_root, dir_path ("root"));
- DIR (sbin, dir_path ("exec_root") /= "sbin");
- DIR (bin, dir_path ("exec_root") /= "bin");
- DIR (lib, (dir_path ("exec_root") /= "lib") /= "<private>");
- DIR (libexec, ((dir_path ("exec_root") /= "libexec") /= "<private>") /= "<project>");
- DIR (pkgconfig, dir_path ("lib") /= "pkgconfig");
+ DIR (sbin, dir_path ("exec_root") /= "sbin");
+ DIR (bin, dir_path ("exec_root") /= "bin");
+ DIR (lib, (dir_path ("exec_root") /= "lib") /= "<private>");
+ DIR (libexec, ((dir_path ("exec_root") /= "libexec") /= "<private>") /= "<project>");
+ DIR (pkgconfig, dir_path ("lib") /= "pkgconfig");
- DIR (etc, dir_path ("data_root") /= "etc");
- DIR (include, (dir_path ("data_root") /= "include") /= "<private>");
- DIR (share, dir_path ("data_root") /= "share");
- DIR (data, (dir_path ("share") /= "<private>") /= "<project>");
+ DIR (etc, dir_path ("data_root") /= "etc");
+ DIR (include, (dir_path ("data_root") /= "include") /= "<private>");
+ DIR (include_arch, dir_path ("include"));
+ DIR (share, dir_path ("data_root") /= "share");
+ DIR (data, (dir_path ("share") /= "<private>") /= "<project>");
+ DIR (buildfile, ((dir_path ("share") /= "build2") /= "export") /= "<project>");
- DIR (doc, ((dir_path ("share") /= "doc") /= "<private>") /= "<project>");
- DIR (legal, dir_path ("doc"));
- DIR (man, dir_path ("share") /= "man");
- DIR (man1, dir_path ("man") /= "man1");
+ DIR (doc, ((dir_path ("share") /= "doc") /= "<private>") /= "<project>");
+ DIR (legal, dir_path ("doc"));
+ DIR (man, dir_path ("share") /= "man");
+ DIR (man1, dir_path ("man") /= "man1");
#undef DIR
@@ -312,22 +330,17 @@ namespace build2
// Enter module variables.
//
- auto& vp (rs.var_pool ());
+ rs.var_pool ().insert<bool> ("for_install", variable_visibility::prereq);
+
+ // The rest of the variables we enter are qualified so go straight
+ // for the public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// Note that the set_dir() calls below enter some more.
//
- {
- // The install variable is a path, not dir_path, since it can be used
- // to both specify the target directory (to install with the same file
- // name) or target file (to install with a different name). And the
- // way we distinguish between the two is via the presence/absence of
- // the trailing directory separator.
- //
- vp.insert<path> ("install", variable_visibility::target);
- vp.insert<bool> ("for_install", variable_visibility::prereq);
- vp.insert<string> ("install.mode");
- vp.insert<bool> ("install.subdirs");
- }
+ vp.insert<string> ("install.mode");
+ vp.insert<bool> ("install.subdirs");
// Environment.
//
@@ -372,25 +385,33 @@ namespace build2
const auto& gr (group_rule_);
bs.insert_rule<alias> (perform_install_id, "install.alias", ar);
- bs.insert_rule<alias> (perform_uninstall_id, "uninstall.alias", ar);
+ bs.insert_rule<alias> (perform_uninstall_id, "install.alias", ar);
bs.insert_rule<fsdir> (perform_install_id, "install.fsdir", dr);
bs.insert_rule<fsdir> (perform_uninstall_id, "install.fsdir", dr);
bs.insert_rule<file> (perform_install_id, "install.file", fr);
- bs.insert_rule<file> (perform_uninstall_id, "uninstall.file", fr);
+ bs.insert_rule<file> (perform_uninstall_id, "install.file", fr);
- bs.insert_rule<target> (perform_install_id, "install.file", gr);
- bs.insert_rule<target> (perform_uninstall_id, "uninstall.file", gr);
+ // Note: use mtime_target (instead of target) to take precedence over
+ // the fallback file rules below.
+ //
+ // @@ We could fix this by checking the target type in file_rule,
+ // similar to build2::file_rule.
+ //
+ bs.insert_rule<mtime_target> (perform_install_id, "install.group", gr);
+ bs.insert_rule<mtime_target> (perform_uninstall_id, "install.group", gr);
// Register the fallback file rule for the update-for-[un]install
// operation, similar to update.
//
- rs.global_scope ().insert_rule<mtime_target> (
- perform_install_id, "install.file", fr);
+ // @@ Hm, it's a bit fuzzy why we would be updating-for-install
+ // something outside of any project?
+ //
+ scope& gs (rs.global_scope ());
- rs.global_scope ().insert_rule<mtime_target> (
- perform_uninstall_id, "uninstall.file", fr);
+ gs.insert_rule<mtime_target> (perform_install_id, "install.file", fr);
+ gs.insert_rule<mtime_target> (perform_uninstall_id, "install.file", fr);
}
// Configuration.
@@ -404,9 +425,9 @@ namespace build2
using config::lookup_config;
using config::specified_config;
- // Note: ignore config.install.scope (see below).
+ // Note: ignore config.install.{scope,manifest} (see below).
//
- bool s (specified_config (rs, "install", {"scope"}));
+ bool s (specified_config (rs, "install", {"scope", "manifest"}));
// Adjust module priority so that the (numerous) config.install.*
// values are saved at the end of config.build.
@@ -443,6 +464,123 @@ namespace build2
config::unsave_variable (rs, v);
}
+ // config.install.manifest
+ //
+ // Installation manifest. Valid values are a file path or `-` to dump
+ // the manifest to stdout.
+ //
+ // If specified during the install operation, then write the
+ // information about all the filesystem entries being installed into
+ // the manifest. If specified during uninstall, then remove the
+ // filesystem entries according to the manifest as opposed to the
+ // current build state. In particular, this functionality can be used
+ // to avoid surprising (and potentially lengthy) updates during
+ // uninstall that may happen because of changes to system-installed
+ // dependencies (for example, the compiler or standard library).
+ //
+ // @@ TODO: manifest uninstall is still TODO.
+ //
+ // Note: there is a single manifest per operation and thus this
+ // variable can only be specified as a global override. (While it
+ // could be handy to save this varible in config.build in some
+ // situations, supporting this will complicate the global override
+ // case). Note that as a result the manifest file path may not be
+ // specified in terms of the config.install.* values.
+ //
+ // Note also that the manifest is produced even in the dry-run mode.
+ // However, in this case no directory creation is tracked.
+ //
+ // The format of the installation manifest is "JSON lines", that is,
+ // each line is a JSON text (this makes it possible to reverse the
+ // order of lines without loading the entire file into memory). For
+ // example (indented lines indicate line continuations):
+ //
+ // {"type":"directory","path":"/tmp/install","mode":"755"}
+ // {"type":"target","name":"/tmp/libhello/libs{hello}",
+ // "entries":[
+ // {"type":"file","path":"/tmp/install/lib/libhello-1.0.so","mode":"755"},
+ // {"type":"symlink","path":"/tmp/install/lib/libhello.so","target":"libhello-1.0.so"}]}
+ //
+ // Each line is a serialization of one of the following non-abstract
+ // C++ structs:
+ //
+ // struct entry // abstract
+ // {
+ // enum {directory, file, symlink, target} type;
+ // };
+ //
+ // struct filesystem_entry: entry // abstract
+ // {
+ // path path;
+ // };
+ //
+ // struct directory_entry: filesystem_entry
+ // {
+ // string mode;
+ // };
+ //
+ // struct file_entry: filesystem_entry
+ // {
+ // string mode;
+ // };
+ //
+ // struct symlink_entry: filesystem_entry
+ // {
+ // path target;
+ // };
+ //
+ // struct target_entry: entry
+ // {
+ // string name;
+ // vector<filesystem_entry*> entries;
+ // };
+ //
+ // New entry types may be added later. Additional entry members may be
+ // added later to existing entries after the existing members.
+ //
+ // If installation is relocatable (see config.install.relocatable) and
+ // the installation manifest file path is inside config.install.root
+ // (including chroot), then absolute filesystem_entry::path's are
+ // saved as relative to the manifest file's directory (note that
+ // symlink_entry::target cannot be absolute in relocatable
+ // installation).
+ //
+ {
+ auto& v (vp.insert<path> ("config.install.manifest"));
+
+ // If specified, verify it is a global override.
+ //
+ if (lookup l = rs[v])
+ {
+ if (!l.belongs (rs.global_scope ()))
+ fail << "config.install.manifest must be a global override" <<
+ info << "specify !config.install.manifest=...";
+ }
+
+ config::unsave_variable (rs, v);
+ }
+
+ // Support for relocatable install.
+ //
+ // Note that it is false by default since supporting relocatable
+ // installation may require extra effort and not all projects may
+ // support it. A project that is known not to support it should assert
+ // this fact in its root.build, for example:
+ //
+ // assert (!$install.relocatable) 'relocatable installation not supported'
+ //
+ {
+ auto& var (vp.insert<bool> ( "install.relocatable"));
+ auto& cvar (vp.insert<bool> ("config.install.relocatable"));
+
+ value& v (rs.assign (var));
+
+ // Note: unlike other variables, for ease of assertion set it to
+ // false if no config.install.* is specified.
+ //
+ v = s && cast_false<bool> (lookup_config (rs, cvar, false));
+ }
+
// Support for private install (aka poor man's Flatpack).
//
const dir_path* p;
@@ -480,35 +618,109 @@ namespace build2
}
}
- // Global config.install.* values.
+ // config.install.filter
//
- set_dir (s, p, rs, "", abs_dir_path (), false, "644", "755", cmd);
-
- set_dir (s, p, rs, "root", abs_dir_path ());
-
- set_dir (s, p, rs, "data_root", dir_data_root);
- set_dir (s, p, rs, "exec_root", dir_exec_root, false, "755");
+ // Installation filterting. The value of this variable is a list of
+ // key-value pairs that specify the filesystem entries to include or
+ // exclude from the installation. For example, the following filters
+ // will omit installing headers and static libraries (notice the
+ // quoting of the wildcard).
+ //
+ // config.install.filter='include/@false "*.a"@false'
+ //
+ // The key in each pair is a file or directory path or a path wildcard
+ // pattern. If a key is relative and contains a directory component or
+ // is a directory, then it is treated relative to the corresponding
+ // config.install.* location. Otherwise (simple path, normally a
+ // pattern), it is matched against the leaf of any path. Note that if
+ // an absolute path is specified, it should be without the
+ // config.install.chroot prefix.
+ //
+ // The value in each pair is either true (include) or false (exclude).
+ // The filters are evaluated in the order specified and the first
+ // match that is found determines the outcome. If no match is found,
+ // the default is to include. For a directory, while false means
+ // exclude all the sub-paths inside this directory, true does not mean
+ // that all the sub-paths will be included wholesale. Rather, the
+ // matched component of the sub-path is treated as included with the
+ // rest of the components matched against the following
+ // sub-filters. For example:
+ //
+ // config.install.filter='
+ // include/x86_64-linux-gnu/@true
+ // include/x86_64-linux-gnu/details/@false
+ // include/@false'
+ //
+ // The true or false value may be followed by comma and the `symlink`
+ // modifier to only apply to symlink filesystem entries. For example:
+ //
+ // config.install.filter='"*.so"@false,symlink'
+ //
+ // Note that this mechanism only affects what gets physically copied
+ // to the installation directory without affecting what gets built for
+ // install or the view of what gets installed at the buildfile level.
+ // For example, given the `include/@false *.a@false` filters, static
+ // libraries will still be built (unless arranged not to with
+ // config.bin.lib) and the pkg-config files will still end up with -I
+ // options pointing to the header installation directory. Note also
+ // that this mechanism applies to both install and uninstall
+ // operations.
+ //
+ // If you are familiar with the Debian or Fedora packaging, this
+ // mechanism is somewhat similar to (and can be used for a similar
+ // purpose as) the Debian's .install files and Fedora's %files spec
+ // file sections that are used to split the installation into multiple
+ // binary packages.
+ //
+ {
+ auto& var (vp.insert<filters> ( "install.filter"));
+ auto& cvar (vp.insert<filters> ("config.install.filter"));
- set_dir (s, p, rs, "sbin", dir_sbin);
- set_dir (s, p, rs, "bin", dir_bin);
- set_dir (s, p, rs, "lib", dir_lib);
- set_dir (s, p, rs, "libexec", dir_libexec);
- set_dir (s, p, rs, "pkgconfig", dir_pkgconfig, false, "644");
+ value& v (rs.assign (var));
- set_dir (s, p, rs, "etc", dir_etc);
- set_dir (s, p, rs, "include", dir_include);
- set_dir (s, p, rs, "share", dir_share);
- set_dir (s, p, rs, "data", dir_data);
+ if (s)
+ {
+ if (lookup l = lookup_config (rs, cvar, nullptr))
+ v = cast<filters> (l);
+ }
+ }
- set_dir (s, p, rs, "doc", dir_doc);
- set_dir (s, p, rs, "legal", dir_legal);
- set_dir (s, p, rs, "man", dir_man);
- set_dir (s, p, rs, "man1", dir_man1);
+ // Global config.install.* values.
+ //
+ set_dir (s, p, rs, "", abs_dir_path (), false, "644", "755", cmd);
+
+ set_dir (s, p, rs, "root", abs_dir_path ());
+
+ set_dir (s, p, rs, "data_root", dir_data_root);
+ set_dir (s, p, rs, "exec_root", dir_exec_root, false, "755");
+
+ set_dir (s, p, rs, "sbin", dir_sbin);
+ set_dir (s, p, rs, "bin", dir_bin);
+ set_dir (s, p, rs, "lib", dir_lib);
+ set_dir (s, p, rs, "libexec", dir_libexec);
+ set_dir (s, p, rs, "pkgconfig", dir_pkgconfig, false, "644");
+
+ set_dir (s, p, rs, "etc", dir_etc);
+ set_dir (s, p, rs, "include", dir_include);
+ set_dir (s, p, rs, "include_arch", dir_include_arch);
+ set_dir (s, p, rs, "share", dir_share);
+ set_dir (s, p, rs, "data", dir_data);
+ set_dir (s, p, rs, "buildfile", dir_buildfile);
+
+ set_dir (s, p, rs, "doc", dir_doc);
+ set_dir (s, p, rs, "legal", dir_legal);
+ set_dir (s, p, rs, "man", dir_man);
+ set_dir (s, p, rs, "man1", dir_man1);
}
// Configure "installability" for built-in target types.
//
+ // Note that for exe{} we also set explicit 755 mode in case it gets
+ // installed somewhere else where the default is not 755 (for example to
+ // libexec/, which on Debian has the 644 mode).
+ //
install_path<exe> (bs, dir_path ("bin"));
+ install_mode<exe> (bs, "755");
install_path<doc> (bs, dir_path ("doc"));
install_path<legal> (bs, dir_path ("legal"));
install_path<man> (bs, dir_path ("man"));
diff --git a/libbuild2/install/operation.cxx b/libbuild2/install/operation.cxx
index 52e8c94..ce5d24a 100644
--- a/libbuild2/install/operation.cxx
+++ b/libbuild2/install/operation.cxx
@@ -3,8 +3,15 @@
#include <libbuild2/install/operation.hxx>
+#include <sstream>
+
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/context.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/install/utility.hxx>
+
using namespace std;
using namespace butl;
@@ -12,25 +19,356 @@ namespace build2
{
namespace install
{
+#ifndef BUILD2_BOOTSTRAP
+ context_data::
+ context_data (const path* mf)
+ : manifest_name (mf),
+ manifest_os (mf != nullptr
+ ? open_file_or_stdout (manifest_name, manifest_ofs)
+ : manifest_ofs),
+ manifest_autorm (manifest_ofs.is_open () ? *mf : path ()),
+ manifest_json (manifest_os, 0 /* indentation */)
+ {
+ if (manifest_ofs.is_open ())
+ {
+ manifest_file = *mf;
+ manifest_file.complete ();
+ manifest_file.normalize ();
+ }
+ }
+
+ static path
+ relocatable_path (context_data& d, const target& t, path p)
+ {
+ // This is both inefficient (re-detecting relocatable manifest for every
+ // path) and a bit dirty (if multiple projects are being installed with
+ // different install.{relocatable,root} values, we may end up producing
+ // some paths relative and some absolute). But doing either of these
+ // properly is probably not worth the extra complexity.
+ //
+ if (!d.manifest_file.empty ()) // Not stdout.
+ {
+ const scope& rs (t.root_scope ());
+
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ // Note: install.root is abs_dir_path so absolute and normalized.
+ //
+ const dir_path* root (cast_null<dir_path> (rs["install.root"]));
+ if (root == nullptr)
+ fail << "unknown installation root directory in " << rs <<
+ info << "did you forget to specify config.install.root?";
+
+ // The manifest path would include chroot so if used, we need to add
+ // it to root and the file path (we could also strip it, but then
+ // making it absolute gets tricky on Windows).
+ //
+ dir_path md (d.manifest_file.directory ());
+
+ if (md.sub (chroot_path (rs, *root))) // Inside installation root.
+ {
+ p = chroot_path (rs, p);
+ try
+ {
+ p = p.relative (md);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make filesystem entry path " << p
+ << " relative to " << md <<
+ info << "required for relocatable installation manifest";
+ }
+ }
+ }
+ }
+
+ return p;
+ }
+
+ // Serialize current target and, if tgt is not NULL, start the new target.
+ //
+ // Note that we always serialize directories as top-level entries. And
+ // theoretically we can end up "splitting" a target with a directory
+ // creation. For example, if some files that belong to the target are
+ // installed into subdirectories that have not yet been created. So we
+ // have to cache the information for the current target in memory and only
+ // flush it once we see the next target (or the end).
+ //
+ // You may be wondering why not just serialize directories as target
+ // entries. While we could do that, it's not quite correct conceptually,
+ // since this would be the first of potentially many targets that caused
+ // the directory's creation. To put it another way, while files and
+ // symlinks belong to tragets, directories do not.
+ //
+ static void
+ manifest_flush_target (context_data& d, const target* tgt)
+ {
+ if (d.manifest_target != nullptr)
+ {
+ assert (!d.manifest_target_entries.empty ());
+
+ // Target name format is the same as in the structured result output.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (1, 0));
+ os << *d.manifest_target;
+
+ try
+ {
+ auto& s (d.manifest_json);
+
+ s.begin_object ();
+ s.member ("type", "target");
+ s.member ("name", os.str ());
+ s.member_name ("entries");
+ s.begin_array ();
+
+ for (const auto& e: d.manifest_target_entries)
+ {
+ path p (relocatable_path (d, *d.manifest_target, move (e.path)));
+
+ s.begin_object ();
+
+ if (e.target.empty ())
+ {
+ s.member ("type", "file");
+ s.member ("path", p.string ());
+ s.member ("mode", e.mode);
+ }
+ else
+ {
+ s.member ("type", "symlink");
+ s.member ("path", p.string ());
+ s.member ("target", e.target.string ());
+ }
+
+ s.end_object ();
+ }
+
+ s.end_array (); // entries member
+ s.end_object (); // target object
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+
+ d.manifest_target_entries.clear ();
+ }
+
+ d.manifest_target = tgt;
+ }
+
+ void context_data::
+ manifest_install_d (context& ctx,
+ const target& tgt,
+ const dir_path& dir,
+ const string& mode)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ try
+ {
+ auto& s (d.manifest_json);
+
+ // If we moved to the next target, flush the current one.
+ //
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, nullptr);
+
+ s.begin_object ();
+ s.member ("type", "directory");
+ s.member ("path", relocatable_path (d, tgt, dir).string ());
+ s.member ("mode", mode);
+ s.end_object ();
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+ }
+ }
+
+ void context_data::
+ manifest_install_f (context& ctx,
+ const target& tgt,
+ const dir_path& dir,
+ const path& name,
+ const string& mode)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, &tgt);
+
+ d.manifest_target_entries.push_back (
+ manifest_target_entry {dir / name, mode, path ()});
+ }
+ }
+
+ void context_data::
+ manifest_install_l (context& ctx,
+ const target& tgt,
+ const path& link_target,
+ const dir_path& dir,
+ const path& link)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, &tgt);
+
+ d.manifest_target_entries.push_back (
+ manifest_target_entry {dir / link, "", link_target});
+ }
+ }
+
+ static void
+ manifest_close (context& ctx)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ try
+ {
+ manifest_flush_target (d, nullptr);
+
+ d.manifest_os << '\n'; // Final newline.
+
+ if (d.manifest_ofs.is_open ())
+ {
+ d.manifest_ofs.close ();
+ d.manifest_autorm.cancel ();
+ }
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+ }
+ }
+#else
+ context_data::
+ context_data (const path*)
+ {
+ }
+
+ void context_data::
+ manifest_install_d (context&,
+ const target&,
+ const dir_path&,
+ const string&)
+ {
+ }
+
+ void context_data::
+ manifest_install_f (context&,
+ const target&,
+ const dir_path&,
+ const path&,
+ const string&)
+ {
+ }
+
+ void context_data::
+ manifest_install_l (context&,
+ const target&,
+ const path&,
+ const dir_path&,
+ const path&)
+ {
+ }
+
+ static void
+ manifest_close (context&)
+ {
+ }
+#endif
+
static operation_id
- install_pre (context&,
- const values& params,
+ pre_install (context&,
+ const values&,
meta_operation_id mo,
- const location& l)
+ const location&)
{
- if (!params.empty ())
- fail (l) << "unexpected parameters for operation install";
+ // Run update as a pre-operation, unless we are disfiguring.
+ //
+ return mo != disfigure_id ? update_id : 0;
+ }
+ static operation_id
+ pre_uninstall (context&,
+ const values&,
+ meta_operation_id mo,
+ const location&)
+ {
// Run update as a pre-operation, unless we are disfiguring.
//
return mo != disfigure_id ? update_id : 0;
}
+ static void
+ install_pre (context& ctx,
+ const values& params,
+ bool inner,
+ const location& l)
+ {
+ if (!params.empty ())
+ fail (l) << "unexpected parameters for operation install";
+
+ if (inner)
+ {
+ // See if we need to write the installation manifest.
+ //
+ // Note: go straight for the public variable pool.
+ //
+ const path* mf (
+ cast_null<path> (
+ ctx.global_scope[*ctx.var_pool.find ("config.install.manifest")]));
+
+ // Note that we cannot calculate whether the manifest should use
+ // relocatable (relative) paths once here since we don't know the
+ // value of config.install.root.
+
+ ctx.current_inner_odata = context::current_data_ptr (
+ new context_data (mf),
+ [] (void* p) {delete static_cast<context_data*> (p);});
+ }
+ }
+
+ static void
+ install_post (context& ctx, const values&, bool inner)
+ {
+ if (inner)
+ manifest_close (ctx);
+ }
+
// Note that we run both install and uninstall serially. The reason for
// this is all the fuzzy things we are trying to do like removing empty
// outer directories if they are empty. If we do this in parallel, then
// those things get racy. Also, since all we do here is creating/removing
// files, there is not going to be much speedup from doing it in parallel.
+ // There is also now the installation manifest, which relies on us
+ // installing all the filesystem entries of a target serially.
const operation_info op_install {
install_id,
@@ -42,8 +380,10 @@ namespace build2
"has nothing to install", // We cannot "be installed".
execution_mode::first,
0 /* concurrency */, // Run serially.
- &install_pre,
+ &pre_install,
nullptr,
+ &install_pre,
+ &install_post,
nullptr,
nullptr
};
@@ -67,7 +407,9 @@ namespace build2
"is not installed",
execution_mode::last,
0 /* concurrency */, // Run serially
- &install_pre,
+ &pre_uninstall,
+ nullptr,
+ nullptr,
nullptr,
nullptr,
nullptr
@@ -87,6 +429,8 @@ namespace build2
op_update.concurrency,
op_update.pre_operation,
op_update.post_operation,
+ op_update.operation_pre,
+ op_update.operation_post,
op_update.adhoc_match,
op_update.adhoc_apply
};
diff --git a/libbuild2/install/operation.hxx b/libbuild2/install/operation.hxx
index c1f5416..bd818b4 100644
--- a/libbuild2/install/operation.hxx
+++ b/libbuild2/install/operation.hxx
@@ -4,10 +4,15 @@
#ifndef LIBBUILD2_INSTALL_OPERATION_HXX
#define LIBBUILD2_INSTALL_OPERATION_HXX
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/operation.hxx>
+#include <libbuild2/filesystem.hxx> // auto_rmfile
namespace build2
{
@@ -16,6 +21,65 @@ namespace build2
extern const operation_info op_install;
extern const operation_info op_uninstall;
extern const operation_info op_update_for_install;
+
+ // Set as context::current_inner_odata during the install/uninstall inner
+ // operations.
+ //
+ struct context_data
+ {
+ // Manifest.
+ //
+#ifndef BUILD2_BOOTSTRAP
+ path manifest_file; // Absolute and normalized, empty if `-`.
+ path_name manifest_name; // Original path/name.
+ ofdstream manifest_ofs;
+ ostream& manifest_os;
+ auto_rmfile manifest_autorm;
+ butl::json::stream_serializer manifest_json;
+ const target* manifest_target = nullptr; // Target being installed.
+ struct manifest_target_entry
+ {
+ build2::path path;
+ string mode;
+ build2::path target;
+ };
+ vector<manifest_target_entry> manifest_target_entries;
+#endif
+
+ // The following manifest_install_[dfl]() functions correspond to (and
+ // are called from) file_rule::install_[dfl]().
+
+ // install -d -m <mode> <dir>
+ //
+ static void
+ manifest_install_d (context&,
+ const target&,
+ const dir_path& dir,
+ const string& mode);
+
+ // install -m <mode> <file> <dir>/<name>
+ //
+ static void
+ manifest_install_f (context&,
+ const target& file,
+ const dir_path& dir,
+ const path& name,
+ const string& mode);
+
+ // install -l <link_target> <dir>/<link>
+ //
+ static void
+ manifest_install_l (context&,
+ const target&,
+ const path& link_target,
+ const dir_path& dir,
+ const path& link);
+
+ // Constructor.
+ //
+ explicit
+ context_data (const path* manifest);
+ };
}
}
diff --git a/libbuild2/install/rule.cxx b/libbuild2/install/rule.cxx
index d4c70c0..873b2e9 100644
--- a/libbuild2/install/rule.cxx
+++ b/libbuild2/install/rule.cxx
@@ -13,6 +13,8 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/install/operation.hxx>
+
using namespace std;
using namespace butl;
@@ -37,12 +39,28 @@ namespace build2
return r.simple () && r.string () == "false" ? nullptr : &r;
}
+ // Note that the below rules are called for both install and
+ // update-for-install.
+ //
+ // @@ TODO: we clearly need a module class.
+ //
+ static inline const variable&
+ var_install (const scope& rs)
+ {
+ context& ctx (rs.ctx);
+
+ return *rs.root_extra->operations[
+ (ctx.current_outer_oif != nullptr
+ ? ctx.current_outer_oif
+ : ctx.current_inner_oif)->id].ovar;
+ }
+
// alias_rule
//
const alias_rule alias_rule::instance;
bool alias_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
// We always match.
//
@@ -53,27 +71,45 @@ namespace build2
return true;
}
- const target* alias_rule::
+ pair<const target*, uint64_t> alias_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
assert (i->member == nullptr);
- return filter (is, a, t, i->prerequisite);
+ return filter (is, a, t, i->prerequisite, me);
}
- const target* alias_rule::
+ pair<const target*, uint64_t> alias_rule::
filter (const scope* is,
- action, const target& t, const prerequisite& p) const
+ action, const target& t, const prerequisite& p,
+ match_extra&) const
{
const target& pt (search (t, p));
- return is == nullptr || pt.in (*is) ? &pt : nullptr;
+ const uint64_t options (match_extra::all_options); // No definition.
+ return make_pair (is == nullptr || pt.in (*is) ? &pt : nullptr, options);
}
recipe alias_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
+ {
+ return apply_impl (a, t, me);
+ }
+
+ recipe alias_rule::
+ apply (action, target&) const
+ {
+ assert (false); // Never called.
+ return nullptr;
+ }
+
+ recipe alias_rule::
+ apply_impl (action a, target& t, match_extra& me, bool reapply) const
{
tracer trace ("install::alias_rule::apply");
+ assert (!reapply || a.operation () != update_id);
+
// Pass-through to our installable prerequisites.
//
// @@ Shouldn't we do match in parallel (here and below)?
@@ -84,6 +120,8 @@ namespace build2
auto pms (group_prerequisite_members (a, t, members_mode::never));
for (auto i (pms.begin ()), e (pms.end ()); i != e; ++i)
{
+ // NOTE: see essentially the same logic in reapply_impl() below.
+ //
const prerequisite& p (i->prerequisite);
// Ignore excluded.
@@ -107,13 +145,17 @@ namespace build2
if (!is)
is = a.operation () != update_id ? install_scope (t) : nullptr;
- const target* pt (filter (*is, a, t, i));
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
+
if (pt == nullptr)
{
l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
- continue;
}
-
// Check if this prerequisite is explicitly "not installable", that
// is, there is the 'install' variable and its value is false.
//
@@ -125,64 +167,108 @@ namespace build2
//
// Note: not the same as lookup_install() above.
//
- auto l ((*pt)["install"]);
- if (l && cast<path> (l).string () == "false")
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
- continue;
+ pt = nullptr;
}
-
// If this is not a file-based target (e.g., a target group such as
// libu{}) then ignore it if there is no rule to install.
//
- if (pt->is_a<file> ())
- build2::match (a, *pt);
- else if (!try_match (a, *pt).first)
+ else if (pt->is_a<file> ())
+ {
+ match_sync (a, *pt, options);
+ }
+ else if (!try_match_sync (a, *pt, options).first)
{
l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
pt = nullptr;
}
- if (pt != nullptr)
- pts.push_back (prerequisite_target (pt, pi));
+ if (pt != nullptr || reapply)
+ {
+ // Use auxiliary data for a NULL entry to distinguish between
+ // filtered out (1) and ignored for other reasons (0).
+ //
+ pts.push_back (
+ prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0));
+ }
}
return default_recipe;
}
- // fsdir_rule
- //
- const fsdir_rule fsdir_rule::instance;
-
- bool fsdir_rule::
- match (action, target&, const string&) const
+ void alias_rule::
+ reapply_impl (action a, target& t, match_extra& me) const
{
- // We always match.
- //
- // Note that we are called both as the outer part during the update-for-
- // un/install pre-operation and as the inner part during the un/install
- // operation itself.
- //
- return true;
- }
+ tracer trace ("install::alias_rule::reapply");
- recipe fsdir_rule::
- apply (action a, target& t) const
- {
- // If this is outer part of the update-for-un/install, delegate to the
- // default fsdir rule. Otherwise, this is a noop (we don't install
- // fsdir{}).
- //
- // For now we also assume we don't need to do anything for prerequisites
- // (the only sensible prerequisite of fsdir{} is another fsdir{}).
+ assert (a.operation () != update_id);
+
+ optional<const scope*> is;
+
+ // Iterate over prerequisites and prerequisite targets in parallel.
//
- if (a.operation () == update_id)
+ auto& pts (t.prerequisite_targets[a]);
+ size_t j (0), n (pts.size ()), en (0);
+
+ auto pms (group_prerequisite_members (a, t, members_mode::never));
+ for (auto i (pms.begin ()), e (pms.end ());
+ i != e && j != n;
+ ++i, ++j, ++en)
{
- match_inner (a, t);
- return &execute_inner;
+ // The same logic as in apply() above except that we skip
+ // prerequisites that were not filtered out.
+ //
+ const prerequisite& p (i->prerequisite);
+
+ include_type pi (include (a, t, p));
+ if (!pi)
+ continue;
+
+ if (p.proj)
+ continue;
+
+ prerequisite_target& pto (pts[j]);
+
+ if (pto.target != nullptr || pto.data == 0)
+ continue;
+
+ if (!is)
+ is = a.operation () != update_id ? install_scope (t) : nullptr;
+
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
+
+ if (pt == nullptr)
+ {
+ l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
+ }
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
+ pt = nullptr;
+ }
+ else if (pt->is_a<file> ())
+ {
+ match_sync (a, *pt, options);
+ }
+ else if (!try_match_sync (a, *pt, options).first)
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
+ pt = nullptr;
+ }
+
+ pto = prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0);
}
- else
- return noop_recipe;
+
+ assert (en == n); // Did not call apply() with true for reapply?
}
// group_rule
@@ -190,20 +276,46 @@ namespace build2
const group_rule group_rule::instance (false /* see_through_only */);
bool group_rule::
- match (action a, target& t, const string& h) const
+ match (action a, target& t) const
+ {
+ return (!see_through_only || t.type ().see_through ()) &&
+ alias_rule::match (a, t);
+ }
+
+ bool group_rule::
+ filter (action, const target&, const target&) const
{
- return (!see_through || t.type ().see_through) &&
- alias_rule::match (a, t, h);
+ return true;
}
- const target* group_rule::
- filter (action, const target&, const target& m) const
+ pair<const target*, uint64_t> group_rule::
+ filter (const scope* is,
+ action, const target& t, const prerequisite& p,
+ match_extra&) const
{
- return &m;
+ const uint64_t options (match_extra::all_options); // No definition.
+ pair<const target*, uint64_t> r (nullptr, options);
+
+ // The same logic as in file_rule::filter() below.
+ //
+ if (p.is_a<exe> ())
+ {
+ const scope& rs (*p.scope.root_scope ());
+
+ if (p.vars.empty () ||
+ cast_empty<path> (p.vars[var_install (rs)]).string () != "true")
+ return r;
+ }
+
+ const target& pt (search (t, p));
+ if (is == nullptr || pt.in (*is))
+ r.first = &pt;
+
+ return r;
}
recipe group_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
tracer trace ("install::group_rule::apply");
@@ -211,7 +323,7 @@ namespace build2
//
// Remember that we are called twice: first during update for install
// (pre-operation) and then during install. During the former, we rely
- // on the normall update rule to resolve the group members. During the
+ // on the normal update rule to resolve the group members. During the
// latter, there will be no rule to do this but the group will already
// have been resolved by the pre-operation.
//
@@ -221,22 +333,23 @@ namespace build2
? resolve_members (a, t)
: t.group_members (a));
- if (gv.members != nullptr)
+ if (gv.members != nullptr && gv.count != 0)
{
+ const scope& rs (t.root_scope ());
+
auto& pts (t.prerequisite_targets[a]);
for (size_t i (0); i != gv.count; ++i)
{
- const target* m (gv.members[i]);
+ const target* mt (gv.members[i]);
- if (m == nullptr)
+ if (mt == nullptr)
continue;
// Let a customized rule have its say.
//
- const target* mt (filter (a, t, *m));
- if (mt == nullptr)
+ if (!filter (a, t, *mt))
{
- l5 ([&]{trace << "ignoring " << *m << " (filtered out)";});
+ l5 ([&]{trace << "ignoring " << *mt << " (filtered out)";});
continue;
}
@@ -245,21 +358,21 @@ namespace build2
//
// Note: not the same as lookup_install() above.
//
- auto l ((*mt)["install"]);
+ auto l ((*mt)[var_install (rs)]);
if (l && cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *mt << " (not installable)";});
continue;
}
- build2::match (a, *mt);
+ match_sync (a, *mt);
pts.push_back (mt); // Never ad hoc.
}
}
// Delegate to the base rule.
//
- return alias_rule::apply (a, t);
+ return alias_rule::apply (a, t, me);
}
@@ -268,7 +381,7 @@ namespace build2
const file_rule file_rule::instance;
bool file_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
// We always match, even if this target is not installable (so that we
// can ignore it; see apply()).
@@ -276,44 +389,73 @@ namespace build2
return true;
}
- const target* file_rule::
+ bool file_rule::
+ filter (action, const target&, const target&) const
+ {
+ return true;
+ }
+
+ pair<const target*, uint64_t> file_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
assert (i->member == nullptr);
- return filter (is, a, t, i->prerequisite);
+ return filter (is, a, t, i->prerequisite, me);
}
- const target* file_rule::
+ pair<const target*, uint64_t> file_rule::
filter (const scope* is,
- action, const target& t, const prerequisite& p) const
+ action, const target& t, const prerequisite& p,
+ match_extra&) const
{
+ const uint64_t options (match_extra::all_options); // No definition.
+ pair<const target*, uint64_t> r (nullptr, options);
+
+ // See also group_rule::filter() with identical semantics.
+ //
if (p.is_a<exe> ())
{
- // Feels like one day this should be unified with include (see
- // context::var_include).
+ const scope& rs (*p.scope.root_scope ());
+
+ // Note that while include() checks for install=false, here we need to
+ // check for explicit install=true. We could have re-used the lookup
+ // performed by include(), but then we would have had to drag it
+ // through and also diagnose any invalid values.
//
if (p.vars.empty () ||
- cast_empty<path> (p.vars["install"]).string () != "true")
- return nullptr;
+ cast_empty<path> (p.vars[var_install (rs)]).string () != "true")
+ return r;
}
const target& pt (search (t, p));
- return is == nullptr || pt.in (*is) ? &pt : nullptr;
+ if (is == nullptr || pt.in (*is))
+ r.first = &pt;
+
+ return r;
}
recipe file_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
- recipe r (apply_impl (a, t));
- return r != nullptr ? r : noop_recipe;
+ recipe r (apply_impl (a, t, me));
+ return r != nullptr ? move (r) : noop_recipe;
}
recipe file_rule::
- apply_impl (action a, target& t) const
+ apply (action, target&) const
+ {
+ assert (false); // Never called.
+ return nullptr;
+ }
+
+ recipe file_rule::
+ apply_impl (action a, target& t, match_extra& me, bool reapply) const
{
tracer trace ("install::file_rule::apply");
+ assert (!reapply || a.operation () != update_id);
+
// Note that we are called both as the outer part during the update-for-
// un/install pre-operation and as the inner part during the un/install
// operation itself.
@@ -333,11 +475,36 @@ namespace build2
// (actual update). We used to do this after matching the prerequisites
// but the inner rule may provide some rule-specific information (like
// the target extension for exe{}) that may be required during the
- // prerequisite search (like the base name for in{}).
+ // prerequisite search (like the base name for in{}; this no longer
+ // reproduces likely due to the changes to exe{} extension derivation
+ // but a contrived arrangement can still be made to trigger this).
//
+ // But then we discovered that doing this before the prerequisites messes
+ // up with the for-install signaling. Specifically, matching the
+ // prerequisites may signal that they are being updated for install,
+ // for example, for a library via a metadata library used in a moc
+ // recipe. While matching the inner rule may trigger updating during
+ // match of such prerequisites, for example, a source file generated by
+ // that moc recipe that depends on this metadata library. If we match
+ // prerequisites before, then the library that is pulled by the metadata
+ // library will be updated before we had a chance to signal that it
+ // should be updated for install.
+ //
+ // To try to accommodate both cases (as best as we can) we now split the
+ // inner rule match into two steps: we do the match before and apply
+ // after. This allows rules that deal with tricky prerequisites like
+ // in{} to assign the target path in match() instead of apply() (see
+ // in::rule, for example).
+ //
+#if 0
optional<bool> unchanged;
if (a.operation () == update_id)
unchanged = match_inner (a, t, unmatch::unchanged).first;
+#else
+ action ia (a.inner_action ());
+ if (a.operation () == update_id)
+ match_only_sync (ia, t);
+#endif
optional<const scope*> is; // Installation scope (resolve lazily).
@@ -345,6 +512,8 @@ namespace build2
auto pms (group_prerequisite_members (a, t, members_mode::never));
for (auto i (pms.begin ()), e (pms.end ()); i != e; ++i)
{
+ // NOTE: see essentially the same logic in reapply_impl() below.
+ //
const prerequisite& p (i->prerequisite);
// Ignore excluded.
@@ -368,27 +537,30 @@ namespace build2
if (!is)
is = a.operation () != update_id ? install_scope (t) : nullptr;
- const target* pt (filter (*is, a, t, i));
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
if (pt == nullptr)
{
l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
- continue;
}
-
+ //
// See if we were explicitly instructed not to touch this target (the
// same semantics as in alias_rule).
//
// Note: not the same as lookup_install() above.
//
- auto l ((*pt)["install"]);
- if (l && cast<path> (l).string () == "false")
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
- continue;
+ pt = nullptr;
}
-
- if (pt->is_a<file> ())
+ else if (pt->is_a<file> ())
{
// If the matched rule returned noop_recipe, then the target state
// is set to unchanged as an optimization. Use this knowledge to
@@ -396,19 +568,36 @@ namespace build2
// when updating static installable content (headers, documentation,
// etc).
//
- if (build2::match (a, *pt, unmatch::unchanged).first)
+ // Regarding options, the expectation here is that they are not used
+ // for the update operation. And for install/uninstall, if they are
+ // used, then they don't effect whether the target is unchanged. All
+ // feels reasonable.
+ //
+ if (match_sync (a, *pt, unmatch::unchanged, options).first)
pt = nullptr;
}
- else if (!try_match (a, *pt).first)
+ else if (!try_match_sync (a, *pt, options).first)
{
l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
pt = nullptr;
}
- if (pt != nullptr)
- pts.push_back (prerequisite_target (pt, pi));
+ if (pt != nullptr || reapply)
+ {
+ // Use auxiliary data for a NULL entry to distinguish between
+ // filtered out (1) and ignored for other reasons (0).
+ //
+ pts.push_back (
+ prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0));
+ }
}
+#if 1
+ optional<bool> unchanged;
+ if (a.operation () == update_id)
+ unchanged = match_sync (ia, t, unmatch::unchanged).first;
+#endif
+
if (a.operation () == update_id)
{
return *unchanged
@@ -426,6 +615,79 @@ namespace build2
}
}
+ void file_rule::
+ reapply_impl (action a, target& t, match_extra& me) const
+ {
+ tracer trace ("install::file_rule::reapply");
+
+ assert (a.operation () != update_id);
+
+ optional<const scope*> is;
+
+ // Iterate over prerequisites and prerequisite targets in parallel.
+ //
+ auto& pts (t.prerequisite_targets[a]);
+ size_t j (0), n (pts.size ()), en (0);
+
+ auto pms (group_prerequisite_members (a, t, members_mode::never));
+ for (auto i (pms.begin ()), e (pms.end ());
+ i != e && j != n;
+ ++i, ++j, ++en)
+ {
+ // The same logic as in apply() above except that we skip
+ // prerequisites that were not filtered out.
+ //
+ const prerequisite& p (i->prerequisite);
+
+ include_type pi (include (a, t, p));
+ if (!pi)
+ continue;
+
+ if (p.proj)
+ continue;
+
+ prerequisite_target& pto (pts[j]);
+
+ if (pto.target != nullptr || pto.data == 0)
+ continue;
+
+ if (!is)
+ is = a.operation () != update_id ? install_scope (t) : nullptr;
+
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
+
+ if (pt == nullptr)
+ {
+ l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
+ }
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
+ pt = nullptr;
+ }
+ else if (pt->is_a<file> ())
+ {
+ if (match_sync (a, *pt, unmatch::unchanged, options).first)
+ pt = nullptr;
+ }
+ else if (!try_match_sync (a, *pt, options).first)
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
+ pt = nullptr;
+ }
+
+ pto = prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0);
+ }
+
+ assert (en == n); // Did not call apply() with true for reapply?
+ }
+
target_state file_rule::
perform_update (action a, const target& t)
{
@@ -510,7 +772,8 @@ namespace build2
const dir_path& d (t.out_dir ().leaf (p->out_path ()));
// Add it as another leading directory rather than modifying
- // the last one directly; somehow, it feels right.
+ // the last one directly; somehow, it feels right. Note: the
+ // result is normalized.
//
if (!d.empty ())
rs.emplace_back (rs.back ().dir / d, rs.back ());
@@ -521,8 +784,9 @@ namespace build2
return rs.back ();
}
- // Resolve installation directory name to absolute directory path. Return
- // all the super-directories leading up to the destination (last).
+ // Resolve installation directory name to absolute and normalized
+ // directory path. Return all the super-directories leading up to the
+ // destination (last).
//
// If target is not NULL, then also handle the subdirs logic.
//
@@ -621,24 +885,52 @@ namespace build2
return rs;
}
- static inline install_dirs
- resolve (const target& t, dir_path d, bool fail_unknown = true)
+ static dir_path
+ resolve_dir (const scope& s, const target* t,
+ dir_path d, dir_path rb,
+ bool fail_unknown)
{
- return resolve (t.base_scope (), &t, move (d), fail_unknown);
+ install_dirs rs (resolve (s, t, move (d), fail_unknown));
+
+ if (rs.empty ())
+ return dir_path ();
+
+ dir_path r (move (rs.back ().dir));
+
+ if (!rb.empty ())
+ {
+ dir_path b (resolve (s, t, move (rb), false).back ().dir);
+
+ try
+ {
+ r = r.relative (b);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make installation directory " << r
+ << " relative to " << b;
+ }
+ }
+
+ return r;
}
dir_path
- resolve_dir (const target& t, dir_path d, bool fail_unknown)
+ resolve_dir (const target& t, dir_path d, dir_path rb, bool fail_unknown)
{
- install_dirs r (resolve (t, move (d), fail_unknown));
- return r.empty () ? dir_path () : move (r.back ().dir);
+ return resolve_dir (t.base_scope (), &t, move (d), move (rb), fail_unknown);
}
dir_path
- resolve_dir (const scope& s, dir_path d, bool fail_unknown)
+ resolve_dir (const scope& s, dir_path d, dir_path rb, bool fail_unknown)
+ {
+ return resolve_dir (s, nullptr, move (d), move (rb), fail_unknown);
+ }
+
+ static inline install_dirs
+ resolve (const target& t, dir_path d, bool fail_unknown = true)
{
- install_dirs r (resolve (s, nullptr, move (d), fail_unknown));
- return r.empty () ? dir_path () : move (r.back ().dir);
+ return resolve (t.base_scope (), &t, move (d), fail_unknown);
}
path
@@ -654,6 +946,10 @@ namespace build2
bool n (!p->to_directory ());
dir_path d (n ? p->directory () : path_cast<dir_path> (*p));
+ if (n && d.empty ())
+ fail << "relative installation file path '" << p
+ << "' has no directory component";
+
install_dirs ids (resolve (f, d));
if (!n)
@@ -704,30 +1000,15 @@ namespace build2
return s;
}
- // Given an abolute path return its chroot'ed version, if any, accoring to
- // install.chroot.
- //
- template <typename P>
- static inline P
- chroot_path (const scope& rs, const P& p)
- {
- if (const dir_path* d = cast_null<dir_path> (rs["install.chroot"]))
- {
- dir_path r (p.root_directory ());
- assert (!r.empty ()); // Must be absolute.
-
- return *d / p.leaf (r);
- }
-
- return p;
- }
-
void file_rule::
install_d (const scope& rs,
const install_dir& base,
const dir_path& d,
+ const file& t,
uint16_t verbosity)
{
+ assert (d.absolute ());
+
context& ctx (rs.ctx);
// Here is the problem: if this is a dry-run, then we will keep showing
@@ -740,7 +1021,10 @@ namespace build2
// with uninstall since the directories won't be empty (because we don't
// actually uninstall any files).
//
- if (ctx.dry_run)
+ // Note that this also means we won't have the directory entries in the
+ // manifest created with dry-run. Probably not a big deal.
+ //
+ if (ctx.dry_run || !filter_entry (rs, d, path (), entry_type::directory))
return;
dir_path chd (chroot_path (rs, d));
@@ -767,13 +1051,13 @@ namespace build2
dir_path pd (d.directory ());
if (pd != base.dir)
- install_d (rs, base, pd, verbosity);
+ install_d (rs, base, pd, t, verbosity);
}
cstrings args;
string reld (
- cast<string> (ctx.global_scope["build.host.class"]) == "windows"
+ ctx.build_host->class_ == "windows"
? msys_path (chd)
: relative (chd).string ());
@@ -798,10 +1082,14 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "install " << chd;
+ print_diag ("install -d", chd); // See also `install -l` below.
}
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+
+ context_data::manifest_install_d (ctx, t, d, *base.dir_mode);
}
void file_rule::
@@ -812,14 +1100,21 @@ namespace build2
const path& f,
uint16_t verbosity)
{
+ assert (name.empty () || name.simple ());
+
context& ctx (rs.ctx);
+ const path& leaf (name.empty () ? f.leaf () : name);
+
+ if (!filter_entry (rs, base.dir, leaf, entry_type::regular))
+ return;
+
path relf (relative (f));
dir_path chd (chroot_path (rs, base.dir));
string reld (
- cast<string> (ctx.global_scope["build.host.class"]) == "windows"
+ ctx.build_host->class_ == "windows"
? msys_path (chd)
: relative (chd).string ());
@@ -852,23 +1147,47 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "install " << t;
+ {
+ if (name.empty ())
+ print_diag ("install", t, chd);
+ else
+ print_diag ("install", t, chd / name);
+ }
}
if (!ctx.dry_run)
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+
+ context_data::manifest_install_f (ctx, t, base.dir, leaf, *base.mode);
}
void file_rule::
install_l (const scope& rs,
const install_dir& base,
- const path& target,
const path& link,
+ const file& target,
+ const path& link_target,
uint16_t verbosity)
{
+ assert (link.simple () && !link.empty ());
+
context& ctx (rs.ctx);
- path rell (relative (chroot_path (rs, base.dir)));
+ if (!filter_entry (rs, base.dir, link, entry_type::symlink))
+ return;
+
+ if (link_target.absolute () &&
+ cast_false<bool> (rs["install.relocatable"]))
+ {
+ fail << "absolute symlink target " << link_target.string ()
+ << " in relocatable installation";
+ }
+
+ dir_path chd (chroot_path (rs, base.dir));
+
+ path rell (relative (chd));
rell /= link;
// We can create a symlink directly without calling ln. This, however,
@@ -882,7 +1201,7 @@ namespace build2
base.sudo != nullptr ? base.sudo->c_str () : nullptr,
"ln",
"-sf",
- target.string ().c_str (),
+ link_target.string ().c_str (),
rell.string ().c_str (),
nullptr};
@@ -895,11 +1214,19 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "install " << rell << " -> " << target;
+ {
+ // Without a flag it's unclear (unlike with ln) that we are creating
+ // a link. FreeBSD install(1) has the -l flag with the appropriate
+ // semantics. For consistency, we also pass -d above.
+ //
+ print_diag ("install -l", link_target, chd / link);
+ }
}
if (!ctx.dry_run)
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
#else
// The -f part.
//
@@ -911,15 +1238,15 @@ namespace build2
if (verb >= verbosity)
{
if (verb >= 2)
- text << "ln -sf " << target.string () << ' ' << rell.string ();
+ text << "ln -sf " << link_target.string () << ' ' << rell.string ();
else if (verb)
- text << "install " << rell << " -> " << target;
+ print_diag ("install -l", link_target, chd / link);
}
if (!ctx.dry_run)
try
{
- mkanylink (target, rell, true /* copy */);
+ mkanylink (link_target, rell, true /* copy */);
}
catch (const pair<entry_type, system_error>& e)
{
@@ -931,6 +1258,12 @@ namespace build2
fail << "unable to make " << w << ' ' << rell << ": " << e.second;
}
#endif
+
+ context_data::manifest_install_l (ctx,
+ target,
+ link_target,
+ base.dir,
+ link);
}
target_state file_rule::
@@ -954,6 +1287,10 @@ namespace build2
bool n (!p.to_directory ());
dir_path d (n ? p.directory () : path_cast<dir_path> (p));
+ if (n && d.empty ())
+ fail << "relative installation file path '" << p
+ << "' has no directory component";
+
// Resolve target directory.
//
install_dirs ids (resolve (t, d));
@@ -975,7 +1312,7 @@ namespace build2
// sudo, etc).
//
for (auto i (ids.begin ()), j (i); i != ids.end (); j = i++)
- install_d (rs, *j, i->dir, verbosity); // install -d
+ install_d (rs, *j, i->dir, t, verbosity); // install -d
install_dir& id (ids.back ());
@@ -1009,6 +1346,8 @@ namespace build2
//
target_state r (straight_execute_prerequisites (a, t));
+ bool fr (filter (a, t, t));
+
// Then installable ad hoc group members, if any.
//
for (const target* m (t.adhoc_member);
@@ -1019,10 +1358,13 @@ namespace build2
{
if (!mf->path ().empty () && mf->mtime () != timestamp_nonexistent)
{
- if (const path* p = lookup_install<path> (*mf, "install"))
+ if (filter (a, t, *mf))
{
- install_target (*mf, *p, tp.empty () ? 1 : 2);
- r |= target_state::changed;
+ if (const path* p = lookup_install<path> (*mf, "install"))
+ {
+ install_target (*mf, *p, !fr || tp.empty () ? 1 : 2);
+ r |= target_state::changed;
+ }
}
}
}
@@ -1031,9 +1373,9 @@ namespace build2
// Finally install the target itself (since we got here we know the
// install variable is there).
//
- if (!tp.empty ())
+ if (fr && !tp.empty ())
{
- install_target (t, cast<path> (t["install"]), 1);
+ install_target (t, cast<path> (t[var_install (rs)]), 1);
r |= target_state::changed;
}
@@ -1046,9 +1388,13 @@ namespace build2
const dir_path& d,
uint16_t verbosity)
{
+ assert (d.absolute ());
+
+ context& ctx (rs.ctx);
+
// See install_d() for the rationale.
//
- if (rs.ctx.dry_run)
+ if (ctx.dry_run || !filter_entry (rs, d, path (), entry_type::directory))
return false;
dir_path chd (chroot_path (rs, d));
@@ -1095,7 +1441,7 @@ namespace build2
if (verb >= 2)
text << "rmdir " << reld;
else if (verb)
- text << "uninstall " << reld;
+ print_diag ("uninstall -d", chd);
}
try
@@ -1125,11 +1471,19 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "uninstall " << reld;
+ print_diag ("uninstall -d", chd);
}
- process pr (run_start (pp, args));
- r = run_finish_code (args, pr);
+ process pr (run_start (pp, args,
+ 0 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+ diag_buffer dbuf (ctx, args[0], pr);
+ dbuf.read ();
+ r = run_finish_code (
+ dbuf,
+ args, pr,
+ verb >= verbosity ? 1 : verb_never /* verbosity */);
}
if (!r)
@@ -1153,40 +1507,16 @@ namespace build2
return r;
}
- bool file_rule::
- uninstall_f (const scope& rs,
- const install_dir& base,
- const file* t,
- const path& name,
- uint16_t verbosity)
+ static void
+ uninstall_f_impl (const scope& rs,
+ const install_dir& base,
+ const path& f,
+ uint16_t verbosity)
{
- assert (t != nullptr || !name.empty ());
- path f (chroot_path (rs, base.dir) /
- (name.empty () ? t->path ().leaf () : name));
-
- try
- {
- // Note: don't follow symlinks so if the target is a dangling symlinks
- // we will proceed to removing it.
- //
- if (!file_exists (f, false)) // May throw (e.g., EACCES).
- return false;
- }
- catch (const system_error& e)
- {
- fail << "invalid installation path " << f << ": " << e;
- }
+ context& ctx (rs.ctx);
path relf (relative (f));
- if (verb >= verbosity && verb == 1)
- {
- if (t != nullptr)
- text << "uninstall " << *t;
- else
- text << "uninstall " << relf;
- }
-
// The same story as with uninstall -d (on Windows rm is also from
// MSYS2/Cygwin).
//
@@ -1196,7 +1526,7 @@ namespace build2
if (verb >= verbosity && verb >= 2)
text << "rm " << relf;
- if (!rs.ctx.dry_run)
+ if (!ctx.dry_run)
{
try
{
@@ -1222,13 +1552,107 @@ namespace build2
process_path pp (run_search (args[0]));
- if (verb >= verbosity && verb >= 2)
- print_process (args);
+ if (verb >= verbosity)
+ {
+ if (verb >= 2)
+ print_process (args);
+ }
- if (!rs.ctx.dry_run)
- run (pp, args);
+ if (!ctx.dry_run)
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
}
+ }
+ bool file_rule::
+ uninstall_f (const scope& rs,
+ const install_dir& base,
+ const file* t,
+ const path& name,
+ uint16_t verbosity)
+ {
+ assert (name.empty () ? t != nullptr : name.simple ());
+
+ const path& leaf (name.empty () ? t->path ().leaf () : name);
+
+ if (!filter_entry (rs, base.dir, leaf, entry_type::regular))
+ return false;
+
+ dir_path chd (chroot_path (rs, base.dir));
+ path f (chd / leaf);
+
+ try
+ {
+ // Note: don't follow symlinks so if the target is a dangling symlinks
+ // we will proceed to removing it.
+ //
+ if (!file_exists (f, false)) // May throw (e.g., EACCES).
+ return false;
+ }
+ catch (const system_error& e)
+ {
+ fail << "invalid installation path " << f << ": " << e;
+ }
+
+ if (verb >= verbosity && verb == 1)
+ {
+ if (t != nullptr)
+ {
+ if (name.empty ())
+ print_diag ("uninstall", *t, chd, "<-");
+ else
+ print_diag ("uninstall", *t, f, "<-");
+ }
+ else
+ print_diag ("uninstall", f);
+ }
+
+ uninstall_f_impl (rs, base, f, verbosity);
+ return true;
+ }
+
+ bool file_rule::
+ uninstall_l (const scope& rs,
+ const install_dir& base,
+ const path& link,
+ const path& /*link_target*/,
+ uint16_t verbosity)
+ {
+ assert (link.simple () && !link.empty ());
+
+ if (!filter_entry (rs, base.dir, link, entry_type::symlink))
+ return false;
+
+ dir_path chd (chroot_path (rs, base.dir));
+ path f (chd / link);
+
+ try
+ {
+ // Note: don't follow symlinks so if the target is a dangling symlinks
+ // we will proceed to removing it.
+ //
+ if (!file_exists (f, false)) // May throw (e.g., EACCES).
+ return false;
+ }
+ catch (const system_error& e)
+ {
+ fail << "invalid installation path " << f << ": " << e;
+ }
+
+ if (verb >= verbosity && verb == 1)
+ {
+ // It's dubious showing the link target path adds anything useful
+ // here.
+ //
+#if 0
+ print_diag ("uninstall -l", target, f, "<-");
+#else
+ print_diag ("uninstall -l", f);
+#endif
+ }
+
+ uninstall_f_impl (rs, base, f, verbosity);
return true;
}
@@ -1251,6 +1675,10 @@ namespace build2
bool n (!p.to_directory ());
dir_path d (n ? p.directory () : path_cast<dir_path> (p));
+ if (n && d.empty ())
+ fail << "relative installation file path '" << p
+ << "' has no directory component";
+
// Resolve target directory.
//
install_dirs ids (resolve (t, d));
@@ -1297,8 +1725,10 @@ namespace build2
//
target_state r (target_state::unchanged);
- if (!tp.empty ())
- r |= uninstall_target (t, cast<path> (t["install"]), 1);
+ bool fr (filter (a, t, t));
+
+ if (fr && !tp.empty ())
+ r |= uninstall_target (t, cast<path> (t[var_install (rs)]), 1);
// Then installable ad hoc group members, if any. To be anally precise,
// we would have to do it in reverse, but that's not easy (it's a
@@ -1312,23 +1742,60 @@ namespace build2
{
if (!mf->path ().empty () && mf->mtime () != timestamp_nonexistent)
{
- if (const path* p = lookup_install<path> (*m, "install"))
+ if (filter (a, t, *mf))
{
- r |= uninstall_target (
- *mf,
- *p,
- tp.empty () || r != target_state::changed ? 1 : 2);
+ if (const path* p = lookup_install<path> (*m, "install"))
+ {
+ r |= uninstall_target (
+ *mf,
+ *p,
+ !fr || tp.empty () || r != target_state::changed ? 1 : 2);
+ }
}
}
}
}
-
// Finally handle installable prerequisites.
//
r |= reverse_execute_prerequisites (a, t);
return r;
}
+
+ // fsdir_rule
+ //
+ const fsdir_rule fsdir_rule::instance;
+
+ bool fsdir_rule::
+ match (action, target&) const
+ {
+ // We always match.
+ //
+ // Note that we are called both as the outer part during the update-for-
+ // un/install pre-operation and as the inner part during the un/install
+ // operation itself.
+ //
+ return true;
+ }
+
+ recipe fsdir_rule::
+ apply (action a, target& t) const
+ {
+ // If this is outer part of the update-for-un/install, delegate to the
+ // default fsdir rule. Otherwise, this is a noop (we don't install
+ // fsdir{}).
+ //
+ // For now we also assume we don't need to do anything for prerequisites
+ // (the only sensible prerequisite of fsdir{} is another fsdir{}).
+ //
+ if (a.operation () == update_id)
+ {
+ match_inner (a, t);
+ return inner_recipe;
+ }
+ else
+ return noop_recipe;
+ }
}
}
diff --git a/libbuild2/install/rule.hxx b/libbuild2/install/rule.hxx
index 53d97d2..b023af5 100644
--- a/libbuild2/install/rule.hxx
+++ b/libbuild2/install/rule.hxx
@@ -22,45 +22,63 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
// Return NULL if this prerequisite should be ignored and pointer to its
- // target otherwise.
+ // target otherwise. In the latter case, return the match options that
+ // should be used for this prerequisite (use match_extra::all_options
+ // and not 0 if no match options are needed).
//
// The default implementation ignores prerequsites that are outside of
// the installation scope (see install_scope() for details).
//
+ // The default implementation always returns match_extra::all_options.
+ // The match_extra argument is not used by the default implementation.
+ //
// The prerequisite is passed as an iterator allowing the filter to
// "see" inside groups.
//
using prerequisite_iterator =
prerequisite_members_range<group_prerequisites>::iterator;
- virtual const target*
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const;
- virtual const target*
- filter (const scope*, action, const target&, const prerequisite&) const;
+ virtual pair<const target*, uint64_t>
+ filter (const scope*,
+ action, const target&, const prerequisite&,
+ match_extra&) const;
+ // Note: rule::apply() override (with match_extra).
+ //
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
+
+ // Implementation of apply().
+ //
+ // If the implementation may call reapply_impl(), then the reapply
+ // argument to apply_impl() must be true. Note that in this case, the
+ // *_impl() functions use the prerequisite_target::data member for own
+ // housekeeping.
+ //
+ recipe
+ apply_impl (action, target&, match_extra&, bool reapply = false) const;
+
+ // Implementation of reapply() that re-tries prerequisites that have
+ // been filtered out during the reapply() call. Note that currently not
+ // supported for update, only for install/uninstall.
+ //
+ void
+ reapply_impl (action, target&, match_extra&) const;
alias_rule () {}
static const alias_rule instance;
- };
-
- class fsdir_rule: public simple_rule
- {
- public:
- virtual bool
- match (action, target&, const string&) const override;
+ private:
virtual recipe
- apply (action, target&) const override;
-
- fsdir_rule () {}
- static const fsdir_rule instance;
+ apply (action, target&) const override; // Dummy simple_rule override.
};
// In addition to the alias rule's semantics, this rule sees through to
@@ -78,25 +96,33 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
- // Return NULL if this group member should be ignored and pointer to its
- // target otherwise.
+ // Return false if this group member should be ignored and true
+ // otherwise. Note that this filter is called during apply().
//
// The default implementation accepts all members.
//
- virtual const target*
+ virtual bool
filter (action, const target&, const target& group_member) const;
+ // Return NULL if this prerequisite should be ignored and pointer to its
+ // target otherwise. The same semantics as in file_rule below.
+ //
+ virtual pair<const target*, uint64_t>
+ filter (const scope*,
+ action, const target&, const prerequisite&,
+ match_extra&) const override;
+
using alias_rule::filter; // "Unhide" to make Clang happy.
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
- group_rule (bool see_through_only): see_through (see_through_only) {}
+ group_rule (bool sto): see_through_only (sto) {}
static const group_rule instance;
- bool see_through;
+ bool see_through_only;
};
struct install_dir;
@@ -105,10 +131,23 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
+
+ // Return false if this ad hoc group member should be ignored and true
+ // otherwise. Note that this filter is called during execute and only
+ // for install/uninstall (and not update). For generality, it is also
+ // (first) called on the target itself (can be detected by comparing
+ // the second and third arguments).
+ //
+ // The default implementation accepts all members.
+ //
+ virtual bool
+ filter (action, const target&, const target& adhoc_group_member) const;
// Return NULL if this prerequisite should be ignored and pointer to its
- // target otherwise.
+ // target otherwise. In the latter case, return the match options that
+ // should be used for this prerequisite (use match_extra::all_options
+ // and not 0 if no match options are needed).
//
// The default implementation ignores prerequsites that are outside of
// the installation scope (see install_scope() for details). It also
@@ -120,27 +159,47 @@ namespace build2
//
// exe{foo}: exe{bar}: install = true # foo runs bar
//
+ // The default implementation always returns match_extra::all_options.
+ // The match_extra argument is not used by the default implementation.
+ //
// The prerequisite is passed as an iterator allowing the filter to
// "see" inside groups.
//
using prerequisite_iterator =
prerequisite_members_range<group_prerequisites>::iterator;
- virtual const target*
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const;
- virtual const target*
- filter (const scope*, action, const target&, const prerequisite&) const;
+ virtual pair<const target*, uint64_t>
+ filter (const scope*,
+ action, const target&, const prerequisite&,
+ match_extra&) const;
+ // Note: rule::apply() override (with match_extra).
+ //
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
- // Implementation of apply() that returns empty_recipe if the target is
- // not installable.
+ // Implementation of apply() that returns empty_recipe (i.e., NULL) if
+ // the target is not installable.
+ //
+ // If the implementation may call reapply_impl(), then the reapply
+ // argument to apply_impl() must be true. Note that in this case, the
+ // *_impl() functions use the prerequisite_target::data member for own
+ // housekeeping.
//
recipe
- apply_impl (action, target&) const;
+ apply_impl (action, target&, match_extra&, bool reapply = false) const;
+
+ // Implementation of reapply() that re-tries prerequisites that have
+ // been filtered out during the reapply() call. Note that currently not
+ // supported for update, only for install/uninstall.
+ //
+ void
+ reapply_impl (action, target&, match_extra&) const;
static target_state
perform_update (action, const target&);
@@ -178,10 +237,16 @@ namespace build2
//
// install -d <dir>
//
+ // Note: <dir> is expected to be absolute.
+ //
+ // Note that the target argument only specifies which target caused
+ // this directory to be created.
+ //
static void
install_d (const scope& rs,
const install_dir& base,
const dir_path& dir,
+ const file& target,
uint16_t verbosity = 1);
// Install a file:
@@ -189,6 +254,8 @@ namespace build2
// install <file> <base>/ # if <name> is empty
// install <file> <base>/<name> # if <name> is not empty
//
+ // Note that <name> should be a simple path.
+ //
static void
install_f (const scope& rs,
const install_dir& base,
@@ -199,13 +266,25 @@ namespace build2
// Install (make) a symlink:
//
- // ln -s <target> <base>/<link>
+ // install -l <link_target> <base>/<link>
+ //
+ // Which is essentially:
+ //
+ // ln -s <link_target> <base>/<link>
+ //
+ // Note that <link> should be a simple path. Note that <link_target>
+ // must not be absolute if relocatable installation is requested
+ // (config.install.relocatable).
+ //
+ // Note that the target argument only specifies which target this
+ // symlink "belongs" to.
//
static void
install_l (const scope& rs,
const install_dir& base,
- const path& target,
const path& link,
+ const file& target,
+ const path& link_target,
uint16_t verbosity = 1);
// Uninstall (remove) a file or symlink:
@@ -223,13 +302,26 @@ namespace build2
const path& name,
uint16_t verbosity = 1);
+ // Uninstall (remove) a symlink.
+ //
+ // This is essentially unistall_f() but with better low-verbosity
+ // diagnostics.
+ //
+ static bool
+ uninstall_l (const scope& rs,
+ const install_dir& base,
+ const path& link,
+ const path& link_target,
+ uint16_t verbosity = 1);
+
+
// Uninstall (remove) an empty directory.
//
// uninstall -d <dir>
//
- // We try to remove all the directories between base and dir but not base
- // itself unless base == dir. Return false if nothing has been removed
- // (i.e., the directories do not exist or are not empty).
+ // We try to remove all the directories between base and dir but not
+ // base itself unless base == dir. Return false if nothing has been
+ // removed (i.e., the directories do not exist or are not empty).
//
static bool
uninstall_d (const scope& rs,
@@ -245,6 +337,23 @@ namespace build2
static const file_rule instance;
file_rule () {}
+
+ private:
+ virtual recipe
+ apply (action, target&) const override; // Dummy simple_rule override.
+ };
+
+ class fsdir_rule: public simple_rule
+ {
+ public:
+ virtual bool
+ match (action, target&) const override;
+
+ virtual recipe
+ apply (action, target&) const override;
+
+ fsdir_rule () {}
+ static const fsdir_rule instance;
};
}
}
diff --git a/libbuild2/install/utility.cxx b/libbuild2/install/utility.cxx
index 17b1365..43d97fb 100644
--- a/libbuild2/install/utility.cxx
+++ b/libbuild2/install/utility.cxx
@@ -3,6 +3,9 @@
#include <libbuild2/install/utility.hxx>
+#include <libbuild2/variable.hxx>
+#include <libbuild2/diagnostics.hxx>
+
namespace build2
{
namespace install
@@ -12,6 +15,8 @@ namespace build2
{
context& ctx (t.ctx);
+ // Note: go straight for the public variable pool.
+ //
const variable& var (*ctx.var_pool.find ("config.install.scope"));
if (const string* s = cast_null<string> (ctx.global_scope[var]))
@@ -30,5 +35,261 @@ namespace build2
return nullptr;
}
+
+ bool
+ filter_entry (const scope& rs,
+ const dir_path& base,
+ const path& leaf,
+ entry_type type)
+ {
+ assert (type != entry_type::unknown &&
+ (type == entry_type::directory) == leaf.empty ());
+
+ const filters* fs (cast_null<filters> (rs["install.filter"]));
+
+ if (fs == nullptr || fs->empty ())
+ return true;
+
+ tracer trace ("install::filter");
+
+ // Parse, resolve, and apply each filter in order.
+ //
+ // If redoing all this work for every entry proves too slow, we can
+ // consider some form of caching (e.g., on the per-project basis).
+ //
+ auto i (fs->begin ());
+
+ bool negate (false);
+ if (i->first == "!")
+ {
+ negate = true;
+ ++i;
+ }
+
+ size_t limit (0); // See below.
+
+ for (auto e (fs->end ()); i != e; ++i)
+ {
+ const pair<string, optional<string>>& kv (*i);
+
+ path k;
+ try
+ {
+ k = path (kv.first);
+
+ if (k.absolute ())
+ k.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "invalid path '" << kv.first << "' in config.install.filter "
+ << "value";
+ }
+
+ bool v;
+ {
+ const string& s (kv.second ? *kv.second : string ());
+
+ size_t p (s.find (','));
+
+ if (s.compare (0, p, "true") == 0)
+ v = true;
+ else if (s.compare (0, p, "false") == 0)
+ v = false;
+ else
+ fail << "expected true or false instead of '" << string (s, 0, p)
+ << "' in config.install.filter value" << endf;
+
+ if (p != string::npos)
+ {
+ if (s.compare (p + 1, string::npos, "symlink") == 0)
+ {
+ if (type != entry_type::symlink)
+ continue;
+ }
+ else
+ fail << "unknown modifier '" << string (s, p + 1) << "' in "
+ << "config.install.filter value";
+ }
+ }
+
+ // @@ TODO (see below for all the corner cases). Note that in a sense
+ // we already have the match file in any subdirectory support via
+ // simple patterns so perhaps this is not worth the trouble. Or we
+ // could support some limited form (e.g., `**` should be in the
+ // last component). But it may still be tricky to determine if
+ // it is a sub-filter.
+ //
+ if (path_pattern_recursive (k))
+ fail << "recursive wildcard pattern '" << kv.first << "' in "
+ << "config.install.filter value";
+
+ if (k.simple () && !k.to_directory ())
+ {
+ // Simple name/pattern matched against the leaf.
+ //
+ // @@ What if it is `**`?
+ //
+ if (path_pattern (k))
+ {
+ if (!path_match (leaf, k))
+ continue;
+ }
+ else
+ {
+ if (k != leaf)
+ continue;
+ }
+ }
+ else
+ {
+ // Split into directory and leaf.
+ //
+ // @@ What if leaf is `**`?
+ //
+ dir_path d;
+ if (k.to_directory ())
+ {
+ d = path_cast<dir_path> (move (k));
+ k = path (); // No leaf.
+ }
+ else
+ {
+ d = k.directory ();
+ k.make_leaf ();
+ }
+
+ // Resolve relative directory.
+ //
+ // Note that this resolution is potentially project-specific (that
+ // is, different projects may have different install.* locaitons).
+ //
+ // Note that if the first component is/contains a wildcard (e.g.,
+ // `*/`), then the resulution will fail, which feels correct (what
+ // does */ mean?).
+ //
+ if (d.relative ())
+ {
+ // @@ Strictly speaking, this should be base, not root scope.
+ //
+ d = resolve_dir (rs, move (d));
+ }
+
+ // Return the number of path components in the path.
+ //
+ auto path_comp = [] (const path& p)
+ {
+ size_t n (0);
+ for (auto i (p.begin ()); i != p.end (); ++i)
+ ++n;
+ return n;
+ };
+
+ // We need the sub() semantics but which uses pattern match instead
+ // of equality for the prefix. Looks like chopping off the path and
+ // calling path_match() on that is the best we can do.
+ //
+ // @@ Assumes no `**` components.
+ //
+ auto path_sub = [&path_comp] (const dir_path& ent,
+ const dir_path& pat,
+ size_t n = 0)
+ {
+ if (n == 0)
+ n = path_comp (pat);
+
+ dir_path p;
+ for (auto i (ent.begin ()); n != 0 && i != ent.end (); --n, ++i)
+ p.combine (*i, i.separator ());
+
+ return path_match (p, pat);
+ };
+
+ // The following checks should continue on no match and fall through
+ // to return.
+ //
+ if (k.empty ()) // Directory.
+ {
+ // Directories have special semantics.
+ //
+ // Consider this sequence of filters:
+ //
+ // include/x86_64-linux-gnu/@true
+ // include/x86_64-linux-gnu/details/@false
+ // include/@false
+ //
+ // It seems the semantics we want is that only subcomponent
+ // filters should apply. Maybe remember the latest matched
+ // directory as a current limit? But perhaps we don't need to
+ // remember the directory itself but the number of path
+ // components?
+ //
+ // I guess for patterns we will use the actual matched directory,
+ // not the pattern, to calculate the limit? @@ Because we
+ // currently don't support `**`, we for now can count components
+ // in the pattern.
+
+ // Check if this is a sub-filter.
+ //
+ size_t n (path_comp (d));
+ if (n <= limit)
+ continue;
+
+ if (path_pattern (d))
+ {
+ if (!path_sub (base, d, n))
+ continue;
+ }
+ else
+ {
+ if (!base.sub (d))
+ continue;
+ }
+
+ if (v)
+ {
+ limit = n;
+ continue; // Continue looking for sub-filters.
+ }
+ }
+ else
+ {
+ if (path_pattern (d))
+ {
+ if (!path_sub (base, d))
+ continue;
+ }
+ else
+ {
+ if (!base.sub (d))
+ continue;
+ }
+
+ if (path_pattern (k))
+ {
+ // @@ Does not handle `**`.
+ //
+ if (!path_match (leaf, k))
+ continue;
+ }
+ else
+ {
+ if (k != leaf)
+ continue;
+ }
+ }
+ }
+
+ if (negate)
+ v = !v;
+
+ l4 ([&]{trace << (base / leaf)
+ << (v ? " included by " : " excluded by ")
+ << kv.first << '@' << *kv.second;});
+ return v;
+ }
+
+ return !negate;
+ }
}
}
diff --git a/libbuild2/install/utility.hxx b/libbuild2/install/utility.hxx
index 52b9a54..fc40ebe 100644
--- a/libbuild2/install/utility.hxx
+++ b/libbuild2/install/utility.hxx
@@ -9,6 +9,7 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/filesystem.hxx> // entry_type
#include <libbuild2/export.hxx>
@@ -43,7 +44,7 @@ namespace build2
{
auto r (
s.target_vars[tt]["*"].insert (
- *s.var_pool ().find ("install.mode")));
+ *s.ctx.var_pool.find ("install.mode")));
if (r.second) // Already set by the user?
r.first = move (m);
@@ -69,22 +70,56 @@ namespace build2
install_scope (const target&);
// Resolve relative installation directory path (e.g., include/libfoo) to
- // its absolute directory path (e.g., /usr/include/libfoo). If the
- // resolution encountered an unknown directory, issue diagnostics and fail
- // unless fail_unknown is false, in which case return empty directory.
+ // its absolute and normalized directory path (e.g., /usr/include/libfoo).
+ // If the resolution encountered an unknown directory, issue diagnostics
+ // and fail unless fail_unknown is false, in which case return empty
+ // directory.
+ //
+ // For rel_base semantics, see the $install.resolve() documentation. Note
+ // that fail_unknown does not apply to the rel_base resolution.
//
// Note: implemented in rule.cxx.
//
LIBBUILD2_SYMEXPORT dir_path
- resolve_dir (const target&, dir_path, bool fail_unknown = true);
+ resolve_dir (const target&,
+ dir_path,
+ dir_path rel_base = {},
+ bool fail_unknown = true);
LIBBUILD2_SYMEXPORT dir_path
- resolve_dir (const scope&, dir_path, bool fail_unknown = true);
+ resolve_dir (const scope&,
+ dir_path,
+ dir_path rel_base = {},
+ bool fail_unknown = true);
// Resolve file installation path returning empty path if not installable.
//
LIBBUILD2_SYMEXPORT path
resolve_file (const file&); // rule.cxx
+
+ // Given an abolute path return its chroot'ed version, if any, accoring to
+ // install.chroot.
+ //
+ template <typename P>
+ inline P
+ chroot_path (const scope& rs, const P& p)
+ {
+ assert (p.absolute ());
+ const dir_path* d (cast_null<dir_path> (rs["install.chroot"]));
+ return d != nullptr ? *d / p.leaf (p.root_directory ()) : p;
+ }
+
+ // Installation filtering (config.install.filter).
+ //
+ // If entry type is a directory, then leaf must be empty.
+ //
+ using filters = vector<pair<string, optional<string>>>;
+
+ LIBBUILD2_SYMEXPORT bool
+ filter_entry (const scope& rs,
+ const dir_path& base,
+ const path& leaf,
+ entry_type);
}
}
diff --git a/libbuild2/json.cxx b/libbuild2/json.cxx
new file mode 100644
index 0000000..4ed1631
--- /dev/null
+++ b/libbuild2/json.cxx
@@ -0,0 +1,904 @@
+// file : libbuild2/json.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/json.hxx>
+
+#include <limits>
+
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/parser.hxx>
+# include <libbutl/json/serializer.hxx>
+#endif
+
+namespace build2
+{
+ // json_event
+ //
+#ifndef BUILD2_BOOTSTRAP
+ const char*
+ to_string (json_event e)
+ {
+ switch (e)
+ {
+ case json_event::begin_object: return "beginning of object";
+ case json_event::end_object: return "end of object";
+ case json_event::begin_array: return "beginning of array";
+ case json_event::end_array: return "end of array";
+ case json_event::name: return "member name";
+ case json_event::string: return "string value";
+ case json_event::number: return "numeric value";
+ case json_event::boolean: return "boolean value";
+ case json_event::null: return "null value";
+ }
+
+ return "";
+ }
+#endif
+
+ // json_type
+ //
+ const char*
+ to_string (json_type t, bool dn) noexcept
+ {
+ using type = json_type;
+
+ switch (t)
+ {
+ case type::null: return "null";
+ case type::boolean: return "boolean";
+ case type::signed_number: return dn ? "signed number" : "number";
+ case type::unsigned_number: return dn ? "unsigned number" : "number";
+ case type::hexadecimal_number: return dn ? "hexadecimal number" : "number";
+ case type::string: return "string";
+ case type::array: return "array";
+ case type::object: return "object";
+ }
+ return "";
+ }
+
+ // json_value
+ //
+ const json_value null_json_value (json_type::null);
+
+ [[noreturn]] void
+ json_as_throw (json_type t, json_type e)
+ {
+ string m;
+ m = "expected ";
+ m += to_string (e, true);
+ m += " instead of ";
+ m += to_string (t, true);
+ throw invalid_argument (move (m));
+ }
+
+ [[noreturn]] static void
+ at_throw (json_type t, json_type e, bool index)
+ {
+ string m;
+
+ if (t != e && t != json_type::null)
+ {
+ m = "expected ";
+ m += to_string (e, true);
+ m += " instead of ";
+ m += to_string (t, true);
+ throw invalid_argument (move (m));
+ }
+ else
+ {
+ m = index ? "index" : "name";
+ m += " out of range in ";
+ m += to_string (e, true);
+ throw std::out_of_range (move (m));
+ }
+ }
+
+ const json_value& json_value::
+ at (size_t index) const
+ {
+ if (type == json_type::array)
+ {
+ if (index < array.size ())
+ return array[index];
+ }
+
+ at_throw (type, json_type::array, true);
+ }
+
+ json_value& json_value::
+ at (size_t index)
+ {
+ if (type == json_type::array)
+ {
+ if (index < array.size ())
+ return array[index];
+ }
+
+ at_throw (type, json_type::array, true);
+ }
+
+#if 0
+ const json_value& json_value::
+ operator[] (size_t index) const
+ {
+ if (type == json_type::null)
+ return null_json_value;
+
+ if (type == json_type::array)
+ return index < array.size () ? array[index] : null_json_value;
+
+ at_throw (type, json_type::array, true);
+ }
+
+ json_value& json_value::
+ operator[] (size_t index)
+ {
+ if (type == json_type::null)
+ {
+ new (&array) array_type ();
+ type = json_type::array;
+ }
+
+ if (type == json_type::array)
+ {
+ size_t n (array.size ());
+
+ if (index < n)
+ return array[index];
+
+ // If there are missing elements in between, fill them with nulls.
+ //
+ if (index != n)
+ array.resize (index, json_value ());
+
+ array.push_back (json_value ());
+ return array.back ();
+ }
+
+ at_throw (type, json_type::array, true);
+ }
+#endif
+
+ const json_value& json_value::
+ at (const char* name) const
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ if (i != object.end ())
+ return i->value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ json_value& json_value::
+ at (const char* name)
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ if (i != object.end ())
+ return i->value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ const json_value* json_value::
+ find (const char* name) const
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+ return i != object.end () ? &i->value : nullptr;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ json_value* json_value::
+ find (const char* name)
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ return i != object.end () ? &i->value : nullptr;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+#if 0
+ const json_value& json_value::
+ operator[] (const char* name) const
+ {
+ if (type == json_type::null)
+ return null_json_value;
+
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+
+ return i != object.end () ? i->value : null_json_value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ json_value& json_value::
+ operator[] (const char* name)
+ {
+ if (type == json_type::null)
+ {
+ new (&object) object_type ();
+ type = json_type::object;
+ }
+
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ if (i != object.end ())
+ return i->value;
+
+ object.push_back (json_member {name, json_value ()});
+ return object.back ().value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+#endif
+
+ int json_value::
+ compare (const json_value& v) const noexcept
+ {
+ int r (0);
+ {
+ // Note: we need to treat unsigned and hexadecimal the same.
+ //
+ json_type t (type == json_type::hexadecimal_number
+ ? json_type::unsigned_number
+ : type);
+
+ json_type vt (v.type == json_type::hexadecimal_number
+ ? json_type::unsigned_number
+ : v.type);
+
+ if (t != vt)
+ {
+ // Handle the special signed/unsigned number case here.
+ //
+ if (t == json_type::signed_number &&
+ vt == json_type::unsigned_number)
+ {
+ if (signed_number < 0)
+ r = -1;
+ else
+ {
+ uint64_t u (static_cast<uint64_t> (signed_number));
+ r = u < v.unsigned_number ? -1 : (u > v.unsigned_number ? 1 : 0);
+ }
+ }
+ else if (t == json_type::unsigned_number &&
+ vt == json_type::signed_number)
+ {
+ if (v.signed_number < 0)
+ r = 1;
+ else
+ {
+ uint64_t u (static_cast<uint64_t> (v.signed_number));
+ r = unsigned_number < u ? -1 : (unsigned_number > u ? 1 : 0);
+ }
+ }
+ else
+ r = (static_cast<uint8_t> (t) < static_cast<uint8_t> (vt) ? -1 : 1);
+ }
+ }
+
+ if (r == 0)
+ {
+ switch (type)
+ {
+ case json_type::null:
+ {
+ r = 0;
+ break;
+ }
+ case json_type::boolean:
+ {
+ r = boolean == v.boolean ? 0 : boolean ? 1 : -1;
+ break;
+ }
+ case json_type::signed_number:
+ {
+ r = (signed_number < v.signed_number
+ ? -1
+ : (signed_number > v.signed_number ? 1 : 0));
+ break;
+ }
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ r = (unsigned_number < v.unsigned_number
+ ? -1
+ : (unsigned_number > v.unsigned_number ? 1 : 0));
+ break;
+ }
+ case json_type::string:
+ {
+ r = string.compare (v.string);
+ break;
+ }
+ case json_type::array:
+ {
+ auto i (array.begin ()), ie (array.end ());
+ auto j (v.array.begin ()), je (v.array.end ());
+
+ for (; i != ie && j != je; ++i, ++j)
+ {
+ if ((r = i->compare (*j)) != 0)
+ break;
+ }
+
+ if (r == 0)
+ r = i == ie ? (j == je ? 0 : -1) : 1; // More elements than other?
+
+ break;
+ }
+ case json_type::object:
+ {
+ // We don't expect there to be a large number of members so it makes
+ // sense to iterate in the lexicographical order without making any
+ // copies.
+ //
+ auto next = [] (object_type::const_iterator p, // == e for first
+ object_type::const_iterator b,
+ object_type::const_iterator e)
+ {
+ // We need to find an element with the "smallest" name that is
+ // greater than the previous entry.
+ //
+ auto n (e);
+
+ for (auto i (b); i != e; ++i)
+ {
+ if (p == e || i->name > p->name)
+ {
+ int r;
+ if (n == e || (r = n->name.compare (i->name)) > 0)
+ n = i;
+ else
+ assert (r != 0); // No duplicates.
+ }
+ }
+
+ return n;
+ };
+
+ auto ib (object.begin ()), ie (object.end ()), i (ie);
+ auto jb (v.object.begin ()), je (v.object.end ()), j (je);
+
+ for (;;)
+ {
+ // Note: we must call next() on both.
+ //
+ i = next (i, ib, ie);
+ j = next (j, jb, je);
+
+ if (i == ie || j == je)
+ break;
+
+ // Determine if both have this name and if not, which name comes
+ // first.
+ //
+ int n (i->name.compare (j->name));
+
+ r = (n < 0 // If i's first, then i is greater.
+ ? -1
+ : (n > 0 // If j's first, then j is greater.
+ ? 1
+ : i->value.compare (j->value))); // Both have this name.
+
+ if (r != 0)
+ break;
+ }
+
+ if (r == 0)
+ r = i == ie ? (j == je ? 0 : -1) : 1; // More members than other?
+
+ break;
+ }
+ }
+ }
+
+ return r;
+ }
+
+ static void
+ append_numbers (json_value& l, const json_value& r) noexcept
+ {
+ auto append = [&l] (uint64_t u, int64_t s, bool hex = false)
+ {
+ if (s < 0)
+ {
+ // The absolute value of a minimum signed intereger is not
+ // representable in the 2s complement integers. So handle this
+ // specially for completeness.
+ //
+ uint64_t a (
+ s != std::numeric_limits<int64_t>::min ()
+ ? static_cast<uint64_t> (-s)
+ : static_cast<uint64_t> (std::numeric_limits<int64_t>::max ()) + 1);
+
+ if (u >= a)
+ {
+ l.unsigned_number = u - a;
+ l.type = (hex
+ ? json_type::hexadecimal_number
+ : json_type::unsigned_number);
+ }
+ else
+ {
+ l.signed_number = -static_cast<int64_t> (a - u);
+ l.type = json_type::signed_number;
+ }
+ }
+ else
+ {
+ l.unsigned_number = u + static_cast<uint64_t> (s);
+ l.type = (hex
+ ? json_type::hexadecimal_number
+ : json_type::unsigned_number);
+ }
+ };
+
+ // We try to keep LHS hex if possible.
+ //
+ if (l.type == json_type::signed_number)
+ {
+ if (r.type == json_type::signed_number)
+ {
+ // Deal with non-negative signed numbers for completeness.
+ //
+ if (l.signed_number >= 0)
+ append (static_cast <uint64_t> (l.signed_number), r.signed_number);
+ else if (r.signed_number >= 0)
+ append (static_cast <uint64_t> (r.signed_number), l.signed_number);
+ else
+ l.signed_number += r.signed_number;
+ }
+ else
+ append (r.unsigned_number, l.signed_number);
+ }
+ else
+ {
+ if (r.type == json_type::signed_number)
+ append (l.unsigned_number,
+ r.signed_number,
+ l.type == json_type::hexadecimal_number);
+ else
+ l.unsigned_number += r.unsigned_number;
+ }
+ }
+
+ void json_value::
+ append (json_value&& v, bool override)
+ {
+ if (type == json_type::null)
+ {
+ *this = move (v);
+ return;
+ }
+ else if (type == json_type::array)
+ {
+ if (v.type == json_type::array)
+ {
+ if (array.empty ())
+ array = move (v.array);
+ else
+ array.insert (array.end (),
+ make_move_iterator (v.array.begin ()),
+ make_move_iterator (v.array.end ()));
+ }
+ else
+ array.push_back (move (v));
+
+ return;
+ }
+ else
+ {
+ switch (v.type)
+ {
+ case json_type::null: return;
+ case json_type::boolean:
+ {
+ if (type != json_type::boolean)
+ break;
+
+ boolean = boolean || v.boolean;
+ return;
+ }
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ if (type != json_type::signed_number &&
+ type != json_type::unsigned_number &&
+ type != json_type::hexadecimal_number)
+ break;
+
+ append_numbers (*this, v);
+ return;
+ }
+ case json_type::string:
+ {
+ if (type != json_type::string)
+ break;
+
+ string += v.string;
+ return;
+ }
+ case json_type::array: break;
+ case json_type::object:
+ {
+ if (type != json_type::object)
+ break;
+
+ if (object.empty ())
+ object = move (v.object);
+ else
+ {
+ for (json_member& m: v.object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [&m] (const json_member& o)
+ {
+ return m.name == o.name;
+ }));
+ if (i == object.end ())
+ object.push_back (move (m));
+ else if (override)
+ i->value = move (m.value);
+ }
+ }
+
+ return;
+ }
+ }
+ }
+
+ throw invalid_argument (
+ string_type ("unable to append ") + to_string (v.type) + " to " +
+ to_string (type));
+ }
+
+ void json_value::
+ prepend (json_value&& v, bool override)
+ {
+ if (type == json_type::null)
+ {
+ *this = move (v);
+ return;
+ }
+ else if (type == json_type::array)
+ {
+ if (v.type == json_type::array)
+ {
+ if (array.empty ())
+ array = move (v.array);
+ else
+ array.insert (array.begin (),
+ make_move_iterator (v.array.begin ()),
+ make_move_iterator (v.array.end ()));
+ }
+ else
+ array.insert (array.begin (), move (v));
+
+ return;
+ }
+ else
+ {
+ switch (v.type)
+ {
+ case json_type::null: return;
+ case json_type::boolean:
+ {
+ if (type != json_type::boolean)
+ break;
+
+ boolean = boolean || v.boolean;
+ return;
+ }
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ if (type != json_type::signed_number &&
+ type != json_type::unsigned_number &&
+ type != json_type::hexadecimal_number)
+ break;
+
+ append_numbers (*this, v);
+ return;
+ }
+ case json_type::string:
+ {
+ if (type != json_type::string)
+ break;
+
+ string.insert (0, v.string);
+ return;
+ }
+ case json_type::array: break;
+ case json_type::object:
+ {
+ if (type != json_type::object)
+ break;
+
+ if (object.empty ())
+ object = move (v.object);
+ else
+ {
+ for (json_member& m: v.object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [&m] (const json_member& o)
+ {
+ return m.name == o.name;
+ }));
+ if (i == object.end ())
+ object.insert (object.begin (), move (m));
+ else if (override)
+ i->value = move (m.value);
+ }
+ }
+
+ return;
+ }
+ }
+ }
+
+ throw invalid_argument (
+ string_type ("unable to prepend ") + to_string (v.type) + " to " +
+ to_string (type));
+ }
+
+#ifndef BUILD2_BOOTSTRAP
+ json_value::
+ json_value (json_parser& p, optional<json_type> et)
+ {
+ using namespace butl::json;
+
+ // A JSON input text cannot be empty.
+ //
+ // Once we have JSON5 support we will be able to distinguish hexadecimal
+ // numbers.
+ //
+ json_type t (json_type::null);
+ switch (*p.next ())
+ {
+ case event::begin_object: t = json_type::object; break;
+ case event::begin_array: t = json_type::array; break;
+ case event::string: t = json_type::string; break;
+ case event::number: t = (p.value ()[0] == '-'
+ ? json_type::signed_number
+ : json_type::unsigned_number); break;
+ case event::boolean: t = json_type::boolean; break;
+ case event::null: t = json_type::null; break;
+ case event::name:
+ case event::end_array:
+ case event::end_object:
+ {
+ assert (false);
+ type = json_type::null;
+ return;
+ }
+ }
+
+ if (et && *et != t)
+ {
+ throw invalid_json_input (
+ p.input_name != nullptr ? p.input_name : "",
+ p.line (),
+ p.column (),
+ p.position (),
+ string_type ("expected ") + to_string (*et, true) + " instead of " +
+ to_string (t, true));
+ }
+
+ switch (t)
+ {
+ case json_type::object:
+ {
+ object_type o; // For exception safety.
+ while (*p.next () != event::end_object)
+ {
+ string_type n (p.name ());
+
+ // Check for duplicates. For now we fail but in the future we may
+ // provide a mode (via a flag) to override instead.
+ //
+ if (find_if (o.begin (), o.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }) != o.end ())
+ {
+ throw invalid_json_input (
+ p.input_name != nullptr ? p.input_name : "",
+ p.line (),
+ p.column (),
+ p.position (),
+ "duplicate object member '" + n + '\'');
+ }
+
+ o.push_back (json_member {move (n), json_value (p)});
+ }
+
+ new (&object) object_type (move (o));
+ type = t;
+ break;
+ }
+ case json_type::array:
+ {
+ array_type c; // For exception safety.
+ while (*p.peek () != event::end_array)
+ c.push_back (json_value (p));
+ p.next (); // Consume end_array.
+
+ new (&array) array_type (move (c));
+ type = t;
+ break;
+ }
+ case json_type::string:
+ {
+ string_type& s (p.value ());
+
+ // Don't move if small string optimized.
+ //
+ if (s.size () > 15)
+ new (&string) string_type (move (s));
+ else
+ new (&string) string_type (s);
+
+ type = t;
+ break;
+ }
+ case json_type::signed_number:
+ {
+ signed_number = p.value<int64_t> ();
+ type = t;
+ break;
+ }
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ unsigned_number = p.value<uint64_t> ();
+ type = t;
+ break;
+ }
+ case json_type::boolean:
+ {
+ boolean = p.value<bool> ();
+ type = t;
+ break;
+ }
+ case json_type::null:
+ {
+ type = t;
+ break;
+ }
+ }
+ }
+
+ void json_value::
+ serialize (json_buffer_serializer& s, optional<json_type> et) const
+ {
+ using namespace butl::json;
+
+ if (et && *et != type)
+ {
+ throw invalid_json_output (
+ nullopt,
+ invalid_json_output::error_code::invalid_value,
+ string_type ("expected ") + to_string (*et, true) + " instead of " +
+ to_string (type, true));
+ }
+
+ switch (type)
+ {
+ case json_type::null:
+ {
+ s.value (nullptr);
+ break;
+ }
+ case json_type::boolean:
+ {
+ s.value (boolean);
+ break;
+ }
+ case json_type::signed_number:
+ {
+ s.value (signed_number);
+ break;
+ }
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ // When we have JSON5 support, we will be able to serialize
+ // hexadecimal properly.
+ //
+ s.value (unsigned_number);
+ break;
+ }
+ case json_type::string:
+ {
+ s.value (string);
+ break;
+ }
+ case json_type::array:
+ {
+ s.begin_array ();
+ for (const json_value& e: array)
+ e.serialize (s);
+ s.end_array ();
+ break;
+ }
+ case json_type::object:
+ {
+ s.begin_object ();
+ for (const json_member& m: object)
+ {
+ s.member_name (m.name);
+ m.value.serialize (s);
+ }
+ s.end_object ();
+ break;
+ }
+ }
+ }
+
+#else
+ json_value::
+ json_value (json_parser&, optional<json_type>)
+ {
+ assert (false);
+ type = json_type::null;
+ }
+
+ void json_value::
+ serialize (json_buffer_serializer&, optional<json_type>) const
+ {
+ assert (false);
+ }
+#endif
+}
diff --git a/libbuild2/json.hxx b/libbuild2/json.hxx
new file mode 100644
index 0000000..96596e3
--- /dev/null
+++ b/libbuild2/json.hxx
@@ -0,0 +1,369 @@
+// file : libbuild2/json.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_JSON_HXX
+#define LIBBUILD2_JSON_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/export.hxx>
+
+namespace butl
+{
+ namespace json
+ {
+ enum class event: uint8_t;
+ class parser;
+ class buffer_serializer;
+ class stream_serializer;
+ class invalid_json_input;
+ class invalid_json_output;
+ }
+}
+
+namespace build2
+{
+ using json_event = butl::json::event;
+ using json_parser = butl::json::parser;
+ using json_buffer_serializer = butl::json::buffer_serializer;
+ using json_stream_serializer = butl::json::stream_serializer;
+ using butl::json::invalid_json_input;
+ using butl::json::invalid_json_output;
+
+#ifndef BUILD2_BOOTSTRAP
+ LIBBUILD2_SYMEXPORT const char*
+ to_string (json_event);
+#endif
+
+ // @@ TODO:
+ //
+ // - provide swap().
+ // - provide operator=(uint64_t), etc.
+ // - provide std::hash specialization
+ // - tighted at()/[] interface in json_array and json_object
+ // - tighten noexcep where possible
+ // - operator bool() - in a sense null is like nullopt.
+ //
+
+ // This JSON representation has one extensions compared to the standard JSON
+ // model: it distinguishes between signed, unsigned, and hexadecimal
+ // numbers.
+ //
+ // Note also that we don't assume that object members are in a sorted order
+ // (but do assume there are no duplicates). However, we could add an
+ // argument to signal that this is the case to speed up some functions, for
+ // example, compare().
+ //
+ enum class json_type: uint8_t
+ {
+ null, // Note: keep first for comparison.
+ boolean,
+ signed_number,
+ unsigned_number,
+ hexadecimal_number,
+ string,
+ array,
+ object,
+ };
+
+ // Return the JSON type as string. If distinguish_numbers is true, then
+ // distinguish between the singned, unsigned, and hexadecimal types.
+ //
+ LIBBUILD2_SYMEXPORT const char*
+ to_string (json_type, bool distinguish_numbers = false) noexcept;
+
+ inline ostream&
+ operator<< (ostream& os, json_type t) {return os << to_string (t);}
+
+ struct json_member;
+
+ class LIBBUILD2_SYMEXPORT json_value
+ {
+ public:
+ using string_type = build2::string;
+ using array_type = vector<json_value>;
+ using object_type = vector<json_member>;
+
+ json_type type;
+
+ // Unchecked value access.
+ //
+ union
+ {
+ bool boolean;
+ int64_t signed_number;
+ uint64_t unsigned_number; // Also used for hexadecimal_number.
+ string_type string;
+ array_type array;
+ object_type object;
+ };
+
+ // Checked value access.
+ //
+ // If the type matches, return the corresponding member of the union.
+ // Otherwise throw std::invalid_argument.
+ //
+ bool as_bool () const;
+ bool& as_bool ();
+
+ int64_t as_int64 () const;
+ int64_t& as_int64 ();
+
+ uint64_t as_uint64 () const;
+ uint64_t& as_uint64 ();
+
+ const string_type& as_string () const;
+ string_type& as_string ();
+
+ const array_type& as_array () const;
+ array_type& as_array ();
+
+ const object_type& as_object () const;
+ object_type& as_object ();
+
+
+ // Construction.
+ //
+ explicit
+ json_value (json_type = json_type::null) noexcept;
+
+ explicit
+ json_value (std::nullptr_t) noexcept;
+
+ explicit
+ json_value (bool) noexcept;
+
+ explicit
+ json_value (int64_t) noexcept;
+
+ explicit
+ json_value (uint64_t, bool hexadecimal = false) noexcept;
+
+ explicit
+ json_value (string_type);
+
+ // If the expected type is specfied, then fail if it does not match
+ // parsed. Throws invalid_json_input.
+ //
+ explicit
+ json_value (json_parser&, optional<json_type> expected = {});
+
+ // If the expected type is specfied, then fail if it does not match the
+ // value's. Throws invalid_json_output.
+ //
+ void
+ serialize (json_buffer_serializer&,
+ optional<json_type> expected = {}) const;
+
+ // Note that values of different types are never equal, except for
+ // signed/unsigned/hexadecimal numbers. Null is equal to null and is less
+ // than any other value. Arrays are compared lexicographically. Object
+ // members are considered in the lexicographically-compared name-ascending
+ // order (see RFC8785). An absent member is less than a present member
+ // (even if it's null).
+ //
+ int
+ compare (const json_value&) const noexcept;
+
+ // Append/prepend one JSON value to another. Throw invalid_argument if the
+ // values are incompatible. Note that for numbers this can also lead to
+ // the change of the value type.
+ //
+ // Append/prepend an array to an array splices in the array elements
+ // rather than adding an element of the array type.
+ //
+ // By default, append to an object overrides existing members while
+ // prepend does not. In a sense, whatever appears last is kept, which is
+ // consistent with what we expect to happen when specifying the same name
+ // repeatedly (provided it's not considered invalid) in a text
+ // representation (e.g., {"a":1,"a":2}). Position-wise, both append and
+ // prepend retain the positions of existing members with append inserting
+ // new ones at the end while prepend -- at the beginning.
+ //
+ void
+ append (json_value&&, bool override = true);
+
+ void
+ prepend (json_value&&, bool override = false);
+
+ // Array element access.
+ //
+ // If the index is out of array bounds, the at() functions throw
+ // std::out_of_range, the const operator[] returns null_json_value, and
+ // the non-const operator[] inserts a new null value at the specified
+ // position (filling any missing elements in between with nulls) and
+ // returns that. All three functions throw std::invalid_argument if the
+ // value is not an array or null with null treated as (missing) array
+ // rather than wrong value type (and with at() functions throwing
+ // out_of_range in this case).
+ //
+ // Note that non-const operator[] will not only insert a new element but
+ // will also turn the value it is called upon into array if it is null.
+ // This semantics allows you to string several subscripts to build up a
+ // chain of values.
+ //
+ // Note also that while the operator[] interface is convenient for
+ // accessing and modifying (or building up) values deep in the tree, it
+ // can lead to inefficiencies or even undesirable semantics during
+ // otherwise read-only access of a non-const object due to the potential
+ // insertion of null values for missing array elements. As a result, it's
+ // recommended to always use a const reference for read-only access (or
+ // use the at() interface if this is deemed too easy to forget).
+ //
+ const json_value&
+ at (size_t) const;
+
+ json_value&
+ at (size_t);
+
+#if 0
+ const json_value&
+ operator[] (size_t) const;
+
+ json_value&
+ operator[] (size_t);
+#endif
+
+
+ // Object member access.
+ //
+ // If a member with the specified name is not found in the object, the
+ // at() functions throw std::out_of_range, the find() function returns
+ // NULL, the const operator[] returns null_json_value, and the non-const
+ // operator[] adds a new member with the specified name and null value and
+ // returns that value. All three functions throw std::invalid_argument if
+ // the value is not an object or null with null treated as (missing)
+ // object rather than wrong value type (and with at() functions throwing
+ // out_of_range in this case).
+ //
+ // Note that non-const operator[] will not only insert a new member but
+ // will also turn the value it is called upon into object if it is null.
+ // This semantics allows you to string several subscripts to build up a
+ // chain of values.
+ //
+ // Note also that while the operator[] interface is convenient for
+ // accessing and modifying (or building up) values deep in the tree, it
+ // can lead to inefficiencies or even undesirable semantics during
+ // otherwise read-only access of a non-const object due to the potential
+ // insertion of null values for missing object members. As a result, it's
+ // recommended to always use a const reference for read-only access (or
+ // use the at() interface if this is deemed too easy to forget).
+ //
+ const json_value&
+ at (const char*) const;
+
+ json_value&
+ at (const char*);
+
+ const json_value*
+ find (const char*) const;
+
+ json_value*
+ find (const char*);
+
+#if 0
+ const json_value&
+ operator[] (const char*) const;
+
+ json_value&
+ operator[] (const char*);
+#endif
+
+ const json_value&
+ at (const string_type&) const;
+
+ json_value&
+ at (const string_type&);
+
+ const json_value*
+ find (const string_type&) const;
+
+ json_value*
+ find (const string_type&);
+
+#if 0
+ const json_value&
+ operator[] (const string_type&) const;
+
+ json_value&
+ operator[] (const string_type&);
+#endif
+
+ // Note that the moved-from value becomes JSON null value.
+ //
+ json_value (json_value&&) noexcept;
+ json_value (const json_value&);
+
+ json_value& operator= (json_value&&) noexcept;
+ json_value& operator= (const json_value&);
+
+ ~json_value () noexcept;
+ };
+
+ LIBBUILD2_SYMEXPORT extern const json_value null_json_value;
+
+ inline bool
+ operator== (const json_value& x, const json_value& y) {return x.compare (y) == 0;}
+
+ inline bool
+ operator!= (const json_value& x, const json_value& y) {return !(x == y);}
+
+ inline bool
+ operator< (const json_value& x, const json_value& y) {return x.compare (y) < 0;}
+
+ inline bool
+ operator<= (const json_value& x, const json_value& y) {return x.compare (y) <= 0;}
+
+ inline bool
+ operator> (const json_value& x, const json_value& y) {return !(x <= y);}
+
+ inline bool
+ operator>= (const json_value& x, const json_value& y) {return !(x < y);}
+
+ // A JSON object member.
+ //
+ struct json_member
+ {
+ // @@ TODO: add some convenience constructors?
+
+ string name;
+ json_value value;
+ };
+
+ // A JSON value that can only be an array.
+ //
+ class /*LIBBUILD2_SYMEXPORT*/ json_array: public json_value
+ {
+ public:
+ // Create empty array.
+ //
+ json_array () noexcept;
+
+ explicit
+ json_array (json_parser&);
+
+ void
+ serialize (json_buffer_serializer& s) const;
+ };
+
+ // A JSON value that can only be an object.
+ //
+ class /*LIBBUILD2_SYMEXPORT*/ json_object: public json_value
+ {
+ public:
+ // Create empty object.
+ //
+ json_object () noexcept;
+
+ explicit
+ json_object (json_parser&);
+
+ void
+ serialize (json_buffer_serializer& s) const;
+ };
+}
+
+#include <libbuild2/json.ixx>
+
+#endif // LIBBUILD2_JSON_HXX
diff --git a/libbuild2/json.ixx b/libbuild2/json.ixx
new file mode 100644
index 0000000..76cd00a
--- /dev/null
+++ b/libbuild2/json.ixx
@@ -0,0 +1,349 @@
+// file : libbuild2/json.ixx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+namespace build2
+{
+ [[noreturn]] LIBBUILD2_SYMEXPORT void
+ json_as_throw (json_type actual, json_type expected);
+
+ inline bool json_value::
+ as_bool () const
+ {
+ if (type == json_type::boolean)
+ return boolean;
+
+ json_as_throw (type, json_type::boolean);
+ }
+
+ inline bool& json_value::
+ as_bool ()
+ {
+ if (type == json_type::boolean)
+ return boolean;
+
+ json_as_throw (type, json_type::boolean);
+ }
+
+ inline int64_t json_value::
+ as_int64 () const
+ {
+ if (type == json_type::signed_number)
+ return signed_number;
+
+ json_as_throw (type, json_type::signed_number);
+ }
+
+ inline int64_t& json_value::
+ as_int64 ()
+ {
+ if (type == json_type::signed_number)
+ return signed_number;
+
+ json_as_throw (type, json_type::signed_number);
+ }
+
+ inline uint64_t json_value::
+ as_uint64 () const
+ {
+ if (type == json_type::unsigned_number ||
+ type == json_type::hexadecimal_number)
+ return unsigned_number;
+
+ json_as_throw (type, json_type::unsigned_number);
+ }
+
+ inline uint64_t& json_value::
+ as_uint64 ()
+ {
+ if (type == json_type::unsigned_number ||
+ type == json_type::hexadecimal_number)
+ return unsigned_number;
+
+ json_as_throw (type, json_type::unsigned_number);
+ }
+
+ inline const string& json_value::
+ as_string () const
+ {
+ if (type == json_type::string)
+ return string;
+
+ json_as_throw (type, json_type::string);
+ }
+
+ inline string& json_value::
+ as_string ()
+ {
+ if (type == json_type::string)
+ return string;
+
+ json_as_throw (type, json_type::string);
+ }
+
+ inline const json_value::array_type& json_value::
+ as_array () const
+ {
+ if (type == json_type::array)
+ return array;
+
+ json_as_throw (type, json_type::array);
+ }
+
+ inline json_value::array_type& json_value::
+ as_array ()
+ {
+ if (type == json_type::array)
+ return array;
+
+ json_as_throw (type, json_type::array);
+ }
+
+ inline const json_value::object_type& json_value::
+ as_object () const
+ {
+ if (type == json_type::object)
+ return object;
+
+ json_as_throw (type, json_type::object);
+ }
+
+ inline json_value::object_type& json_value::
+ as_object ()
+ {
+ if (type == json_type::object)
+ return object;
+
+ json_as_throw (type, json_type::object);
+ }
+
+ inline json_value::
+ ~json_value () noexcept
+ {
+ switch (type)
+ {
+ case json_type::null:
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: break;
+ case json_type::string: string.~string_type (); break;
+ case json_type::array: array.~array_type (); break;
+ case json_type::object: object.~object_type (); break;
+ }
+ }
+
+ inline json_value::
+ json_value (json_type t) noexcept
+ : type (t)
+ {
+ switch (type)
+ {
+ case json_type::null: break;
+ case json_type::boolean: boolean = false; break;
+ case json_type::signed_number: signed_number = 0; break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: unsigned_number = 0; break;
+ case json_type::string: new (&string) string_type (); break;
+ case json_type::array: new (&array) array_type (); break;
+ case json_type::object: new (&object) object_type (); break;
+ }
+ }
+
+ inline json_value::
+ json_value (std::nullptr_t) noexcept
+ : type (json_type::null)
+ {
+ }
+
+ inline json_value::
+ json_value (bool v) noexcept
+ : type (json_type::boolean), boolean (v)
+ {
+ }
+
+ inline json_value::
+ json_value (int64_t v) noexcept
+ : type (json_type::signed_number), signed_number (v)
+ {
+ }
+
+ inline json_value::
+ json_value (uint64_t v, bool hex) noexcept
+ : type (hex
+ ? json_type::hexadecimal_number
+ : json_type::unsigned_number),
+ unsigned_number (v)
+ {
+ }
+
+ inline json_value::
+ json_value (string_type v)
+ : type (json_type::string), string (move (v))
+ {
+ }
+
+ inline const json_value& json_value::
+ at (const string_type& n) const
+ {
+ return at (n.c_str ());
+ }
+
+ inline json_value& json_value::
+ at (const string_type& n)
+ {
+ return at (n.c_str ());
+ }
+
+ inline const json_value* json_value::
+ find (const string_type& n) const
+ {
+ return find (n.c_str ());
+ }
+
+ inline json_value* json_value::
+ find (const string_type& n)
+ {
+ return find (n.c_str ());
+ }
+
+#if 0
+ inline const json_value& json_value::
+ operator[] (const string_type& n) const
+ {
+ return operator[] (n.c_str ());
+ }
+
+ inline json_value& json_value::
+ operator[] (const string_type& n)
+ {
+ return operator[] (n.c_str ());
+ }
+#endif
+
+ inline json_value::
+ json_value (json_value&& v) noexcept
+ : type (v.type)
+ {
+ switch (type)
+ {
+ case json_type::null:
+ break;
+ case json_type::boolean:
+ boolean = v.boolean;
+ break;
+ case json_type::signed_number:
+ signed_number = v.signed_number;
+ break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ unsigned_number = v.unsigned_number;
+ break;
+ case json_type::string:
+ new (&string) string_type (move (v.string));
+ v.string.~string_type ();
+ break;
+ case json_type::array:
+ new (&array) array_type (move (v.array));
+ v.array.~array_type ();
+ break;
+ case json_type::object:
+ new (&object) object_type (move (v.object));
+ v.object.~object_type ();
+ break;
+ }
+
+ v.type = json_type::null;
+ }
+
+ inline json_value::
+ json_value (const json_value& v)
+ : type (v.type)
+ {
+ switch (type)
+ {
+ case json_type::null:
+ break;
+ case json_type::boolean:
+ boolean = v.boolean;
+ break;
+ case json_type::signed_number:
+ signed_number = v.signed_number;
+ break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ unsigned_number = v.unsigned_number;
+ break;
+ case json_type::string:
+ new (&string) string_type (v.string);
+ break;
+ case json_type::array:
+ new (&array) array_type (v.array);
+ break;
+ case json_type::object:
+ new (&object) object_type (v.object);
+ break;
+ }
+ }
+
+ inline json_value& json_value::
+ operator= (json_value&& v) noexcept
+ {
+ if (this != &v)
+ {
+ this->~json_value ();
+ new (this) json_value (move (v));
+ }
+ return *this;
+ }
+
+ inline json_value& json_value::
+ operator= (const json_value& v)
+ {
+ if (this != &v)
+ {
+ this->~json_value ();
+ new (this) json_value (v);
+ }
+ return *this;
+ }
+
+ // json_array
+ //
+ inline json_array::
+ json_array () noexcept
+ : json_value (json_type::array)
+ {
+ }
+
+ inline json_array::
+ json_array (json_parser& p)
+ : json_value (p, json_type::array)
+ {
+ }
+
+ inline void json_array::
+ serialize (json_buffer_serializer& s) const
+ {
+ json_value::serialize (s, json_type::array);
+ }
+
+ // json_object
+ //
+ inline json_object::
+ json_object () noexcept
+ : json_value (json_type::object)
+ {
+ }
+
+ inline json_object::
+ json_object (json_parser& p)
+ : json_value (p, json_type::object)
+ {
+ }
+
+ inline void json_object::
+ serialize (json_buffer_serializer& s) const
+ {
+ json_value::serialize (s, json_type::object);
+ }
+}
diff --git a/libbuild2/lexer.cxx b/libbuild2/lexer.cxx
index 992e5d1..04c15be 100644
--- a/libbuild2/lexer.cxx
+++ b/libbuild2/lexer.cxx
@@ -42,6 +42,22 @@ namespace build2
return make_pair (make_pair (r[0], r[1]), sep_);
}
+ pair<char, bool> lexer::
+ peek_char ()
+ {
+ auto p (skip_spaces ());
+ assert (!p.second);
+ sep_ = p.first;
+
+ char r ('\0');
+
+ xchar c (peek ());
+ if (!eos (c))
+ r = c;
+
+ return make_pair (r, sep_);
+ }
+
void lexer::
mode (lexer_mode m, char ps, optional<const char*> esc, uintptr_t data)
{
@@ -144,13 +160,15 @@ namespace build2
break;
}
case lexer_mode::foreign:
- assert (data > 1);
- // Fall through.
+ {
+ assert (ps == '\0' && data > 1);
+ s = false;
+ break;
+ }
case lexer_mode::single_quoted:
case lexer_mode::double_quoted:
{
- assert (ps == '\0');
- s = false;
+ assert (false); // Can only be set manually in word().
break;
}
case lexer_mode::variable:
@@ -162,8 +180,49 @@ namespace build2
default: assert (false); // Unhandled custom mode.
}
- state_.push (
- state {m, data, nullopt, lsb, false, ps, s, n, q, *esc, s1, s2});
+ mode_impl (state {m, data, nullopt, lsb, false, ps, s, n, q, *esc, s1, s2});
+ }
+
+ void lexer::
+ mode_impl (state&& s)
+ {
+ // If we are in the double-quoted mode then, unless the new mode is eval
+ // or variable, delay the state switch until the current mode is expired.
+ // Note that we delay by injecting the new state beneath the current
+ // state.
+ //
+ if (!state_.empty () &&
+ state_.top ().mode == lexer_mode::double_quoted &&
+ s.mode != lexer_mode::eval &&
+ s.mode != lexer_mode::variable)
+ {
+ state qs (move (state_.top ())); // Save quoted state.
+ state_.top () = move (s); // Overwrite quoted state with new state.
+ state_.push (move (qs)); // Restore quoted state.
+ }
+ else
+ state_.push (move (s));
+ }
+
+ void lexer::
+ expire_mode ()
+ {
+ // If we are in the double-quoted mode, then delay the state expiration
+ // until the current mode is expired. Note that we delay by overwriting
+ // the being expired state with the current state.
+ //
+ assert (!state_.empty () &&
+ (state_.top ().mode != lexer_mode::double_quoted ||
+ state_.size () > 1));
+
+ if (state_.top ().mode == lexer_mode::double_quoted)
+ {
+ state qs (move (state_.top ())); // Save quoted state.
+ state_.pop (); // Pop quoted state.
+ state_.top () = move (qs); // Expire state, restoring quoted state.
+ }
+ else
+ state_.pop ();
}
token lexer::
@@ -654,9 +713,9 @@ namespace build2
}
token lexer::
- word (state st, bool sep)
+ word (const state& rst, bool sep)
{
- lexer_mode m (st.mode);
+ lexer_mode m (rst.mode);
xchar c (peek ());
assert (!eos (c));
@@ -687,22 +746,66 @@ namespace build2
lexeme += c;
};
- for (; !eos (c); c = peek ())
+ const state* st (&rst);
+ for (bool first (true); !eos (c); first = false, c = peek ())
{
// First handle escape sequences.
//
if (c == '\\')
{
- // In the variable mode we treat the beginning of the escape sequence
- // as a separator (think \"$foo\").
+ // In the variable mode we treat immediate `\` as the escape sequence
+ // literal and any following as a separator (think \"$foo\").
//
if (m == lexer_mode::variable)
- break;
+ {
+ if (!first)
+ break;
+
+ get ();
+ c = get ();
+
+ if (eos (c))
+ fail (c) << "unterminated escape sequence";
+
+ // For now we only support all the simple C/C++ escape sequences
+ // plus \0 (which in C/C++ is an octal escape sequence).
+ //
+ // In the future we may decide to support more elaborate sequences
+ // such as \xNN, \uNNNN, etc.
+ //
+ // Note: we return it in the literal form instead of translating for
+ // easier printing.
+ //
+ switch (c)
+ {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\':
+ case '0':
+ case 'a':
+ case 'b':
+ case 'f':
+ case 'n':
+ case 'r':
+ case 't':
+ case 'v': lexeme = c; break;
+ default:
+ fail (c) << "unknown escape sequence \\" << c;
+ }
+
+ state_.pop ();
+ return token (type::escape,
+ move (lexeme),
+ sep,
+ qtype, qcomp, qfirst,
+ ln, cn);
+ }
get ();
xchar p (peek ());
- const char* esc (st.escapes);
+ const char* esc (st->escapes);
if (esc == nullptr ||
(*esc != '\0' && !eos (p) && strchr (esc, p) != nullptr))
@@ -718,7 +821,7 @@ namespace build2
continue;
}
else
- unget (c); // Treat as a normal character.
+ unget (c); // Fall through to treat as a normal character.
}
bool done (false);
@@ -747,8 +850,8 @@ namespace build2
get ();
state_.pop ();
- st = state_.top ();
- m = st.mode;
+ st = &state_.top ();
+ m = st->mode;
continue;
}
}
@@ -757,19 +860,17 @@ namespace build2
//
else if (m == lexer_mode::variable)
{
- bool first (lexeme.empty ());
-
// Handle special variable names, if any.
//
- if (first &&
- st.data != 0 &&
- strchr (reinterpret_cast<const char*> (st.data), c) != nullptr)
+ if (first &&
+ st->data != 0 &&
+ strchr (reinterpret_cast<const char*> (st->data), c) != nullptr)
{
get ();
lexeme += c;
done = true;
}
- else if (c != '_' && !(first ? alpha (c) : alnum (c)))
+ else if (c != '_' && !(lexeme.empty () ? alpha (c) : alnum (c)))
{
if (c != '.')
done = true;
@@ -789,17 +890,17 @@ namespace build2
{
// First check if it's a pair separator.
//
- if (c == st.sep_pair)
+ if (c == st->sep_pair)
done = true;
else
{
// Then see if this character or character sequence is a separator.
//
- for (const char* p (strchr (st.sep_first, c));
+ for (const char* p (strchr (st->sep_first, c));
p != nullptr;
p = done ? nullptr : strchr (p + 1, c))
{
- char s (st.sep_second[p - st.sep_first]);
+ char s (st->sep_second[p - st->sep_first]);
// See if it has a second.
//
@@ -817,8 +918,21 @@ namespace build2
// Handle single and double quotes if enabled for this mode and unless
// they were considered separators.
//
- if (st.quotes && !done)
+ if (st->quotes && !done)
{
+ auto quoted_mode = [this] (lexer_mode m)
+ {
+ // In the double-quoted mode we only do effective escaping of the
+ // special `$("\` characters, line continuations, plus `)` for
+ // symmetry. Nothing can be escaped in single-quoted.
+ //
+ const char* esc (m == lexer_mode::double_quoted ? "$()\"\\\n" : "");
+
+ state_.push (state {
+ m, 0, nullopt, false, false, '\0', false, true, true,
+ esc, nullptr, nullptr});
+ };
+
switch (c)
{
case '\'':
@@ -826,7 +940,7 @@ namespace build2
// Enter the single-quoted mode in case the derived lexer needs
// to notice this.
//
- mode (lexer_mode::single_quoted);
+ quoted_mode (lexer_mode::single_quoted);
switch (qtype)
{
@@ -865,9 +979,10 @@ namespace build2
{
get ();
- mode (lexer_mode::double_quoted);
- st = state_.top ();
- m = st.mode;
+ quoted_mode (lexer_mode::double_quoted);
+
+ st = &state_.top ();
+ m = st->mode;
switch (qtype)
{
@@ -1023,6 +1138,8 @@ namespace build2
}
case '\\':
{
+ // See if this is line continuation.
+ //
get ();
if (peek () == '\n')
diff --git a/libbuild2/lexer.hxx b/libbuild2/lexer.hxx
index 148666e..e913829 100644
--- a/libbuild2/lexer.hxx
+++ b/libbuild2/lexer.hxx
@@ -26,14 +26,15 @@ namespace build2
// mode we don't treat certain characters (e.g., `+`, `=`) as special so
// that we can use them in the variable values, e.g., `foo = g++`. In
// contrast, in the variable mode, we restrict certain character (e.g., `/`)
- // from appearing in the name. The values mode is like value but recogizes
- // `,` as special (used in contexts where we need to list multiple
- // values). The attributes/attribute_value modes are like values where each
- // value is potentially a variable assignment; they don't treat `{` and `}`
- // as special (so we cannot have name groups in attributes) as well as
- // recognizes `=` and `]`. The subscript mode is like value but doesn't
- // treat `{` and `}` as special and recognizes `]`. The eval mode is used in
- // the evaluation context.
+ // from appearing in the name. Additionally, in the variable mode we
+ // recognize leading `\` as the beginning of the escape sequent ($\n). The
+ // values mode is like value but recogizes `,` as special (used in contexts
+ // where we need to list multiple values). The attributes/attribute_value
+ // modes are like values where each value is potentially a variable
+ // assignment; they don't treat `{` and `}` as special (so we cannot have
+ // name groups in attributes) as well as recognizes `=` and `]`. The
+ // subscript mode is like value but doesn't treat `{` and `}` as special and
+ // recognizes `]`. The eval mode is used in the evaluation context.
//
// A number of modes are "derived" from the value/values mode by recognizing
// a few extra characters:
@@ -133,10 +134,23 @@ namespace build2
const path_name&
name () const {return name_;}
- // Note: sets mode for the next token. The second argument can be used to
- // specify the pair separator character (if the mode supports pairs). If
- // escapes is not specified, then inherit the current mode's (though a
- // mode can also override it).
+ // Set the lexer mode for the next token or delay this until the end of a
+ // double-quoted token sequence is encountered. The second argument can be
+ // used to specify the pair separator character (if the mode supports
+ // pairs). If escapes is not specified, then inherit the current mode's
+ // (though a mode can also override it).
+ //
+ // Note that there is a common parsing pattern of sensing the language
+ // construct kind we are about to parse by reading its first token,
+ // switching to an appropriate lexing mode, and then parsing the rest. The
+ // problem here is that the first token may start the double-quoted token
+ // sequence, turning the lexer into the double-quoted mode. In this case
+ // switching the lexer mode right away would not be a good idea. Thus,
+ // this function delays the mode switch until the end of the double-quoted
+ // sequence is encountered. Note, however, that such a delay only works
+ // properly if the function is called right after the first quoted token
+ // is read (because any subsequent tokens may end up being parsed in a
+ // nested mode such as variable or eval; see mode_impl() for details).
//
virtual void
mode (lexer_mode,
@@ -153,10 +167,12 @@ namespace build2
state_.top ().lsbrace_unsep = unsep;
}
- // Expire the current mode early.
+ // Expire the current mode early or delay this until the end of a
+ // double-quoted token sequence is encountered (see mode() for details on
+ // the delay condition and reasoning).
//
void
- expire_mode () {state_.pop ();}
+ expire_mode ();
lexer_mode
mode () const {return state_.top ().mode;}
@@ -175,7 +191,7 @@ namespace build2
virtual token
next ();
- // Peek at the first two characters of the next token(s). Return the
+ // Peek at the first one/two characters of the next token(s). Return the
// characters or '\0' if either would be eos. Also return an indicator of
// whether the next token would be separated. Note: cannot be used to peek
// at the first character of a line.
@@ -184,6 +200,9 @@ namespace build2
// mode in which these characters will actually be parsed use the same
// whitespace separation (the sep_space and sep_newline values).
//
+ pair<char, bool>
+ peek_char ();
+
pair<pair<char, char>, bool>
peek_chars ();
@@ -244,7 +263,7 @@ namespace build2
// been "expired" from the top).
//
virtual token
- word (state current, bool separated);
+ word (const state& current, bool separated);
// Return true in first if we have seen any spaces. Skipped empty lines
// don't count. In other words, we are only interested in spaces that are
@@ -255,6 +274,20 @@ namespace build2
pair<bool, bool>
skip_spaces ();
+ // Set state for the next token or delay until the end of a double-quoted
+ // token sequence is encountered (see mode() for details on the delay
+ // condition and reasoning).
+ //
+ void
+ mode_impl (state&&);
+
+ state&
+ current_state ()
+ {
+ assert (!state_.empty ());
+ return state_.top ();
+ }
+
// Diagnostics.
//
protected:
@@ -283,11 +316,14 @@ namespace build2
}
const path_name& name_;
- std::stack<state> state_;
bool sep_; // True if we skipped spaces in peek().
private:
+ // Use current_state(), mode_impl(), and expire_mode().
+ //
+ std::stack<state> state_;
+
using base = char_scanner<butl::utf8_validator, 2>;
// Buffer for a get()/peek() potential error.
diff --git a/libbuild2/make-parser.test.cxx b/libbuild2/make-parser.test.cxx
index 5c57978..00a265a 100644
--- a/libbuild2/make-parser.test.cxx
+++ b/libbuild2/make-parser.test.cxx
@@ -22,7 +22,7 @@ namespace build2
// Fake build system driver, default verbosity.
//
init_diag (1);
- init (nullptr, argv[0]);
+ init (nullptr, argv[0], true);
path_name in ("<stdin>");
diff --git a/libbuild2/module.cxx b/libbuild2/module.cxx
index 3f4f1d0..1aaa38d 100644
--- a/libbuild2/module.cxx
+++ b/libbuild2/module.cxx
@@ -30,26 +30,26 @@ using namespace butl;
namespace build2
{
- mutex loaded_modules_lock::mutex_;
+ mutex module_libraries_lock::mutex_;
- loaded_module_map loaded_modules;
+ module_libraries_map module_libraries;
void
load_builtin_module (module_load_function* lf)
{
for (const module_functions* i (lf ()); i->name != nullptr; ++i)
- loaded_modules[i->name] = i;
+ module_libraries.emplace (i->name, module_library {*i, dir_path ()});
}
// Sorted array of bundled modules (excluding core modules bundled with
// libbuild2; see below).
//
-#if !defined(BUILD2_BOOTSTRAP) && !defined(LIBBUILD2_STATIC_BUILD)
static const char* bundled_modules[] = {
"bash",
"bin",
"c",
"cc",
+ "cli",
"cxx",
"in",
"version"
@@ -63,7 +63,6 @@ namespace build2
bundled_modules + sizeof (bundled_modules) / sizeof (*bundled_modules),
mod);
}
-#endif
// Note: also used by ad hoc recipes thus not static.
//
@@ -77,15 +76,23 @@ namespace build2
// same global mutexes. Also disable nested module context for good
// measure.
//
+ // The reserve values were picked experimentally by building libbuild2 and
+ // adding a reasonable margin for future growth.
+ //
ctx.module_context_storage->reset (
- new context (ctx.sched,
- ctx.mutexes,
- ctx.fcache,
- false, /* match_only */
+ new context (*ctx.sched,
+ *ctx.mutexes,
+ *ctx.fcache,
+ nullopt, /* match_only */
false, /* no_external_modules */
false, /* dry_run */
+ ctx.no_diag_buffer,
ctx.keep_going,
ctx.global_var_overrides, /* cmd_vars */
+ context::reserves {
+ 2500, /* targets */
+ 900 /* variables */
+ },
nullopt)); /* module_context */
// We use the same context for building any nested modules that might be
@@ -120,6 +127,9 @@ namespace build2
{
// New update operation.
//
+ assert (op_update.operation_pre == nullptr &&
+ op_update.operation_post == nullptr);
+
ctx.module_context->current_operation (op_update);
// Un-tune the scheduler.
@@ -127,13 +137,14 @@ namespace build2
// Note that we can only do this if we are running serially because
// otherwise we cannot guarantee the scheduler is idle (we could have
// waiting threads from the outer context). This is fine for now since the
- // only two tuning level we use are serial and full concurrency (turns out
- // currently we don't really need this: we will always be called during
- // load or match phases and we always do parallel match; but let's keep it
- // in case things change).
- //
- auto sched_tune (ctx.sched.serial ()
- ? scheduler::tune_guard (ctx.sched, 0)
+ // only two tuning level we use are serial and full concurrency. (Turns
+ // out currently we don't really need this: we will always be called
+ // during load or match phases and we always do parallel match; but let's
+ // keep it in case things change. Actually, we may need it, if the
+ // scheduler was started up in a tuned state, like in bpkg).
+ //
+ auto sched_tune (ctx.sched->serial ()
+ ? scheduler::tune_guard (*ctx.sched, 0)
: scheduler::tune_guard ());
// Remap verbosity level 0 to 1 unless we were requested to be silent.
@@ -231,11 +242,20 @@ namespace build2
}
#endif
- static module_load_function*
+ // Return the module functions as well as the module project directory or
+ // empty if not imported from project. Return {nullptr, nullopt} if not
+ // found.
+ //
+ // The dry-run mode only calls import_search() and always returns NULL for
+ // module functions (see below for background).
+ //
+ static pair<module_load_function*, optional<dir_path>>
import_module (
#if defined(BUILD2_BOOTSTRAP) || defined(LIBBUILD2_STATIC_BUILD)
+ bool,
scope&,
#else
+ bool dry_run,
scope& bs,
#endif
const string& mod,
@@ -249,15 +269,21 @@ namespace build2
{
tracer trace ("import_module");
+ pair<module_load_function*, optional<dir_path>> r (nullptr, nullopt);
+
// Take care of core modules that are bundled with libbuild2 in case they
// are not pre-loaded by the driver.
//
- if (mod == "config") return &config::build2_config_load;
- else if (mod == "dist") return &dist::build2_dist_load;
- else if (mod == "install") return &install::build2_install_load;
- else if (mod == "test") return &test::build2_test_load;
+ if (mod == "config") r.first = &config::build2_config_load;
+ else if (mod == "dist") r.first = &dist::build2_dist_load;
+ else if (mod == "install") r.first = &install::build2_install_load;
+ else if (mod == "test") r.first = &test::build2_test_load;
- module_load_function* r (nullptr);
+ if (r.first != nullptr)
+ {
+ r.second = dir_path ();
+ return r;
+ }
// No dynamic loading of build system modules during bootstrap or if
// statically-linked..
@@ -326,7 +352,7 @@ namespace build2
// and undefined if the module was not mentioned.
//
if (boot && !bundled && ctx.no_external_modules)
- return nullptr;
+ return r; // NULL
// See if we can import a target for this module.
//
@@ -381,7 +407,7 @@ namespace build2
if (ir.first.empty ())
{
assert (opt);
- return nullptr;
+ return r; // NULL
}
if (ir.second)
@@ -389,6 +415,8 @@ namespace build2
// What if a module is specified with config.import.<mod>.<lib>.libs?
// Note that this could still be a project-qualified target.
//
+ // Note: we now return an empty directory to mean something else.
+ //
if (ir.second->empty ())
fail (loc) << "direct module target importation not yet supported";
@@ -396,6 +424,17 @@ namespace build2
// the target (which will also give us the shared library path).
//
l5 ([&]{trace << "found " << ir.first << " in " << *ir.second;});
+ }
+
+ if (dry_run)
+ {
+ r.second = ir.second ? move (*ir.second) : dir_path ();
+ return r;
+ }
+
+ if (ir.second)
+ {
+ r.second = *ir.second;
// Create the build context if necessary.
//
@@ -408,7 +447,7 @@ namespace build2
create_module_context (ctx, loc);
}
- // Inherit loaded_modules lock from the outer context.
+ // Inherit module_libraries lock from the outer context.
//
ctx.module_context->modules_lock = ctx.modules_lock;
@@ -417,7 +456,7 @@ namespace build2
//
auto_thread_env penv (nullptr);
context& ctx (*bs.ctx.module_context);
- scheduler::phase_guard pg (ctx.sched);
+ scheduler::phase_guard pg (*ctx.sched);
// Load the imported project in the module context.
//
@@ -468,6 +507,8 @@ namespace build2
}
else
{
+ r.second = dir_path ();
+
// No module project found. Form the shared library name (incorporating
// build system core version) and try using system-default search
// (installed, rpath, etc).
@@ -510,7 +551,7 @@ namespace build2
fail (loc) << "unable to lookup " << sym << " in build system module "
<< mod << " (" << lib << "): " << err;
- r = function_cast<module_load_function*> (hs.second);
+ r.first = function_cast<module_load_function*> (hs.second);
}
else if (!opt)
{
@@ -522,7 +563,10 @@ namespace build2
<< "line variable to specify its project out_root";
}
else
+ {
+ r.second = nullopt;
l5 ([&]{trace << "unable to load " << lib << ": " << err;});
+ }
#endif // BUILD2_BOOTSTRAP || LIBBUILD2_STATIC_BUILD
@@ -538,89 +582,200 @@ namespace build2
{
tracer trace ("find_module");
- // Note that we hold the lock for the entire time it takes to build a
- // module.
+ // If this is a submodule, get the main module name.
+ //
+ string mmod (smod, 0, smod.find ('.'));
+
+ // We have a somewhat strange two-level caching in imported_modules
+ // and module_libraries in order to achieve the following:
+ //
+ // 1. Correctly handle cases where a module can be imported from one
+ // project but not the other.
+ //
+ // 2. Make sure that for each project that imports the module we actually
+ // call import_search() in order to mark any config.import.* as used.
//
- loaded_modules_lock lock (bs.ctx);
+ // 3. Make sure that all the projects import the same module.
+ //
+ scope& rs (*bs.root_scope ());
+
+ const string* mod;
+ const module_functions* fun;
- // Optional modules and submodules sure make this logic convoluted. So we
- // divide it into two parts: (1) find or insert an entry (for submodule
- // or, failed that, for the main module, the latter potentially NULL) and
- // (2) analyze the entry and issue diagnostics.
+ // First check the project's imported_modules in case this (main) module
+ // is known to be not found.
//
- auto i (loaded_modules.find (smod)), e (loaded_modules.end ());
+ auto j (rs.root_extra->imported_modules.find (mmod));
+ auto je (rs.root_extra->imported_modules.end ());
- if (i == e)
+ if (j != je && !j->found)
{
- // If this is a submodule, get the main module name.
+ mod = &mmod;
+ fun = nullptr;
+ }
+ else
+ {
+ // Note that we hold the lock for the entire time it takes to build a
+ // module.
//
- string mmod (smod, 0, smod.find ('.'));
+ module_libraries_lock lock (bs.ctx);
- if (mmod != smod)
- i = loaded_modules.find (mmod);
+ // Optional modules and submodules sure make this logic convoluted. So
+ // we divide it into two parts: (1) find or insert an entry (for
+ // submodule or, failed that, for the main module) and (2) analyze the
+ // entry and issue diagnostics.
+ //
+ auto i (module_libraries.find (smod));
+ auto ie (module_libraries.end ());
- if (i == e)
+ bool imported (false);
+ if (i == ie)
{
- module_load_function* f (import_module (bs, mmod, loc, boot, opt));
+ if (mmod != smod)
+ i = module_libraries.find (mmod);
- if (f != nullptr)
+ if (i == ie)
{
- // Enter all the entries noticing which one is our submodule. If
- // none are, then we notice the main module.
- //
- for (const module_functions* j (f ()); j->name != nullptr; ++j)
+ pair<module_load_function*, optional<dir_path>> ir (
+ import_module (false /* dry_run */, bs, mmod, loc, boot, opt));
+
+ if (module_load_function* f = ir.first)
{
- const string& n (j->name);
+ // Enter all the entries noticing which one is our submodule. If
+ // none are, then we notice the main module.
+ //
+ for (const module_functions* j (f ()); j->name != nullptr; ++j)
+ {
+ const string& n (j->name);
+
+ l5 ([&]{trace << "registering " << n;});
+
+ bool main (n == mmod);
- l5 ([&]{trace << "registering " << n;});
+ auto p (module_libraries.emplace (
+ n,
+ module_library {
+ *j,
+ main ? move (*ir.second) : dir_path ()}));
- auto p (loaded_modules.emplace (n, j));
+ if (!p.second)
+ fail (loc) << "build system submodule name " << n << " of main "
+ << "module " << mmod << " is already in use";
- if (!p.second)
- fail (loc) << "build system submodule name " << n << " of main "
- << "module " << mmod << " is already in use";
+ // Note: this assumes the main module is last.
+ //
+ if (n == smod || (main && i == ie))
+ i = p.first;
+ }
- if (n == smod || (i == e && n == mmod))
- i = p.first;
+ // We should at least have the main module.
+ //
+ if (i == ie)
+ fail (loc) << "invalid function list in build system module "
+ << mmod;
}
- // We should at least have the main module.
- //
- if (i == e)
- fail (loc) << "invalid function list in build system module "
- << mmod;
+ imported = true;
+ }
+ }
+
+ // Now the iterator points to a submodule or to the main module, or to
+ // end if neither is found.
+ //
+ assert (j == je || i != ie); // Cache state consistecy sanity check.
+
+ if (i != ie)
+ {
+ // Note: these should remain stable after we release the lock.
+ //
+ mod = &i->first;
+ fun = &i->second.functions.get ();
+
+ // If this project hasn't imported this main module and we found the
+ // entry in the cache, then we have to perform the import_search()
+ // part of import_module() in order to cover items (2) and (3) above.
+ //
+ // There is one nuance: omit this for bundled modules since it's
+ // possible to first import them ad hoc and then, if we call
+ // import_search() again, to find them differently (e.g., as a
+ // subproject).
+ //
+ if (j == je && !imported && !bundled_module (mmod))
+ {
+ pair<module_load_function*, optional<dir_path>> ir (
+ import_module (true /* dry_run */, bs, mmod, loc, boot, opt));
+
+ if (ir.second)
+ {
+ if (i->first != mmod)
+ {
+ i = module_libraries.find (mmod);
+ assert (i != ie); // Has to be there.
+ }
+
+ const dir_path& cd (*ir.second);
+ const dir_path& pd (i->second.import_path);
+
+ if (cd != pd)
+ {
+ fail (loc) << "inconsistent build system module " << mmod
+ << " importation" <<
+ info << rs << " imports it as "
+ << (cd.empty () ? "ad hoc" : cd.representation ().c_str ()) <<
+ info << "previously imported as "
+ << (pd.empty () ? "ad hoc" : pd.representation ().c_str ());
+ }
+ }
+ else
+ {
+ // This module is not found from this project.
+ //
+ mod = &mmod;
+ fun = nullptr;
+ }
}
- else
- i = loaded_modules.emplace (move (mmod), nullptr).first;
+ }
+ else
+ {
+ mod = &mmod;
+ fun = nullptr;
}
}
+ // Cache the result in imported_modules if necessary.
+ //
+ if (j == je)
+ rs.root_extra->imported_modules.push_back (
+ module_import {mmod, fun != nullptr});
+
// Reduce skipped external module to optional.
//
- if (boot && i->second == nullptr)
+ if (boot && fun == nullptr)
opt = true;
- // Now the iterator points to a submodule or to the main module, the
- // latter potentially NULL.
+ // Handle optional.
//
- if (!opt)
+ if (fun == nullptr)
{
- if (i->second == nullptr)
- {
- fail (loc) << "unable to load build system module " << i->first;
- }
- else if (i->first != smod)
- {
- fail (loc) << "build system module " << i->first << " has no "
+ if (!opt)
+ fail (loc) << "unable to load build system module " << *mod;
+ }
+ else if (*mod != smod)
+ {
+ if (!opt)
+ fail (loc) << "build system module " << *mod << " has no "
<< "submodule " << smod;
+ else
+ {
+ // Note that if the main module exists but has no such submodule, we
+ // return NULL rather than fail (think of an older version of a module
+ // that doesn't implement some extra functionality).
+ //
+ fun = nullptr;
}
}
- // Note that if the main module exists but has no such submodule, we
- // return NULL rather than fail (think of an older version of a module
- // that doesn't implement some extra functionality).
- //
- return i->second;
+ return fun;
}
void
@@ -628,7 +783,7 @@ namespace build2
{
// First see if this modules has already been booted for this project.
//
- module_map& lm (rs.root_extra->modules);
+ module_state_map& lm (rs.root_extra->loaded_modules);
auto i (lm.find (mod));
if (i != lm.end ())
@@ -673,7 +828,7 @@ namespace build2
i->boot_init = e.init;
}
- rs.assign (rs.var_pool ().insert (mod + ".booted")) = (mf != nullptr);
+ rs.assign (rs.var_pool (true).insert (mod + ".booted")) = (mf != nullptr);
}
void
@@ -704,7 +859,7 @@ namespace build2
{
// First see if this modules has already been inited for this project.
//
- module_map& lm (rs.root_extra->modules);
+ module_state_map& lm (rs.root_extra->loaded_modules);
auto i (lm.find (mod));
bool f (i == lm.end ());
@@ -742,7 +897,7 @@ namespace build2
// buildfile-visible (where we use the term "load a module"; see the note
// on terminology above)
//
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true));
value& lv (bs.assign (vp.insert (mod + ".loaded")));
value& cv (bs.assign (vp.insert (mod + ".configured")));
@@ -824,7 +979,7 @@ namespace build2
if (cast_false<bool> (bs[name + ".loaded"]))
{
if (cast_false<bool> (bs[name + ".configured"]))
- return rs.root_extra->modules.find (name)->module;
+ return rs.root_extra->loaded_modules.find (name)->module;
}
else
{
@@ -846,7 +1001,7 @@ namespace build2
// attempt to load it was optional?
return cast_false<bool> (bs[name + ".loaded"])
- ? rs.root_extra->modules.find (name)->module
+ ? rs.root_extra->loaded_modules.find (name)->module
: init_module (rs, bs, name, loc, false /* optional */, hints)->module;
}
}
diff --git a/libbuild2/module.hxx b/libbuild2/module.hxx
index 8223bae..6cdd040 100644
--- a/libbuild2/module.hxx
+++ b/libbuild2/module.hxx
@@ -21,6 +21,12 @@ namespace build2
// implementation's perspectives, the module library is "loaded" and the
// module is optionally "bootstrapped" (or "booted" for short) and then
// "initialized" (or "inited").
+ //
+ // Note also that a module name (or component thereof, for submodules) is
+ // not a project name (in particular, it can be less than 3 characters long)
+ // and we usually use `-` instead of `_` as a word separator within
+ // components, for example `c.as-cpp` (since the top-level component ends up
+ // in the library name; but this is not a hard rule).
// Base class for module instance.
//
@@ -145,9 +151,9 @@ namespace build2
//
// The <name> part in the function name is the main module name without
// submodule components (for example, `c` in `c.config`) and the load
- // function is expected to return boot/init functions for all its submodules
- // (if any) as well as for the module itself as an array of module_functions
- // terminated with an all-NULL entry.
+ // function is expected to return boot/init functions as an array of
+ // module_functions: entries for all its submodules (if any) first, followed
+ // by the module itself, and terminated with an all-NULL entry.
//
// Note that the load function is guaranteed to be called during serial
// execution (either from main() or during the load phase).
@@ -155,7 +161,31 @@ namespace build2
extern "C"
using module_load_function = const module_functions* ();
- // Module state.
+ // Imported module state.
+ //
+ // The module name is the main module (corresponding to the library). If
+ // found is false then this module could not be imported from this project.
+ //
+ struct module_import
+ {
+ const string name;
+ bool found;
+ };
+
+ struct module_import_map: vector<module_import>
+ {
+ iterator
+ find (const string& name)
+ {
+ return find_if (
+ begin (), end (),
+ [&name] (const module_import& i) {return i.name == name;});
+ }
+ };
+
+ // Loaded module state.
+ //
+ // Note that unlike import_state, the module name here could be a submodule.
//
struct module_state
{
@@ -167,7 +197,7 @@ namespace build2
optional<module_boot_init> boot_init;
};
- struct module_map: vector<module_state>
+ struct module_state_map: vector<module_state>
{
iterator
find (const string& name)
@@ -268,23 +298,28 @@ namespace build2
return static_cast<T&> (*load_module (root, base, name, l, config_hints));
}
- // Loaded modules (as in libraries).
+ // Loaded module libraries.
//
- // A NULL entry for the main module indicates that a module library was not
- // found.
+ // Note that this map contains entries for all the submodules.
//
- using loaded_module_map = map<string, const module_functions*>;
+ struct module_library
+ {
+ reference_wrapper<const module_functions> functions;
+ dir_path import_path; // Only for main module.
+ };
+
+ using module_libraries_map = map<string, module_library>;
- // The loaded_modules map is locked per top-level (as opposed to nested)
+ // The module_libraries map is locked per top-level (as opposed to nested)
// context (see context.hxx for details).
//
// Note: should only be constructed during contexts-wide serial execution.
//
- class LIBBUILD2_SYMEXPORT loaded_modules_lock
+ class LIBBUILD2_SYMEXPORT module_libraries_lock
{
public:
explicit
- loaded_modules_lock (context& c)
+ module_libraries_lock (context& c)
: ctx_ (c), lock_ (mutex_, defer_lock)
{
if (ctx_.modules_lock == nullptr)
@@ -294,7 +329,7 @@ namespace build2
}
}
- ~loaded_modules_lock ()
+ ~module_libraries_lock ()
{
if (ctx_.modules_lock == this)
ctx_.modules_lock = nullptr;
@@ -306,7 +341,7 @@ namespace build2
mlock lock_;
};
- LIBBUILD2_SYMEXPORT extern loaded_module_map loaded_modules;
+ LIBBUILD2_SYMEXPORT extern module_libraries_map modules_libraries;
// Load a builtin module (i.e., a module linked as a static/shared library
// or that is part of the build system driver).
diff --git a/libbuild2/name.cxx b/libbuild2/name.cxx
index 1081b5c..6c48bb3 100644
--- a/libbuild2/name.cxx
+++ b/libbuild2/name.cxx
@@ -80,15 +80,20 @@ namespace build2
}
ostream&
- to_stream (ostream& os, const name& n, bool quote, char pair, bool escape)
+ to_stream (ostream& os, const name& n, quote_mode q, char pair, bool escape)
{
using pattern_type = name::pattern_type;
- auto write_string = [&os, quote, pair, escape] (
+ auto write_string = [&os, q, pair, escape] (
const string& v,
optional<pattern_type> pat = nullopt,
bool curly = false)
{
+ // We don't expect the effective quoting mode to be specified for the
+ // name patterns.
+ //
+ assert (q != quote_mode::effective || !pat);
+
// Special characters, path pattern characters, and regex pattern
// characters. The latter only need to be quoted in the first position
// and if followed by a non-alphanumeric delimiter. If that's the only
@@ -97,7 +102,7 @@ namespace build2
// escape leading `+` in the curly braces which is also recognized as a
// path pattern.
//
- char sc[] = {
+ char nsc[] = {
'{', '}', '[', ']', '$', '(', ')', // Token endings.
' ', '\t', '\n', '#', // Spaces.
'\\', '"', // Escaping and quoting.
@@ -114,6 +119,26 @@ namespace build2
return (v[0] == '~' || v[0] == '^') && v[1] != '\0' && !alnum (v[1]);
};
+ char esc[] = {
+ '{', '}', '$', '(', // Token endings.
+ ' ', '\t', '\n', '#', // Spaces.
+ '"', // Quoting.
+ pair, // Pair separator, if any.
+ '\0'};
+
+ auto ec = [&esc] (const string& v)
+ {
+ for (size_t i (0); i < v.size (); ++i)
+ {
+ char c (v[i]);
+
+ if (strchr (esc, c) != nullptr || (c == '\\' && v[i + 1] == '\\'))
+ return true;
+ }
+
+ return false;
+ };
+
if (pat)
{
switch (*pat)
@@ -124,7 +149,7 @@ namespace build2
}
}
- if (quote && v.find ('\'') != string::npos)
+ if (q != quote_mode::none && v.find ('\'') != string::npos)
{
// Quote the string with the double quotes rather than with the single
// one. Escape some of the special characters.
@@ -148,8 +173,10 @@ namespace build2
// pattern character but not vice-verse. See the parsing logic for
// details.
//
- else if (quote && (v.find_first_of (sc) != string::npos ||
- (!pat && v.find_first_of (pc) != string::npos)))
+ else if ((q == quote_mode::normal &&
+ (v.find_first_of (nsc) != string::npos ||
+ (!pat && v.find_first_of (pc) != string::npos))) ||
+ (q == quote_mode::effective && ec (v)))
{
if (escape) os << '\\';
os << '\'';
@@ -164,8 +191,9 @@ namespace build2
// details). So we escape it both if it's not a pattern or is a path
// pattern.
//
- else if (quote && ((!pat || *pat == pattern_type::path) &&
- ((v[0] == '+' && curly) || rc (v))))
+ else if (q == quote_mode::normal &&
+ (!pat || *pat == pattern_type::path) &&
+ ((v[0] == '+' && curly) || rc (v)))
{
if (escape) os << '\\';
os << '\\' << v;
@@ -176,12 +204,12 @@ namespace build2
uint16_t dv (stream_verb (os).path); // Directory verbosity.
- auto write_dir = [&os, quote, &write_string, dv] (
+ auto write_dir = [&os, q, &write_string, dv] (
const dir_path& d,
optional<pattern_type> pat = nullopt,
bool curly = false)
{
- if (quote)
+ if (q != quote_mode::none)
write_string (dv < 1 ? diag_relative (d) : d.representation (),
pat,
curly);
@@ -194,7 +222,7 @@ namespace build2
// If quoted then print empty name as '' rather than {}.
//
- if (quote && n.empty ())
+ if (q != quote_mode::none && n.empty ())
return os << (escape ? "\\'\\'" : "''");
if (n.proj)
@@ -255,7 +283,7 @@ namespace build2
ostream&
to_stream (ostream& os,
const names_view& ns,
- bool quote,
+ quote_mode q,
char pair,
bool escape)
{
@@ -263,7 +291,7 @@ namespace build2
{
const name& n (*i);
++i;
- to_stream (os, n, quote, pair, escape);
+ to_stream (os, n, q, pair, escape);
if (n.pair)
os << n.pair;
diff --git a/libbuild2/name.hxx b/libbuild2/name.hxx
index 1dd5a9f..f5cb2c5 100644
--- a/libbuild2/name.hxx
+++ b/libbuild2/name.hxx
@@ -178,14 +178,15 @@ namespace build2
// trailing directory separator then it is stored as a directory, otherwise
// as a simple name. Note that the returned name is never a pattern.
//
- // NOTE: this function does not parse the full name syntax.
+ // NOTE: this function does not parse the full name syntax. See context-less
+ // parser::parse_names() for a heavy-weight way to achieve this.
//
name
to_name (string);
// Serialize the name to the stream. If requested, the name components
- // containing special characters are quoted and/or escaped. The special
- // characters are:
+ // containing special characters are quoted and/or escaped. In the normal
+ // quoting mode the special characters are:
//
// {}[]$() \t\n#\"'%
//
@@ -199,8 +200,14 @@ namespace build2
//
// As well as leading `+` if in the curly braces.
//
+ // In the effective quoting mode the special characters are:
+ //
+ // {}$( \t\n#"'
+ //
+ // As well as `\` if followed by any of the above characters or itself.
+ //
// If the pair argument is not '\0', then it is added to the above special
- // characters set. If the quote character is present in the component then
+ // characters sets. If the quote character is present in the component then
// it is double quoted rather than single quoted. In this case the following
// characters are escaped:
//
@@ -213,15 +220,23 @@ namespace build2
// Note that in the quoted mode empty unqualified name is printed as '',
// not {}.
//
+ enum class quote_mode
+ {
+ none,
+ normal,
+ effective
+ };
+
LIBBUILD2_SYMEXPORT ostream&
to_stream (ostream&,
const name&,
- bool quote,
+ quote_mode,
char pair = '\0',
bool escape = false);
inline ostream&
- operator<< (ostream& os, const name& n) {return to_stream (os, n, false);}
+ operator<< (ostream& os, const name& n) {
+ return to_stream (os, n, quote_mode::none);}
// Vector of names.
//
@@ -240,13 +255,13 @@ namespace build2
LIBBUILD2_SYMEXPORT ostream&
to_stream (ostream&,
const names_view&,
- bool quote,
+ quote_mode,
char pair = '\0',
bool escape = false);
inline ostream&
operator<< (ostream& os, const names_view& ns) {
- return to_stream (os, ns, false);}
+ return to_stream (os, ns, quote_mode::none);}
inline ostream&
operator<< (ostream& os, const names& ns) {return os << names_view (ns);}
diff --git a/libbuild2/name.test.cxx b/libbuild2/name.test.cxx
index 80b830e..c404503 100644
--- a/libbuild2/name.test.cxx
+++ b/libbuild2/name.test.cxx
@@ -46,7 +46,7 @@ namespace build2
// Test stream representation.
//
{
- auto ts = [] (const name& n, bool quote = true)
+ auto ts = [] (const name& n, quote_mode quote = quote_mode::normal)
{
ostringstream os;
stream_verb (os, stream_verbosity (0, 1));
@@ -54,8 +54,8 @@ namespace build2
return os.str ();
};
- assert (ts (name ()) == "''");
- assert (ts (name (), false) == "{}");
+ assert (ts (name ()) == "''");
+ assert (ts (name (), quote_mode::none) == "{}");
assert (ts (name ("foo")) == "foo");
@@ -70,10 +70,18 @@ namespace build2
assert (ts (name (dir ("bar/"), "dir", "foo")) == "bar/dir{foo}");
assert (ts (name (dir ("bar/baz/"), "dir", "foo")) == "bar/baz/dir{foo}");
- // Quoting.
+ // Normal quoting.
//
assert (ts (name (dir ("bar baz/"), "dir", "foo fox")) == "'bar baz/'dir{'foo fox'}");
+ // Effective quoting.
+ //
+ assert (ts (name ("bar\\baz"), quote_mode::effective) == "bar\\baz");
+ assert (ts (name ("bar[baz]"), quote_mode::effective) == "bar[baz]");
+ assert (ts (name ("bar$baz"), quote_mode::effective) == "'bar$baz'");
+ assert (ts (name ("bar\\\\baz"), quote_mode::effective) == "'bar\\\\baz'");
+ assert (ts (name ("bar\\$baz"), quote_mode::effective) == "'bar\\$baz'");
+
// Relative logic.
//
#ifndef _WIN32
diff --git a/libbuild2/operation.cxx b/libbuild2/operation.cxx
index 0f30c4a..6f88e38 100644
--- a/libbuild2/operation.cxx
+++ b/libbuild2/operation.cxx
@@ -6,6 +6,10 @@
#include <iostream> // cout
#include <unordered_map>
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/file.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
@@ -14,6 +18,10 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/diagnostics.hxx>
+#if 0
+#include <libbuild2/adhoc-rule-buildscript.hxx> // @@ For a hack below.
+#endif
+
using namespace std;
using namespace butl;
@@ -56,7 +64,7 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
nullptr, // operation pre
- &load,
+ &perform_load,
nullptr, // search
nullptr, // match
nullptr, // execute
@@ -68,16 +76,17 @@ namespace build2
// perform
//
void
- load (const values&,
- scope& root,
- const path& bf,
- const dir_path& out_base,
- const dir_path& src_base,
- const location&)
+ perform_load (const values&,
+ scope& root,
+ const path& bf,
+ const dir_path& out_base,
+ const dir_path& src_base,
+ const location&)
{
// Load project's root.build.
//
- load_root (root);
+ if (!root.root_extra->loaded)
+ load_root (root);
// Create the base scope. Note that its existence doesn't mean it was
// already setup as a base scope; it can be the same as root.
@@ -92,15 +101,15 @@ namespace build2
}
void
- search (const values&,
- const scope&,
- const scope& bs,
- const path& bf,
- const target_key& tk,
- const location& l,
- action_targets& ts)
+ perform_search (const values&,
+ const scope&,
+ const scope& bs,
+ const path& bf,
+ const target_key& tk,
+ const location& l,
+ action_targets& ts)
{
- tracer trace ("search");
+ tracer trace ("perform_search");
context& ctx (bs.ctx);
phase_lock pl (ctx, run_phase::match);
@@ -155,8 +164,9 @@ namespace build2
//
map.reserve (ctx.targets.size () / 2);
- bool e (false);
+ size_t count_matched (ctx.count_matched ());
+ bool e (false);
for (size_t pass (1); pass != 3; ++pass)
{
for (const auto& pt: ctx.targets)
@@ -171,8 +181,7 @@ namespace build2
//
const target::opstate& s (t->state[a]);
- if (s.task_count.load (memory_order_relaxed) - ctx.count_base () <
- target::offset_matched)
+ if (s.task_count.load (memory_order_relaxed) < count_matched)
continue;
// Skip if for some reason the path is not assigned.
@@ -222,9 +231,19 @@ namespace build2
}
else if (t->decl != target_decl::real)
{
- dr << info << "target " << *t << " is not explicitly declared "
- << "in any buildfile" <<
- info << "perhaps it is a dynamic dependency?";
+ if (t->decl == target_decl::implied)
+ {
+ dr << info << "target " << *t << " is implied by a buildfile";
+ }
+ else
+ {
+ dr << info << "target " << *t << " is not declared in a buildfile";
+
+ if (t->decl == target_decl::prereq_file)
+ dr << " but has corresponding existing file";
+
+ dr << info << "perhaps it is a dynamic dependency?";
+ }
}
}
}
@@ -234,9 +253,10 @@ namespace build2
}
void
- match (const values&, action a, action_targets& ts, uint16_t diag, bool prog)
+ perform_match (const values&, action a, action_targets& ts,
+ uint16_t diag, bool prog)
{
- tracer trace ("match");
+ tracer trace ("perform_match");
if (ts.empty ())
return;
@@ -248,25 +268,50 @@ namespace build2
// Setup progress reporting if requested.
//
- string what; // Note: must outlive monitor_guard.
+ struct monitor_data
+ {
+ size_t incr;
+ string what;
+ atomic<timestamp::rep> time {timestamp_nonexistent_rep};
+ } md; // Note: must outlive monitor_guard.
scheduler::monitor_guard mg;
if (prog && show_progress (2 /* max_verb */))
{
- size_t incr (stderr_term ? 1 : 10); // Scale depending on output type.
-
- what = " targets to " + diag_do (ctx, a);
+ // Note that showing progress is not free and it can take up to 10% of
+ // the up-to-date check on some projects (e.g., Boost). So we jump
+ // through a few hoops to make sure we don't overindulge.
+ //
+ md.incr = stderr_term // Scale depending on output type.
+ ? (ctx.sched->serial () ? 1 : 5)
+ : 100;
+ md.what = " targets to " + diag_do (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
- incr,
- [incr, &what] (size_t c) -> size_t
+ md.incr,
+ [&md] (size_t c) -> size_t
{
+ size_t r (c + md.incr);
+
+ if (stderr_term)
+ {
+ timestamp o (duration (md.time.load (memory_order_consume)));
+ timestamp n (system_clock::now ());
+
+ if (n - o < chrono::milliseconds (80))
+ return r;
+
+ md.time.store (n.time_since_epoch ().count (),
+ memory_order_release);
+ }
+
diag_progress_lock pl;
diag_progress = ' ';
diag_progress += to_string (c);
- diag_progress += what;
- return c + incr;
+ diag_progress += md.what;
+
+ return r;
});
}
@@ -274,6 +319,7 @@ namespace build2
// many we have started. Wait with unlocked phase to allow phase
// switching.
//
+ bool fail (false);
size_t i (0), n (ts.size ());
{
atomic_count task_count (0);
@@ -284,21 +330,85 @@ namespace build2
const target& t (ts[i].as<target> ());
l5 ([&]{trace << diag_doing (a, t);});
- target_state s (match_async (a, t, 0, task_count, false));
+ target_state s (match_async (a, t,
+ 0, task_count,
+ match_extra::all_options,
+ false /* fail */));
// Bail out if the target has failed and we weren't instructed to
// keep going.
//
- if (s == target_state::failed && !ctx.keep_going)
+ if (s == target_state::failed)
{
- ++i;
- break;
+ fail = true;
+
+ if (!ctx.keep_going)
+ {
+ ++i;
+ break;
+ }
}
}
wg.wait ();
}
+ // If we have any targets with post hoc prerequisites, match those.
+ //
+ // See match_posthoc() for the overall approach description.
+ //
+ bool posthoc_fail (false);
+ if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going))
+ {
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ // Note that on each iteration we may end up with new entries at the
+ // back. Since we start and end each iteration in serial execution, we
+ // don't need to mess with the mutex.
+ //
+ for (const posthoc_target& p: ctx.current_posthoc_targets)
+ {
+ action a (p.action); // May not be the same as argument action.
+ const target& t (p.target);
+
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching to " << diag_do (t.ctx, a)
+ << " post hoc prerequisites of " << t;
+ });
+
+ // Cannot use normal match because incrementing dependency counts in
+ // the face of cycles does not work well (we will deadlock for the
+ // reverse execution mode).
+ //
+ // @@ PERF: match in parallel (need match_direct_async(), etc).
+ //
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ target_state s (match_direct_sync (a, *pt.target,
+ pt.match_options,
+ false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+ }
+
+ if (posthoc_fail && !ctx.keep_going)
+ break;
+ }
+ }
+
// Clear the progress if present.
//
if (mg)
@@ -309,15 +419,25 @@ namespace build2
// We are now running serially. Re-examine targets that we have matched.
//
- bool fail (false);
for (size_t j (0); j != n; ++j)
{
action_target& at (ts[j]);
const target& t (at.as<target> ());
- target_state s (j < i
- ? match (a, t, false)
- : target_state::postponed);
+ // We cannot attribute post hoc failures to specific targets so it
+ // seems the best we can do is just fail them all.
+ //
+ target_state s;
+ if (j < i)
+ {
+ s = match_complete (a, t, match_extra::all_options, false /* fail */);
+
+ if (posthoc_fail)
+ s = /*t.state[a].state =*/ target_state::failed;
+ }
+ else
+ s = target_state::postponed;
+
switch (s)
{
case target_state::postponed:
@@ -368,16 +488,103 @@ namespace build2
}
void
- execute (const values&, action a, action_targets& ts,
- uint16_t diag, bool prog)
+ perform_execute (const values&, action a, action_targets& ts,
+ uint16_t diag, bool prog)
{
- tracer trace ("execute");
+ tracer trace ("perform_execute");
if (ts.empty ())
return;
context& ctx (ts[0].as<target> ().ctx);
+ bool posthoc_fail (false);
+ auto execute_posthoc = [&ctx, &posthoc_fail] ()
+ {
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ for (const posthoc_target& p: ctx.current_posthoc_targets)
+ {
+ action a (p.action); // May not be the same as argument action.
+ const target& t (p.target);
+
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while " << diag_doing (t.ctx, a)
+ << " post hoc prerequisites of " << t;
+ });
+
+#if 0
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ target_state s (
+ execute_direct_sync (a, *pt.target, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+ }
+#else
+ // Note: similar logic/reasoning to below except we use direct
+ // execution.
+ //
+ atomic_count tc (0);
+ wait_guard wg (ctx, tc);
+
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ target_state s (
+ execute_direct_async (a, *pt.target, 0, tc, false /*fail*/));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+ }
+
+ wg.wait ();
+
+ // Process the result.
+ //
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ // Similar to below, no need to wait.
+ //
+ target_state s (pt.target->executed_state (a, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ // Note: no need to keep going.
+ //
+ posthoc_fail = true;
+ break;
+ }
+ }
+ }
+#endif
+ if (posthoc_fail && !ctx.keep_going)
+ break;
+ }
+ };
+
// Reverse the order of targets if the execution mode is 'last'.
//
if (ctx.current_mode == execution_mode::last)
@@ -385,6 +592,7 @@ namespace build2
phase_lock pl (ctx, run_phase::execute); // Never switched.
+ bool fail (false);
{
// Tune the scheduler.
//
@@ -393,7 +601,7 @@ namespace build2
switch (ctx.current_inner_oif->concurrency)
{
- case 0: sched_tune = tune_guard (ctx.sched, 1); break; // Run serially.
+ case 0: sched_tune = tune_guard (*ctx.sched, 1); break; // Run serially.
case 1: break; // Run as is.
default: assert (false); // Not supported.
}
@@ -416,7 +624,7 @@ namespace build2
{
what = "% of targets " + diag_did (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
init - incr,
[init, incr, &what, &ctx] (size_t c) -> size_t
@@ -441,9 +649,18 @@ namespace build2
}
}
+ // In the 'last' execution mode run post hoc first.
+ //
+ if (ctx.current_mode == execution_mode::last)
+ {
+ if (!ctx.current_posthoc_targets.empty ())
+ execute_posthoc ();
+ }
+
// Similar logic to execute_members(): first start asynchronous
// execution of all the top-level targets.
//
+ if (!posthoc_fail || ctx.keep_going)
{
atomic_count task_count (0);
wait_guard wg (ctx, task_count);
@@ -459,13 +676,24 @@ namespace build2
// Bail out if the target has failed and we weren't instructed to
// keep going.
//
- if (s == target_state::failed && !ctx.keep_going)
- break;
+ if (s == target_state::failed)
+ {
+ fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
}
wg.wait ();
}
+ if (ctx.current_mode == execution_mode::first)
+ {
+ if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going))
+ execute_posthoc ();
+ }
+
// We are now running serially.
//
@@ -501,12 +729,24 @@ namespace build2
// Re-examine all the targets and print diagnostics.
//
- bool fail (false);
for (action_target& at: ts)
{
const target& t (at.as<target> ());
- switch ((at.state = t.executed_state (a, false)))
+ // Similar to match we cannot attribute post hoc failures to specific
+ // targets so it seems the best we can do is just fail them all.
+ //
+ if (!posthoc_fail)
+ {
+ // Note that here we call executed_state() directly instead of
+ // execute_complete() since we know there is no need to wait.
+ //
+ at.state = t.executed_state (a, false /* fail */);
+ }
+ else
+ at.state = /*t.state[a].state =*/ target_state::failed;
+
+ switch (at.state)
{
case target_state::unknown:
{
@@ -551,26 +791,191 @@ namespace build2
if (fail)
throw failed ();
- // We should have executed every target that we matched, provided we
+#ifndef NDEBUG
+ size_t base (ctx.count_base ());
+
+ // For now we disable these checks if we've performed any group member
+ // resolutions that required a match (with apply()) but not execute.
+ //
+ if (ctx.target_count.load (memory_order_relaxed) != 0 &&
+ ctx.resolve_count.load (memory_order_relaxed) != 0)
+ {
+ // These counts are only tracked for the inner operation.
+ //
+ action ia (a.outer () ? a.inner_action () : a);
+
+ // While it may seem that just decrementing the counters for every
+ // target with the resolve_counted flag set should be enough, this will
+ // miss any prerequisites that this target has matched but did not
+ // execute, which may affect both task_count and dependency_count. Note
+ // that this applies recursively and we effectively need to pretend to
+ // execute this target and all its prerequisites, recursively without
+ // actually executing any of their recepies.
+ //
+ // That last bit means we must be able to interpret the populated
+ // prerequisite_targets generically, which is a requirement we place on
+ // rules that resolve groups in apply (see target::group_members() for
+ // details). It so happens that our own adhoc_buildscript_rule doesn't
+ // follow this rule (see execute_update_prerequisites()) so we detect
+ // and handle this with a hack.
+ //
+ // @@ Hm, but there is no guarantee that this holds recursively since
+ // prerequisites may not be see-through groups. For this to work we
+ // would have to impose this restriction globally. Which we could
+ // probably do, just need to audit things carefully (especially
+ // cc::link_rule). But we already sort of rely on that for dump! Maybe
+ // should just require it everywhere and fix adhoc_buildscript_rule.
+ //
+ // @@ There are special recipes that don't populate prerequisite_targets
+ // like group_recipe! Are we banning any user-defined such recipes?
+ // Need to actually look if we have anything else like this. There
+ // is also inner_recipe, though doesn't apply here (only for outer).
+ //
+ // @@ TMP: do and enable after the 0.16.0 release.
+ //
+ // Note: recursive lambda.
+ //
+#if 0
+ auto pretend_execute = [base, ia] (target& t,
+ const auto& pretend_execute) -> void
+ {
+ context& ctx (t.ctx);
+
+ // Note: tries to emulate the execute_impl() functions semantics.
+ //
+ auto execute_impl = [base, ia, &ctx, &pretend_execute] (target& t)
+ {
+ target::opstate& s (t.state[ia]);
+
+ size_t gd (ctx.dependency_count.fetch_sub (1, memory_order_relaxed));
+ size_t td (s.dependents.fetch_sub (1, memory_order_release));
+ assert (td != 0 && gd != 0);
+
+ // Execute unless already executed.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_executed)
+ pretend_execute (t, pretend_execute);
+ };
+
+ target::opstate& s (t.state[ia]);
+
+ if (s.state != target_state::unchanged) // Noop recipe.
+ {
+ if (s.recipe_group_action)
+ {
+ execute_impl (const_cast<target&> (*t.group));
+ }
+ else
+ {
+ // @@ Special hack for adhoc_buildscript_rule (remember to drop
+ // include above if getting rid of).
+ //
+ bool adhoc (
+ ia == perform_update_id &&
+ s.rule != nullptr &&
+ dynamic_cast<const adhoc_buildscript_rule*> (
+ &s.rule->second.get ()) != nullptr);
+
+ for (const prerequisite_target& p: t.prerequisite_targets[ia])
+ {
+ const target* pt;
+
+ if (adhoc)
+ pt = (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr);
+ else
+ pt = p.target;
+
+ if (pt != nullptr)
+ execute_impl (const_cast<target&> (*pt));
+ }
+
+ ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+ }
+
+ s.state = target_state::changed;
+ }
+
+ s.task_count.store (base + target::offset_executed,
+ memory_order_relaxed);
+ };
+#endif
+
+ for (const auto& pt: ctx.targets)
+ {
+ target& t (*pt);
+ target::opstate& s (t.state[ia]);
+
+ // We are only interested in the targets that have been matched for
+ // this operation and are in the applied state.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_applied)
+ continue;
+
+ if (s.resolve_counted)
+ {
+#if 0
+ pretend_execute (t, pretend_execute);
+
+ if (ctx.resolve_count.load (memory_order_relaxed) == 0)
+ break;
+#else
+ return; // Skip all the below checks.
+#endif
+ }
+ }
+ }
+
+ // We should have executed every target that we have matched, provided we
// haven't failed (in which case we could have bailed out early).
//
assert (ctx.target_count.load (memory_order_relaxed) == 0);
+ assert (ctx.resolve_count.load (memory_order_relaxed) == 0); // Sanity check.
-#ifndef NDEBUG
if (ctx.dependency_count.load (memory_order_relaxed) != 0)
{
+ auto dependents = [base] (action a, const target& t)
+ {
+ const target::opstate& s (t.state[a]);
+
+ // Only consider targets that have been matched for this operation
+ // (since matching is what causes the dependents count reset).
+ //
+ size_t c (s.task_count.load (memory_order_relaxed));
+
+ return (c >= base + target::offset_applied
+ ? s.dependents.load (memory_order_relaxed)
+ : 0);
+ };
+
diag_record dr;
dr << info << "detected unexecuted matched targets:";
for (const auto& pt: ctx.targets)
{
const target& t (*pt);
- if (size_t n = t[a].dependents.load (memory_order_relaxed))
+
+ if (size_t n = dependents (a, t))
dr << text << t << ' ' << n;
+
+ if (a.outer ())
+ {
+ if (size_t n = dependents (a.inner_action (), t))
+ dr << text << t << ' ' << n;
+ }
}
}
-#endif
+
assert (ctx.dependency_count.load (memory_order_relaxed) == 0);
+#endif
}
const meta_operation_info mo_perform {
@@ -583,10 +988,10 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
nullptr, // operation pre
- &load,
- &search,
- &match,
- &execute,
+ &perform_load,
+ &perform_search,
+ &perform_match,
+ &perform_execute,
nullptr, // operation post
nullptr, // meta-operation post
nullptr // include
@@ -594,6 +999,71 @@ namespace build2
// info
//
+
+ // Note: similar approach to forward() in configure.
+ //
+ struct info_params
+ {
+ bool json = false;
+ bool subprojects = true;
+ };
+
+ // Note: should not fail if mo is NULL (see info_subprojects() below).
+ //
+ static info_params
+ info_parse_params (const values& params,
+ const char* mo = nullptr,
+ const location& l = location ())
+ {
+ info_params r;
+
+ if (params.size () == 1)
+ {
+ for (const name& n: cast<names> (params[0]))
+ {
+ if (n.simple ())
+ {
+ if (n.value == "json")
+ {
+ r.json = true;
+ continue;
+ }
+
+ if (n.value == "no_subprojects")
+ {
+ r.subprojects = false;
+ continue;
+ }
+
+ // Fall through.
+ }
+
+ if (mo != nullptr)
+ fail (l) << "unexpected parameter '" << n << "' for "
+ << "meta-operation " << mo;
+ }
+ }
+ else if (!params.empty ())
+ {
+ if (mo != nullptr)
+ fail (l) << "unexpected parameters for meta-operation " << mo;
+ }
+
+ return r;
+ }
+
+ bool
+ info_subprojects (const values& params)
+ {
+ return info_parse_params (params).subprojects;
+ }
+
+ static void
+ info_pre (context&, const values& params, const location& l)
+ {
+ info_parse_params (params, "info", l); // Validate.
+ }
+
static operation_id
info_operation_pre (context&, const values&, operation_id o)
{
@@ -644,7 +1114,7 @@ namespace build2
}
static void
- info_execute (const values&, action, action_targets& ts, uint16_t, bool)
+ info_execute_lines (action_targets& ts, bool subp)
{
for (size_t i (0); i != ts.size (); ++i)
{
@@ -677,7 +1147,7 @@ namespace build2
//
auto print_mods = [&rs] ()
{
- for (const module_state& ms: rs.root_extra->modules)
+ for (const module_state& ms: rs.root_extra->loaded_modules)
cout << ' ' << ms.name;
};
@@ -695,6 +1165,20 @@ namespace build2
cout << ' ' << *p;
};
+ // Print a potentially null/empty directory path without trailing slash.
+ //
+ auto print_dir = [] (const dir_path& d)
+ {
+ if (!d.empty ())
+ cout << ' ' << d.string ();
+ };
+
+ auto print_pdir = [&print_dir] (const dir_path* d)
+ {
+ if (d != nullptr)
+ print_dir (*d);
+ };
+
// This could be a simple project that doesn't set project name.
//
cout
@@ -702,16 +1186,181 @@ namespace build2
<< "version:" ; print_empty (cast_empty<string> (rs[ctx.var_version])); cout << endl
<< "summary:" ; print_empty (cast_empty<string> (rs[ctx.var_project_summary])); cout << endl
<< "url:" ; print_empty (cast_empty<string> (rs[ctx.var_project_url])); cout << endl
- << "src_root: " << cast<dir_path> (rs[ctx.var_src_root]) << endl
- << "out_root: " << cast<dir_path> (rs[ctx.var_out_root]) << endl
- << "amalgamation:" ; print_null (*rs.root_extra->amalgamation); cout << endl
- << "subprojects:" ; print_null (*rs.root_extra->subprojects); cout << endl
+ << "src_root:" ; print_dir (cast<dir_path> (rs[ctx.var_src_root])); cout << endl
+ << "out_root:" ; print_dir (cast<dir_path> (rs[ctx.var_out_root])); cout << endl
+ << "amalgamation:" ; print_pdir (*rs.root_extra->amalgamation); cout << endl;
+ if (subp)
+ {
+ cout
+ << "subprojects:" ; print_null (*rs.root_extra->subprojects); cout << endl;
+ }
+ cout
<< "operations:" ; print_ops (rs.root_extra->operations, ctx.operation_table); cout << endl
<< "meta-operations:"; print_ops (rs.root_extra->meta_operations, ctx.meta_operation_table); cout << endl
<< "modules:" ; print_mods (); cout << endl;
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ info_execute_json (action_targets& ts, bool subp)
+ {
+ json::stream_serializer s (cout);
+ s.begin_array ();
+
+ for (size_t i (0); i != ts.size (); ++i)
+ {
+ const scope& rs (ts[i].as<scope> ());
+
+ context& ctx (rs.ctx);
+
+ s.begin_object ();
+
+ // Print a potentially empty string.
+ //
+ auto print_string = [&s] (const char* n,
+ const string& v,
+ bool check = false)
+ {
+ if (!v.empty ())
+ s.member (n, v, check);
+ };
+
+ // Print a potentially null/empty directory path without trailing slash.
+ //
+ auto print_dir = [&s] (const char* n, const dir_path& v)
+ {
+ if (!v.empty ())
+ s.member (n, v.string ());
+ };
+
+ auto print_pdir = [&print_dir] (const char* n, const dir_path* v)
+ {
+ if (v != nullptr)
+ print_dir (n, *v);
+ };
+
+ // Print [meta_]operation names (see info_lines() for details).
+ //
+ auto print_ops = [&s] (const char* name,
+ const auto& ov,
+ const auto& ot,
+ const auto& printer)
+ {
+ s.member_name (name, false /* check */);
+
+ s.begin_array ();
+
+ for (uint8_t id (2); id < ov.size (); ++id)
+ {
+ if (ov[id] != nullptr)
+ printer (ot[id]);
+ }
+
+ s.end_array ();
+ };
+
+ // Note that we won't check some values for being valid UTF-8, since
+ // their characters belong to even stricter character sets and/or are
+ // read from buildfile which is already verified to be valid UTF-8.
+ //
+ print_string ("project", project (rs).string ());
+ print_string ("version", cast_empty<string> (rs[ctx.var_version]));
+ print_string ("summary", cast_empty<string> (rs[ctx.var_project_summary]));
+ print_string ("url", cast_empty<string> (rs[ctx.var_project_url]));
+ print_dir ("src_root", cast<dir_path> (rs[ctx.var_src_root]));
+ print_dir ("out_root", cast<dir_path> (rs[ctx.var_out_root]));
+ print_pdir ("amalgamation", *rs.root_extra->amalgamation);
+
+ // Print subprojects.
+ //
+ if (subp)
+ {
+ const subprojects* sps (*rs.root_extra->subprojects);
+
+ if (sps != nullptr && !sps->empty ())
+ {
+ s.member_name ("subprojects", false /* check */);
+ s.begin_array ();
+
+ for (const auto& sp: *sps)
+ {
+ s.begin_object ();
+
+ print_dir ("path", sp.second);
+
+ // See find_subprojects() for details.
+ //
+ const string& n (sp.first.string ());
+
+ if (!path::traits_type::is_separator (n.back ()))
+ print_string ("name", n);
+
+ s.end_object ();
+ }
+
+ s.end_array ();
+ }
+ }
+
+ print_ops ("operations",
+ rs.root_extra->operations,
+ ctx.operation_table,
+ [&s] (const string& v) {s.value (v, false /* check */);});
+
+ print_ops ("meta-operations",
+ rs.root_extra->meta_operations,
+ ctx.meta_operation_table,
+ [&s] (const meta_operation_data& v)
+ {
+ s.value (v.name, false /* check */);
+ });
+
+ // Print modules.
+ //
+ if (!rs.root_extra->loaded_modules.empty ())
+ {
+ s.member_name ("modules", false /* check */);
+ s.begin_array ();
+
+ for (const module_state& ms: rs.root_extra->loaded_modules)
+ s.value (ms.name, false /* check */);
+
+ s.end_array ();
+ }
+
+ s.end_object ();
+ }
+
+ s.end_array ();
+ cout << endl;
+ }
+#else
+ static void
+ info_execute_json (action_targets&, bool)
+ {
+ }
+#endif //BUILD2_BOOTSTRAP
+
+ static void
+ info_execute (const values& params,
+ action,
+ action_targets& ts,
+ uint16_t,
+ bool)
+ {
+ info_params ip (info_parse_params (params));
+
+ // Note that both outputs will not be "ideal" if the user does something
+ // like `b info(foo/) info(bar/)` instead of `b info(foo/ bar/)`. Oh,
+ // well.
+ //
+ if (ip.json)
+ info_execute_json (ts, ip.subprojects);
+ else
+ info_execute_lines (ts, ip.subprojects);
+ }
+
const meta_operation_info mo_info {
info_id,
"info",
@@ -719,8 +1368,8 @@ namespace build2
"",
"",
"",
- false, // bootstrap_outer
- nullptr, // meta-operation pre
+ false, // bootstrap_outer
+ &info_pre, // meta-operation pre
&info_operation_pre,
&info_load,
&info_search,
@@ -746,6 +1395,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -772,6 +1423,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -788,6 +1441,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
}
diff --git a/libbuild2/operation.hxx b/libbuild2/operation.hxx
index 2f88e88..e8ff38a 100644
--- a/libbuild2/operation.hxx
+++ b/libbuild2/operation.hxx
@@ -121,16 +121,20 @@ namespace build2
// End of operation and meta-operation batches.
//
+ // Note: not called in case any of the earlier callbacks failed.
+ //
void (*operation_post) (context&, const values&, operation_id);
void (*meta_operation_post) (context&, const values&);
// Optional prerequisite exclusion override callback. See include() for
- // details. Note that it's not called for include_type::normal;
+ // details. Note that it's not called for include_type::normal without
+ // operation-specific override.
//
include_type (*include) (action,
const target&,
const prerequisite_member&,
- include_type);
+ include_type,
+ lookup&);
};
// Built-in meta-operations.
@@ -145,41 +149,46 @@ namespace build2
// scope.
//
LIBBUILD2_SYMEXPORT void
- load (const values&,
- scope&,
- const path&,
- const dir_path&,
- const dir_path&,
- const location&);
+ perform_load (const values&,
+ scope&,
+ const path&,
+ const dir_path&,
+ const dir_path&,
+ const location&);
// Search and match the target. This is the default implementation
// that does just that and adds a pointer to the target to the list.
//
LIBBUILD2_SYMEXPORT void
- search (const values&,
- const scope&,
- const scope&,
- const path&,
- const target_key&,
- const location&,
- action_targets&);
+ perform_search (const values&,
+ const scope&,
+ const scope&,
+ const path&,
+ const target_key&,
+ const location&,
+ action_targets&);
LIBBUILD2_SYMEXPORT void
- match (const values&, action, action_targets&,
- uint16_t diag, bool prog);
+ perform_match (const values&, action, action_targets&,
+ uint16_t diag, bool prog);
// Execute the action on the list of targets. This is the default
// implementation that does just that while issuing appropriate
// diagnostics (unless quiet).
//
LIBBUILD2_SYMEXPORT void
- execute (const values&, action, const action_targets&,
- uint16_t diag, bool prog);
+ perform_execute (const values&, action, const action_targets&,
+ uint16_t diag, bool prog);
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_noop;
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_perform;
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_info;
+ // Return true if params does not contain no_subprojects.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ info_subprojects (const values& params);
+
// Operation info.
//
// NOTE: keep POD-like to ensure can be constant-initialized in order to
@@ -216,14 +225,22 @@ namespace build2
//
const size_t concurrency;
- // The first argument in all the callbacks is the operation parameters.
+ // The values argument in the callbacks is the operation parameters. If
+ // the operation expects parameters, then it should have a non-NULL
+ // operation_pre() callback. Failed that, any parameters will be diagnosed
+ // as unexpected.
//
- // If the operation expects parameters, then it should have a non-NULL
- // pre(). Failed that, any parameters will be diagnosed as unexpected.
+ // Note also that if the specified operation has outer (for example,
+ // update-for-install), then parameters belong to outer (for example,
+ // install; this is done in order to be consistent with the case when
+ // update is performed as a pre-operation of install).
- // If the returned operation_id's are not 0, then they are injected
- // as pre/post operations for this operation. Can be NULL if unused.
- // The returned operation_id shall not be default_id.
+ // Pre/post operations for this operation. Note that these callbacks are
+ // called before this operation becomes current.
+ //
+ // If the returned by pre/post_*() operation_id's are not 0, then they are
+ // injected as pre/post operations for this operation. Can be NULL if
+ // unused. The returned operation_id shall not be default_id.
//
operation_id (*pre_operation) (
context&, const values&, meta_operation_id, const location&);
@@ -231,6 +248,16 @@ namespace build2
operation_id (*post_operation) (
context&, const values&, meta_operation_id);
+ // Called immediately after/before this operation becomes/ceases to be
+ // current operation for the specified context. Can be used to
+ // initialize/finalize operation-specific data (context::current_*_odata).
+ // Can be NULL if unused.
+ //
+ void (*operation_pre) (
+ context&, const values&, bool inner, const location&);
+ void (*operation_post) (
+ context&, const values&, bool inner);
+
// Operation-specific ad hoc rule callbacks. Essentially, if not NULL,
// then every ad hoc rule match and apply call for this operation is
// proxied through these functions.
@@ -305,35 +332,36 @@ namespace build2
using operation_table = butl::string_table<operation_id>;
- // These are "sparse" in the sense that we may have "holes" that
- // are represented as NULL pointers. Also, lookup out of bounds
- // is treated as a hole.
+ // This is a "sparse" vector in the sense that we may have "holes" that are
+ // represented as default-initialized empty instances (for example, NULL if
+ // T is a pointer). Also, lookup out of bounds is treated as a hole.
//
- template <typename T>
+ template <typename T, size_t N>
struct sparse_vector
{
- using base_type = vector<T*>;
+ using base_type = small_vector<T, N>;
using size_type = typename base_type::size_type;
void
- insert (size_type i, T& x)
+ insert (size_type i, T x)
{
size_type n (v_.size ());
if (i < n)
- v_[i] = &x;
+ v_[i] = x;
else
{
if (n != i)
- v_.resize (i, nullptr); // Add holes.
- v_.push_back (&x);
+ v_.resize (i, T ()); // Add holes.
+
+ v_.push_back (move (x));
}
}
- T*
+ T
operator[] (size_type i) const
{
- return i < v_.size () ? v_[i] : nullptr;
+ return i < v_.size () ? v_[i] : T ();
}
bool
@@ -348,8 +376,28 @@ namespace build2
base_type v_;
};
- using meta_operations = sparse_vector<const meta_operation_info>;
- using operations = sparse_vector<const operation_info>;
+ // For operations we keep both the pointer to its description as well
+ // as to its operation variable (see var_include) which may belong to
+ // the project-private variable pool.
+ //
+ struct project_operation_info
+ {
+ const operation_info* info = nullptr;
+ const variable* ovar = nullptr; // Operation variable.
+
+ // Allow treating it as pointer to operation_info in most contexts.
+ //
+ operator const operation_info*() const {return info;}
+ bool operator== (nullptr_t) {return info == nullptr;}
+ bool operator!= (nullptr_t) {return info != nullptr;}
+
+ project_operation_info (const operation_info* i = nullptr, // VC14
+ const variable* v = nullptr)
+ : info (i), ovar (v) {}
+ };
+
+ using meta_operations = sparse_vector<const meta_operation_info*, 8>;
+ using operations = sparse_vector<project_operation_info, 10>;
}
namespace butl
diff --git a/libbuild2/options-types.hxx b/libbuild2/options-types.hxx
new file mode 100644
index 0000000..5c224a7
--- /dev/null
+++ b/libbuild2/options-types.hxx
@@ -0,0 +1,16 @@
+// file : libbuild2/options-types.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_OPTIONS_TYPES_HXX
+#define LIBBUILD2_OPTIONS_TYPES_HXX
+
+namespace build2
+{
+ enum class structured_result_format
+ {
+ lines,
+ json
+ };
+}
+
+#endif // LIBBUILD2_OPTIONS_TYPES_HXX
diff --git a/libbuild2/parser.cxx b/libbuild2/parser.cxx
index 9f69117..5321cd5 100644
--- a/libbuild2/parser.cxx
+++ b/libbuild2/parser.cxx
@@ -24,6 +24,8 @@
#include <libbuild2/adhoc-rule-regex-pattern.hxx>
+#include <libbuild2/dist/module.hxx> // module
+
#include <libbuild2/config/utility.hxx> // lookup_config
using namespace std;
@@ -42,7 +44,10 @@ namespace build2
{
o << '=';
names storage;
- to_stream (o, reverse (a.value, storage), true /* quote */, '@');
+ to_stream (o,
+ reverse (a.value, storage, true /* reduce */),
+ quote_mode::normal,
+ '@');
}
return o;
@@ -57,27 +62,7 @@ namespace build2
enter_scope (parser& p, dir_path&& d)
: p_ (&p), r_ (p.root_), s_ (p.scope_), b_ (p.pbase_)
{
- // Try hard not to call normalize(). Most of the time we will go just
- // one level deeper.
- //
- bool n (true);
-
- if (d.relative ())
- {
- // Relative scopes are opened relative to out, not src.
- //
- if (d.simple () && !d.current () && !d.parent ())
- {
- d = dir_path (p.scope_->out_path ()) /= d.string ();
- n = false;
- }
- else
- d = p.scope_->out_path () / d;
- }
-
- if (n)
- d.normalize ();
-
+ complete_normalize (*p.scope_, d);
e_ = p.switch_scope (d);
}
@@ -103,8 +88,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_scope (enter_scope&& x) {*this = move (x);}
- enter_scope& operator= (enter_scope&& x)
+ enter_scope (enter_scope&& x) noexcept {*this = move (x);}
+ enter_scope& operator= (enter_scope&& x) noexcept
{
if (this != &x)
{
@@ -121,6 +106,31 @@ namespace build2
enter_scope (const enter_scope&) = delete;
enter_scope& operator= (const enter_scope&) = delete;
+ static void
+ complete_normalize (scope& s, dir_path& d)
+ {
+ // Try hard not to call normalize(). Most of the time we will go just
+ // one level deeper.
+ //
+ bool n (true);
+
+ if (d.relative ())
+ {
+ // Relative scopes are opened relative to out, not src.
+ //
+ if (d.simple () && !d.current () && !d.parent ())
+ {
+ d = dir_path (s.out_path ()) /= d.string ();
+ n = false;
+ }
+ else
+ d = s.out_path () / d;
+ }
+
+ if (n)
+ d.normalize ();
+ }
+
private:
parser* p_;
scope* r_;
@@ -162,7 +172,11 @@ namespace build2
tracer& tr)
{
auto r (p.scope_->find_target_type (n, o, loc));
- return p.ctx.targets.insert (
+
+ if (r.first.factory == nullptr)
+ p.fail (loc) << "abstract target type " << r.first.name << "{}";
+
+ return p.ctx->targets.insert (
r.first, // target type
move (n.dir),
move (o.dir),
@@ -182,12 +196,16 @@ namespace build2
tracer& tr)
{
auto r (p.scope_->find_target_type (n, o, loc));
- return p.ctx.targets.find (r.first, // target type
- n.dir,
- o.dir,
- n.value,
- r.second, // extension
- tr);
+
+ if (r.first.factory == nullptr)
+ p.fail (loc) << "abstract target type " << r.first.name << "{}";
+
+ return p.ctx->targets.find (r.first, // target type
+ n.dir,
+ o.dir,
+ n.value,
+ r.second, // extension
+ tr);
}
~enter_target ()
@@ -198,8 +216,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_target (enter_target&& x) {*this = move (x);}
- enter_target& operator= (enter_target&& x) {
+ enter_target (enter_target&& x) noexcept {*this = move (x);}
+ enter_target& operator= (enter_target&& x) noexcept {
p_ = x.p_; t_ = x.t_; x.p_ = nullptr; return *this;}
enter_target (const enter_target&) = delete;
@@ -230,8 +248,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_prerequisite (enter_prerequisite&& x) {*this = move (x);}
- enter_prerequisite& operator= (enter_prerequisite&& x) {
+ enter_prerequisite (enter_prerequisite&& x) noexcept {*this = move (x);}
+ enter_prerequisite& operator= (enter_prerequisite&& x) noexcept {
p_ = x.p_; r_ = x.r_; x.p_ = nullptr; return *this;}
enter_prerequisite (const enter_prerequisite&) = delete;
@@ -247,6 +265,7 @@ namespace build2
{
pre_parse_ = false;
attributes_.clear ();
+ condition_ = nullopt;
default_target_ = nullptr;
peeked_ = false;
replay_ = replay::stop;
@@ -259,10 +278,11 @@ namespace build2
scope* root,
scope& base,
target* tgt,
- prerequisite* prq)
+ prerequisite* prq,
+ bool enter)
{
lexer l (is, in);
- parse_buildfile (l, root, base, tgt, prq);
+ parse_buildfile (l, root, base, tgt, prq, enter);
}
void parser::
@@ -270,7 +290,8 @@ namespace build2
scope* root,
scope& base,
target* tgt,
- prerequisite* prq)
+ prerequisite* prq,
+ bool enter)
{
path_ = &l.name ();
lexer_ = &l;
@@ -289,9 +310,9 @@ namespace build2
? auto_project_env (*root_)
: auto_project_env ());
- if (path_->path != nullptr)
- enter_buildfile (*path_->path); // Note: needs scope_.
-
+ const buildfile* bf (enter && path_->path != nullptr
+ ? &enter_buildfile<buildfile> (*path_->path)
+ : nullptr);
token t;
type tt;
next (t, tt);
@@ -303,13 +324,34 @@ namespace build2
else
{
parse_clause (t, tt);
- process_default_target (t);
+
+ if (stage_ != stage::boot && stage_ != stage::root)
+ process_default_target (t, bf);
}
if (tt != type::eos)
fail (t) << "unexpected " << t;
}
+ names parser::
+ parse_export_stub (istream& is, const path_name& name,
+ const scope& rs, scope& gs, scope& ts)
+ {
+ // Enter the export stub manually with correct out.
+ //
+ if (name.path != nullptr)
+ {
+ dir_path out (!rs.out_eq_src ()
+ ? out_src (name.path->directory (), rs)
+ : dir_path ());
+
+ enter_buildfile<buildfile> (*name.path, move (out));
+ }
+
+ parse_buildfile (is, name, &gs, ts, nullptr, nullptr, false /* enter */);
+ return move (export_value);
+ }
+
token parser::
parse_variable (lexer& l, scope& s, const variable& var, type kind)
{
@@ -355,6 +397,81 @@ namespace build2
return make_pair (move (lhs), move (t));
}
+ names parser::
+ parse_names (lexer& l,
+ const dir_path* b,
+ pattern_mode pmode,
+ const char* what,
+ const string* separators)
+ {
+ path_ = &l.name ();
+ lexer_ = &l;
+
+ root_ = nullptr;
+ scope_ = nullptr;
+ target_ = nullptr;
+ prerequisite_ = nullptr;
+
+ pbase_ = b;
+
+ token t;
+ type tt;
+
+ mode (lexer_mode::value, '@');
+ next (t, tt);
+
+ names r (parse_names (t, tt, pmode, what, separators));
+
+ if (tt != type::eos)
+ fail (t) << "unexpected " << t;
+
+ return r;
+ }
+
+ value parser::
+ parse_eval (lexer& l, scope& rs, scope& bs, pattern_mode pmode)
+ {
+ path_ = &l.name ();
+ lexer_ = &l;
+
+ root_ = &rs;
+ scope_ = &bs;
+ target_ = nullptr;
+ prerequisite_ = nullptr;
+
+ pbase_ = scope_->src_path_;
+
+ // Note that root_ may not be a project root.
+ //
+ auto_project_env penv (
+ stage_ != stage::boot && root_ != nullptr && root_->root_extra != nullptr
+ ? auto_project_env (*root_)
+ : auto_project_env ());
+
+ token t;
+ type tt;
+ next (t, tt);
+
+ if (tt != type::lparen)
+ fail (t) << "expected '(' instead of " << t;
+
+ location loc (get_location (t));
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
+
+ values vs (parse_eval (t, tt, pmode));
+
+ if (next (t, tt) != type::eos)
+ fail (t) << "unexpected " << t;
+
+ switch (vs.size ())
+ {
+ case 0: return value (names ());
+ case 1: return move (vs[0]);
+ default: fail (loc) << "expected single value" << endf;
+ }
+ }
+
bool parser::
parse_clause (token& t, type& tt, bool one)
{
@@ -500,6 +617,12 @@ namespace build2
{
f = &parser::parse_config_environment;
}
+ else if (n == "recipe")
+ {
+ // Valid only after recipe header (%).
+ //
+ fail (t) << n << " directive without % recipe header";
+ }
if (f != nullptr)
{
@@ -516,9 +639,39 @@ namespace build2
location nloc (get_location (t));
names ns;
- if (tt != type::labrace)
+ // We have to parse names in chunks to detect invalid cases of the
+ // group{foo}<...> syntax.
+ //
+ // Consider (1):
+ //
+ // x =
+ // group{foo} $x<...>:
+ //
+ // And (2):
+ //
+ // x = group{foo} group{bar}
+ // $x<...>:
+ //
+ // As well as (3):
+ //
+ // <...><...>:
+ //
+ struct chunk
{
- ns = parse_names (t, tt, pattern_mode::preserve);
+ size_t pos; // Index in ns of the beginning of the last chunk.
+ location loc; // Position of the beginning of the last chunk.
+ };
+ optional<chunk> ns_last;
+
+ bool labrace_first (tt == type::labrace);
+ if (!labrace_first)
+ {
+ do
+ {
+ ns_last = chunk {ns.size (), get_location (t)};
+ parse_names (t, tt, ns, pattern_mode::preserve, true /* chunk */);
+ }
+ while (start_names (tt));
// Allow things like function calls that don't result in anything.
//
@@ -534,44 +687,87 @@ namespace build2
}
}
- // Handle ad hoc target group specification (<...>).
+ // Handle target group specification (<...>).
//
// We keep an "optional" (empty) vector of names parallel to ns that
- // contains the ad hoc group members.
+ // contains the group members. Note that when we "catch" gns up to ns,
+ // we populate it with ad hoc (as opposed to explicit) groups with no
+ // members.
//
- adhoc_names ans;
+ group_names gns;
if (tt == type::labrace)
{
- while (tt == type::labrace)
+ for (; tt == type::labrace; labrace_first = false)
{
- // Parse target names inside < >.
+ // Detect explicit group (group{foo}<...>).
+ //
+ // Note that `<` first thing on the line is not seperated thus the
+ // labrace_first complication.
+ //
+ bool expl (!t.separated && !labrace_first);
+ if (expl)
+ {
+ // Note: (N) refers to the example in the above comment.
+ //
+ if (!ns_last /* (3) */ || ns_last->pos == ns.size () /* (1) */)
+ {
+ fail (t) << "group name or whitespace expected before '<'";
+ }
+ else
+ {
+ size_t n (ns.size () - ns_last->pos);
+
+ // Note: could be a pair.
+ //
+ if ((n > 2 || (n == 2 && !ns[ns_last->pos].pair)) /* (2) */)
+ {
+ fail (t) << "single group name or whitespace expected before "
+ << "'<' instead of '"
+ << names_view (ns.data () + ns_last->pos, n) << "'";
+ }
+ }
+ }
+
+ // Parse target names inside <>.
//
// We "reserve" the right to have attributes inside <> though what
// exactly that would mean is unclear. One potentially useful
- // semantics would be the ability to specify attributes for ad hoc
- // members though the fact that the primary target is listed first
- // would make it rather unintuitive. Maybe attributes that change
- // the group semantics itself?
+ // semantics would be the ability to specify attributes for group
+ // members though the fact that the primary target for ad hoc groups
+ // is listed first would make it rather unintuitive. Maybe
+ // attributes that change the group semantics itself?
//
next_with_attributes (t, tt);
auto at (attributes_push (t, tt));
if (at.first)
- fail (at.second) << "attributes before ad hoc target";
+ fail (at.second) << "attributes before group member";
else
attributes_pop ();
- // Allow empty case (<>).
+ // For explicit groups, the group target is already in ns and all
+ // the members should go straight to gns.
//
- if (tt != type::rabrace)
+ // For ad hoc groups, the first name (or a pair) is the primary
+ // target which we need to keep in ns. The rest, if any, are ad
+ // hoc members that we should move to gns.
+ //
+ if (expl)
+ {
+ gns.resize (ns.size ()); // Catch up with the names vector.
+ group_names_loc& g (gns.back ());
+ g.expl = true;
+ g.group_loc = move (ns_last->loc);
+ g.member_loc = get_location (t); // Start of members.
+
+ if (tt != type::rabrace) // Handle empty case (<>)
+ parse_names (t, tt, g.ns, pattern_mode::preserve);
+ }
+ else if (tt != type::rabrace) // Allow and ignore empty case (<>).
{
- location aloc (get_location (t));
+ location mloc (get_location (t)); // Start of members.
- // The first name (or a pair) is the primary target which we need
- // to keep in ns. The rest, if any, are ad hoc members that we
- // should move to ans.
- //
size_t m (ns.size ());
parse_names (t, tt, ns, pattern_mode::preserve);
size_t n (ns.size ());
@@ -588,11 +784,10 @@ namespace build2
{
n -= m; // Number of names in ns we should end up with.
- ans.resize (n); // Catch up with the names vector.
- adhoc_names_loc& a (ans.back ());
-
- a.loc = move (aloc);
- a.ns.insert (a.ns.end (),
+ gns.resize (n); // Catch up with the names vector.
+ group_names_loc& g (gns.back ());
+ g.group_loc = g.member_loc = move (mloc);
+ g.ns.insert (g.ns.end (),
make_move_iterator (ns.begin () + n),
make_move_iterator (ns.end ()));
ns.resize (n);
@@ -606,12 +801,16 @@ namespace build2
// Parse the next chunk of target names after >, if any.
//
next (t, tt);
- if (start_names (tt))
- parse_names (t, tt, ns, pattern_mode::preserve);
+ ns_last = nullopt; // To detect <...><...>.
+ while (start_names (tt))
+ {
+ ns_last = chunk {ns.size (), get_location (t)};
+ parse_names (t, tt, ns, pattern_mode::preserve, true /* chunk */);
+ }
}
- if (!ans.empty ())
- ans.resize (ns.size ()); // Catch up with the final chunk.
+ if (!gns.empty ())
+ gns.resize (ns.size ()); // Catch up with the final chunk.
if (tt != type::colon)
fail (t) << "expected ':' instead of " << t;
@@ -630,10 +829,7 @@ namespace build2
if (ns.empty ())
fail (t) << "expected target before ':'";
- if (at.first)
- fail (at.second) << "attributes before target";
- else
- attributes_pop ();
+ attributes as (attributes_pop ());
// Call the specified parsing function (variable value/block) for
// one/each pattern/target. We handle multiple targets by replaying
@@ -642,10 +838,11 @@ namespace build2
// evaluated. The function signature is:
//
// void (token& t, type& tt,
+ // optional<bool> member, // true -- explict, false -- ad hoc
// optional<pattern_type>, const target_type* pat_tt, string pat,
// const location& pat_loc)
//
- // Note that the target and its ad hoc members are inserted implied
+ // Note that the target and its group members are inserted implied
// but this flag can be cleared and default_target logic applied if
// appropriate.
//
@@ -735,23 +932,31 @@ namespace build2
// Resolve target type. If none is specified, then it's file{}.
//
+ // Note: abstract target type is ok here.
+ //
const target_type* ttype (n.untyped ()
? &file::static_type
: scope_->find_target_type (n.type));
if (ttype == nullptr)
- fail (nloc) << "unknown target type " << n.type;
+ fail (nloc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
- f (t, tt, n.pattern, ttype, move (n.value), nloc);
+ f (t, tt, nullopt, n.pattern, ttype, move (n.value), nloc);
};
auto for_each = [this, &trace, &for_one_pat,
- &t, &tt, &ns, &nloc, &ans] (auto&& f)
+ &t, &tt, &as, &ns, &nloc, &gns] (auto&& f)
{
+ // We need replay if we have multiple targets or group members.
+ //
// Note: watch out for an out-qualified single target (two names).
//
replay_guard rg (*this,
- ns.size () > 2 || (ns.size () == 2 && !ns[0].pair));
+ ns.size () > 2 ||
+ (ns.size () == 2 && !ns[0].pair) ||
+ !gns.empty ());
for (size_t i (0), e (ns.size ()); i != e; )
{
@@ -765,11 +970,15 @@ namespace build2
//
if (n.pattern)
{
+ if (!as.empty ())
+ fail (as.loc) << "attributes before target type/pattern";
+
if (n.pair)
fail (nloc) << "out-qualified target type/pattern";
- if (!ans.empty () && !ans[i].ns.empty ())
- fail (ans[i].loc) << "ad hoc member in target type/pattern";
+ if (!gns.empty () && !gns[i].ns.empty ())
+ fail (gns[i].member_loc)
+ << "group member in target type/pattern";
if (*n.pattern == pattern_type::regex_substitution)
fail (nloc) << "regex substitution " << n << " without "
@@ -779,24 +988,47 @@ namespace build2
}
else
{
- name o (n.pair ? move (ns[++i]) : name ());
- enter_target tg (*this,
- move (n),
- move (o),
- true /* implied */,
- nloc,
- trace);
-
- // Enter ad hoc members.
- //
- if (!ans.empty ())
+ bool expl;
+ vector<reference_wrapper<target>> gms;
{
- // Note: index after the pair increment.
+ name o (n.pair ? move (ns[++i]) : name ());
+ enter_target tg (*this,
+ move (n),
+ move (o),
+ true /* implied */,
+ nloc,
+ trace);
+
+ if (!as.empty ())
+ apply_target_attributes (*target_, as);
+
+ // Enter group members.
//
- enter_adhoc_members (move (ans[i]), true /* implied */);
+ if (!gns.empty ())
+ {
+ // Note: index after the pair increment.
+ //
+ group_names_loc& g (gns[i]);
+ expl = g.expl;
+
+ if (expl && !target_->is_a<group> ())
+ fail (g.group_loc) << *target_ << " is not group target";
+
+ gms = expl
+ ? enter_explicit_members (move (g), true /* implied */)
+ : enter_adhoc_members (move (g), true /* implied */);
+ }
+
+ f (t, tt, nullopt, nullopt, nullptr, string (), location ());
}
- f (t, tt, nullopt, nullptr, string (), location ());
+ for (target& gm: gms)
+ {
+ rg.play (); // Replay.
+
+ enter_target tg (*this, gm);
+ f (t, tt, expl, nullopt, nullptr, string (), location ());
+ }
}
if (++i != e)
@@ -850,12 +1082,15 @@ namespace build2
ploc = get_location (t);
pns = parse_names (t, tt, pattern_mode::preserve);
- // Target-specific variable assignment.
+ // Target type/pattern-specific variable assignment.
//
if (tt == type::assign || tt == type::prepend || tt == type::append)
{
- if (!ans.empty ())
- fail (ans[0].loc) << "ad hoc member in target type/pattern";
+ // Note: ns contains single target name.
+ //
+ if (!gns.empty ())
+ fail (gns[0].member_loc)
+ << "group member in target type/pattern";
// Note: see the same code below if changing anything here.
//
@@ -874,6 +1109,7 @@ namespace build2
for_one_pat (
[this, &var, akind, &aloc] (
token& t, type& tt,
+ optional<bool>,
optional<pattern_type> pt, const target_type* ptt,
string pat, const location& ploc)
{
@@ -886,6 +1122,10 @@ namespace build2
nloc);
next_after_newline (t, tt);
+
+ if (!as.empty ())
+ fail (as.loc) << "attributes before target type/pattern";
+
continue; // Just a target type/pattern-specific var assignment.
}
@@ -915,6 +1155,7 @@ namespace build2
for_one_pat (
[this] (
token& t, type& tt,
+ optional<bool>,
optional<pattern_type> pt, const target_type* ptt,
string pat, const location& ploc)
{
@@ -934,8 +1175,14 @@ namespace build2
if (pns.empty () &&
tt != type::percent && tt != type::multi_lcbrace)
{
- if (!ans.empty ())
- fail (ans[0].loc) << "ad hoc member in target type/pattern";
+ // Note: ns contains single target name.
+ //
+ if (!gns.empty ())
+ fail (gns[0].member_loc)
+ << "group member in target type/pattern";
+
+ if (!as.empty ())
+ fail (as.loc) << "attributes before target type/pattern";
continue;
}
@@ -943,6 +1190,38 @@ namespace build2
// Ok, this is an ad hoc pattern rule.
//
+ // First process the attributes.
+ //
+ string rn;
+ {
+ const location& l (as.loc);
+
+ for (auto& a: as)
+ {
+ const string& n (a.name);
+ value& v (a.value);
+
+ // rule_name=
+ //
+ if (n == "rule_name")
+ {
+ try
+ {
+ rn = convert<string> (move (v));
+
+ if (rn.empty ())
+ throw invalid_argument ("empty name");
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else
+ fail (l) << "unknown ad hoc pattern rule attribute " << a;
+ }
+ }
+
// What should we do if we have neither prerequisites nor recipes?
// While such a declaration doesn't make much sense, it can happen,
// for example, with an empty variable expansion:
@@ -1004,22 +1283,33 @@ namespace build2
check_pattern (n, nloc);
- // Verify all the ad hoc members are patterns or substitutions and
- // of the correct type.
+ // If we have group members, verify all the members are patterns or
+ // substitutions (ad hoc) or subsitutions (explicit) and of the
+ // correct pattern type. A rule for an explicit group that wishes to
+ // match based on some of its members feels far fetched.
+ //
+ // For explicit groups the use-case is to inject static members
+ // which could otherwise be tedious to specify for each group.
//
- names ns (ans.empty () ? names () : move (ans[0].ns));
- const location& aloc (ans.empty () ? location () : ans[0].loc);
+ const location& mloc (gns.empty () ? location () : gns[0].member_loc);
+ names ns (gns.empty () ? names () : move (gns[0].ns));
+ bool expl (gns.empty () ? false : gns[0].expl);
for (name& n: ns)
{
if (!n.pattern || !(*n.pattern == pt || (st && *n.pattern == *st)))
{
- fail (aloc) << "expected " << pn << " pattern or substitution "
+ fail (mloc) << "expected " << pn << " pattern or substitution "
<< "instead of " << n;
}
if (*n.pattern != pattern_type::regex_substitution)
- check_pattern (n, aloc);
+ {
+ if (expl)
+ fail (mloc) << "explicit group member pattern " << n;
+
+ check_pattern (n, mloc);
+ }
}
// The same for prerequisites except here we can have non-patterns.
@@ -1039,14 +1329,18 @@ namespace build2
}
}
- // Derive the rule name. It must be unique in this scope.
+ // Derive the rule name unless specified explicitly. It must be
+ // unique in this scope.
//
// It would have been nice to include the location but unless we
// include the absolute path to the buildfile (which would be
// unwieldy), it could be ambigous.
//
- string rn ("<ad hoc pattern rule #" +
- to_string (scope_->adhoc_rules.size () + 1) + '>');
+ // NOTE: we rely on the <...> format in dump.
+ //
+ if (rn.empty ())
+ rn = "<ad hoc pattern rule #" +
+ to_string (scope_->adhoc_rules.size () + 1) + '>';
auto& ars (scope_->adhoc_rules);
@@ -1059,7 +1353,9 @@ namespace build2
const target_type* ttype (nullptr);
if (i != ars.end ())
{
- // @@ TODO: append ad hoc members, prereqs.
+ // @@ TODO: append ad hoc members, prereqs (we now have
+ // [rule_name=] which we can use to reference the same
+ // rule).
//
ttype = &(*i)->type;
assert (false);
@@ -1073,7 +1369,15 @@ namespace build2
: scope_->find_target_type (n.type);
if (ttype == nullptr)
- fail (nloc) << "unknown target type " << n.type;
+ fail (nloc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
+
+ if (!gns.empty ())
+ {
+ if (ttype->is_a<group> () != expl)
+ fail (nloc) << "group type and target type mismatch";
+ }
unique_ptr<adhoc_rule_pattern> rp;
switch (pt)
@@ -1086,7 +1390,7 @@ namespace build2
rp.reset (new adhoc_rule_regex_pattern (
*scope_, rn, *ttype,
move (n), nloc,
- move (ns), aloc,
+ move (ns), mloc,
move (pns), ploc));
break;
case pattern_type::regex_substitution:
@@ -1109,8 +1413,13 @@ namespace build2
for (shared_ptr<adhoc_rule>& pr: recipes)
{
- pr->pattern = &rp; // Connect recipe to pattern.
- rp.rules.push_back (move (pr));
+ // Can be NULL if the recipe is disabled with a condition.
+ //
+ if (pr != nullptr)
+ {
+ pr->pattern = &rp; // Connect recipe to pattern.
+ rp.rules.push_back (move (pr));
+ }
}
// Register this adhoc rule for all its actions.
@@ -1121,7 +1430,7 @@ namespace build2
for (action a: r.actions)
{
- // This covers both duplicate recipe actions withing the rule
+ // This covers both duplicate recipe actions within the rule
// pattern (similar to parse_recipe()) as well as conflicts
// with other rules (ad hoc or not).
//
@@ -1157,20 +1466,38 @@ namespace build2
// sources into the distribution. Unless there is an explicit
// recipe for dist.
//
+ // And the same for the configure meta-operation to, for
+ // example, make sure a hinted ad hoc rule matches. @@ Hm,
+ // maybe we fixed this with action-specific hints? But the
+ // injection part above may still apply. BTW, this is also
+ // required for see-through groups in order to resolve their
+ // member.
+ //
+ // Note also that the equivalent semantics for ad hoc recipes
+ // is provided by match_adhoc_recipe().
+ //
if (a.meta_operation () == perform_id)
{
- action da (dist_id, a.operation ());
-
- for (shared_ptr<adhoc_rule>& pr: rp.rules)
+ auto reg = [this, ttype, &rp, &r] (action ea)
+ {
+ for (shared_ptr<adhoc_rule>& pr: rp.rules)
for (action a: pr->actions)
- if (da == a)
- goto skip;
+ if (ea == a)
+ return;
- scope_->rules.insert (da, *ttype, rp.rule_name, r);
+ scope_->rules.insert (ea, *ttype, rp.rule_name, r);
+ };
- skip:
- ;
+ reg (action (dist_id, a.operation ()));
+ reg (action (configure_id, a.operation ()));
}
+
+ // @@ TODO: if this rule does dynamic member discovery of a
+ // see-through target group, then we may also need to
+ // register update for other meta-operations (see, for
+ // example, wildcard update registration in the cli
+ // module). BTW, we can now detect such a target via
+ // its target type flags.
}
}
}
@@ -1214,6 +1541,7 @@ namespace build2
st = token (t), // Save start token (will be gone on replay).
recipes = small_vector<shared_ptr<adhoc_rule>, 1> ()]
(token& t, type& tt,
+ optional<bool> gm, // true -- explicit, false -- ad hoc
optional<pattern_type> pt, const target_type* ptt, string pat,
const location& ploc) mutable
{
@@ -1227,7 +1555,14 @@ namespace build2
//
next (t, tt); // Newline.
next (t, tt); // First token inside the variable block.
- parse_variable_block (t, tt, pt, ptt, move (pat), ploc);
+
+ // For explicit groups we only assign variables on the group
+ // omitting the members.
+ //
+ if (!gm || !*gm)
+ parse_variable_block (t, tt, pt, ptt, move (pat), ploc);
+ else
+ skip_block (t, tt);
if (tt != type::rcbrace)
fail (t) << "expected '}' instead of " << t;
@@ -1243,6 +1578,16 @@ namespace build2
else
rt = st;
+ // If this is a group member then we know we are replaying and
+ // can skip the recipe.
+ //
+ if (gm)
+ {
+ replay_skip ();
+ next (t, tt);
+ return;
+ }
+
if (pt)
fail (rt) << "unexpected recipe after target type/pattern" <<
info << "ad hoc pattern rule may not be combined with other "
@@ -1263,7 +1608,7 @@ namespace build2
// Note also that we treat this as an explicit dependency
// declaration (i.e., not implied).
//
- enter_targets (move (ns), nloc, move (ans), 0);
+ enter_targets (move (ns), nloc, move (gns), 0, as);
}
continue;
@@ -1278,7 +1623,8 @@ namespace build2
if (!start_names (tt))
fail (t) << "unexpected " << t;
- // @@ PAT: currently we pattern-expand target-specific var names.
+ // @@ PAT: currently we pattern-expand target-specific var names (see
+ // also parse_import()).
//
const location ploc (get_location (t));
names pns (parse_names (t, tt, pattern_mode::expand));
@@ -1313,6 +1659,7 @@ namespace build2
for_each (
[this, &var, akind, &aloc] (
token& t, type& tt,
+ optional<bool> gm,
optional<pattern_type> pt, const target_type* ptt, string pat,
const location& ploc)
{
@@ -1321,7 +1668,18 @@ namespace build2
*pt, *ptt, move (pat), ploc,
var, akind, aloc);
else
- parse_variable (t, tt, var, akind);
+ {
+ // Skip explicit group members (see the block case above for
+ // background).
+ //
+ if (!gm || !*gm)
+ parse_variable (t, tt, var, akind);
+ else
+ {
+ next (t, tt);
+ skip_line (t, tt);
+ }
+ }
});
next_after_newline (t, tt);
@@ -1339,8 +1697,9 @@ namespace build2
parse_dependency (t, tt,
move (ns), nloc,
- move (ans),
- move (pns), ploc);
+ move (gns),
+ move (pns), ploc,
+ as);
}
continue;
@@ -1564,7 +1923,7 @@ namespace build2
// Parse a recipe chain.
//
// % [<attrs>] [<buildspec>]
- // [if|switch ...]
+ // [if|if!|switch|recipe ...]
// {{ [<lang> ...]
// ...
// }}
@@ -1583,10 +1942,27 @@ namespace build2
//
if (target_ != nullptr)
{
+ // @@ What if some members are added later?
+ //
+ // @@ Also, what happends if redeclared as real dependency, do we
+ // upgrade the members?
+ //
if (target_->decl != target_decl::real)
{
- for (target* m (target_); m != nullptr; m = m->adhoc_member)
- m->decl = target_decl::real;
+ target_->decl = target_decl::real;
+
+ if (group* g = target_->is_a<group> ())
+ {
+ for (const target& m: g->static_members)
+ const_cast<target&> (m).decl = target_decl::real; // During load.
+ }
+ else
+ {
+ for (target* m (target_->adhoc_member);
+ m != nullptr;
+ m = m->adhoc_member)
+ m->decl = target_decl::real;
+ }
if (default_target_ == nullptr)
default_target_ = target_;
@@ -1599,7 +1975,15 @@ namespace build2
t = start; tt = t.type;
for (size_t i (0); tt == type::percent || tt == type::multi_lcbrace; ++i)
{
- recipes.push_back (nullptr); // For missing else/default (see below).
+ // For missing else/default (see below).
+ //
+ // Note that it may remain NULL if we have, say, an if-condition that
+ // evaluates to false and no else. While it may be tempting to get rid
+ // of such "holes", it's not easy due to the replay semantics (see the
+ // target_ != nullptr block below). So we expect the caller to be
+ // prepared to handle this.
+ //
+ recipes.push_back (nullptr);
attributes as;
buildspec bs;
@@ -1616,7 +2000,131 @@ namespace build2
attributes& as;
buildspec& bs;
const location& bsloc;
- } d {ttype, name, recipes, first, clean, i, as, bs, bsloc};
+ function<void (string&&)> parse_trailer;
+ } d {ttype, name, recipes, first, clean, i, as, bs, bsloc, {}};
+
+ d.parse_trailer = [this, &d] (string&& text)
+ {
+ if (d.first)
+ {
+ adhoc_rule& ar (*d.recipes.back ());
+
+ // Translate each buildspec entry into action and add it to the
+ // recipe entry.
+ //
+ const location& l (d.bsloc);
+
+ for (metaopspec& m: d.bs)
+ {
+ meta_operation_id mi (ctx->meta_operation_table.find (m.name));
+
+ if (mi == 0)
+ fail (l) << "unknown meta-operation " << m.name;
+
+ const meta_operation_info* mf (
+ root_->root_extra->meta_operations[mi]);
+
+ if (mf == nullptr)
+ fail (l) << "project " << *root_ << " does not support meta-"
+ << "operation " << ctx->meta_operation_table[mi].name;
+
+ for (opspec& o: m)
+ {
+ operation_id oi;
+ if (o.name.empty ())
+ {
+ if (mf->operation_pre == nullptr)
+ oi = update_id;
+ else
+ // Calling operation_pre() to translate doesn't feel
+ // appropriate here.
+ //
+ fail (l) << "default operation in recipe action" << endf;
+ }
+ else
+ oi = ctx->operation_table.find (o.name);
+
+ if (oi == 0)
+ fail (l) << "unknown operation " << o.name;
+
+ const operation_info* of (root_->root_extra->operations[oi]);
+
+ if (of == nullptr)
+ fail (l) << "project " << *root_ << " does not support "
+ << "operation " << ctx->operation_table[oi];
+
+ // Note: for now always inner (see match_rule_impl() for
+ // details).
+ //
+ action a (mi, oi);
+
+ // Check for duplicates (local).
+ //
+ if (find_if (
+ d.recipes.begin (), d.recipes.end (),
+ [a] (const shared_ptr<adhoc_rule>& r)
+ {
+ auto& as (r->actions);
+ return find (as.begin (), as.end (), a) != as.end ();
+ }) != d.recipes.end ())
+ {
+ fail (l) << "duplicate " << mf->name << '(' << of->name
+ << ") recipe";
+ }
+
+ ar.actions.push_back (a);
+ }
+ }
+
+ // Set the recipe text.
+ //
+ if (ar.recipe_text (
+ *scope_,
+ d.ttype != nullptr ? *d.ttype : target_->type (),
+ move (text),
+ d.as))
+ d.clean = true;
+
+ // Verify we have no unhandled attributes.
+ //
+ for (attribute& a: d.as)
+ fail (d.as.loc) << "unknown recipe attribute " << a << endf;
+ }
+
+ // Copy the recipe over to the target verifying there are no
+ // duplicates (global).
+ //
+ if (target_ != nullptr)
+ {
+ const shared_ptr<adhoc_rule>& r (d.recipes[d.i]);
+
+ for (const shared_ptr<adhoc_rule>& er: target_->adhoc_recipes)
+ {
+ auto& as (er->actions);
+
+ for (action a: r->actions)
+ {
+ if (find (as.begin (), as.end (), a) != as.end ())
+ {
+ const meta_operation_info* mf (
+ root_->root_extra->meta_operations[a.meta_operation ()]);
+
+ const operation_info* of (
+ root_->root_extra->operations[a.operation ()]);
+
+ fail (d.bsloc)
+ << "duplicate " << mf->name << '(' << of->name
+ << ") recipe for target " << *target_;
+ }
+ }
+ }
+
+ target_->adhoc_recipes.push_back (r);
+
+ // Note that "registration" of configure_* and dist_* actions
+ // (similar to ad hoc rules) is provided by match_adhoc_recipe().
+ }
+ };
// Note that this function must be called at most once per iteration.
//
@@ -1654,8 +2162,12 @@ namespace build2
//
location loc (get_location (st));
+ // @@ We could add an attribute (name= or recipe_name=) to allow
+ // the user specify a friendly name for diagnostics, similar
+ // to rule_name.
+
shared_ptr<adhoc_rule> ar;
- if (!lang)
+ if (!lang || icasecmp (*lang, "buildscript") == 0)
{
// Buildscript
//
@@ -1751,129 +2263,200 @@ namespace build2
}
if (!skip)
- {
- if (d.first)
- {
- adhoc_rule& ar (*d.recipes.back ());
+ d.parse_trailer (move (t.value));
- // Translate each buildspec entry into action and add it to the
- // recipe entry.
- //
- const location& l (d.bsloc);
-
- for (metaopspec& m: d.bs)
- {
- meta_operation_id mi (ctx.meta_operation_table.find (m.name));
-
- if (mi == 0)
- fail (l) << "unknown meta-operation " << m.name;
+ next (t, tt);
+ assert (tt == type::multi_rcbrace);
- const meta_operation_info* mf (
- root_->root_extra->meta_operations[mi]);
+ next (t, tt); // Newline.
+ next_after_newline (t, tt, token (t)); // Should be on its own line.
+ };
- if (mf == nullptr)
- fail (l) << "project " << *root_ << " does not support meta-"
- << "operation " << ctx.meta_operation_table[mi].name;
+ auto parse_recipe_directive = [this, &d] (token& t, type& tt,
+ const string&)
+ {
+ // Parse recipe directive:
+ //
+ // recipe <lang> <file>
+ //
+ // Note that here <lang> is not optional.
+ //
+ // @@ We could guess <lang> from the extension.
- for (opspec& o: m)
- {
- operation_id oi;
- if (o.name.empty ())
- {
- if (mf->operation_pre == nullptr)
- oi = update_id;
- else
- // Calling operation_pre() to translate doesn't feel
- // appropriate here.
- //
- fail (l) << "default operation in recipe action" << endf;
- }
- else
- oi = ctx.operation_table.find (o.name);
+ // Use value mode to minimize the number of special characters.
+ //
+ mode (lexer_mode::value, '@');
- if (oi == 0)
- fail (l) << "unknown operation " << o.name;
+ // Parse <lang>.
+ //
+ if (next (t, tt) != type::word)
+ fail (t) << "expected recipe language instead of " << t;
- const operation_info* of (root_->root_extra->operations[oi]);
+ location lloc (get_location (t));
+ string lang (t.value);
+ next (t, tt);
- if (of == nullptr)
- fail (l) << "project " << *root_ << " does not support "
- << "operation " << ctx.operation_table[oi];
+ // Parse <file> as names to get variable expansion, etc.
+ //
+ location nloc (get_location (t));
+ names ns (parse_names (t, tt, pattern_mode::ignore, "file name"));
- // Note: for now always inner (see match_rule() for details).
- //
- action a (mi, oi);
+ path file;
+ try
+ {
+ file = convert<path> (move (ns));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (nloc) << "invalid recipe file path: " << e;
+ }
- // Check for duplicates (local).
- //
- if (find_if (
- d.recipes.begin (), d.recipes.end (),
- [a] (const shared_ptr<adhoc_rule>& r)
- {
- auto& as (r->actions);
- return find (as.begin (), as.end (), a) != as.end ();
- }) != d.recipes.end ())
- {
- fail (l) << "duplicate " << mf->name << '(' << of->name
- << ") recipe";
- }
+ string text;
+ if (d.first)
+ {
+ // Source relative to the buildfile rather than src scope. In
+ // particular, this make sourcing from exported buildfiles work.
+ //
+ if (file.relative () && path_->path != nullptr)
+ {
+ // Note: all sourced/included/imported paths are absolute and
+ // normalized.
+ //
+ file = path_->path->directory () / file;
+ }
- ar.actions.push_back (a);
- }
- }
+ file.normalize ();
- // Set the recipe text.
- //
- if (ar.recipe_text (
- *scope_,
- d.ttype != nullptr ? *d.ttype : target_->type (),
- move (t.value),
- d.as))
- d.clean = true;
-
- // Verify we have no unhandled attributes.
- //
- for (attribute& a: d.as)
- fail (d.as.loc) << "unknown recipe attribute " << a << endf;
+ try
+ {
+ ifdstream ifs (file);
+ text = ifs.read_text ();
+ }
+ catch (const io_error& e)
+ {
+ fail (nloc) << "unable to read recipe file " << file << ": " << e;
}
- // Copy the recipe over to the target verifying there are no
- // duplicates (global).
- //
- if (target_ != nullptr)
+ shared_ptr<adhoc_rule> ar;
{
- const shared_ptr<adhoc_rule>& r (d.recipes[d.i]);
+ // This is expected to be the location of the opening multi-curly
+ // with the recipe body starting from the following line. So we
+ // need to fudge the line number a bit.
+ //
+ location loc (file, 0, 1);
- for (const shared_ptr<adhoc_rule>& er: target_->adhoc_recipes)
+ if (icasecmp (lang, "buildscript") == 0)
{
- auto& as (er->actions);
+ // Buildscript
+ //
+ ar.reset (
+ new adhoc_buildscript_rule (
+ d.name.empty () ? "<ad hoc buildscript recipe>" : d.name,
+ loc,
+ 2)); // Use `{{` and `}}` for dump.
- for (action a: r->actions)
+ // Enter as buildfile-like so that it gets automatically
+ // distributed. Note: must be consistent with build/export/
+ // handling in process_default_target().
+ //
+ enter_buildfile<buildscript> (file);
+ }
+ else if (icasecmp (lang, "c++") == 0)
+ {
+ // C++
+ //
+ // We expect to find a C++ comment line with version and
+ // optional fragment separator before the first non-comment,
+ // non-blank line:
+ //
+ // // c++ <ver> [<sep>]
+ //
+ string s;
+ location sloc (file, 1, 1);
{
- if (find (as.begin (), as.end (), a) != as.end ())
+ // Note: observe blank lines for accurate line count.
+ //
+ size_t b (0), e (0);
+ for (size_t m (0), n (text.size ());
+ next_word (text, n, b, e, m, '\n', '\r'), b != n;
+ sloc.line++)
{
- const meta_operation_info* mf (
- root_->root_extra->meta_operations[a.meta_operation ()]);
+ s.assign (text, b, e - b);
- const operation_info* of (
- root_->root_extra->operations[a.operation ()]);
+ if (!trim (s).empty ())
+ {
+ if (icasecmp (s, "// c++ ", 7) == 0)
+ break;
- fail (d.bsloc)
- << "duplicate " << mf->name << '(' << of->name
- << ") recipe for target " << *target_;
+ if (s[0] != '/' || s[1] != '/')
+ {
+ b = e;
+ break;
+ }
+ }
}
+
+ if (b == e)
+ fail (sloc) << "no '// c++ <version> [<separator>]' line";
}
- }
- target_->adhoc_recipes.push_back (r);
+ uint64_t ver;
+ optional<string> sep;
+ {
+ size_t b (7), e (7);
+ if (next_word (s, b, e, ' ', '\t') == 0)
+ fail (sloc) << "missing c++ recipe version" << endf;
+
+ try
+ {
+ ver = convert<uint64_t> (build2::name (string (s, b, e - b)));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid c++ recipe version: " << e << endf;
+ }
+
+ if (next_word (s, b, e, ' ', '\t') != 0)
+ {
+ sep = string (s, b, e - b);
+
+ if (next_word (s, b, e, ' ', '\t') != 0)
+ fail (sloc) << "junk after fragment separator";
+ }
+ }
+
+ ar.reset (
+ new adhoc_cxx_rule (
+ d.name.empty () ? "<ad hoc c++ recipe>" : d.name,
+ loc,
+ 2, // Use `{{` and `}}` for dump.
+ ver,
+ move (sep)));
+
+ // Enter as buildfile-like so that it gets automatically
+ // distributed. Note: must be consistent with build/export/
+ // handling in process_default_target().
+ //
+ // While ideally we would want to use the cxx{} target type,
+ // it's defined in a seperate build system module (which may not
+ // even be loaded by this project, so even runtime lookup won't
+ // work). So we use file{} instead.
+ //
+ enter_buildfile<build2::file> (file);
+ }
+ else
+ fail (lloc) << "unknown recipe language '" << lang << "'";
}
+
+ assert (d.recipes[d.i] == nullptr);
+ d.recipes[d.i] = move (ar);
}
+ else
+ assert (d.recipes[d.i] != nullptr);
- next (t, tt);
- assert (tt == type::multi_rcbrace);
+ d.parse_trailer (move (text));
- next (t, tt); // Newline.
- next_after_newline (t, tt, token (t)); // Should be on its own line.
+ next_after_newline (t, tt);
};
bsloc = get_location (t); // Fallback location.
@@ -1893,8 +2476,7 @@ namespace build2
//
// TODO: handle and erase common attributes if/when we have any.
//
- as = move (attributes_top ());
- attributes_pop ();
+ as = attributes_pop ();
// Handle the buildspec.
//
@@ -1934,7 +2516,7 @@ namespace build2
expire_mode ();
next_after_newline (t, tt, "recipe action");
- // See if this is if-else or switch.
+ // See if this is if-else/switch or `recipe`.
//
// We want the keyword test similar to parse_clause() but we cannot do
// it if replaying. So we skip it with understanding that if it's not
@@ -1950,14 +2532,21 @@ namespace build2
// handy if we want to provide a custom recipe but only on certain
// platforms or some such).
- if (n == "if")
+ if (n == "if" || n == "if!")
{
- parse_if_else (t, tt, true /* multi */, parse_block);
+ parse_if_else (t, tt, true /* multi */,
+ parse_block, parse_recipe_directive);
continue;
}
else if (n == "switch")
{
- parse_switch (t, tt, true /* multi */, parse_block);
+ parse_switch (t, tt, true /* multi */,
+ parse_block, parse_recipe_directive);
+ continue;
+ }
+ else if (n == "recipe")
+ {
+ parse_recipe_directive (t, tt, "" /* kind */);
continue;
}
@@ -1965,7 +2554,7 @@ namespace build2
}
if (tt != type::multi_lcbrace)
- fail (t) << "expected recipe block instead of " << t;
+ fail (t) << "expected recipe block or 'recipe' instead of " << t;
// Fall through.
}
@@ -2010,13 +2599,97 @@ namespace build2
}
}
- void parser::
- enter_adhoc_members (adhoc_names_loc&& ans, bool implied)
+ vector<reference_wrapper<target>> parser::
+ enter_explicit_members (group_names_loc&& gns, bool implied)
+ {
+ tracer trace ("parser::enter_explicit_members", &path_);
+
+ names& ns (gns.ns);
+ const location& loc (gns.member_loc);
+
+ vector<reference_wrapper<target>> r;
+ r.reserve (ns.size ());
+
+ group& g (target_->as<group> ());
+ auto& ms (g.static_members);
+
+ for (size_t i (0); i != ns.size (); ++i)
+ {
+ name&& n (move (ns[i]));
+ name&& o (n.pair ? move (ns[++i]) : name ());
+
+ if (n.qualified ())
+ fail (loc) << "project name in target " << n;
+
+ // We derive the path unless the target name ends with the '...' escape
+ // which here we treat as the "let the rule derive the path" indicator
+ // (see target::split_name() for details). This will only be useful for
+ // referring to group members that are managed by the group's matching
+ // rule. Note also that omitting '...' for such a member could be used
+ // to override the file name, provided the rule checks if the path has
+ // already been derived before doing it itself.
+ //
+ // @@ What can the ad hoc recipe/rule do differently here? Maybe get
+ // path from dynamic targets? Maybe we will have custom path
+ // derivation support in buildscript in the future?
+ //
+ bool escaped;
+ {
+ const string& v (n.value);
+ size_t p (v.size ());
+
+ escaped = (p > 3 &&
+ v[--p] == '.' && v[--p] == '.' && v[--p] == '.' &&
+ v[--p] != '.');
+ }
+
+ target& m (enter_target::insert_target (*this,
+ move (n), move (o),
+ implied,
+ loc, trace));
+
+ if (g == m)
+ fail (loc) << "explicit group member " << m << " is group itself";
+
+ // Add as static member skipping duplicates.
+ //
+ if (find (ms.begin (), ms.end (), m) == ms.end ())
+ {
+ if (m.group == nullptr)
+ m.group = &g;
+ else if (m.group != &g)
+ fail (loc) << g << " group member " << m << " already belongs to "
+ << "group " << *m.group;
+
+ ms.push_back (m);
+ }
+
+ if (!escaped)
+ {
+ if (file* ft = m.is_a<file> ())
+ ft->derive_path ();
+ }
+
+ r.push_back (m);
+ }
+
+ return r;
+ }
+
+ vector<reference_wrapper<target>> parser::
+ enter_adhoc_members (group_names_loc&& gns, bool implied)
{
tracer trace ("parser::enter_adhoc_members", &path_);
- names& ns (ans.ns);
- const location& loc (ans.loc);
+ names& ns (gns.ns);
+ const location& loc (gns.member_loc);
+
+ if (target_->is_a<group> ())
+ fail (loc) << "ad hoc group primary member " << *target_
+ << " is explicit group";
+
+ vector<reference_wrapper<target>> r;
+ r.reserve (ns.size ());
for (size_t i (0); i != ns.size (); ++i)
{
@@ -2044,14 +2717,16 @@ namespace build2
v[--p] != '.');
}
- target& at (
- enter_target::insert_target (*this,
- move (n), move (o),
- implied,
- loc, trace));
+ target& m (enter_target::insert_target (*this,
+ move (n), move (o),
+ implied,
+ loc, trace));
+
+ if (target_ == &m)
+ fail (loc) << "ad hoc group member " << m << " is primary target";
- if (target_ == &at)
- fail (loc) << "ad hoc group member " << at << " is primary target";
+ if (m.is_a<group> ())
+ fail (loc) << "ad hoc group member " << m << " is explicit group";
// Add as an ad hoc member at the end of the chain skipping duplicates.
//
@@ -2059,7 +2734,7 @@ namespace build2
const_ptr<target>* mp (&target_->adhoc_member);
for (; *mp != nullptr; mp = &(*mp)->adhoc_member)
{
- if (*mp == &at)
+ if (*mp == &m)
{
mp = nullptr;
break;
@@ -2068,30 +2743,41 @@ namespace build2
if (mp != nullptr)
{
- *mp = &at;
- at.group = target_;
+ if (m.group == nullptr)
+ m.group = target_;
+ else if (m.group != target_)
+ fail (loc) << *target_ << " ad hoc group member " << m
+ << " already belongs to group " << *m.group;
+ *mp = &m;
}
}
if (!escaped)
{
- if (file* ft = at.is_a<file> ())
+ if (file* ft = m.is_a<file> ())
ft->derive_path ();
}
+
+ r.push_back (m);
}
+
+ return r;
}
- small_vector<reference_wrapper<target>, 1> parser::
+ small_vector<pair<reference_wrapper<target>,
+ vector<reference_wrapper<target>>>, 1> parser::
enter_targets (names&& tns, const location& tloc, // Target names.
- adhoc_names&& ans, // Ad hoc target names.
- size_t prereq_size)
+ group_names&& gns, // Group member names.
+ size_t prereq_size,
+ const attributes& tas) // Target attributes.
{
- // Enter all the targets (normally we will have just one) and their ad hoc
- // groups.
+ // Enter all the targets (normally we will have just one) and their group
+ // members.
//
tracer trace ("parser::enter_targets", &path_);
- small_vector<reference_wrapper<target>, 1> tgs;
+ small_vector<pair<reference_wrapper<target>,
+ vector<reference_wrapper<target>>>, 1> tgs;
for (size_t i (0); i != tns.size (); ++i)
{
@@ -2113,13 +2799,24 @@ namespace build2
false /* implied */,
tloc, trace);
- // Enter ad hoc members.
+ if (!tas.empty ())
+ apply_target_attributes (*target_, tas);
+
+ // Enter group members.
//
- if (!ans.empty ())
+ vector<reference_wrapper<target>> gms;
+ if (!gns.empty ())
{
// Note: index after the pair increment.
//
- enter_adhoc_members (move (ans[i]), false /* implied */);
+ group_names_loc& g (gns[i]);
+
+ if (g.expl && !target_->is_a<group> ())
+ fail (g.group_loc) << *target_ << " is not group target";
+
+ gms = g.expl
+ ? enter_explicit_members (move (g), false /* implied */)
+ : enter_adhoc_members (move (g), false /* implied */);
}
if (default_target_ == nullptr)
@@ -2127,17 +2824,97 @@ namespace build2
target_->prerequisites_state_.store (2, memory_order_relaxed);
target_->prerequisites_.reserve (prereq_size);
- tgs.push_back (*target_);
+ tgs.emplace_back (*target_, move (gms));
}
return tgs;
}
void parser::
+ apply_target_attributes (target& t, const attributes& as)
+ {
+ const location& l (as.loc);
+
+ for (auto& a: as)
+ {
+ const string& n (a.name);
+ const value& v (a.value);
+
+ // rule_hint=
+ // liba@rule_hint=
+ //
+ size_t p (string::npos);
+ if (n == "rule_hint" ||
+ ((p = n.find ('@')) != string::npos &&
+ n.compare (p + 1, string::npos, "rule_hint") == 0))
+ {
+ // Resolve target type, if specified.
+ //
+ const target_type* tt (nullptr);
+ if (p != string::npos)
+ {
+ string t (n, 0, p);
+ tt = scope_->find_target_type (t);
+
+ if (tt == nullptr)
+ fail (l) << "unknown target type " << t << " in rule_hint "
+ << "attribute";
+ }
+
+ // The rule hint value is vector<pair<optional<string>, string>> where
+ // the first half is the operation and the second half is the hint.
+ // Absent operation is used as a fallback for update/clean.
+ //
+ const names& ns (v.as<names> ());
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ operation_id oi (default_id);
+ if (i->pair)
+ {
+ const name& n (*i++);
+
+ if (!n.simple ())
+ fail (l) << "expected operation name instead of " << n
+ << " in rule_hint attribute";
+
+ const string& v (n.value);
+
+ if (!v.empty ())
+ {
+ oi = ctx->operation_table.find (v);
+
+ if (oi == 0)
+ fail (l) << "unknown operation " << v << " in rule_hint "
+ << "attribute";
+
+ if (root_->root_extra->operations[oi] == nullptr)
+ fail (l) << "project " << *root_ << " does not support "
+ << "operation " << ctx->operation_table[oi]
+ << " specified in rule_hint attribute";
+ }
+ }
+
+ const name& n (*i);
+
+ if (!n.simple () || n.empty ())
+ fail (l) << "expected hint instead of " << n << " in rule_hint "
+ << "attribute";
+
+ t.rule_hints.insert (tt, oi, n.value);
+ }
+ }
+ else
+ fail (l) << "unknown target attribute " << a;
+ }
+ }
+
+ void parser::
parse_dependency (token& t, token_type& tt,
names&& tns, const location& tloc, // Target names.
- adhoc_names&& ans, // Ad hoc target names.
- names&& pns, const location& ploc) // Prereq names.
+ group_names&& gns, // Group member names.
+ names&& pns, const location& ploc, // Prereq names.
+ const attributes& tas) // Target attributes.
{
// Parse a dependency chain and/or a target/prerequisite-specific variable
// assignment/block and/or recipe block(s).
@@ -2147,33 +2924,92 @@ namespace build2
//
tracer trace ("parser::parse_dependency", &path_);
+ // Diagnose conditional prerequisites. Note that we want to diagnose this
+ // even if pns is empty (think empty variable expansion; the literal "no
+ // prerequisites" case is handled elsewhere).
+ //
+ // @@ TMP For now we only do it during the dist meta-operation. In the
+ // future we should tighten this to any meta-operation provided
+ // the dist module is loaded.
+ //
+ // @@ TMP For now it's a warning because we have dependencies like
+ // cli.cxx{foo}: cli{foo} which are not currently possible to
+ // rewrite (cli.cxx{} is not always registered).
+ //
+ if (condition_ &&
+ ctx->current_mif != nullptr &&
+ ctx->current_mif->id == dist_id)
+ {
+ // Only issue the warning for the projects being distributed. In
+ // particular, this makes sure we don't complain about imported
+ // projects. Note: use amalgamation to cover bundled subprojects.
+ //
+ auto* dm (root_->bundle_scope ()->find_module<dist::module> (
+ dist::module::name));
+
+ if (dm != nullptr && dm->distributed)
+ {
+ warn (tloc) << "conditional dependency declaration may result in "
+ << "incomplete distribution" <<
+ info (ploc) << "prerequisite declared here" <<
+ info (*condition_) << "conditional buildfile fragment starts here" <<
+ info << "instead use 'include' prerequisite-specific variable to "
+ << "conditionally include prerequisites" <<
+ info << "for example: "
+ << "<target>: <prerequisite>: include = (<condition>)" <<
+ info << "for details, see https://github.com/build2/HOWTO/blob/"
+ << "master/entries/keep-build-graph-config-independent.md";
+ }
+ }
+
// First enter all the targets.
//
- small_vector<reference_wrapper<target>, 1> tgs (
- enter_targets (move (tns), tloc, move (ans), pns.size ()));
+ small_vector<pair<reference_wrapper<target>,
+ vector<reference_wrapper<target>>>, 1>
+ tgs (enter_targets (move (tns), tloc, move (gns), pns.size (), tas));
// Now enter each prerequisite into each target.
//
- for (name& pn: pns)
+ for (auto i (pns.begin ()); i != pns.end (); ++i)
{
// We cannot reuse the names if we (potentially) may need to pass them
// as targets in case of a chain (see below).
//
- name n (tt != type::colon ? move (pn) : pn);
+ name n (tt != type::colon ? move (*i) : *i);
// See also scope::find_prerequisite_key().
//
auto rp (scope_->find_target_type (n, ploc));
- const target_type* tt (rp.first);
+ const target_type* t (rp.first);
optional<string>& e (rp.second);
- if (tt == nullptr)
- fail (ploc) << "unknown target type " << n.type;
+ if (t == nullptr)
+ {
+ if (n.proj)
+ {
+ // If the target type is unknown then no phase 2 import (like
+ // rule-specific search) can possibly succeed so we can fail now and
+ // with a more accurate reason. See import2(names) for background.
+ //
+ diag_record dr;
+ dr << fail (ploc) << "unable to import target " << n;
+ import_suggest (dr, *n.proj, nullptr, string (), false);
+ }
+ else
+ {
+ fail (ploc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
+ }
+ }
+
+ if (t->factory == nullptr)
+ fail (ploc) << "abstract target type " << t->name << "{}";
// Current dir collapses to an empty one.
//
if (!n.dir.empty ())
- n.dir.normalize (false, true);
+ n.dir.normalize (false /* actual */, true);
// @@ OUT: for now we assume the prerequisite's out is undetermined. The
// only way to specify an src prerequisite will be with the explicit
@@ -2184,10 +3020,47 @@ namespace build2
// a special indicator. Also, one can easily and natually suppress any
// searches by specifying the absolute path.
//
+ name o;
+ if (n.pair)
+ {
+ assert (n.pair == '@');
+
+ ++i;
+ o = tt != type::colon ? move (*i) : *i;
+
+ if (!o.directory ())
+ fail (ploc) << "expected directory after '@'";
+
+ o.dir.normalize (); // Note: don't collapse current to empty.
+
+ // Make sure out and src are parallel unless both were specified as
+ // absolute. We make an exception for this case because out may be
+ // used to "tag" imported targets (see cc::search_library()). So it's
+ // sort of the "I know what I am doing" escape hatch (it would have
+ // been even better to verify such a target is outside any project
+ // but that won't be cheap).
+ //
+ // For now we require that both are either relative or absolute.
+ //
+ // See similar code for targets in scope::find_target_type().
+ //
+ if (n.dir.absolute () && o.dir.absolute ())
+ ;
+ else if (n.dir.empty () && o.dir.current ())
+ ;
+ else if (o.dir.relative () &&
+ n.dir.relative () &&
+ o.dir == n.dir)
+ ;
+ else
+ fail (ploc) << "prerequisite output directory " << o.dir
+ << " must be parallel to source directory " << n.dir;
+ }
+
prerequisite p (move (n.proj),
- *tt,
+ *t,
move (n.dir),
- dir_path (),
+ move (o.dir),
move (n.value),
move (e),
*scope_);
@@ -2196,7 +3069,7 @@ namespace build2
{
// Move last prerequisite (which will normally be the only one).
//
- target& t (*i);
+ target& t (i->first);
t.prerequisites_.push_back (++i == e
? move (p)
: prerequisite (p, memory_order_relaxed));
@@ -2209,20 +3082,42 @@ namespace build2
//
// We handle multiple targets and/or prerequisites by replaying the tokens
// (see the target-specific case comments for details). The function
- // signature is:
+ // signature for for_each_t (see for_each on the gm argument semantics):
+ //
+ // void (token& t, type& tt, optional<bool> gm)
+ //
+ // And for for_each_p:
//
// void (token& t, type& tt)
//
auto for_each_t = [this, &t, &tt, &tgs] (auto&& f)
{
- replay_guard rg (*this, tgs.size () > 1);
+ // We need replay if we have multiple targets or group members.
+ //
+ replay_guard rg (*this, tgs.size () > 1 || !tgs[0].second.empty ());
for (auto ti (tgs.begin ()), te (tgs.end ()); ti != te; )
{
- target& tg (*ti);
- enter_target tgg (*this, tg);
+ target& tg (ti->first);
+ const vector<reference_wrapper<target>>& gms (ti->second);
+
+ {
+ enter_target g (*this, tg);
+ f (t, tt, nullopt);
+ }
+
+ if (!gms.empty ())
+ {
+ bool expl (tg.is_a<group> ());
- f (t, tt);
+ for (target& gm: gms)
+ {
+ rg.play (); // Replay.
+
+ enter_target g (*this, gm);
+ f (t, tt, expl);
+ }
+ }
if (++ti != te)
rg.play (); // Replay.
@@ -2235,8 +3130,8 @@ namespace build2
for (auto ti (tgs.begin ()), te (tgs.end ()); ti != te; )
{
- target& tg (*ti);
- enter_target tgg (*this, tg);
+ target& tg (ti->first);
+ enter_target g (*this, tg);
for (size_t pn (tg.prerequisites_.size ()), pi (pn - pns.size ());
pi != pn; )
@@ -2279,7 +3174,7 @@ namespace build2
this,
st = token (t), // Save start token (will be gone on replay).
recipes = small_vector<shared_ptr<adhoc_rule>, 1> ()]
- (token& t, type& tt) mutable
+ (token& t, type& tt, optional<bool> gm) mutable
{
token rt; // Recipe start token.
@@ -2289,7 +3184,14 @@ namespace build2
{
next (t, tt); // Newline.
next (t, tt); // First token inside the variable block.
- parse_variable_block (t, tt);
+
+ // Skip explicit group members (see the block case above for
+ // background).
+ //
+ if (!gm || !*gm)
+ parse_variable_block (t, tt);
+ else
+ skip_block (t, tt);
if (tt != type::rcbrace)
fail (t) << "expected '}' instead of " << t;
@@ -2305,6 +3207,16 @@ namespace build2
else
rt = st;
+ // If this is a group member then we know we are replaying and can
+ // skip the recipe.
+ //
+ if (gm)
+ {
+ replay_skip ();
+ next (t, tt);
+ return;
+ }
+
parse_recipe (t, tt, rt, recipes);
};
@@ -2314,21 +3226,6 @@ namespace build2
return;
}
- // What should we do if there are no prerequisites (for example, because
- // of an empty wildcard result)? We can fail or we can ignore. In most
- // cases, however, this is probably an error (for example, forgetting to
- // checkout a git submodule) so let's not confuse the user and fail (one
- // can always handle the optional prerequisites case with a variable and
- // an if).
- //
- if (pns.empty ())
- fail (ploc) << "no prerequisites in dependency chain or prerequisite-"
- << "specific variable assignment";
-
- next_with_attributes (t, tt); // Recognize attributes after `:`.
-
- auto at (attributes_push (t, tt));
-
// If we are here, then this can be one of three things:
//
// 1. A prerequisite-specific variable bloc:
@@ -2342,10 +3239,37 @@ namespace build2
//
// foo: bar: x = y
//
- // 3. A further dependency chain :
+ // 3. A further dependency chain:
//
// foo: bar: baz ...
//
+ // What should we do if there are no prerequisites, for example, because
+ // of an empty wildcard result or empty variable expansion? We can fail or
+ // we can ignore. In most cases, however, this is probably an error (for
+ // example, forgetting to checkout a git submodule) so let's not confuse
+ // the user and fail (one can always handle the optional prerequisites
+ // case with a variable and an if).
+ //
+ // On the other hand, we allow just empty prerequisites (which is also the
+ // more common case by far) and so it's strange that we don't allow the
+ // same with, say, `include = false`:
+ //
+ // exe{foo}: cxx{$empty} # Ok.
+ // exe{foo}: cxx{$empty}: include = false # Not Ok?
+ //
+ // So let's ignore in the first two cases (variable block and assignment)
+ // for consistency. The dependency chain is iffy both conceptually and
+ // implementation-wise (it could be followed by a variable block). So
+ // let's keep it an error for now.
+ //
+ // Note that the syntactically-empty prerequisite list is still an error:
+ //
+ // exe{foo}: : include = false # Error.
+ //
+ next_with_attributes (t, tt); // Recognize attributes after `:`.
+
+ auto at (attributes_push (t, tt));
+
if (tt == type::newline || tt == type::eos)
{
attributes_pop (); // Must be none since can't be standalone.
@@ -2360,15 +3284,22 @@ namespace build2
// Parse the block for each prerequisites of each target.
//
- for_each_p ([this] (token& t, token_type& tt)
- {
- next (t, tt); // First token inside the block.
+ if (!pns.empty ())
+ for_each_p ([this] (token& t, token_type& tt)
+ {
+ next (t, tt); // First token inside the block.
- parse_variable_block (t, tt);
+ parse_variable_block (t, tt);
- if (tt != type::rcbrace)
- fail (t) << "expected '}' instead of " << t;
- });
+ if (tt != type::rcbrace)
+ fail (t) << "expected '}' instead of " << t;
+ });
+ else
+ {
+ skip_block (t, tt);
+ if (tt != type::rcbrace)
+ fail (t) << "expected '}' instead of " << t;
+ }
next (t, tt); // Presumably newline after '}'.
next_after_newline (t, tt, '}'); // Should be on its own line.
@@ -2391,10 +3322,13 @@ namespace build2
// Parse the assignment for each prerequisites of each target.
//
- for_each_p ([this, &var, at] (token& t, token_type& tt)
- {
- parse_variable (t, tt, var, at);
- });
+ if (!pns.empty ())
+ for_each_p ([this, &var, at] (token& t, token_type& tt)
+ {
+ parse_variable (t, tt, var, at);
+ });
+ else
+ skip_line (t, tt);
next_after_newline (t, tt);
@@ -2413,6 +3347,13 @@ namespace build2
//
else
{
+ if (pns.empty ())
+ fail (ploc) << "no prerequisites in dependency chain";
+
+ // @@ This is actually ambiguous: prerequisite or target attributes
+ // (or both or neither)? Perhaps this should be prerequisites for
+ // the same reason as below (these are prerequsites first).
+ //
if (at.first)
fail (at.second) << "attributes before prerequisites";
else
@@ -2424,30 +3365,35 @@ namespace build2
// we just say that the dependency chain is equivalent to specifying
// each dependency separately.
//
- // Also note that supporting ad hoc target group specification in
- // chains will be complicated. For example, what if prerequisites that
- // have ad hoc targets don't end up being chained? Do we just silently
- // drop them? Also, these are prerequsites first that happened to be
- // reused as target names so perhaps it is the right thing not to
- // support, conceptually.
+ // Also note that supporting target group specification in chains will
+ // be complicated. For example, what if prerequisites that have group
+ // members don't end up being chained? Do we just silently drop them?
+ // Also, these are prerequsites first that happened to be reused as
+ // target names so perhaps it is the right thing not to support,
+ // conceptually.
//
parse_dependency (t, tt,
move (pns), ploc,
- {} /* ad hoc target name */,
- move (ns), loc);
+ {} /* group names */,
+ move (ns), loc,
+ attributes () /* target attributes */);
}
}
}
void parser::
- source (istream& is, const path_name& in, const location& loc, bool deft)
+ source_buildfile (istream& is,
+ const path_name& in,
+ const location& loc,
+ optional<bool> deft)
{
- tracer trace ("parser::source", &path_);
+ tracer trace ("parser::source_buildfile", &path_);
l5 ([&]{trace (loc) << "entering " << in;});
- if (in.path != nullptr)
- enter_buildfile (*in.path);
+ const buildfile* bf (in.path != nullptr
+ ? &enter_buildfile<buildfile> (*in.path)
+ : nullptr);
const path_name* op (path_);
path_ = &in;
@@ -2457,11 +3403,11 @@ namespace build2
lexer_ = &l;
target* odt;
- if (deft)
- {
+ if (!deft || *deft)
odt = default_target_;
+
+ if (deft && *deft)
default_target_ = nullptr;
- }
token t;
type tt;
@@ -2471,12 +3417,15 @@ namespace build2
if (tt != type::eos)
fail (t) << "unexpected " << t;
- if (deft)
+ if (deft && *deft)
{
- process_default_target (t);
- default_target_ = odt;
+ if (stage_ != stage::boot && stage_ != stage::root)
+ process_default_target (t, bf);
}
+ if (!deft || *deft)
+ default_target_ = odt;
+
lexer_ = ol;
path_ = op;
@@ -2486,11 +3435,35 @@ namespace build2
void parser::
parse_source (token& t, type& tt)
{
+ // source [<attrs>] <path>+
+ //
+
// The rest should be a list of buildfiles. Parse them as names in the
- // value mode to get variable expansion and directory prefixes.
+ // value mode to get variable expansion and directory prefixes. Also
+ // handle optional attributes.
//
mode (lexer_mode::value, '@');
- next (t, tt);
+ next_with_attributes (t, tt);
+ attributes_push (t, tt);
+
+ bool nodt (false); // Source buildfile without default target semantics.
+ {
+ attributes as (attributes_pop ());
+ const location& l (as.loc);
+
+ for (const attribute& a: as)
+ {
+ const string& n (a.name);
+
+ if (n == "no_default_target")
+ {
+ nodt = true;
+ }
+ else
+ fail (l) << "unknown source directive attribute " << a;
+ }
+ }
+
const location l (get_location (t));
names ns (tt != type::newline && tt != type::eos
? parse_names (t, tt, pattern_mode::expand, "path", nullptr)
@@ -2517,10 +3490,10 @@ namespace build2
try
{
ifdstream ifs (p);
- source (ifs,
- path_name (p),
- get_location (t),
- false /* default_target */);
+ source_buildfile (ifs,
+ path_name (p),
+ get_location (t),
+ nodt ? optional<bool> {} : false);
}
catch (const io_error& e)
{
@@ -2534,6 +3507,9 @@ namespace build2
void parser::
parse_include (token& t, type& tt)
{
+ // include <path>+
+ //
+
tracer trace ("parser::parse_include", &path_);
if (stage_ == stage::boot)
@@ -2642,19 +3618,35 @@ namespace build2
l6 ([&]{trace (l) << "absolute path " << p;});
- if (!root_->buildfiles.insert (p).second) // Note: may be "new" root.
+ // Note: may be "new" root.
+ //
+ if (!root_->root_extra->insert_buildfile (p))
{
l5 ([&]{trace (l) << "skipping already included " << p;});
continue;
}
+ // Note: see a variant of this in parse_import().
+ //
+ // Clear/restore if/switch location.
+ //
+ // We do it here but not in parse_source since the included buildfile is
+ // in a sense expected to be a standalone entity (think a file included
+ // from an export stub).
+ //
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = nullopt;
+
try
{
ifdstream ifs (p);
- source (ifs,
- path_name (p),
- get_location (t),
- true /* default_target */);
+ source_buildfile (ifs,
+ path_name (p),
+ get_location (t),
+ true /* default_target */);
}
catch (const io_error& e)
{
@@ -2706,13 +3698,16 @@ namespace build2
[] (const string& s) {return s.c_str ();});
cargs.push_back (nullptr);
+ // Note: we are in the serial load phase and so no diagnostics buffering
+ // is needed.
+ //
process pr (run_start (3 /* verbosity */,
cargs,
0 /* stdin */,
-1 /* stdout */,
- true /* error */,
- dir_path () /* cwd */,
+ 2 /* stderr */,
nullptr /* env */,
+ dir_path () /* cwd */,
l));
try
{
@@ -2734,10 +3729,10 @@ namespace build2
dr << info (l) << "while parsing " << args[0] << " output";
});
- source (is,
- path_name ("<stdout>"),
- l,
- false /* default_target */);
+ source_buildfile (is,
+ path_name ("<stdout>"),
+ l,
+ false /* default_target */);
}
is.close (); // Detect errors.
@@ -2751,7 +3746,7 @@ namespace build2
// caused by that and let run_finish() deal with it.
}
- run_finish (cargs, pr, l);
+ run_finish (cargs, pr, 2 /* verbosity */, false /* omit_normal */, l);
next_after_newline (t, tt);
}
@@ -2797,14 +3792,16 @@ namespace build2
// which case it will be duplicating them in its root.build file). So
// for now we allow this trusting the user knows what they are doing.
//
- string proj;
- {
- const project_name& n (named_project (*root_));
-
- if (!n.empty ())
- proj = n.variable ();
- }
-
+ // There is another special case: a buildfile imported from another
+ // project. In this case we also allow <project> to be the imported
+ // project name in addition to importing. The thinking here is that an
+ // imported buildfile is in a sense like a module (may provide rules which
+ // may require configuration, etc) and should be able to use its own
+ // project name (which is often the corresponding tool name) in the
+ // configuration variables, just like modules. In this case we use the
+ // imported project name as the reporting module name (but which can
+ // be overridden with config.report.module attribute).
+ //
const location loc (get_location (t));
// We are now in the normal lexing mode and we let the lexer handle `?=`.
@@ -2812,18 +3809,28 @@ namespace build2
next_with_attributes (t, tt);
// Get variable attributes, if any, and deal with the special config.*
- // attributes. Since currently they can only appear in the config
- // directive, we handle them in an ad hoc manner.
+ // attributes as well as null. Since currently they can only appear in the
+ // config directive, we handle them in an ad hoc manner.
//
attributes_push (t, tt);
attributes& as (attributes_top ());
+ bool nullable (false);
optional<string> report;
string report_var;
+ // Reporting module name. Empty means the config module reporting
+ // project's own configuration.
+ //
+ project_name report_module;
+
for (auto i (as.begin ()); i != as.end (); )
{
- if (i->name == "config.report")
+ if (i->name == "null")
+ {
+ nullable = true;
+ }
+ else if (i->name == "config.report")
{
try
{
@@ -2835,7 +3842,7 @@ namespace build2
report = move (v);
else
throw invalid_argument (
- "expected 'false' or format name instead of '" + v + "'");
+ "expected 'false' or format name instead of '" + v + '\'');
}
catch (const invalid_argument& e)
{
@@ -2847,6 +3854,23 @@ namespace build2
try
{
report_var = convert<string> (move (i->value));
+
+ if (!report)
+ report = string ("true");
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (as.loc) << "invalid " << i->name << " attribute value: " << e;
+ }
+ }
+ else if (i->name == "config.report.module")
+ {
+ try
+ {
+ report_module = convert<project_name> (move (i->value));
+
+ if (!report)
+ report = string ("true");
}
catch (const invalid_argument& e)
{
@@ -2870,14 +3894,18 @@ namespace build2
// As a way to print custom (discovered, computed, etc) configuration
// information we allow specifying a non config.* variable provided it is
- // explicitly marked with the config.report attribute.
+ // explicitly marked with the config.report attribute (or another
+ // attribute that implies it).
//
bool new_val (false);
+ string org_var; // Original variable if config.report.variable specified.
+
+ const variable* var (nullptr); // config.* variable.
lookup l;
if (report && *report != "false" && !config)
{
- if (!as.empty ())
+ if (!as.empty () || nullable)
fail (as.loc) << "unexpected attributes for report-only variable";
attributes_pop ();
@@ -2891,7 +3919,14 @@ namespace build2
// philosophical question. In either case it doesn't seem useful for it
// to unconditionally force reporting at level 2.
//
- report_var = move (name);
+ if (!report_var.empty ())
+ {
+ // For example, config [config.report.variable=multi] multi_database
+ //
+ org_var = move (name);
+ }
+ else
+ report_var = move (name);
next (t, tt); // We shouldn't have the default value part.
}
@@ -2904,41 +3939,133 @@ namespace build2
// config prefix and the project substring.
//
{
- diag_record dr;
+ string proj;
+ {
+ const project_name& n (named_project (*root_));
- if (!config)
- dr << fail (t) << "configuration variable '" << name
- << "' does not start with 'config.'";
+ if (!n.empty ())
+ proj = n.variable ();
+ }
- if (!proj.empty ())
+ diag_record dr;
+ do // Breakout loop.
{
- size_t p (name.find ('.' + proj));
+ if (!config)
+ {
+ dr << fail (t) << "configuration variable '" << name
+ << "' does not start with 'config.'";
+ break;
+ }
- if (p == string::npos ||
- ((p += proj.size () + 1) != name.size () && // config.<proj>
- name[p] != '.')) // config.<proj>.
+ auto match = [&name] (const string& proj)
{
+ size_t p (name.find ('.' + proj));
+ return (p != string::npos &&
+ ((p += proj.size () + 1) == name.size () || // config.<proj>
+ name[p] == '.')); // config.<proj>.
+ };
+
+ if (!proj.empty () && match (proj))
+ break;
+
+ // See if this buildfile belongs to a different project. If so, use
+ // the project name as the reporting module name.
+ //
+ if (path_->path != nullptr)
+ {
+ // Note: all sourced/included/imported paths are absolute and
+ // normalized.
+ //
+ const path& f (*path_->path);
+ dir_path d (f.directory ());
+
+ auto p (ctx->scopes.find (d)); // Note: never empty.
+ if (*p.first != &ctx->global_scope)
+ {
+ // The buildfile will most likely be in src which means we may
+ // end up with multiple scopes (see scope_map for background).
+ // First check if one of them is us. If not, then we can extract
+ // the project name from any one of them.
+ //
+ const scope& bs (**p.first); // Save.
+
+ for (; p.first != p.second; ++p.first)
+ {
+ if (root_ == (*p.first)->root_scope ())
+ break;
+ }
+
+ if (p.first == p.second)
+ {
+ // Note: we expect the project itself to be named.
+ //
+ const project_name& n (project (*bs.root_scope ()));
+
+ if (!n.empty ())
+ {
+ // If the buildfile comes from a different project, then
+ // it's more likely to use the imported project's config
+ // variables. So replace proj with that for diagnostics
+ // below.
+ //
+ proj = n.variable ();
+
+ if (*report != "false" && verb >= 2)
+ report_module = n;
+ }
+ }
+ }
+ else
+ {
+ // If the buildfile is not in any project, then it could be
+ // installed.
+ //
+ // Per import2_buildfile(), exported buildfiles are installed
+ // into $install.buildfile/<proj>/....
+ //
+ const dir_path& id (build_install_buildfile);
+
+ if (!id.empty () && d.sub (id))
+ {
+ dir_path l (d.leaf (id));
+ if (!l.empty ())
+ {
+ project_name n (*l.begin ());
+ proj = n.variable ();
+
+ if (*report != "false" && verb >= 2)
+ report_module = move (n);
+ }
+ }
+ }
+ }
+
+ if (!proj.empty () && match (proj))
+ break;
+
+ // Note: only if proj not empty (see above).
+ //
+ if (!proj.empty ())
dr << fail (t) << "configuration variable '" << name
<< "' does not include project name";
- }
}
+ while (false);
if (!dr.empty ())
dr << info << "expected variable name in the 'config[.**]."
<< (proj.empty () ? "<project>" : proj.c_str ()) << ".**' form";
}
- const variable& var (
- parse_variable_name (move (name), get_location (t)));
- apply_variable_attributes (var);
+ var = &parse_variable_name (move (name), get_location (t));
+ apply_variable_attributes (*var);
// Note that even though we are relying on the config.** variable
// pattern to set global visibility, let's make sure as a sanity check.
//
- if (var.visibility != variable_visibility::global)
+ if (var->visibility != variable_visibility::global)
{
- fail (t) << "configuration variable " << var << " has "
- << var.visibility << " visibility";
+ fail (t) << "configuration variable " << *var << " has "
+ << var->visibility << " visibility";
}
// See if we have the default value part.
@@ -2960,15 +4087,15 @@ namespace build2
//
bool dev;
{
- size_t p (var.name.rfind ('.'));
- dev = p != 6 && var.name.compare (p + 1, string::npos, "develop") == 0;
+ size_t p (var->name.rfind ('.'));
+ dev = p != 6 && var->name.compare (p + 1, string::npos, "develop") == 0;
}
uint64_t sflags (0);
if (dev)
{
- if (var.type != &value_traits<bool>::value_type)
- fail (loc) << var << " variable must be of type bool";
+ if (var->type != &value_traits<bool>::value_type)
+ fail (loc) << *var << " variable must be of type bool";
// This is quite messy: below we don't always parse the value (plus it
// may be computed) so here we just peek at the next token. But we
@@ -2977,7 +4104,10 @@ namespace build2
if (!def_val ||
peek (lexer_mode::value, '@') != type::word ||
peeked ().value != "false")
- fail (loc) << var << " variable default value must be literal false";
+ fail (loc) << *var << " variable default value must be literal false";
+
+ if (nullable)
+ fail (loc) << *var << " variable must not be nullable";
sflags |= config::save_false_omitted;
}
@@ -2986,7 +4116,7 @@ namespace build2
// in order to mark it as saved. We also have to do this to get the new
// value status.
//
- l = config::lookup_config (new_val, *root_, var, sflags);
+ l = config::lookup_config (new_val, *root_, *var, sflags);
// Handle the default value.
//
@@ -2997,58 +4127,127 @@ namespace build2
// all.
//
if (l.defined ())
+ {
+ // Peek at the attributes to detect whether the value is NULL.
+ //
+ if (!dev && !nullable)
+ {
+ // Essentially a prefix of parse_variable_value().
+ //
+ mode (lexer_mode::value, '@');
+ next_with_attributes (t, tt);
+ attributes_push (t, tt, true);
+ for (const attribute& a: attributes_pop ())
+ {
+ if (a.name == "null")
+ {
+ nullable = true;
+ break;
+ }
+ }
+ }
+
skip_line (t, tt);
+ }
else
{
value lhs, rhs (parse_variable_value (t, tt, !dev /* mode */));
- apply_value_attributes (&var, lhs, move (rhs), type::assign);
- l = config::lookup_config (new_val, *root_, var, move (lhs), sflags);
+ apply_value_attributes (var, lhs, move (rhs), type::assign);
+
+ if (!nullable)
+ nullable = lhs.null;
+
+ l = config::lookup_config (new_val, *root_, *var, move (lhs), sflags);
}
}
+
+ // If the variable is not nullable, verify the value is not NULL.
+ //
+ // Note that undefined is not the same as NULL (if it is undefined, we
+ // should either see the default value or if there is no default value,
+ // then the user is expected to handle the undefined case).
+ //
+ if (!nullable && l.defined () && l->null)
+ fail (loc) << "null value in non-nullable variable " << *var;
}
// We will be printing the report at either level 2 (-v) or 3 (-V)
- // depending on the final value of config_report_new.
+ // depending on the final value of config_report::new_value.
//
- // Note that for the config_report_new calculation we only incorporate
- // variables that we are actually reporting.
+ // Note that for the config_report::new_value calculation we only
+ // incorporate variables that we are actually reporting.
//
if (*report != "false" && verb >= 2)
{
+ // Find existing or insert new config_report entry for this module.
+ //
+ auto i (find_if (config_reports.begin (),
+ config_reports.end (),
+ [&report_module] (const config_report& r)
+ {
+ return r.module == report_module;
+ }));
+
+ if (i == config_reports.end ())
+ {
+ config_reports.push_back (
+ config_report {move (report_module), {}, false});
+ i = config_reports.end () - 1;
+ }
+
+ auto& report_values (i->values);
+ bool& report_new_value (i->new_value);
+
// We don't want to lookup the report variable value here since it's
// most likely not set yet.
//
if (!report_var.empty ())
{
+ if (org_var.empty () && var != nullptr)
+ org_var = var->name;
+
// In a somewhat hackish way we pass the variable in an undefined
// lookup.
//
+ // Note: consistent with parse_variable_name() wrt overridability.
+ //
l = lookup ();
l.var = &root_->var_pool ().insert (
- move (report_var), true /* overridable */);
+ move (report_var),
+ report_var.find ('.') != string::npos /* overridable */);
}
if (l.var != nullptr)
{
- auto r (make_pair (l, move (*report)));
-
// If we have a duplicate, update it (it could be useful to have
// multiple config directives to "probe" the value before calculating
// the default; see lookup_config() for details).
//
- auto i (find_if (config_report.begin (),
- config_report.end (),
- [&l] (const pair<lookup, string>& p)
+ // Since the original variable is what the user will see in the
+ // report, we prefer that as a key.
+ //
+ auto i (find_if (report_values.begin (),
+ report_values.end (),
+ [&org_var, &l] (const config_report::value& v)
{
- return p.first.var == l.var;
+ return (v.org.empty () && org_var.empty ()
+ ? v.val.var == l.var
+ : (v.org.empty ()
+ ? v.val.var->name == org_var
+ : v.org == l.var->name));
}));
- if (i == config_report.end ())
- config_report.push_back (move (r));
+ if (i == report_values.end ())
+ report_values.push_back (
+ config_report::value {l, move (*report), move (org_var)});
else
- *i = move (r);
+ {
+ i->val = l;
+ i->fmt = move (*report);
+ if (i->org.empty ()) i->org = move (org_var);
+ }
- config_report_new = config_report_new || new_val;
+ report_new_value = report_new_value || new_val;
}
}
@@ -3105,118 +4304,306 @@ namespace build2
if (stage_ == stage::boot)
fail (t) << "import during bootstrap";
- // General import format:
+ // General import form:
//
// import[?!] [<attrs>] <var> = [<attrs>] (<target>|<project>%<target>])+
//
+ // Special form for importing buildfiles:
+ //
+ // import[?!] [<attrs>] (<target>|<project>%<target>])+
+ //
bool opt (t.value.back () == '?');
- bool ph2 (opt || t.value.back () == '!');
+ optional<string> ph2 (opt || t.value.back () == '!'
+ ? optional<string> (string ())
+ : nullopt);
// We are now in the normal lexing mode and we let the lexer handle `=`.
//
next_with_attributes (t, tt);
- // Get variable attributes, if any, and deal with the special metadata
- // attribute. Since currently it can only appear in the import directive,
- // we handle it in an ad hoc manner.
+ // Get variable (or value, in the second form) attributes, if any, and
+ // deal with the special metadata and rule_hint attributes. Since
+ // currently they can only appear in the import directive, we handle them
+ // in an ad hoc manner.
//
attributes_push (t, tt);
- attributes& as (attributes_top ());
- bool meta (false);
- for (auto i (as.begin ()); i != as.end (); )
+ bool meta (false); // Import with metadata.
+ bool once (false); // Import buildfile once.
+ bool nodt (false); // Import buildfile without default target semantics.
{
- if (i->name == "metadata")
- {
- if (!ph2)
- fail (as.loc) << "loading metadata requires immediate import" <<
- info << "consider using the import! directive instead";
+ attributes& as (attributes_top ());
+ const location& l (as.loc);
- meta = true;
- }
- else
+ for (auto i (as.begin ()); i != as.end (); )
{
- ++i;
- continue;
- }
+ const string& n (i->name);
+ value& v (i->value);
- i = as.erase (i);
- }
+ if (n == "metadata")
+ {
+ if (!ph2)
+ fail (l) << "loading metadata requires immediate import" <<
+ info << "consider using the import! directive instead";
- if (tt != type::word)
- fail (t) << "expected variable name instead of " << t;
+ meta = true;
+ }
+ else if (n == "no_default_target")
+ {
+ nodt = true;
+ }
+ else if (n == "once")
+ {
+ once = true;
+ }
+ else if (n == "rule_hint")
+ {
+ if (!ph2)
+ fail (l) << "rule hint can only be used with immediate import" <<
+ info << "consider using the import! directive instead";
- const variable& var (
- parse_variable_name (move (t.value), get_location (t)));
- apply_variable_attributes (var);
+ // Here we only allow a single name.
+ //
+ try
+ {
+ ph2 = convert<string> (move (v));
- if (var.visibility > variable_visibility::scope)
- {
- fail (t) << "variable " << var << " has " << var.visibility
- << " visibility but is assigned in import";
+ if (ph2->empty ())
+ throw invalid_argument ("empty name");
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else
+ {
+ ++i;
+ continue;
+ }
+
+ i = as.erase (i);
+ }
}
- // Next should come the assignment operator. Note that we don't support
+ // Note that before supporting the second form (without <var>) we used to
+ // parse the value after assignment in the value mode. However, we don't
+ // really need to since what we should have is a bunch of target names.
+ // In other words, whatever the value mode does not treat as special
+ // compared to the normal mode (like `:`) would be illegal here.
+ //
+ // Note that we expant patterns for the ad hoc import case:
+ //
+ // import sub = */
+ //
+ // @@ PAT: the only issue here is that we currently pattern-expand var
+ // name (same assue as with target-specific var names).
+ //
+ if (!start_names (tt))
+ fail (t) << "expected variable name or buildfile target instead of " << t;
+
+ location loc (get_location (t));
+ names ns (parse_names (t, tt, pattern_mode::expand));
+
+ // Next could come the assignment operator. Note that we don't support
// default assignment (?=) yet (could make sense when attempting to import
// alternatives or some such).
//
- next (t, tt);
+ type atype;
+ const variable* var (nullptr);
+ if (tt == type::assign || tt == type::append || tt == type::prepend)
+ {
+ var = &parse_variable_name (move (ns), loc);
+ apply_variable_attributes (*var);
+
+ if (var->visibility > variable_visibility::scope)
+ {
+ fail (loc) << "variable " << *var << " has " << var->visibility
+ << " visibility but is assigned in import";
+ }
- if (tt != type::assign && tt != type::append && tt != type::prepend)
- fail (t) << "expected variable assignment instead of " << t;
+ atype = tt;
+ next_with_attributes (t, tt);
+ attributes_push (t, tt, true /* standalone */);
- type atype (tt);
- value& val (atype == type::assign
- ? scope_->assign (var)
- : scope_->append (var));
+ if (!start_names (tt))
+ fail (t) << "expected target to import instead of " << t;
- // The rest should be a list of targets. Parse them similar to a value on
- // the RHS of an assignment (attributes, etc).
- //
- // Note that we expant patterns for the ad hoc import case:
- //
- // import sub = */
+ loc = get_location (t);
+ ns = parse_names (t, tt, pattern_mode::expand);
+ }
+ else if (tt == type::default_assign)
+ fail (t) << "default assignment not yet supported";
+
+
+ // If there are any value attributes, roundtrip the names through the
+ // value applying the attributes.
//
- mode (lexer_mode::value, '@');
- next_with_attributes (t, tt);
+ if (!attributes_top ().empty ())
+ {
+ value lhs, rhs (move (ns));
+ apply_value_attributes (nullptr, lhs, move (rhs), type::assign);
- if (tt == type::newline || tt == type::eos)
- fail (t) << "expected target to import instead of " << t;
+ if (!lhs)
+ fail (loc) << "expected target to import instead of null value";
- const location loc (get_location (t));
+ untypify (lhs, true /* reduce */);
+ ns = move (lhs.as<names> ());
+ }
+ else
+ attributes_pop ();
- if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
+ value* val (var != nullptr ?
+ &(atype == type::assign
+ ? scope_->assign (*var)
+ : scope_->append (*var))
+ : nullptr);
+
+ for (name& n: ns)
{
- names storage;
- for (name& n: reverse (v, storage))
+ // @@ Could this be an out-qualified ad hoc import? Yes, see comment
+ // about buildfile import in import_load().
+ //
+ if (n.pair)
+ fail (loc) << "unexpected pair in import";
+
+ // See if we are importing a buildfile target. Such an import is always
+ // immediate.
+ //
+ bool bf (n.type == "buildfile");
+ if (bf)
{
- // @@ Could this be an out-qualified ad hoc import?
- //
- if (n.pair)
- fail (loc) << "unexpected pair in import";
+ if (meta)
+ fail (loc) << "metadata requested for buildfile target " << n;
- // import() will check the name, if required.
- //
- names r (import (*scope_, move (n), ph2, opt, meta, loc).first);
+ if (var != nullptr)
+ {
+ if (once)
+ fail (loc) << "once importation requested with variable assignment";
+
+ if (nodt)
+ fail (loc) << "no_default_target importation requested with "
+ << "variable assignment";
+ }
+
+ if (ph2 && !ph2->empty ())
+ fail (loc) << "rule hint specified for buildfile target " << n;
+ }
+ else
+ {
+ if (once)
+ fail (loc) << "once importation requested for target " << n;
+
+ if (nodt)
+ fail (loc) << "no_default_target importation requested for target "
+ << n;
+
+ if (var == nullptr)
+ fail (loc) << "variable assignment required to import target " << n;
+ }
+ // import() will check the name, if required.
+ //
+ import_result<scope> ir (
+ import (*scope_,
+ move (n),
+ ph2 ? ph2 : bf ? optional<string> (string ()) : nullopt,
+ opt,
+ meta,
+ loc));
+
+ names& r (ir.name);
+
+ if (val != nullptr)
+ {
if (r.empty ()) // Optional not found.
{
if (atype == type::assign)
- val = nullptr;
+ *val = nullptr;
}
else
{
- if (atype == type::assign)
- val.assign (move (r), &var);
- else if (atype == type::prepend)
- val.prepend (move (r), &var);
- else
- val.append (move (r), &var);
+ // Import (more precisely, alias) the target type into this project
+ // if not known.
+ //
+ // Note that if the result is ignored (val is NULL), then it's fair
+ // to assume this is not necessary.
+ //
+ if (const scope* iroot = ir.target)
+ {
+ const name& n (r.front ());
+ if (n.typed ())
+ import_target_type (*root_, *iroot, n.type, loc);
+ }
+
+ if (atype == type::assign) val->assign (move (r), var);
+ else if (atype == type::prepend) val->prepend (move (r), var);
+ else val->append (move (r), var);
}
if (atype == type::assign)
atype = type::append; // Append subsequent values.
}
+ else
+ {
+ assert (bf);
+
+ if (r.empty ()) // Optional not found.
+ {
+ assert (opt);
+ continue;
+ }
+
+ // Note: see also import_buildfile().
+ //
+ assert (r.size () == 1); // See import_load() for details.
+ name& n (r.front ());
+ path p (n.dir / n.value); // Should already include extension.
+
+ // Note: similar to parse_include().
+ //
+ // Nuance: we insert this buildfile even with once=false in case it
+ // gets imported with once=true from another place.
+ //
+ if (!root_->root_extra->insert_buildfile (p) && once)
+ {
+ l5 ([&]{trace (loc) << "skipping already imported " << p;});
+ continue;
+ }
+
+ // Clear/restore if/switch location.
+ //
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = nullopt;
+
+ try
+ {
+ ifdstream ifs (p);
+
+ auto df = make_diag_frame (
+ [this, &p, &loc] (const diag_record& dr)
+ {
+ dr << info (loc) << p << " imported from here";
+ });
+
+ // @@ Do we want to enter this buildfile? What's the harm (one
+ // benefit is that it will be in dump). But, we currently don't
+ // out-qualify them, though feels like there is nothing fatal
+ // in that, just inaccurate.
+ //
+ source_buildfile (ifs,
+ path_name (p),
+ loc,
+ nodt ? optional<bool> {} : false);
+ }
+ catch (const io_error& e)
+ {
+ fail (loc) << "unable to read imported buildfile " << p << ": " << e;
+ }
+ }
}
next_after_newline (t, tt);
@@ -3258,7 +4645,12 @@ namespace build2
fail (l) << "null value in export";
if (val.type != nullptr)
- untypify (val);
+ {
+ // While feels far-fetched, let's preserve empty typed values in the
+ // result.
+ //
+ untypify (val, false /* reduce */);
+ }
export_value = move (val).as<names> ();
@@ -3298,6 +4690,9 @@ namespace build2
n = move (i->value);
+ if (n[0] == '_')
+ fail (l) << "module name '" << n << "' starts with underscore";
+
if (i->pair)
try
{
@@ -3342,41 +4737,160 @@ namespace build2
void parser::
parse_define (token& t, type& tt)
{
- // define <derived>: <base>
+ // define [<attrs>] <derived>: <base>
+ // define <alias> = <scope>/<type>
//
// See tests/define.
//
- if (next (t, tt) != type::word)
- fail (t) << "expected name instead of " << t << " in target type "
- << "definition";
+ next_with_attributes (t, tt);
- string dn (move (t.value));
- const location dnl (get_location (t));
+ attributes_push (t, tt);
+ attributes as (attributes_pop ());
- if (next (t, tt) != type::colon)
- fail (t) << "expected ':' instead of " << t << " in target type "
+ if (tt != type::word)
+ fail (t) << "expected name instead of " << t << " in target type "
<< "definition";
+ string n (move (t.value));
+ const location nl (get_location (t));
+
next (t, tt);
- if (tt == type::word)
+ if (tt == type::colon)
{
+ // Handle attributes.
+ //
+ target_type::flag fs (target_type::flag::none);
+ {
+ const location& l (as.loc);
+
+ for (attribute& a: as)
+ {
+ const string& n (a.name);
+ value& v (a.value);
+
+ if (n == "see_through") fs |= target_type::flag::see_through;
+ else if (n == "member_hint") fs |= target_type::flag::member_hint;
+ else
+ fail (l) << "unknown target type definition attribute " << n;
+
+ if (!v.null)
+ fail (l) << "unexpected value in attribute " << n;
+ }
+ }
+
+ if (next (t, tt) != type::word)
+ fail (t) << "expected name instead of " << t << " in target type "
+ << "definition";
+
// Target.
//
const string& bn (t.value);
const target_type* bt (scope_->find_target_type (bn));
if (bt == nullptr)
- fail (t) << "unknown target type " << bn;
+ fail (t) << "unknown target type " << bn <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
- if (!root_->derive_target_type (move (dn), *bt).second)
- fail (dnl) << "target type " << dn << " already defined in this "
- << "project";
+ // The derive_target_type() call below does not produce a non-abstract
+ // type if passed an abstract base. So we ban this for now (it's unclear
+ // why would someone want to do this).
+ //
+ if (bt->factory == nullptr)
+ fail (t) << "abstract base target type " << bt->name << "{}";
+
+ // Note that the group{foo}<...> syntax is only recognized for group-
+ // based targets and ad hoc buildscript recipes/rules only match group.
+ // (We may want to relax this for member_hint in the future since its
+ // currently also used on non-mtime-based targets, though what exactly
+ // we will do in ad hoc recipes/rules in this case is fuzzy).
+ //
+ if ((fs & target_type::flag::group) == target_type::flag::group &&
+ !bt->is_a<group> ())
+ fail (t) << "base target type " << bn << " must be group for "
+ << "group-related attribute";
+
+ if (!root_->derive_target_type (move (n), *bt, fs).second)
+ fail (nl) << "target type " << n << " already defined in project "
+ << *root_;
next (t, tt); // Get newline.
}
+ else if (tt == type::assign)
+ {
+ if (!as.empty ())
+ fail (as.loc) << "unexpected target type alias attribute";
+
+ // The rest should be a path-like target type. Parse it as names in
+ // the value mode to get variable expansion, etc.
+ //
+ mode (lexer_mode::value, '@');
+ next (t, tt);
+ const location tl (get_location (t));
+ names ns (
+ parse_names (t, tt, pattern_mode::ignore, "target type", nullptr));
+
+ name* tn (nullptr);
+ if (ns.size () == 1)
+ {
+ tn = &ns.front ();
+
+ if (tn->file ())
+ {
+ try
+ {
+ tn->canonicalize ();
+
+ if (tn->dir.absolute ())
+ tn->dir.normalize ();
+ else
+ tn = nullptr;
+ }
+ catch (const invalid_path&) {tn = nullptr;}
+ catch (const invalid_argument&) {tn = nullptr;}
+ }
+ else
+ tn = nullptr;
+ }
+
+ if (tn == nullptr)
+ fail (tl) << "expected scope-qualified target type instead of " << ns;
+
+ // If we got here, then tn->dir is the scope and tn->value is the target
+ // type.
+ //
+ // NOTE: see similar code in import_target_type().
+ //
+ const target_type* tt (nullptr);
+ if (const scope* rs = ctx->scopes.find_out (tn->dir).root_scope ())
+ {
+ tt = rs->find_target_type (tn->value);
+
+ if (tt == nullptr)
+ fail (tl) << "unknown target type " << tn->value << " in scope "
+ << *rs;
+ }
+ else
+ fail (tl) << "unknown project scope " << tn->dir << " in scope"
+ << "-qualified target type" <<
+ info << "did you forget to import the corresponding project?";
+
+ if (n != tn->value)
+ fail (nl) << "alias target type name " << n << " does not match "
+ << tn->value;
+
+ // Note that this is potentially a shallow reference to a user-derived
+ // target type. Seeing that we only ever destory the entire graph, this
+ // should be ok.
+ //
+ auto p (root_->root_extra->target_types.insert (*tt));
+
+ if (!p.second && &p.first.get () != tt)
+ fail (nl) << "target type " << n << " already defined in this project";
+ }
else
- fail (t) << "expected name instead of " << t << " in target type "
+ fail (t) << "expected ':' or '=' instead of " << t << " in target type "
<< "definition";
next_after_newline (t, tt);
@@ -3385,19 +4899,28 @@ namespace build2
void parser::
parse_if_else (token& t, type& tt)
{
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = get_location (t);
+
parse_if_else (t, tt,
false /* multi */,
[this] (token& t, type& tt, bool s, const string& k)
{
return parse_clause_block (t, tt, s, k);
- });
+ },
+ {});
}
void parser::
parse_if_else (token& t, type& tt,
bool multi,
const function<void (
- token&, type&, bool, const string&)>& parse_block)
+ token&, type&, bool, const string&)>& parse_block,
+ const function<void (
+ token&, token_type&, const string&)>& parse_recipe_directive)
{
// Handle the whole if-else chain. See tests/if-else.
//
@@ -3422,7 +4945,7 @@ namespace build2
// is not an option. So let's skip it.
//
if (taken)
- skip_line (t, tt);
+ skip_line (t, tt); // Skip expression.
else
{
if (tt == type::newline || tt == type::eos)
@@ -3482,31 +5005,65 @@ namespace build2
parse_block (t, tt, !take, k);
taken = taken || take;
}
- else if (!multi) // No lines in multi-curly if-else.
+ else
{
- if (take)
+ // The only valid line in multi-curly if-else is `recipe`.
+ //
+ if (multi)
{
- if (!parse_clause (t, tt, true))
- fail (t) << "expected " << k << "-line instead of " << t;
+ // Note that we cannot do the keyword test if we are replaying. So
+ // we skip it with the understanding that if it's not a keywords,
+ // then we wouldn't have gotten here on the replay.
+ //
+ if (tt == type::word &&
+ (replay_ == replay::play || keyword (t)) &&
+ t.value == "recipe")
+ {
+ if (take)
+ {
+ parse_recipe_directive (t, tt, k);
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
- taken = true;
+ if (tt == type::newline)
+ next (t, tt);
+ }
+ }
+ else
+ fail (t) << "expected " << k << "-block or 'recipe' instead of "
+ << t;
}
else
{
- skip_line (t, tt);
+ if (tt == type::multi_lcbrace)
+ fail (t) << "expected " << k << "-line instead of " << t <<
+ info << "did you forget to specify % recipe header?";
- if (tt == type::newline)
- next (t, tt);
+ if (take)
+ {
+ if (!parse_clause (t, tt, true))
+ fail (t) << "expected " << k << "-line instead of " << t;
+
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
+
+ if (tt == type::newline)
+ next (t, tt);
+ }
}
}
- else
- fail (t) << "expected " << k << "-block instead of " << t;
// See if we have another el* keyword.
//
// Note that we cannot do the keyword test if we are replaying. So we
// skip it with the understanding that if it's not a keywords, then we
- // wouldn't have gotten here on the reply (see parse_recipe() for
+ // wouldn't have gotten here on the replay (see parse_recipe() for
// details).
//
if (k != "else" &&
@@ -3526,19 +5083,28 @@ namespace build2
void parser::
parse_switch (token& t, type& tt)
{
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = get_location (t);
+
parse_switch (t, tt,
false /* multi */,
[this] (token& t, type& tt, bool s, const string& k)
{
return parse_clause_block (t, tt, s, k);
- });
+ },
+ {});
}
void parser::
parse_switch (token& t, type& tt,
bool multi,
const function<void (
- token&, type&, bool, const string&)>& parse_block)
+ token&, type&, bool, const string&)>& parse_block,
+ const function<void (
+ token&, token_type&, const string&)>& parse_recipe_directive)
{
// switch <value> [: <func> [<arg>]] [, <value>...]
// {
@@ -3633,7 +5199,7 @@ namespace build2
{
// Note that we cannot do the keyword test if we are replaying. So we
// skip it with the understanding that if it's not a keywords, then we
- // wouldn't have gotten here on the reply (see parse_recipe() for
+ // wouldn't have gotten here on the replay (see parse_recipe() for
// details). Note that this appears to mean that replay cannot be used
// if we allow lines, only blocks. Consider:
//
@@ -3736,7 +5302,7 @@ namespace build2
if (!e.arg.empty ())
args.push_back (value (e.arg));
- value r (ctx.functions.call (scope_, *e.func, args, l));
+ value r (ctx->functions.call (scope_, *e.func, args, l));
// We support two types of functions: matchers and extractors:
// a matcher returns a statically-typed bool value while an
@@ -3839,25 +5405,49 @@ namespace build2
parse_block (t, tt, !take, k);
taken = taken || take;
}
- else if (!multi) // No lines in multi-curly if-else.
+ else
{
- if (take)
+ if (multi)
{
- if (!parse_clause (t, tt, true))
- fail (t) << "expected " << k << "-line instead of " << t;
+ if (tt == type::word &&
+ (replay_ == replay::play || keyword (t)) &&
+ t.value == "recipe")
+ {
+ if (take)
+ {
+ parse_recipe_directive (t, tt, k);
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
- taken = true;
+ if (tt == type::newline)
+ next (t, tt);
+ }
+ }
+ else
+ fail (t) << "expected " << k << "-block or 'recipe' instead of "
+ << t;
}
else
{
- skip_line (t, tt);
+ if (take)
+ {
+ if (!parse_clause (t, tt, true))
+ fail (t) << "expected " << k << "-line instead of " << t;
- if (tt == type::newline)
- next (t, tt);
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
+
+ if (tt == type::newline)
+ next (t, tt);
+ }
}
}
- else
- fail (t) << "expected " << k << "-block instead of " << t;
}
if (tt != type::rcbrace)
@@ -3870,10 +5460,10 @@ namespace build2
void parser::
parse_for (token& t, type& tt)
{
- // for <varname>: <value>
+ // for [<var-attrs>] <varname> [<elem-attrs>]: [<val-attrs>] <value>
// <line>
//
- // for <varname>: <value>
+ // for [<var-attrs>] <varname> [<elem-attrs>]: [<val-attrs>] <value>
// {
// <block>
// }
@@ -3884,13 +5474,12 @@ namespace build2
next_with_attributes (t, tt);
attributes_push (t, tt);
- // @@ PAT: currently we pattern-expand for var.
+ // Enable list element attributes.
//
- const location vloc (get_location (t));
- names vns (parse_names (t, tt, pattern_mode::expand));
+ enable_attributes ();
- if (tt != type::colon)
- fail (t) << "expected ':' instead of " << t << " after variable name";
+ const location vloc (get_location (t));
+ names vns (parse_names (t, tt, pattern_mode::preserve));
const variable& var (parse_variable_name (move (vns), vloc));
apply_variable_attributes (var);
@@ -3901,6 +5490,17 @@ namespace build2
<< " visibility but is assigned in for-loop";
}
+ // Parse the list element attributes, if present.
+ //
+ attributes_push (t, tt);
+
+ if (tt != type::colon)
+ fail (t) << "expected ':' instead of " << t << " after variable name";
+
+ // Save element attributes so that we can inject them on each iteration.
+ //
+ attributes val_attrs (attributes_pop ());
+
// Now the value (list of names) to iterate over. Parse it similar to a
// value on the RHS of an assignment (expansion, attributes).
//
@@ -3909,15 +5509,24 @@ namespace build2
value val (parse_value_with_attributes (t, tt, pattern_mode::expand));
- // If this value is a vector, then save its element type so that we
+ // If the value type provides custom iterate function, then use that (see
+ // value_type::iterate for details).
+ //
+ auto iterate (val.type != nullptr ? val.type->iterate : nullptr);
+
+ // If this value is a container, then save its element type so that we
// can typify each element below.
//
const value_type* etype (nullptr);
- if (val && val.type != nullptr)
+ if (!iterate && val && val.type != nullptr)
{
etype = val.type->element_type;
- untypify (val);
+
+ // Note that here we don't want to be reducing empty simple values to
+ // empty lists.
+ //
+ untypify (val, false /* reduce */);
}
if (tt != type::newline)
@@ -3965,32 +5574,50 @@ namespace build2
// Iterate.
//
- value& v (scope_->assign (var)); // Assign even if no iterations.
+ value& lhs (scope_->assign (var)); // Assign even if no iterations.
if (!val)
return;
- names& ns (val.as<names> ());
-
- if (ns.empty ())
- return;
+ names* ns (nullptr);
+ if (!iterate)
+ {
+ ns = &val.as<names> ();
+ if (ns->empty ())
+ return;
+ }
istringstream is (move (body));
- for (auto i (ns.begin ()), e (ns.end ());; )
+ struct data
+ {
+ const variable& var;
+ const attributes& val_attrs;
+ uint64_t line;
+ bool block;
+ value& lhs;
+ istringstream& is;
+
+ } d {var, val_attrs, line, block, lhs, is};
+
+ function<void (value&&, bool first)> iteration =
+ [this, &d] (value&& v, bool first)
{
- // Set the variable value.
+ // Rewind the stream.
+ //
+ if (!first)
+ {
+ d.is.clear ();
+ d.is.seekg (0);
+ }
+
+ // Inject element attributes.
//
- bool pair (i->pair);
- names n;
- n.push_back (move (*i));
- if (pair) n.push_back (move (*++i));
- v = value (move (n));
+ attributes_.push_back (d.val_attrs);
- if (etype != nullptr)
- typify (v, *etype, &var);
+ apply_value_attributes (&d.var, d.lhs, move (v), type::assign);
- lexer l (is, *path_, line);
+ lexer l (d.is, *path_, d.line);
lexer* ol (lexer_);
lexer_ = &l;
@@ -3998,7 +5625,7 @@ namespace build2
type tt;
next (t, tt);
- if (block)
+ if (d.block)
{
next (t, tt); // {
next (t, tt); // <newline>
@@ -4006,20 +5633,33 @@ namespace build2
parse_clause (t, tt);
- if (tt != (block ? type::rcbrace : type::eos))
- fail (t) << "expected name " << (block ? "or '}' " : "")
+ if (tt != (d.block ? type::rcbrace : type::eos))
+ fail (t) << "expected name " << (d.block ? "or '}' " : "")
<< "instead of " << t;
lexer_ = ol;
+ };
- if (++i == e)
- break;
+ if (!iterate)
+ {
+ for (auto b (ns->begin ()), i (b), e (ns->end ()); i != e; ++i)
+ {
+ // Set the variable value.
+ //
+ bool pair (i->pair);
+ names n;
+ n.push_back (move (*i));
+ if (pair) n.push_back (move (*++i));
+ value v (move (n));
- // Rewind the stream.
- //
- is.clear ();
- is.seekg (0);
+ if (etype != nullptr)
+ typify (v, *etype, &var);
+
+ iteration (move (v), i == b);
+ }
}
+ else
+ iterate (val, iteration);
}
void parser::
@@ -4092,7 +5732,7 @@ namespace build2
if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
{
names storage;
- cout << reverse (v, storage) << endl;
+ cout << reverse (v, storage, true /* reduce */) << endl;
}
else
cout << "[null]" << endl;
@@ -4125,7 +5765,7 @@ namespace build2
if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
{
names storage;
- dr << reverse (v, storage);
+ dr << reverse (v, storage, true /* reduce */);
}
if (tt != type::eos)
@@ -4155,8 +5795,10 @@ namespace build2
if (ns.empty ())
{
+ // Indent two spaces.
+ //
if (scope_ != nullptr)
- dump (*scope_, " "); // Indent two spaces.
+ dump (scope_, nullopt /* action */, dump_format::buildfile, " ");
else
os << " <no current scope>" << endl;
}
@@ -4174,8 +5816,10 @@ namespace build2
const target* t (enter_target::find_target (*this, n, o, l, trace));
+ // Indent two spaces.
+ //
if (t != nullptr)
- dump (*t, " "); // Indent two spaces.
+ dump (t, nullopt /* action */, dump_format::buildfile, " ");
else
{
os << " <no target " << n;
@@ -4197,10 +5841,12 @@ namespace build2
{
// Enter a variable name for assignment (as opposed to lookup).
+ // If the variable is qualified (and thus public), make it overridable.
+ //
// Note that the overridability can still be restricted (e.g., by a module
// that enters this variable or by a pattern).
//
- bool ovr (true);
+ bool ovr (on.find ('.') != string::npos);
auto r (scope_->var_pool ().insert (move (on), nullptr, nullptr, &ovr));
if (!r.second)
@@ -4234,9 +5880,13 @@ namespace build2
{
// Parse and enter a variable name for assignment (as opposed to lookup).
- // The list should contain a single, simple name.
+ // The list should contain a single, simple name. Go an extra mile to
+ // issue less confusing diagnostics.
//
- if (ns.size () != 1 || ns[0].pattern || !ns[0].simple () || ns[0].empty ())
+ size_t n (ns.size ());
+ if (n == 0 || (n == 1 && ns[0].empty ()))
+ fail (l) << "empty variable name";
+ else if (n != 1 || ns[0].pattern || !ns[0].simple ())
fail (l) << "expected variable name instead of " << ns;
return parse_variable_name (move (ns[0].value), l);
@@ -4292,7 +5942,7 @@ namespace build2
// Note that the pattern is preserved if insert fails with regex_error.
//
p = scope_->target_vars[ptt].insert (pt, move (pat)).insert (
- var, kind == type::assign);
+ var, kind == type::assign, false /* reset_extra */);
}
catch (const regex_error& e)
{
@@ -4306,7 +5956,12 @@ namespace build2
// We store prepend/append values untyped (similar to overrides).
//
if (rhs.type != nullptr && kind != type::assign)
- untypify (rhs);
+ {
+ // Our heuristics for prepend/append of a typed value is to preserve
+ // empty (see apply_value_attributes() for details) so do not reduce.
+ //
+ untypify (rhs, false /* reduce */);
+ }
if (p.second)
{
@@ -4393,32 +6048,119 @@ namespace build2
: value (names ());
}
- static const value_type*
- map_type (const string& n)
+ const value_type* parser::
+ find_value_type (const scope*, const string& n)
{
- auto ptr = [] (const value_type& vt) {return &vt;};
-
- return
- n == "bool" ? ptr (value_traits<bool>::value_type) :
- n == "int64" ? ptr (value_traits<int64_t>::value_type) :
- n == "uint64" ? ptr (value_traits<uint64_t>::value_type) :
- n == "string" ? ptr (value_traits<string>::value_type) :
- n == "path" ? ptr (value_traits<path>::value_type) :
- n == "dir_path" ? ptr (value_traits<dir_path>::value_type) :
- n == "abs_dir_path" ? ptr (value_traits<abs_dir_path>::value_type) :
- n == "name" ? ptr (value_traits<name>::value_type) :
- n == "name_pair" ? ptr (value_traits<name_pair>::value_type) :
- n == "target_triplet" ? ptr (value_traits<target_triplet>::value_type) :
- n == "project_name" ? ptr (value_traits<project_name>::value_type) :
-
- n == "int64s" ? ptr (value_traits<int64s>::value_type) :
- n == "uint64s" ? ptr (value_traits<uint64s>::value_type) :
- n == "strings" ? ptr (value_traits<strings>::value_type) :
- n == "paths" ? ptr (value_traits<paths>::value_type) :
- n == "dir_paths" ? ptr (value_traits<dir_paths>::value_type) :
- n == "names" ? ptr (value_traits<vector<name>>::value_type) :
-
- nullptr;
+ switch (n[0])
+ {
+ case 'a':
+ {
+ if (n == "abs_dir_path") return &value_traits<abs_dir_path>::value_type;
+ break;
+ }
+ case 'b':
+ {
+ if (n == "bool") return &value_traits<bool>::value_type;
+ break;
+ }
+ case 'c':
+ {
+ if (n == "cmdline") return &value_traits<cmdline>::value_type;
+ break;
+ }
+ case 'd':
+ {
+ if (n.compare (0, 8, "dir_path") == 0)
+ {
+ if (n[8] == '\0') return &value_traits<dir_path>::value_type;
+ if (n[8] == 's' &&
+ n[9] == '\0') return &value_traits<dir_paths>::value_type;
+ }
+ break;
+ }
+ case 'i':
+ {
+ if (n.compare (0, 5, "int64") == 0)
+ {
+ if (n[5] == '\0') return &value_traits<int64_t>::value_type;
+ if (n[5] == 's' &&
+ n[6] == '\0') return &value_traits<int64s>::value_type;
+ }
+ break;
+ }
+ case 'j':
+ {
+ if (n.compare (0, 4, "json") == 0)
+ {
+ if (n[4] == '\0') return &value_traits<json_value>::value_type;
+ if (n == "json_array") return &value_traits<json_array>::value_type;
+ if (n == "json_object")
+ return &value_traits<json_object>::value_type;
+ if (n == "json_set")
+ return &value_traits<set<json_value>>::value_type;
+ if (n == "json_map")
+ return &value_traits<map<json_value, json_value>>::value_type;
+ }
+ break;
+ }
+ case 'n':
+ {
+ if (n.compare (0, 4, "name") == 0)
+ {
+ if (n[4] == '\0') return &value_traits<name>::value_type;
+ if (n[4] == 's' &&
+ n[5] == '\0') return &value_traits<vector<name>>::value_type;
+ if (n == "name_pair") return &value_traits<name_pair>::value_type;
+ }
+ break;
+ }
+
+ case 'p':
+ {
+ if (n.compare (0, 4, "path") == 0)
+ {
+ if (n[4] == '\0') return &value_traits<path>::value_type;
+ if (n[4] == 's' &&
+ n[5] == '\0') return &value_traits<paths>::value_type;
+ }
+ else if (n == "project_name")
+ return &value_traits<project_name>::value_type;
+ break;
+ }
+ case 's':
+ {
+ if (n.compare (0, 6, "string") == 0)
+ {
+ if (n[6] == '\0') return &value_traits<string>::value_type;
+ if (n[6] == 's' &&
+ n[7] == '\0') return &value_traits<strings>::value_type;
+ if (n == "string_set") return &value_traits<set<string>>::value_type;
+ if (n == "string_map")
+ return &value_traits<map<string,string>>::value_type;
+ }
+ break;
+ }
+ case 't':
+ {
+ if (n == "target_triplet")
+ return &value_traits<target_triplet>::value_type;
+ break;
+ }
+ case 'u':
+ {
+ if (n.compare (0, 6, "uint64") == 0)
+ {
+ if (n[6] == '\0') return &value_traits<uint64_t>::value_type;
+ if (n[6] == 's' &&
+ n[7] == '\0') return &value_traits<uint64s>::value_type;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return nullptr;
}
void parser::
@@ -4440,19 +6182,62 @@ namespace build2
string& n (a.name);
value& v (a.value);
- if (const value_type* t = map_type (n))
+ if (n == "visibility")
+ {
+ try
+ {
+ string s (convert<string> (move (v)));
+
+ variable_visibility r;
+ if (s == "global") r = variable_visibility::global;
+ else if (s == "project") r = variable_visibility::project;
+ else if (s == "scope") r = variable_visibility::scope;
+ else if (s == "target") r = variable_visibility::target;
+ else if (s == "prerequisite") r = variable_visibility::prereq;
+ else throw invalid_argument ("unknown visibility name");
+
+ if (vis && r != *vis)
+ fail (l) << "conflicting variable visibilities: " << s << ", "
+ << *vis;
+
+ vis = r;
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else if (n == "overridable")
+ {
+ try
+ {
+ // Treat absent value (represented as NULL) as true.
+ //
+ bool r (v.null || convert<bool> (move (v)));
+
+ if (ovr && r != *ovr)
+ fail (l) << "conflicting variable overridabilities";
+
+ ovr = r;
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else if (const value_type* t = find_value_type (root_, n))
{
+ if (!v.null)
+ fail (l) << "unexpected value in attribute " << a;
+
if (type != nullptr && t != type)
- fail (l) << "multiple variable types: " << n << ", " << type->name;
+ fail (l) << "conflicting variable types: " << n << ", "
+ << type->name;
type = t;
- // Fall through.
}
else
fail (l) << "unknown variable attribute " << a;
-
- if (!v.null)
- fail (l) << "unexpected value in attribute " << a;
}
if (type != nullptr && var.type != nullptr)
@@ -4464,15 +6249,33 @@ namespace build2
<< var.type->name << " to " << type->name;
}
- //@@ TODO: the same checks for vis and ovr (when we have the corresponding
- // attributes).
+ if (vis)
+ {
+ // Note that this logic naturally makes sure that a project-private
+ // variable doesn't have global visibility (since it would have been
+ // entered with the project visibility).
+ //
+ if (var.visibility == *vis)
+ vis = nullopt;
+ else if (var.visibility > *vis) // See variable_pool::update().
+ fail (l) << "changing variable " << var << " visibility from "
+ << var.visibility << " to " << *vis;
+ }
- if (type || vis || ovr)
- ctx.var_pool.update (const_cast<variable&> (var),
- type,
- vis ? &*vis : nullptr,
- ovr ? &*ovr : nullptr);
+ if (ovr)
+ {
+ // Note that the overridability incompatibilities are diagnosed by
+ // update(). So we just need to diagnose the project-private case.
+ //
+ if (*ovr && var.owner != &ctx->var_pool)
+ fail (l) << "private variable " << var << " cannot be overridable";
+ }
+ if (type || vis || ovr)
+ var.owner->update (const_cast<variable&> (var),
+ type,
+ vis ? &*vis : nullptr,
+ ovr ? &*ovr : nullptr);
}
void parser::
@@ -4482,7 +6285,7 @@ namespace build2
type kind)
{
attributes as (attributes_pop ());
- const location& l (as.loc);
+ const location& l (as.loc); // This points to value if no attributes.
// Essentially this is an attribute-augmented assign/append/prepend.
//
@@ -4496,16 +6299,18 @@ namespace build2
if (n == "null")
{
+ // @@ Looks like here we assume representationally empty?
+ //
if (rhs && !rhs.empty ()) // Note: null means we had an expansion.
fail (l) << "value with null attribute";
null = true;
// Fall through.
}
- else if (const value_type* t = map_type (n))
+ else if (const value_type* t = find_value_type (root_, n))
{
if (type != nullptr && t != type)
- fail (l) << "multiple value types: " << n << ", " << type->name;
+ fail (l) << "conflicting value types: " << n << ", " << type->name;
type = t;
// Fall through.
@@ -4553,6 +6358,13 @@ namespace build2
bool rhs_type (false);
if (rhs.type != nullptr)
{
+ // Our heuristics is to not reduce typed RHS empty simple values for
+ // prepend/append and additionally for assign provided LHS is a
+ // container.
+ //
+ bool reduce (kind == type::assign &&
+ (type == nullptr || !type->container));
+
// Only consider RHS type if there is no explicit or variable type.
//
if (type == nullptr)
@@ -4563,7 +6375,7 @@ namespace build2
// Reduce this to the untyped value case for simplicity.
//
- untypify (rhs);
+ untypify (rhs, reduce);
}
if (kind == type::assign)
@@ -4592,6 +6404,17 @@ namespace build2
}
else
{
+ auto df = make_diag_frame (
+ [this, var, &l](const diag_record& dr)
+ {
+ if (!l.empty ())
+ {
+ dr << info (l);
+ if (var != nullptr) dr << "variable " << var->name << ' ';
+ dr << "value is assigned here";
+ }
+ });
+
if (kind == type::assign)
{
if (rhs)
@@ -4946,17 +6769,38 @@ namespace build2
if (pre_parse_)
return v; // Empty.
- if (v.type != nullptr || !v || v.as<names> ().size () != 1)
- fail (l) << "expected target before ':'";
-
+ // We used to return this as a <target>:<name> pair but that meant we
+ // could not handle an out-qualified target (which is represented as
+ // <target>@<out> pair). As a somewhat of a hack, we deal with this by
+ // changing the order of the name and target to be <name>:<target> with
+ // the qualified case becoming a "tripple pair" <name>:<target>@<out>.
+ //
+ // @@ This is actually not great since it's possible to observe such a
+ // tripple pair, for example with `print (file{x}@./:y)`.
+ //
if (n.type != nullptr || !n || n.as<names> ().size () != 1 ||
n.as<names> ()[0].pattern)
fail (nl) << "expected variable name after ':'";
- names& ns (v.as<names> ());
+ names& ns (n.as<names> ());
ns.back ().pair = ':';
- ns.push_back (move (n.as<names> ().back ()));
- return v;
+
+ if (v.type == nullptr && v)
+ {
+ names& ts (v.as<names> ());
+
+ size_t s (ts.size ());
+ if (s == 1 || (s == 2 && ts.front ().pair == '@'))
+ {
+ ns.push_back (move (ts.front ()));
+ if (s == 2)
+ ns.push_back (move (ts.back ()));
+
+ return n;
+ }
+ }
+
+ fail (l) << "expected target before ':'" << endf;
}
else
{
@@ -5025,8 +6869,13 @@ namespace build2
}
pair<bool, location> parser::
- attributes_push (token& t, type& tt, bool standalone)
+ attributes_push (token& t, type& tt, bool standalone, bool next_token)
{
+ // To make sure that the attributes are not standalone we need to read the
+ // token which follows ']'.
+ //
+ assert (standalone || next_token);
+
location l (get_location (t));
bool has (tt == type::lsbrace);
@@ -5049,6 +6898,10 @@ namespace build2
// Parse the attribute name with expansion (we rely on this in some
// old and hairy tests).
//
+ // Note that the attributes lexer mode does not recognize `{}@` as
+ // special and we rely on that in the rule hint attributes
+ // (libs@rule_hint=cxx).
+ //
const location l (get_location (t));
names ns (
@@ -5090,32 +6943,33 @@ namespace build2
}
while (tt != type::rsbrace);
}
+ else
+ has = false; // `[]` doesn't count.
if (tt != type::rsbrace)
fail (t) << "expected ']' instead of " << t;
- next (t, tt);
-
- if (tt == type::newline || tt == type::eos)
+ if (next_token)
{
- if (!standalone)
- fail (t) << "standalone attributes";
+ next (t, tt);
+
+ if (tt == type::newline || tt == type::eos)
+ {
+ if (!standalone)
+ fail (t) << "standalone attributes";
+ }
+ //
+ // Verify that the attributes are separated from the following word or
+ // "word-producing" token.
+ //
+ else if (!t.separated && (tt == type::word ||
+ tt == type::dollar ||
+ tt == type::lparen ||
+ tt == type::lcbrace))
+ fail (t) << "whitespace required after attributes" <<
+ info (l) << "use the '\\[' escape sequence if this is a wildcard "
+ << "pattern";
}
- //
- // We require attributes to be separated from the following word or
- // "word-producing" tokens (`$` for variable expansions/function calls,
- // `(` for eval contexts, and `{` for name generation) to reduce the
- // possibility of confusing them with wildcard patterns. Consider:
- //
- // ./: [abc]-foo.txt
- //
- else if (!t.separated && (tt == type::word ||
- tt == type::dollar ||
- tt == type::lparen ||
- tt == type::lcbrace))
- fail (t) << "whitespace required after attributes" <<
- info (l) << "use the '\\[' escape sequence if this is a wildcard "
- << "pattern";
return make_pair (has, l);
}
@@ -5350,9 +7204,11 @@ namespace build2
// May throw invalid_path.
//
auto include_pattern =
- [&r, &append, &include_match, sp, &l, this] (string&& p,
- optional<string>&& e,
- bool a)
+ [this,
+ &append, &include_match,
+ &r, sp, &l, &dir] (string&& p,
+ optional<string>&& e,
+ bool a)
{
// If we don't already have any matches and our pattern doesn't contain
// multiple recursive wildcards, then the result will be unique and we
@@ -5399,14 +7255,62 @@ namespace build2
// multiple entries for each pattern.
//
if (!interm)
- d.appf (move (m).representation (), optional<string> (d.e));
+ {
+ // If the extension is empty (meaning there should be no extension,
+ // for example hxx{Q*.}), skip entries with extensions.
+ //
+ if (!d.e || !d.e->empty () || m.extension_cstring () == nullptr)
+ d.appf (move (m).representation (), optional<string> (d.e));
+ }
return true;
};
+ const function<bool (const dir_entry&)> dangling (
+ [&dir] (const dir_entry& de)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ const path& n (de.path ());
+
+ // One case where this turned out to be not worth it practically
+ // (too much noise) is the backlinks to executables (and the
+ // associated DLL assemblies for Windows). So we now have this
+ // heuristics that if this looks like an executable (or DLL for
+ // Windows), then we omit the warning. On POSIX, where executables
+ // don't have extensions, we will consider it an executable only if
+ // we are not looking for directories (which also normally don't
+ // have extension).
+ //
+ // @@ PEDANTIC: re-enable if --pedantic.
+ //
+ if (sl)
+ {
+ string e (n.extension ());
+
+ if ((e.empty () && !dir) ||
+ path_traits::compare (e, "exe") == 0 ||
+ path_traits::compare (e, "dll") == 0 ||
+ path_traits::compare (e, "pdb") == 0 || // .{exe,dll}.pdb
+ (path_traits::compare (e, "dlls") == 0 && // .exe.dlls assembly
+ path_traits::compare (n.base ().extension (), "exe") == 0))
+ return true;
+ }
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry")
+ << ' ' << de.base () / n;
+
+ return true;
+ });
+
try
{
- path_search (path (move (p)), process, *sp);
+ path_search (path (move (p)),
+ process,
+ *sp,
+ path_match_flags::follow_symlinks,
+ dangling);
}
catch (const system_error& e)
{
@@ -5574,6 +7478,7 @@ namespace build2
if ((n.pair & 0x02) != 0)
{
e = move (n.type);
+ n.type.clear ();
// Remove non-empty extension from the name (it got to be there, see
// above).
@@ -5851,9 +7756,35 @@ namespace build2
bool concat_quoted_first (false);
name concat_data;
- auto concat_typed = [this, &vnull, &vtype,
- &concat, &concat_data] (value&& rhs,
- const location& loc)
+ auto concat_diag_multiple = [this] (const location& loc,
+ const char* what_expansion)
+ {
+ diag_record dr (fail (loc));
+
+ dr << "concatenating " << what_expansion << " contains multiple values";
+
+ // See if this looks like a subscript without an evaluation context and
+ // help the user out.
+ //
+ if (mode () != lexer_mode::eval)
+ {
+ const token& t (peeked ()); // Should be peeked at.
+
+ if (t.type == type::word &&
+ t.qtype == quote_type::unquoted &&
+ t.value[0] == '[')
+ {
+ dr << info << "wrap it in (...) evaluation context if this "
+ << "is value subscript";
+ }
+ }
+ };
+
+ auto concat_typed = [this, what, &vnull, &vtype,
+ &concat, &concat_data,
+ &concat_diag_multiple] (value&& rhs,
+ const location& loc,
+ const char* what_expansion)
{
// If we have no LHS yet, then simply copy value/type.
//
@@ -5870,6 +7801,10 @@ namespace build2
// RHS.
//
+ // Note that if RHS contains multiple values then we expect the result
+ // to be a single value somehow or, more likely, there to be no
+ // suitable $builtin.concat() overload.
+ //
a.push_back (move (rhs));
const char* l ((a[0].type != nullptr ? a[0].type->name : "<untyped>"));
@@ -5886,7 +7821,10 @@ namespace build2
dr << info << "use quoting to force untyped concatenation";
});
- p = ctx.functions.try_call (
+ if (ctx == nullptr)
+ fail << "literal " << what << " expected";
+
+ p = ctx->functions.try_call (
scope_, "builtin.concat", vector_view<value> (a), loc);
}
@@ -5908,18 +7846,22 @@ namespace build2
if (!vnull)
{
if (vtype != nullptr)
- untypify (rhs);
+ untypify (rhs, true /* reduce */);
names& d (rhs.as<names> ());
- // If the value is empty, then untypify() will (typically; no pun
- // intended) represent it as an empty sequence of names rather than
- // a sequence of one empty name. This is usually what we need (see
- // simple_reverse() for details) but not in this case.
+ // If the value is empty, then we asked untypify() to reduce it to
+ // an empty sequence of names rather than a sequence of one empty
+ // name.
//
- if (!d.empty ())
+ if (size_t n = d.size ())
{
- assert (d.size () == 1); // Must be a single value.
+ if (n != 1)
+ {
+ assert (what_expansion != nullptr);
+ concat_diag_multiple (loc, what_expansion);
+ }
+
concat_data = move (d[0]);
}
}
@@ -6026,6 +7968,8 @@ namespace build2
// continue accumulating or inject. We inject if the next token is not a
// word, var expansion, or eval context or if it is separated.
//
+ optional<pair<const value_type*, name>> path_concat; // Backup.
+
if (concat && last_concat ())
{
// Concatenation does not affect the tokens we get, only what we do
@@ -6065,6 +8009,13 @@ namespace build2
// dir/{$str}
// file{$str}
//
+ // And yet another exception: if the type is path or dir_path and the
+ // pattern mode is not ignore, then we will inject to try our luck in
+ // interpreting the concatenation result as a path pattern. This makes
+ // sure patterns like `$src_base/*.txt` work, naturally. Failed that,
+ // we will handle this concatenation as we do for other types (via the
+ // path_concat backup).
+ //
// A concatenation cannot produce value/NULL.
//
@@ -6076,12 +8027,14 @@ namespace build2
bool e1 (tt == type::lcbrace && !peeked ().separated);
bool e2 (pp || dp != nullptr || tp != nullptr);
+ const value_type* pt (&value_traits<path>::value_type);
+ const value_type* dt (&value_traits<dir_path>::value_type);
+
if (e1 || e2)
{
- if (vtype == &value_traits<path>::value_type ||
- vtype == &value_traits<string>::value_type)
+ if (vtype == pt || vtype == &value_traits<string>::value_type)
; // Representation is already in concat_data.value.
- else if (vtype == &value_traits<dir_path>::value_type)
+ else if (vtype == dt)
concat_data.value = move (concat_data.dir).representation ();
else
{
@@ -6096,6 +8049,20 @@ namespace build2
vtype = nullptr;
// Fall through to injection.
}
+ else if (pmode != pattern_mode::ignore &&
+ (vtype == pt || vtype == dt))
+ {
+ path_concat = make_pair (vtype, concat_data);
+
+ // Note: for path the representation is already in
+ // concat_data.value.
+ //
+ if (vtype == dt)
+ concat_data.value = move (concat_data.dir).representation ();
+
+ vtype = nullptr;
+ // Fall through to injection.
+ }
else
{
// This is either a simple name (untyped concatenation; in which
@@ -6189,7 +8156,7 @@ namespace build2
//
names ns;
ns.push_back (name (move (val)));
- concat_typed (value (move (ns)), get_location (t));
+ concat_typed (value (move (ns)), get_location (t), nullptr);
}
else
{
@@ -6291,6 +8258,8 @@ namespace build2
if (ttp == nullptr)
ppat = pinc = false;
+ else if (ttp->factory == nullptr)
+ fail (loc) << "abstract target type " << ttp->name << "{}";
}
}
@@ -6356,7 +8325,7 @@ namespace build2
// See if this is a pattern, path or regex.
//
// A path pattern either contains an unquoted wildcard character or,
- // in the curly context, start with unquoted/unescaped `+`.
+ // in the curly context, starts with unquoted/unescaped `+`.
//
// A regex pattern starts with unquoted/unescaped `~` followed by a
// non-alphanumeric delimiter and has the following form:
@@ -6434,7 +8403,7 @@ namespace build2
// Note that we have to check for regex patterns first since
// they may also be detected as path patterns.
//
- if (!quoted_first && regex_pattern ())
+ if (!quoted_first && !path_concat && regex_pattern ())
{
// Note: we may decide to support regex-based name generation
// some day (though a substitution won't make sense here).
@@ -6452,6 +8421,9 @@ namespace build2
? scope_->find_target_type (*tp)
: nullptr);
+ if (ttp != nullptr && ttp->factory == nullptr)
+ fail (loc) << "abstract target type " << ttp->name << "{}";
+
if (tp == nullptr || ttp != nullptr)
{
if (pmode == pattern_mode::detect)
@@ -6502,7 +8474,7 @@ namespace build2
// there isn't any good reason to; see also to_stream(name) for
// the corresponding serialization logic).
//
- if (!quoted_first && regex_pattern ())
+ if (!quoted_first && !path_concat && regex_pattern ())
{
const char* w;
if (val[0] == '~')
@@ -6560,6 +8532,24 @@ namespace build2
}
}
+ // If this is a concatenation of the path or dir_path type and it is
+ // not a pattern, then handle it in the same way as concatenations of
+ // other types (see above).
+ //
+ if (path_concat && !pat)
+ {
+ ns.push_back (move (path_concat->second));
+
+ // Restore the type information if that's the only name.
+ //
+ if (start == ns.size () && last_token ())
+ vtype = path_concat->first;
+
+ // Restart the loop.
+ //
+ continue;
+ }
+
// If we are a second half of a pair, add another first half
// unless this is the first instance.
//
@@ -6614,6 +8604,9 @@ namespace build2
//
if (tt == type::dollar || tt == type::lparen)
{
+ if (ctx == nullptr)
+ fail << "literal " << what << " expected";
+
// These cases are pretty similar in that in both we quickly end up
// with a list of names that we need to splice into the result.
//
@@ -6635,11 +8628,15 @@ namespace build2
// token is a paren or a word, we turn it on and switch to the eval
// mode if what we get next is a paren.
//
- // Also sniff out the special variables string from mode data for
- // the ad hoc $() handling below.
- //
mode (lexer_mode::variable);
+ // Sniff out the special variables string from mode data and use
+ // that to recognize special variables in the ad hoc $() handling
+ // below.
+ //
+ // Note: must be done before calling next() which may expire the
+ // mode.
+ //
auto special = [s = reinterpret_cast<const char*> (mode_data ())]
(const token& t) -> char
{
@@ -6678,156 +8675,202 @@ namespace build2
next (t, tt);
loc = get_location (t);
- name qual;
- string name;
-
- if (t.separated)
- ; // Leave the name empty to fail below.
- else if (tt == type::word)
+ if (tt == type::escape)
{
- name = move (t.value);
+ // For now we only support all the simple C/C++ escape sequences
+ // plus \0 (which in C/C++ is an octal escape sequence). See the
+ // lexer part for details.
+ //
+ // Note: cannot be subscripted.
+ //
+ if (!pre_parse_)
+ {
+ string s;
+ switch (char c = t.value[0])
+ {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\': s = c; break;
+ case '0': s = '\0'; break;
+ case 'a': s = '\a'; break;
+ case 'b': s = '\b'; break;
+ case 'f': s = '\f'; break;
+ case 'n': s = '\n'; break;
+ case 'r': s = '\r'; break;
+ case 't': s = '\t'; break;
+ case 'v': s = '\v'; break;
+ default:
+ assert (false);
+ }
+
+ result_data = name (move (s));
+ what = "escape sequence expansion";
+ }
+
+ tt = peek ();
}
- else if (tt == type::lparen)
+ else
{
- expire_mode ();
- mode (lexer_mode::eval, '@');
- next_with_attributes (t, tt);
+ names qual;
+ string name;
- // Handle the $(x) case ad hoc. We do it this way in order to get
- // the variable name even during pre-parse. It should also be
- // faster.
- //
- char c;
- if ((tt == type::word
- ? path_traits::rfind_separator (t.value) == string::npos
- : (c = special (t))) &&
- peek () == type::rparen)
+ if (t.separated)
+ ; // Leave the name empty to fail below.
+ else if (tt == type::word)
{
- name = (tt == type::word ? move (t.value) : string (1, c));
- next (t, tt); // Get `)`.
+ name = move (t.value);
}
- else
+ else if (tt == type::lparen)
{
- using name_type = build2::name;
+ expire_mode ();
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
- //@@ OUT will parse @-pair and do well?
+ // Handle the $(x) case ad hoc. We do it this way in order to
+ // get the variable name even during pre-parse. It should also
+ // be faster.
//
- values vs (parse_eval (t, tt, pmode));
-
- if (!pre_parse_)
+ char c ('\0');
+ if ((tt == type::word
+ ? path_traits::rfind_separator (t.value) == string::npos
+ : (c = special (t))) &&
+ peek () == type::rparen)
{
- if (vs.size () != 1)
- fail (loc) << "expected single variable/function name";
+ name = (tt == type::word ? move (t.value) : string (1, c));
+ next (t, tt); // Get `)`.
+ }
+ else
+ {
+ using name_type = build2::name;
- value& v (vs[0]);
+ values vs (parse_eval (t, tt, pmode));
- if (!v)
- fail (loc) << "null variable/function name";
+ if (!pre_parse_)
+ {
+ if (vs.size () != 1)
+ fail (loc) << "expected single variable/function name";
- names storage;
- vector_view<name_type> ns (reverse (v, storage)); // Movable.
- size_t n (ns.size ());
+ value& v (vs[0]);
- // We cannot handle scope-qualification in the eval context as
- // we do for target-qualification (see eval-qual) since then
- // we would be treating all paths as qualified variables. So
- // we have to do it here.
- //
- if (n == 2 && ns[0].pair == ':') // $(foo: x)
- {
- qual = move (ns[0]);
+ if (!v)
+ fail (loc) << "null variable/function name";
- if (qual.empty ())
- fail (loc) << "empty variable/function qualification";
- }
- else if (n == 2 && ns[0].directory ()) // $(foo/ x)
- {
- qual = move (ns[0]);
- qual.pair = '/';
- }
- else if (n > 1)
- fail (loc) << "expected variable/function name instead of '"
- << ns << "'";
+ names storage;
+ vector_view<name_type> ns (
+ reverse (v, storage, true /* reduce */)); // Movable.
+ size_t n (ns.size ());
- // Note: checked for empty below.
- //
- if (!ns[n - 1].simple ())
- fail (loc) << "expected variable/function name instead of '"
- << ns[n - 1] << "'";
+ // We cannot handle scope-qualification in the eval context
+ // as we do for target-qualification (see eval-qual) since
+ // then we would be treating all paths as qualified
+ // variables. So we have to do it here.
+ //
+ if (n >= 2 && ns[0].pair == ':') // $(foo: x)
+ {
+ // Note: name is first (see eval for details).
+ //
+ qual.push_back (move (ns[1]));
- size_t p;
- if (n == 1 && // $(foo/x)
- (p = path_traits::rfind_separator (ns[0].value)) !=
- string::npos)
- {
- // Note that p cannot point to the last character since then
- // it would have been a directory, not a simple name.
+ if (qual.back ().empty ())
+ fail (loc) << "empty variable/function qualification";
+
+ if (n > 2)
+ qual.push_back (move (ns[2]));
+
+ // Move name to the last position (see below).
+ //
+ swap (ns[0], ns[n - 1]);
+ }
+ else if (n == 2 && ns[0].directory ()) // $(foo/ x)
+ {
+ qual.push_back (move (ns[0]));
+ qual.back ().pair = '/';
+ }
+ else if (n > 1)
+ fail (loc) << "expected variable/function name instead of '"
+ << ns << "'";
+
+ // Note: checked for empty below.
//
- string& s (ns[0].value);
+ if (!ns[n - 1].simple ())
+ fail (loc) << "expected variable/function name instead of '"
+ << ns[n - 1] << "'";
- name = string (s, p + 1);
- s.resize (p + 1);
- qual = name_type (dir_path (move (s)));
- qual.pair = '/';
+ size_t p;
+ if (n == 1 && // $(foo/x)
+ (p = path_traits::rfind_separator (ns[0].value)) !=
+ string::npos)
+ {
+ // Note that p cannot point to the last character since
+ // then it would have been a directory, not a simple name.
+ //
+ string& s (ns[0].value);
+
+ name = string (s, p + 1);
+ s.resize (p + 1);
+ qual.push_back (name_type (dir_path (move (s))));
+ qual.back ().pair = '/';
+ }
+ else
+ name = move (ns[n - 1].value);
}
- else
- name = move (ns[n - 1].value);
}
}
- }
- else
- fail (t) << "expected variable/function name instead of " << t;
-
- if (!pre_parse_ && name.empty ())
- fail (loc) << "empty variable/function name";
-
- // Figure out whether this is a variable expansion with potential
- // subscript or a function call.
- //
- if (sub) enable_subscript ();
- tt = peek ();
+ else
+ fail (t) << "expected variable/function name instead of " << t;
- // Note that we require function call opening paren to be
- // unseparated; consider: $x ($x == 'foo' ? 'FOO' : 'BAR').
- //
- if (tt == type::lparen && !peeked ().separated)
- {
- // Function call.
- //
- next (t, tt); // Get '('.
- mode (lexer_mode::eval, '@');
- next_with_attributes (t, tt);
+ if (!pre_parse_ && name.empty ())
+ fail (loc) << "empty variable/function name";
- // @@ Should we use (target/scope) qualification (of name) as the
- // context in which to call the function? Hm, interesting...
+ // Figure out whether this is a variable expansion with potential
+ // subscript or a function call.
//
- values args (parse_eval (t, tt, pmode));
-
if (sub) enable_subscript ();
tt = peek ();
- // Note that we "move" args to call().
+ // Note that we require function call opening paren to be
+ // unseparated; consider: $x ($x == 'foo' ? 'FOO' : 'BAR').
//
- if (!pre_parse_)
+ if (tt == type::lparen && !peeked ().separated)
{
- result_data = ctx.functions.call (scope_, name, args, loc);
- what = "function call";
+ // Function call.
+ //
+ next (t, tt); // Get '('.
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
+
+ // @@ Should we use (target/scope) qualification (of name) as
+ // the context in which to call the function? Hm, interesting...
+ //
+ values args (parse_eval (t, tt, pmode));
+
+ if (sub) enable_subscript ();
+ tt = peek ();
+
+ // Note that we "move" args to call().
+ //
+ if (!pre_parse_)
+ {
+ result_data = ctx->functions.call (scope_, name, args, loc);
+ what = "function call";
+ }
+ else
+ lookup_function (move (name), loc);
}
else
- lookup_function (move (name), loc);
- }
- else
- {
- // Variable expansion.
- //
- lookup l (lookup_variable (move (qual), move (name), loc));
-
- if (!pre_parse_)
{
- if (l.defined ())
- result = l.value; // Otherwise leave as NULL result_data.
+ // Variable expansion.
+ //
+ lookup l (lookup_variable (move (qual), move (name), loc));
+
+ if (!pre_parse_)
+ {
+ if (l.defined ())
+ result = l.value; // Otherwise leave as NULL result_data.
- what = "variable expansion";
+ what = "variable expansion";
+ }
}
}
}
@@ -6859,85 +8902,132 @@ namespace build2
// Handle value subscript.
//
- if (tt == type::lsbrace)
+ if (mode () == lexer_mode::eval) // Note: not if(sub)!
{
- location bl (get_location (t));
- next (t, tt); // `[`
- mode (lexer_mode::subscript, '\0' /* pair */);
- next (t, tt);
-
- location l (get_location (t));
- value v (
- tt != type::rsbrace
- ? parse_value (t, tt, pattern_mode::ignore, "value subscript")
- : value (names ()));
-
- if (tt != type::rsbrace)
+ while (tt == type::lsbrace)
{
- // Note: wildcard pattern should have `]` as well so no escaping
- // suggestion.
- //
- fail (t) << "expected ']' instead of " << t;
- }
+ location bl (get_location (t));
+ next (t, tt); // `[`
+ mode (lexer_mode::subscript, '\0' /* pair */);
+ next (t, tt);
- if (!pre_parse_)
- {
- uint64_t j;
- try
- {
- j = convert<uint64_t> (move (v));
- }
- catch (const invalid_argument& e)
+ location l (get_location (t));
+ value v (
+ tt != type::rsbrace
+ ? parse_value (t, tt, pattern_mode::ignore, "value subscript")
+ : value (names ()));
+
+ if (tt != type::rsbrace)
{
- fail (l) << "invalid value subscript: " << e <<
- info (bl) << "use the '\\[' escape sequence if this is a "
- << "wildcard pattern" << endf;
+ // Note: wildcard pattern should have `]` as well so no escaping
+ // suggestion.
+ //
+ fail (t) << "expected ']' instead of " << t;
}
- // Similar to expanding an undefined variable, we return NULL if
- // the index is out of bounds.
- //
- // Note that result may or may not point to result_data.
- //
- if (result->null)
- result_data = value ();
- else if (result->type == nullptr)
+ if (!pre_parse_)
{
- const names& ns (result->as<names> ());
-
- // Pair-aware subscript.
+ // For type-specific subscript implementations we pass the
+ // subscript value as is.
//
- names r;
- for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ if (auto f = (result->type != nullptr
+ ? result->type->subscript
+ : nullptr))
{
- if (j == 0)
+ result_data = f (*result, &result_data, move (v), l, bl);
+ }
+ else
+ {
+ uint64_t j;
+ try
{
- r.push_back (*i);
- if (i->pair)
- r.push_back (*++i);
- break;
+ j = convert<uint64_t> (move (v));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid value subscript: " << e <<
+ info (bl) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern" << endf;
+ }
+
+ // Similar to expanding an undefined variable, we return NULL
+ // if the index is out of bounds.
+ //
+ // Note that result may or may not point to result_data.
+ //
+ if (result->null)
+ result_data = value ();
+ else if (result->type == nullptr)
+ {
+ const names& ns (result->as<names> ());
+
+ // Pair-aware subscript.
+ //
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ {
+ if (j == 0)
+ {
+ r.push_back (*i);
+ if (i->pair)
+ r.push_back (*++i);
+ break;
+ }
+
+ if (i->pair)
+ ++i;
+ }
+
+ result_data = r.empty () ? value () : value (move (r));
}
+ else
+ {
+ // Similar logic to parse_for().
+ //
+ const value_type* etype (result->type->element_type);
+
+ value val (result == &result_data
+ ? value (move (result_data))
+ : value (*result));
+
+ untypify (val, false /* reduce */);
- if (i->pair)
- ++i;
+ names& ns (val.as<names> ());
+
+ // Pair-aware subscript.
+ //
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ {
+ bool p (i->pair);
+
+ if (j == 0)
+ {
+ r.push_back (move (*i));
+ if (p)
+ r.push_back (move (*++i));
+ break;
+ }
+
+ if (p)
+ ++i;
+ }
+
+ result_data = r.empty () ? value () : value (move (r));
+
+ if (etype != nullptr)
+ typify (result_data, *etype, nullptr /* var */);
+ }
}
- result_data = r.empty () ? value () : value (move (r));
- }
- else
- {
- // @@ TODO: we would want to return a value with element type.
- //
- //result_data = ...
- fail (l) << "typed value subscript not yet supported" <<
- info (bl) << "use the '\\[' escape sequence if this is a "
- << "wildcard pattern";
+ result = &result_data;
}
- result = &result_data;
+ // See if we have chained subscript.
+ //
+ enable_subscript ();
+ tt = peek ();
}
-
- tt = peek ();
}
if (pre_parse_)
@@ -6981,7 +9071,8 @@ namespace build2
// then it should not be overloaded for a type). In a quoted
// context we use $string() which returns a "canonical
// representation" (e.g., a directory path without a trailing
- // slash).
+ // slash). Note: looks like we use typed $concat() now in the
+ // unquoted context.
//
if (result->type != nullptr && quoted)
{
@@ -7004,7 +9095,10 @@ namespace build2
dr << info (loc) << "while converting " << t << " to string";
});
- p = ctx.functions.try_call (
+ if (ctx == nullptr)
+ fail << "literal " << what << " expected";
+
+ p = ctx->functions.try_call (
scope_, "string", vector_view<value> (&result_data, 1), loc);
}
@@ -7012,7 +9106,11 @@ namespace build2
fail (loc) << "no string conversion for " << t;
result_data = move (p.first);
- untypify (result_data); // Convert to untyped simple name.
+
+ // Convert to untyped simple name reducing empty string to empty
+ // names as an optimization.
+ //
+ untypify (result_data, true /* reduce */);
}
if ((concat && vtype != nullptr) || // LHS typed.
@@ -7021,13 +9119,13 @@ namespace build2
if (result != &result_data) // Same reason as above.
result = &(result_data = *result);
- concat_typed (move (result_data), loc);
+ concat_typed (move (result_data), loc, what);
}
//
// Untyped concatenation. Note that if RHS is NULL/empty, we still
// set the concat flag.
//
- else if (!result->null && !result->empty ())
+ else if (!result->null)
{
// This can only be an untyped value.
//
@@ -7035,34 +9133,36 @@ namespace build2
//
const names& lv (cast<names> (*result));
- // This should be a simple value or a simple directory.
- //
- if (lv.size () > 1)
- fail (loc) << "concatenating " << what << " contains multiple "
- << "values";
+ if (size_t s = lv.size ())
+ {
+ // This should be a simple value or a simple directory.
+ //
+ if (s > 1)
+ concat_diag_multiple (loc, what);
- const name& n (lv[0]);
+ const name& n (lv[0]);
- if (n.qualified ())
- fail (loc) << "concatenating " << what << " contains project "
- << "name";
+ if (n.qualified ())
+ fail (loc) << "concatenating " << what << " contains project "
+ << "name";
- if (n.typed ())
- fail (loc) << "concatenating " << what << " contains type";
+ if (n.typed ())
+ fail (loc) << "concatenating " << what << " contains target type";
- if (!n.dir.empty ())
- {
- if (!n.value.empty ())
- fail (loc) << "concatenating " << what << " contains "
- << "directory";
+ if (!n.dir.empty ())
+ {
+ if (!n.value.empty ())
+ fail (loc) << "concatenating " << what << " contains "
+ << "directory";
- // Note that here we cannot assume what's in dir is really a
- // path (think s/foo/bar/) so we have to reverse it exactly.
- //
- concat_data.value += n.dir.representation ();
+ // Note that here we cannot assume what's in dir is really a
+ // path (think s/foo/bar/) so we have to reverse it exactly.
+ //
+ concat_data.value += n.dir.representation ();
+ }
+ else
+ concat_data.value += n.value;
}
- else
- concat_data.value += n.value;
}
// The same little hack as in the word case ($empty+foo).
@@ -7088,16 +9188,27 @@ namespace build2
// Nothing else to do here if the result is NULL or empty.
//
- if (result->null || result->empty ())
- continue;
-
- // @@ Could move if nv is result_data; see untypify().
+ // Note that we cannot use value::empty() here since we are
+ // interested in representationally empty.
//
- names nv_storage;
- names_view nv (reverse (*result, nv_storage));
+ if (!result->null)
+ {
+ // @@ Could move if nv is result_data; see untypify().
+ //
+ // Nuance: we should only be reducing empty simple value to empty
+ // list if we are not a second half of a pair.
+ //
+ bool pair (!ns.empty () && ns.back ().pair);
+
+ names nv_storage;
+ names_view nv (reverse (*result, nv_storage, !pair /* reduce */));
- count = splice_names (
- loc, nv, move (nv_storage), ns, what, pairn, pp, dp, tp);
+ if (!nv.empty ())
+ {
+ count = splice_names (
+ loc, nv, move (nv_storage), ns, what, pairn, pp, dp, tp);
+ }
+ }
}
continue;
@@ -7332,14 +9443,16 @@ namespace build2
buildspec parser::
parse_buildspec (istream& is, const path_name& in)
{
- // We do "effective escaping" and only for ['"\$(] (basically what's
- // necessary inside a double-quoted literal plus the single quote).
+ // We do "effective escaping" of the special `'"\$(` characters (basically
+ // what's escapable inside a double-quoted literal plus the single quote;
+ // note, however, that we exclude line continuations and `)` since they
+ // would make directory paths on Windows unusable).
//
path_ = &in;
lexer l (is, *path_, 1 /* line */, "\'\"\\$(");
lexer_ = &l;
- root_ = &ctx.global_scope.rw ();
+ root_ = &ctx->global_scope.rw ();
scope_ = root_;
target_ = nullptr;
prerequisite_ = nullptr;
@@ -7574,8 +9687,11 @@ namespace build2
}
lookup parser::
- lookup_variable (name&& qual, string&& name, const location& loc)
+ lookup_variable (names&& qual, string&& name, const location& loc)
{
+ // Note that this function can be called during execute (for example, from
+ // scripts). In particular, this means we cannot use enter_{scope,target}.
+
if (pre_parse_)
return lookup ();
@@ -7587,9 +9703,6 @@ namespace build2
// If we are qualified, it can be a scope or a target.
//
- enter_scope sg;
- enter_target tg;
-
if (qual.empty ())
{
s = scope_;
@@ -7598,36 +9711,70 @@ namespace build2
}
else
{
- switch (qual.pair)
+ // What should we do if we cannot find the qualification (scope or
+ // target)? We can "fall through" to an outer scope (there is always the
+ // global scope backstop), we can return NULL straight away, or we can
+ // fail. It feels like in most cases unknown scope or target is a
+ // mistake and doing anything other than failing is just making things
+ // harder to debug.
+ //
+ switch (qual.front ().pair)
{
case '/':
{
- assert (qual.directory ());
- sg = enter_scope (*this, move (qual.dir));
- s = scope_;
+ assert (qual.front ().directory ());
+
+ dir_path& d (qual.front ().dir);
+ enter_scope::complete_normalize (*scope_, d);
+
+ s = &ctx->scopes.find_out (d);
+
+ if (s->out_path () != d)
+ fail (loc) << "unknown scope " << d << " in scope-qualified "
+ << "variable " << name << " expansion" <<
+ info << "did you forget to include the corresponding buildfile?";
+
break;
}
- case ':':
+ default:
{
- qual.pair = '\0';
+ build2::name n (move (qual.front ())), o;
+
+ if (n.pair)
+ o = move (qual.back ());
+
+ t = enter_target::find_target (*this, n, o, loc, trace);
+
+ if (t == nullptr || !operator>= (t->decl, target_decl::implied)) // VC14
+ {
+ diag_record dr (fail (loc));
+
+ dr << "unknown target " << n;
+
+ if (n.pair && !o.dir.empty ())
+ dr << '@' << o.dir;
- // @@ OUT TODO
+ dr << " in target-qualified variable " << name << " expansion";
+ }
+
+ // Use the target's var_pool for good measure.
//
- tg = enter_target (
- *this, move (qual), build2::name (), true, loc, trace);
- t = target_;
+ s = &t->base_scope ();
+
break;
}
- default: assert (false);
}
}
// Lookup.
//
- if (const variable* pvar = scope_->var_pool ().find (name))
+ if (const variable* pvar =
+ (s != nullptr ? s : scope_)->var_pool ().find (name))
{
auto& var (*pvar);
+ // Note: the order of the following blocks is important.
+
if (p != nullptr)
{
// The lookup depth is a bit of a hack but should be harmless since
@@ -7714,62 +9861,213 @@ namespace build2
return r;
}
+ // file.cxx
+ //
+ extern const dir_path std_export_dir;
+ extern const dir_path alt_export_dir;
+
void parser::
- process_default_target (token& t)
+ process_default_target (token& t, const buildfile* bf)
{
tracer trace ("parser::process_default_target", &path_);
// The logic is as follows: if we have an explicit current directory
- // target, then that's the default target. Otherwise, we take the
- // first target and use it as a prerequisite to create an implicit
- // current directory target, effectively making it the default
- // target via an alias. If there are no targets in this buildfile,
- // then we don't do anything.
+ // target, then that's the default target. Otherwise, we take the first
+ // target and use it as a prerequisite to create an implicit current
+ // directory target, effectively making it the default target via an
+ // alias. If this is a project root buildfile, then also add exported
+ // buildfiles. And if there are no targets in this buildfile, then we
+ // don't do anything (reasonably assuming it's not root).
//
if (default_target_ == nullptr) // No targets in this buildfile.
return;
- target& dt (*default_target_);
-
target* ct (
- const_cast<target*> ( // Ok (serial execution).
- ctx.targets.find (dir::static_type, // Explicit current dir target.
- scope_->out_path (),
- dir_path (), // Out tree target.
- string (),
- nullopt,
- trace)));
-
- if (ct == nullptr)
- {
- l5 ([&]{trace (t) << "creating current directory alias for " << dt;});
-
- // While this target is not explicitly mentioned in the buildfile, we
- // say that we behave as if it were. Thus not implied.
- //
- ct = &ctx.targets.insert (dir::static_type,
- scope_->out_path (),
- dir_path (),
- string (),
- nullopt,
- target_decl::real,
- trace).first;
- // Fall through.
- }
- else if (ct->decl != target_decl::real)
+ const_cast<target*> ( // Ok (serial execution).
+ ctx->targets.find (dir::static_type, // Explicit current dir target.
+ scope_->out_path (),
+ dir_path (), // Out tree target.
+ string (),
+ nullopt,
+ trace)));
+
+ if (ct != nullptr && ct->decl == target_decl::real)
+ ; // Existing and not implied.
+ else
{
- ct->decl = target_decl::real;
- // Fall through.
+ target& dt (*default_target_);
+
+ if (ct == nullptr)
+ {
+ l5 ([&]{trace (t) << "creating current directory alias for " << dt;});
+
+ // While this target is not explicitly mentioned in the buildfile, we
+ // say that we behave as if it were. Thus not implied.
+ //
+ ct = &ctx->targets.insert (dir::static_type,
+ scope_->out_path (),
+ dir_path (),
+ string (),
+ nullopt,
+ target_decl::real,
+ trace).first;
+ }
+ else
+ ct->decl = target_decl::real;
+
+ ct->prerequisites_state_.store (2, memory_order_relaxed);
+ ct->prerequisites_.push_back (prerequisite (dt));
}
- else
- return; // Existing and not implied.
- ct->prerequisites_state_.store (2, memory_order_relaxed);
- ct->prerequisites_.emplace_back (prerequisite (dt));
+ // See if this is a root buildfile and not in a simple project.
+ //
+ if (bf != nullptr &&
+ root_ != nullptr &&
+ root_->root_extra != nullptr &&
+ root_->root_extra->loaded &&
+ *root_->root_extra->project != nullptr &&
+ bf->dir == root_->src_path () &&
+ bf->name == root_->root_extra->buildfile_file.string ())
+ {
+ // See if we have any exported buildfiles.
+ //
+ const dir_path& export_dir (
+ root_->root_extra->altn ? alt_export_dir : std_export_dir);
+
+ dir_path d (root_->src_path () / export_dir);
+ if (exists (d))
+ {
+ // Make sure prerequisites are set.
+ //
+ ct->prerequisites_state_.store (2, memory_order_relaxed);
+
+ const string& build_ext (root_->root_extra->build_ext);
+
+ // Return true if entered any exported buildfiles.
+ //
+ // Note: recursive lambda.
+ //
+ auto iterate = [this, &trace,
+ ct, &build_ext] (const dir_path& d,
+ const auto& iterate) -> bool
+ {
+ bool r (false);
+
+ try
+ {
+ for (const dir_entry& e:
+ dir_iterator (d, dir_iterator::detect_dangling))
+ {
+ switch (e.type ())
+ {
+ case entry_type::directory:
+ {
+ r = iterate (d / path_cast<dir_path> (e.path ()), iterate) || r;
+ break;
+ }
+ case entry_type::regular:
+ {
+ const path& n (e.path ());
+
+ // Besides the buildfile also export buildscript and C++ files
+ // that are used to provide recipe implementations (see
+ // parse_recipe() for details).
+ //
+ string e (n.extension ());
+ if (const target_type* tt = (
+ e == build_ext ? &buildfile::static_type :
+ e == "buildscript" ? &buildscript::static_type :
+ e == "cxx" ||
+ e == "cpp" ||
+ e == "cc" ? &file::static_type : nullptr))
+ {
+ // Enter as if found by search_existing_file(). Note that
+ // entering it as real would cause file_rule not to match
+ // for clean.
+ //
+ // Note that these targets may already be entered (for
+ // example, if already imported).
+ //
+ const target& bf (
+ ctx->targets.insert (*tt,
+ d,
+ (root_->out_eq_src ()
+ ? dir_path ()
+ : out_src (d, *root_)),
+ n.base ().string (),
+ move (e),
+ target_decl::prereq_file,
+ trace).first);
+
+ ct->prerequisites_.push_back (prerequisite (bf));
+ r = true;
+ }
+
+ break;
+ }
+ case entry_type::unknown:
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ fail << (sl ? "dangling symlink" : "inaccessible entry")
+ << ' ' << d / e.path ();
+
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to iterate over " << d << ": " << e;
+ }
+
+ return r;
+ };
+
+ if (iterate (d, iterate))
+ {
+ // Arrange for the exported buildfiles to be installed, recreating
+ // subdirectories inside export/. Essentially, we are arranging for
+ // this:
+ //
+ // build/export/file{*}:
+ // {
+ // install = buildfile/
+ // install.subdirs = true
+ // }
+ //
+ if (cast_false<bool> (root_->vars["install.loaded"]))
+ {
+ enter_scope es (*this, dir_path (export_dir));
+ auto& vars (scope_->target_vars[file::static_type]["*"]);
+
+ // @@ TODO: get cached variables from the module once we have one.
+ //
+ {
+ auto r (vars.insert (*root_->var_pool ().find ("install")));
+
+ if (r.second) // Already set by the user?
+ r.first = path_cast<path> (dir_path ("buildfile"));
+ }
+
+ {
+ auto r (vars.insert (
+ *root_->var_pool (true).find ("install.subdirs")));
+ if (r.second)
+ r.first = true;
+ }
+ }
+ }
+ }
+ }
}
- void parser::
- enter_buildfile (const path& p)
+ template <typename T>
+ const T& parser::
+ enter_buildfile (const path& p, optional<dir_path> out)
{
tracer trace ("parser::enter_buildfile", &path_);
@@ -7777,17 +10075,20 @@ namespace build2
// Figure out if we need out.
//
- dir_path out;
- if (scope_->src_path_ != nullptr &&
- scope_->src_path () != scope_->out_path () &&
- d.sub (scope_->src_path ()))
+ dir_path o;
+ if (out)
+ o = move (*out);
+ else if (root_ != nullptr &&
+ root_->src_path_ != nullptr &&
+ !root_->out_eq_src () &&
+ d.sub (*root_->src_path_))
{
- out = out_src (d, *root_);
+ o = out_src (d, *root_);
}
- ctx.targets.insert<buildfile> (
+ return ctx->targets.insert<T> (
move (d),
- move (out),
+ move (o),
p.leaf ().base ().string (),
p.extension (), // Always specified.
trace);
diff --git a/libbuild2/parser.hxx b/libbuild2/parser.hxx
index 0390e26..3e1d0a0 100644
--- a/libbuild2/parser.hxx
+++ b/libbuild2/parser.hxx
@@ -4,6 +4,10 @@
#ifndef LIBBUILD2_PARSER_HXX
#define LIBBUILD2_PARSER_HXX
+#include <exception> // uncaught_exception[s]()
+
+#include <libbutl/ft/exception.hxx> // uncaught_exceptions
+
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
@@ -44,9 +48,19 @@ namespace build2
explicit
parser (context& c, stage s = stage::rest)
: fail ("error", &path_), info ("info", &path_),
- ctx (c),
+ ctx (&c),
stage_ (s) {}
+ // Pattern expansion mode.
+ //
+ enum class pattern_mode
+ {
+ ignore, // Treat as literals.
+ preserve, // Preserve as name pattern.
+ expand, // Expand to non-pattern names.
+ detect // Implementation detail mode (see code for more information).
+ };
+
// Issue diagnostics and throw failed in case of an error.
//
void
@@ -55,14 +69,20 @@ namespace build2
scope* root,
scope& base,
target* = nullptr,
- prerequisite* = nullptr);
+ prerequisite* = nullptr,
+ bool enter_buildfile = true);
void
parse_buildfile (lexer&,
scope* root,
scope& base,
target* = nullptr,
- prerequisite* = nullptr);
+ prerequisite* = nullptr,
+ bool enter_buildfile = true);
+
+ names
+ parse_export_stub (istream& is, const path_name& name,
+ const scope& rs, scope& gs, scope& ts);
buildspec
parse_buildspec (istream&, const path_name&);
@@ -73,12 +93,10 @@ namespace build2
pair<value, token>
parse_variable_value (lexer&, scope&, const dir_path*, const variable&);
- names
- parse_export_stub (istream& is, const path_name& name, scope& r, scope& b)
- {
- parse_buildfile (is, name, &r, b);
- return move (export_value);
- }
+ // Parse an evaluation context (`(...)`).
+ //
+ value
+ parse_eval (lexer&, scope& rs, scope& bs, pattern_mode);
// The above functions may be called again on the same parser instance
// after a reset.
@@ -86,6 +104,25 @@ namespace build2
void
reset ();
+ // Special, context-less mode that can only be used to parse literal
+ // names.
+ //
+ public:
+ static const string name_separators;
+
+ explicit
+ parser (context* c)
+ : fail ("error", &path_), info ("info", &path_),
+ ctx (c),
+ stage_ (stage::rest) {}
+
+ names
+ parse_names (lexer&,
+ const dir_path* base,
+ pattern_mode pmode,
+ const char* what = "name",
+ const string* separators = &name_separators);
+
// Ad hoc parsing results for some cases.
//
// Note that these are not touched by reset().
@@ -97,24 +134,35 @@ namespace build2
// config directive result.
//
- vector<pair<lookup, string>> config_report; // Config value and format.
- bool config_report_new = false; // One of values is new.
+ struct config_report
+ {
+ struct value
+ {
+ lookup val; // Value.
+ string fmt; // Format.
+ string org; // Original variable if config.report.variable.
+ };
+
+ project_name module; // Reporting module name.
+ vector<value> values;
+ bool new_value; // One of values is new.
+ };
+ small_vector<config_report, 1> config_reports;
+
+ // Misc utilities.
+ //
+ public:
+ // Return the value type corresponding to the type name or NULL if the
+ // type name is unknown. Pass project's root scope if known.
+ //
+ static const value_type*
+ find_value_type (const scope* rs, const string& name);
// Recursive descent parser.
//
protected:
using pattern_type = name::pattern_type;
- // Pattern expansion mode.
- //
- enum class pattern_mode
- {
- ignore, // Treat as literals.
- preserve, // Preserve as name pattern.
- expand, // Expand to non-pattern names.
- detect // Implementation detail mode (see code for more information).
- };
-
// If one is true then parse a single (logical) line (logical means it
// can actually be several lines, e.g., an if-block). Return false if
// nothing has been parsed (i.e., we are still on the same token).
@@ -142,27 +190,41 @@ namespace build2
const target_type* = nullptr,
const string& = {});
- // Ad hoc target names inside < ... >.
+ // Group target names inside < ... >.
//
- struct adhoc_names_loc
+ struct group_names_loc
{
+ bool expl = false; // True -- explicit group, fase -- ad hoc.
+ location group_loc; // Group/primary target location.
+ location member_loc; // Members location.
names ns;
- location loc;
};
- using adhoc_names = small_vector<adhoc_names_loc, 1>;
+ using group_names = small_vector<group_names_loc, 1>;
- void
- enter_adhoc_members (adhoc_names_loc&&, bool);
+ vector<reference_wrapper<target>>
+ enter_explicit_members (group_names_loc&&, bool);
+
+ vector<reference_wrapper<target>>
+ enter_adhoc_members (group_names_loc&&, bool);
+
+ small_vector<pair<reference_wrapper<target>, // Target.
+ vector<reference_wrapper<target>>>, // Ad hoc members.
+ 1>
+ enter_targets (names&&, const location&,
+ group_names&&,
+ size_t,
+ const attributes&);
- small_vector<reference_wrapper<target>, 1>
- enter_targets (names&&, const location&, adhoc_names&&, size_t);
+ void
+ apply_target_attributes (target&, const attributes&);
void
parse_dependency (token&, token_type&,
names&&, const location&,
- adhoc_names&&,
- names&&, const location&);
+ group_names&&,
+ names&&, const location&,
+ const attributes&);
void
parse_assert (token&, token_type&);
@@ -210,7 +272,9 @@ namespace build2
parse_if_else (token&, token_type&,
bool,
const function<void (
- token&, token_type&, bool, const string&)>&);
+ token&, token_type&, bool, const string&)>&,
+ const function<void (
+ token&, token_type&, const string&)>&);
void
parse_switch (token&, token_type&);
@@ -219,7 +283,9 @@ namespace build2
parse_switch (token&, token_type&,
bool,
const function<void (
- token&, token_type&, bool, const string&)>&);
+ token&, token_type&, bool, const string&)>&,
+ const function<void (
+ token&, token_type&, const string&)>&);
void
parse_for (token&, token_type&);
@@ -297,15 +363,25 @@ namespace build2
// Push a new entry into the attributes_ stack. If the next token is `[`
// then parse the attribute sequence until ']' storing the result in the
- // new stack entry. Then get the next token and, if standalone is false,
- // verify it is not newline/eos (i.e., there is something after it).
- // Return the indication of whether we have seen `[` (even if it's the
- // `[]` empty list) and its location.
+ // new stack entry. Then, if next_token is true, get the next token and,
+ // if standalone is false, verify it is not newline/eos (i.e., there is
+ // something after it). If the next token is read and it is a word or a
+ // "word-producing" token (`$` for variable expansions/function calls, `(`
+ // for eval contexts, and `{` for name generation), then verify that it is
+ // separated to reduce the possibility of confusing it with a wildcard
+ // pattern. Consider:
+ //
+ // ./: [abc]-foo.txt
+ //
+ // Return the indication of whether we have seen any attributes (note that
+ // the `[]` empty list does not count) and the location of `[`.
//
// Note that during pre-parsing nothing is pushed into the stack.
//
pair<bool, location>
- attributes_push (token&, token_type&, bool standalone = false);
+ attributes_push (token&, token_type&,
+ bool standalone = false,
+ bool next_token = true);
attributes
attributes_pop ()
@@ -319,15 +395,21 @@ namespace build2
attributes&
attributes_top () {return attributes_.back ();}
- // Source a stream optionnaly performing the default target processing.
- // If the specified path name has a real path, then also enter it as a
- // buildfile.
+ // Source a buildfile as a stream optionally performing the default target
+ // processing. If the specified path name has a real path, then also enter
+ // it as a buildfile.
+ //
+ // If default_target is nullopt, then disable the default target semantics
+ // as when loading boostrap.build or root.build. If it is false, then
+ // continue with the existing default_target value. If it is true, then
+ // start with a new default_value and call process_default_target() at
+ // the end.
//
void
- source (istream&,
- const path_name&,
- const location&,
- bool default_target);
+ source_buildfile (istream&,
+ const path_name&,
+ const location&,
+ optional<bool> default_target);
// The what argument is used in diagnostics (e.g., "expected <what>
// instead of ...".
@@ -337,9 +419,6 @@ namespace build2
// project separator. Note that even if it is NULL, the result may still
// contain non-simple names due to variable expansions.
//
-
- static const string name_separators;
-
names
parse_names (token& t, token_type& tt,
pattern_mode pmode,
@@ -362,14 +441,7 @@ namespace build2
const string* separators = &name_separators)
{
names ns;
- parse_names (t, tt,
- ns,
- pmode,
- chunk,
- what,
- separators,
- 0,
- nullopt, nullptr, nullptr);
+ parse_names (t, tt, ns, pmode, chunk, what, separators);
return ns;
}
@@ -391,14 +463,7 @@ namespace build2
bool chunk = false)
{
names ns;
- auto r (parse_names (t, tt,
- ns,
- pmode,
- chunk,
- what,
- separators,
- 0,
- nullopt, nullptr, nullptr));
+ auto r (parse_names (t, tt, ns, pmode, chunk, what, separators));
value v (r.type); // Potentially typed NULL value.
@@ -518,8 +583,12 @@ namespace build2
// Customization hooks.
//
protected:
- // If qual is not empty, then its pair member should indicate the kind
- // of qualification: ':' -- target, '/' -- scope.
+ // If qual is not empty, then first element's pair member indicates the
+ // kind of qualification:
+ //
+ // '\0' -- target
+ // '@' -- out-qualified target
+ // '/' -- scope
//
// Note that this function is called even during pre-parse with the result
// unused. In this case a valid name will only be provided for variables
@@ -527,8 +596,12 @@ namespace build2
// example, $($x ? X : Y)) it will be empty (along with qual, which can
// only be non-empty for a computed variable).
//
+ // Note also that this function is (currently) not called by some lookup-
+ // like functions ($defined(), $config.origin()). But we should be careful
+ // if/when extending this and audit all the existing use-cases.
+ //
virtual lookup
- lookup_variable (name&& qual, string&& name, const location&);
+ lookup_variable (names&& qual, string&& name, const location&);
// This function is only called during pre-parse and is the continuation
// of the similar logic in lookup_variable() above (including the fact
@@ -553,12 +626,15 @@ namespace build2
switch_scope (const dir_path& out_base);
void
- process_default_target (token&);
+ process_default_target (token&, const buildfile*);
- // Enter buildfile as a target.
+ private:
+ // Enter buildfile or buildfile-file like file (e.g., a recipe file) as a
+ // target.
//
- void
- enter_buildfile (const path&);
+ template <typename T>
+ const T&
+ enter_buildfile (const path&, optional<dir_path> out = nullopt);
// Lexer.
//
@@ -644,15 +720,24 @@ namespace build2
replay_data_[replay_i_].mode == m);
}
+ // In the replay mode return the lexing mode of the token returned by the
+ // subsequent next() or peek() call.
+ //
lexer_mode
mode () const
{
if (replay_ != replay::play)
+ {
return lexer_->mode ();
+ }
else
{
- assert (replay_i_ != replay_data_.size ());
- return replay_data_[replay_i_].mode;
+ assert (!peeked_ || replay_i_ != 0);
+
+ size_t i (!peeked_ ? replay_i_ : replay_i_ - 1);
+ assert (i != replay_data_.size ());
+
+ return replay_data_[i].mode;
}
}
@@ -707,6 +792,16 @@ namespace build2
}
void
+ replay_pop ()
+ {
+ assert (replay_ == replay::save);
+
+ assert (!peeked_ && !replay_data_.empty ());
+
+ replay_data_.pop_back ();
+ }
+
+ void
replay_play ()
{
assert ((replay_ == replay::save && !replay_data_.empty ()) ||
@@ -722,10 +817,21 @@ namespace build2
}
void
- replay_stop ()
+ replay_skip ()
{
+ assert (replay_ == replay::play);
+
assert (!peeked_);
+ replay_i_ = replay_data_.size () - 1;
+ }
+
+ void
+ replay_stop (bool verify = true)
+ {
+ if (verify)
+ assert (!peeked_);
+
if (replay_ == replay::play)
path_ = replay_path_; // Restore old path.
@@ -752,10 +858,23 @@ namespace build2
~replay_guard ()
{
if (p_ != nullptr)
- p_->replay_stop ();
+ p_->replay_stop (!uncaught_exception ());
}
private:
+ // C++17 deprecated uncaught_exception() so use uncaught_exceptions() if
+ // available.
+ //
+ static bool
+ uncaught_exception ()
+ {
+#ifdef __cpp_lib_uncaught_exceptions
+ return std::uncaught_exceptions () != 0;
+#else
+ return std::uncaught_exception ();
+#endif
+ }
+
parser* p_;
};
@@ -825,7 +944,7 @@ namespace build2
// NOTE: remember to update reset() if adding anything here.
//
protected:
- context& ctx;
+ context* ctx;
stage stage_;
bool pre_parse_ = false;
@@ -842,6 +961,13 @@ namespace build2
small_vector<attributes, 2> attributes_;
+ // Innermost if/switch (but excluding recipes).
+ //
+ // Note also that this is cleared/restored when crossing the include
+ // (but not source) boundary.
+ //
+ optional<location> condition_;
+
target* default_target_ = nullptr;
replay_token peek_;
diff --git a/libbuild2/prerequisite.cxx b/libbuild2/prerequisite.cxx
index cc41708..bb77c9e 100644
--- a/libbuild2/prerequisite.cxx
+++ b/libbuild2/prerequisite.cxx
@@ -54,16 +54,16 @@ namespace build2
}
prerequisite::
- prerequisite (const target_type& t)
+ prerequisite (const target_type& t, bool locked)
: proj (nullopt),
type (t.type ()),
dir (t.dir),
out (t.out), // @@ If it's empty, then we treat as undetermined?
name (t.name),
- ext (to_ext (t.ext ())),
+ ext (to_ext (locked ? t.ext_locked () : t.ext ())),
scope (t.base_scope ()),
target (&t),
- vars (t.ctx, false /* global */)
+ vars (*this, false /* shared */)
{
}
diff --git a/libbuild2/prerequisite.hxx b/libbuild2/prerequisite.hxx
index 476ed9d..9b9cccf 100644
--- a/libbuild2/prerequisite.hxx
+++ b/libbuild2/prerequisite.hxx
@@ -29,7 +29,9 @@ namespace build2
using target_type_type = build2::target_type;
// Note that unlike targets, for prerequisites an empty out directory
- // means undetermined rather than being definitely in the out tree.
+ // means undetermined rather than being definitely in the out tree (but
+ // maybe we should make this explicit via optional<>; see the from-target
+ // constructor).
//
// It might seem natural to keep the reference to the owner target instead
// of to the scope. But that's not the semantics that we have, consider:
@@ -61,6 +63,8 @@ namespace build2
// Note that the lookup is often ad hoc (see bin.whole as an example).
// But see also parser::lookup_variable() if adding something here.
//
+ // @@ PERF: redo as vector so can make move constructor noexcept.
+ //
public:
variable_map vars;
@@ -91,12 +95,28 @@ namespace build2
name (move (n)),
ext (move (e)),
scope (s),
- vars (s.ctx, false /* global */) {}
+ vars (*this, false /* shared */) {}
- // Make a prerequisite from a target.
+ prerequisite (const target_type_type& t,
+ dir_path d,
+ dir_path o,
+ string n,
+ optional<string> e,
+ const scope_type& s)
+ : type (t),
+ dir (move (d)),
+ out (move (o)),
+ name (move (n)),
+ ext (move (e)),
+ scope (s),
+ vars (*this, false /* shared */) {}
+
+ // Make a prerequisite from a target. If the second argument is true,
+ // assume the targets mutex is locked (see ext_locked()/key_locked()
+ // for background).
//
explicit
- prerequisite (const target_type&);
+ prerequisite (const target_type&, bool locked = false);
// Note that the returned key "tracks" the prerequisite; that is, any
// updates to the prerequisite's members will be reflected in the key.
@@ -136,7 +156,10 @@ namespace build2
is_a (const target_type_type& tt) const {return type.is_a (tt);}
public:
- prerequisite (prerequisite&& x)
+ // Note that we have the noexcept specification even though vars
+ // (std::map) could potentially throw.
+ //
+ prerequisite (prerequisite&& x) noexcept
: proj (move (x.proj)),
type (x.type),
dir (move (x.dir)),
@@ -145,7 +168,8 @@ namespace build2
ext (move (x.ext)),
scope (x.scope),
target (x.target.load (memory_order_relaxed)),
- vars (move (x.vars)) {}
+ vars (move (x.vars), *this, false /* shared */)
+ {}
prerequisite (const prerequisite& x, memory_order o = memory_order_consume)
: proj (x.proj),
@@ -156,7 +180,7 @@ namespace build2
ext (x.ext),
scope (x.scope),
target (x.target.load (o)),
- vars (x.vars) {}
+ vars (x.vars, *this, false /* shared */) {}
};
inline ostream&
diff --git a/libbuild2/recipe.cxx b/libbuild2/recipe.cxx
index 3720059..87d37e7 100644
--- a/libbuild2/recipe.cxx
+++ b/libbuild2/recipe.cxx
@@ -7,8 +7,9 @@
namespace build2
{
- const recipe empty_recipe;
- const recipe noop_recipe (&noop_action);
- const recipe default_recipe (&default_action);
- const recipe group_recipe (&group_action);
+ recipe_function* const empty_recipe = nullptr;
+ recipe_function* const noop_recipe = &noop_action;
+ recipe_function* const default_recipe = &default_action;
+ recipe_function* const group_recipe = &group_action;
+ recipe_function* const inner_recipe = &execute_inner;
}
diff --git a/libbuild2/recipe.hxx b/libbuild2/recipe.hxx
index 508c059..97261f5 100644
--- a/libbuild2/recipe.hxx
+++ b/libbuild2/recipe.hxx
@@ -27,13 +27,14 @@ namespace build2
// and while the prerequisite will be re-examined via another dependency,
// this target is done).
//
- // Note that max size for the "small capture optimization" in std::function
- // ranges (in pointer sizes) from 0 (GCC prior to 5) to 2 (GCC 5) to 6 (VC
- // 14.2). With the size ranging (in bytes for 64-bit target) from 32 (GCC)
- // to 64 (VC).
+ // Note that max size for the "small size optimization" in std::function
+ // (which is what move_only_function_ex is based on) ranges (in pointer
+ // sizes) from 0 (GCC libstdc++ prior to 5) to 2 (GCC 5 and later) to 3
+ // (Clang libc++) to 6 (VC 14.3). With the size ranging (in bytes for 64-bit
+ // target) from 32 (GCC) to 64 (VC).
//
using recipe_function = target_state (action, const target&);
- using recipe = function<recipe_function>;
+ using recipe = move_only_function_ex<recipe_function>;
// Commonly-used recipes.
//
@@ -44,10 +45,11 @@ namespace build2
// <libbuild2/algorithm.hxx> for details). The group recipe calls the
// group's recipe.
//
- LIBBUILD2_SYMEXPORT extern const recipe empty_recipe;
- LIBBUILD2_SYMEXPORT extern const recipe noop_recipe;
- LIBBUILD2_SYMEXPORT extern const recipe default_recipe;
- LIBBUILD2_SYMEXPORT extern const recipe group_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const empty_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const noop_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const default_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const group_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const inner_recipe;
}
#endif // LIBBUILD2_RECIPE_HXX
diff --git a/libbuild2/rule-map.hxx b/libbuild2/rule-map.hxx
index 20895f3..8f6f59f 100644
--- a/libbuild2/rule-map.hxx
+++ b/libbuild2/rule-map.hxx
@@ -14,15 +14,43 @@
namespace build2
{
- using hint_rule_map =
- butl::prefix_map<string, reference_wrapper<const rule>, '.'>;
+ // A rule name is used both for diagnostics as well as to match rule hints
+ // (see rule_hints). A rule hint is a potentially partial rule name.
+ //
+ // The recommended rule naming scheme is to start with the module name, for
+ // example: cxx.compile, cxx.link. This way a rule hint can be just the
+ // module name, for example [rule_hint=cxx]. If a module can only possibly
+ // have a single rule, then the rule name can be just the module name (e.g.,
+ // `in`; though make doubly sure there is unlikely to be a need for another
+ // rule, for example, for documentation generation, in the future).
+ //
+ // The two common choices of names for the second component in a rule name
+ // is an action (e.g., cxx.compile, cxx.link) or a target type (e.g.,
+ // bin.def, bin.lib). The latter is a good choice when the action is
+ // inherent to the target type (e.g., "generate def file", "see through lib
+ // group"). Also note that a rule for compensating operations (e.g.,
+ // update/clean, install/uninstall) is customarily registered with the same
+ // name.
+ //
+ struct name_rule_map: butl::prefix_map<string,
+ reference_wrapper<const rule>,
+ '.'>
+ {
+ // Return true if the rule name matches a rule hint.
+ //
+ static bool
+ sub (const string& hint, const string& name)
+ {
+ return compare_type ('.').prefix (hint, name);
+ }
+ };
- using target_type_rule_map = map<const target_type*, hint_rule_map>;
+ using target_type_rule_map = map<const target_type*, name_rule_map>;
// This is an "indexed map" with operation_id being the index. Entry
// with id 0 is a wildcard.
//
- // Note that while we may resize some vectors during non-serial load, this
+ // Note that while we may resize some vectors during non-initial load, this
// is MT-safe since we never cache any references to their elements.
//
class operation_rule_map
@@ -33,7 +61,7 @@ namespace build2
bool
insert (operation_id oid,
const target_type& tt,
- string hint,
+ string name,
const rule& r)
{
// 3 is the number of builtin operations.
@@ -41,7 +69,7 @@ namespace build2
if (oid >= map_.size ())
map_.resize ((oid < 3 ? 3 : oid) + 1);
- return map_[oid][&tt].emplace (move (hint), r).second;
+ return map_[oid][&tt].emplace (move (name), r).second;
}
// Return NULL if not found.
@@ -78,17 +106,17 @@ namespace build2
bool
insert (action_id a,
const target_type& tt,
- string hint,
+ string name,
const rule& r)
{
- return insert (a >> 4, a & 0x0F, tt, move (hint), r);
+ return insert (a >> 4, a & 0x0F, tt, move (name), r);
}
template <typename T>
bool
- insert (action_id a, string hint, const rule& r)
+ insert (action_id a, string name, const rule& r)
{
- return insert (a, T::static_type, move (hint), r);
+ return insert (a, T::static_type, move (name), r);
}
// 0 oid is a wildcard.
@@ -97,17 +125,17 @@ namespace build2
insert (meta_operation_id mid,
operation_id oid,
const target_type& tt,
- string hint,
+ string name,
const rule& r)
{
if (mid_ == mid)
- return map_.insert (oid, tt, move (hint), r);
+ return map_.insert (oid, tt, move (name), r);
else
{
if (next_ == nullptr)
next_.reset (new rule_map (mid));
- return next_->insert (mid, oid, tt, move (hint), r);
+ return next_->insert (mid, oid, tt, move (name), r);
}
}
@@ -115,10 +143,10 @@ namespace build2
bool
insert (meta_operation_id mid,
operation_id oid,
- string hint,
+ string name,
const rule& r)
{
- return insert (mid, oid, T::static_type, move (hint), r);
+ return insert (mid, oid, T::static_type, move (name), r);
}
// Return NULL if not found.
diff --git a/libbuild2/rule.cxx b/libbuild2/rule.cxx
index 6dad685..dc1c96c 100644
--- a/libbuild2/rule.cxx
+++ b/libbuild2/rule.cxx
@@ -15,19 +15,64 @@ using namespace butl;
namespace build2
{
- // rule (vtable)
+ // rule
//
rule::
~rule ()
{
}
+ void rule::
+ apply_posthoc (action, target&, match_extra&) const
+ {
+ }
+
+ void rule::
+ reapply (action, target&, match_extra&) const
+ {
+ // Unless the rule overrode cur_options, this function should never get
+ // called. And if it did, then it should override this function.
+ //
+ assert (false);
+ }
+
+ const target* rule::
+ import (const prerequisite_key&,
+ const optional<string>&,
+ const location&) const
+ {
+ return nullptr;
+ }
+
+ const rule_match*
+ match_adhoc_recipe (action, target&, match_extra&); // algorithm.cxx
+
+ bool rule::
+ sub_match (const string& n, operation_id o,
+ action a, target& t, match_extra& me) const
+ {
+ // First check for an ad hoc recipe (see match_rule_impl() for details).
+ //
+ if (!t.adhoc_recipes.empty ())
+ {
+ // Use scratch match_extra since if there is no recipe, then we don't
+ // want to keep any changes and if there is, then we want it discarded.
+ //
+ match_extra s (true /* locked */); // Not called from adhoc_rule::match().
+ if (match_adhoc_recipe (action (a.meta_operation (), o), t, s) != nullptr)
+ return false;
+ }
+
+ const string& h (t.find_hint (o));
+ return name_rule_map::sub (h, n) && match (a, t, h, me);
+ }
+
// simple_rule
//
bool simple_rule::
- match (action a, target& t, const string& h, match_extra&) const
+ match (action a, target& t, const string&, match_extra&) const
{
- return match (a, t, h);
+ return match (a, t);
}
recipe simple_rule::
@@ -36,6 +81,20 @@ namespace build2
return apply (a, t);
}
+ bool simple_rule::
+ sub_match (const string& n, operation_id o,
+ action a, target& t) const
+ {
+ if (!t.adhoc_recipes.empty ())
+ {
+ match_extra s (true /* locked */); // Not called from adhoc_rule::match().
+ if (match_adhoc_recipe (action (a.meta_operation (), o), t, s) != nullptr)
+ return false;
+ }
+
+ return name_rule_map::sub (t.find_hint (o), n) && match (a, t);
+ }
+
// file_rule
//
// Note that this rule is special. It is the last, fallback rule. If
@@ -46,20 +105,26 @@ namespace build2
// use it as a guide to implement your own, normal, rules.
//
bool file_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t, const string&, match_extra&) const
{
tracer trace ("file_rule::match");
+ if (match_type_ && !t.is_a<mtime_target> ())
+ return false;
+
// While strictly speaking we should check for the file's existence
// for every action (because that's the condition for us matching),
// for some actions this is clearly a waste. Say, perform_clean: we
// are not doing anything for this action so not checking if the file
// exists seems harmless.
//
+ // But we also don't want to match real targets and not cleaning their
+ // output files.
+ //
switch (a)
{
case perform_clean_id:
- return true;
+ return t.decl != target_decl::real;
default:
{
// While normally we shouldn't do any of this in match(), no other
@@ -121,7 +186,7 @@ namespace build2
}
recipe file_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra&) const
{
// Update triggers the update of this target's prerequisites so it would
// seem natural that we should also trigger their cleanup. However, this
@@ -153,12 +218,12 @@ namespace build2
}
const file_rule file_rule::instance;
- const rule_match file_rule::rule_match ("file", file_rule::instance);
+ const rule_match file_rule::rule_match ("build.file", file_rule::instance);
// alias_rule
//
bool alias_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true;
}
@@ -169,9 +234,25 @@ namespace build2
// Inject dependency on our directory (note: not parent) so that it is
// automatically created on update and removed on clean.
//
- inject_fsdir (a, t, false);
+ inject_fsdir (a, t, true, true, false);
- match_prerequisites (a, t);
+ // Handle the alias match-only level.
+ //
+ match_search ms;
+ if (t.ctx.match_only && *t.ctx.match_only == match_only_level::alias)
+ {
+ ms = [] (action,
+ const target& t,
+ const prerequisite& p,
+ include_type i)
+ {
+ return prerequisite_target (
+ p.is_a<alias> () ? &search (t, p) : nullptr,
+ i);
+ };
+ }
+
+ match_prerequisites (a, t, ms);
return default_recipe;
}
@@ -180,7 +261,7 @@ namespace build2
// fsdir_rule
//
bool fsdir_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true;
}
@@ -214,7 +295,7 @@ namespace build2
if (verb >= 2)
text << "mkdir " << d;
else if (verb && t.ctx.current_diag_noise)
- text << "mkdir " << t;
+ print_diag ("mkdir", t);
};
// Note: ignoring the dry_run flag.
@@ -271,16 +352,19 @@ namespace build2
}
void fsdir_rule::
- perform_update_direct (action a, const target& t)
+ perform_update_direct (action a, const fsdir& t)
{
+ assert (t.ctx.phase == run_phase::match);
+
// First create the parent directory. If present, it is always first.
//
- const target* p (t.prerequisite_targets[a].empty ()
- ? nullptr
- : t.prerequisite_targets[a][0]);
-
- if (p != nullptr && p->is_a<fsdir> ())
- perform_update_direct (a, *p);
+ if (const target* p = (t.prerequisite_targets[a].empty ()
+ ? nullptr
+ : t.prerequisite_targets[a][0]))
+ {
+ if (const fsdir* fp = p->is_a<fsdir> ())
+ perform_update_direct (a, *fp);
+ }
// The same code as in perform_update() above.
//
@@ -299,6 +383,8 @@ namespace build2
// Don't fail if we couldn't remove the directory because it is not empty
// (or is current working directory). In this case rmdir() will issue a
// warning when appropriate.
+
+ // The same code as in perform_clean_direct() below.
//
target_state ts (rmdir (t.dir, t, t.ctx.current_diag_noise ? 1 : 2)
? target_state::changed
@@ -310,12 +396,41 @@ namespace build2
return ts;
}
+ void fsdir_rule::
+ perform_clean_direct (action a, const fsdir& t)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ // The same code as in perform_clean() above.
+ //
+ // Except that if there are other dependens of this fsdir{} then this will
+ // likely be a noop (because the directory won't be empty) and it makes
+ // sense to just defer cleaning to such other dependents. See
+ // clean_during_match() for backgound. This is similar logic as in
+ // unmatch::safe.
+ //
+ if (t[a].dependents.load (memory_order_relaxed) == 0)
+ {
+ rmdir (t.dir, t, t.ctx.current_diag_noise ? 1 : 2);
+
+ // Then clean the parent directory. If present, it is always first.
+ //
+ if (const target* p = (t.prerequisite_targets[a].empty ()
+ ? nullptr
+ : t.prerequisite_targets[a][0]))
+ {
+ if (const fsdir* fp = p->is_a<fsdir> ())
+ perform_clean_direct (a, *fp);
+ }
+ }
+ }
+
const fsdir_rule fsdir_rule::instance;
// noop_rule
//
bool noop_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true;
}
@@ -339,8 +454,9 @@ namespace build2
}
bool adhoc_rule::
- match (action a, target& t, const string& h, match_extra& me) const
+ match (action a, target& xt, const string& h, match_extra& me) const
{
+ const target& t (xt);
return pattern == nullptr || pattern->match (a, t, h, me);
}
diff --git a/libbuild2/rule.hxx b/libbuild2/rule.hxx
index 364e3ff..eceb6ad 100644
--- a/libbuild2/rule.hxx
+++ b/libbuild2/rule.hxx
@@ -22,13 +22,25 @@ namespace build2
// you need to modify some state (e.g., counters or some such), then make
// sure things are MT-safe.
//
- // Note: match() is only called once but may not be followed by apply().
+ // Note: match() could be called multiple times (so should be idempotent)
+ // and it may not be followed by apply().
+ //
+ // The hint argument is the rule hint, if any, that was used to select this
+ // rule. While normally not factored into the match decision, a rule may
+ // "try harder" if a hint was specified (see cc::link_rule for an example).
//
// The match_extra argument (the type is defined in target.hxx) is used to
// pass additional information that is only needed by some rule
// implementations. It is also a way for us to later pass more information
// without breaking source compatibility.
//
+ // A rule may adjust post hoc prerequisites by overriding apply_posthoc().
+ // See match_extra::posthoc_prerequisite_targets for background and details.
+ //
+ // A rule may support match options and if such a rule is rematched with
+ // different options, then reapply() is called. See
+ // match_extra::{cur,new}_options for background and details.
+ //
struct match_extra;
class LIBBUILD2_SYMEXPORT rule
@@ -40,6 +52,12 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const = 0;
+ virtual void
+ apply_posthoc (action, target&, match_extra&) const;
+
+ virtual void
+ reapply (action, target&, match_extra&) const;
+
rule () = default;
virtual
@@ -47,15 +65,45 @@ namespace build2
rule (const rule&) = delete;
rule& operator= (const rule&) = delete;
+
+ // Resolve a project-qualified target in a rule-specific manner.
+ //
+ // This is optional functionality that may be provided by some rules to
+ // facilitate immediate importation of certain target types. See the
+ // import machinery for details. The default implementation always returns
+ // NULL.
+ //
+ // Note that if this function returns a target, it should have the
+ // extension assigned so that as_name() returns a stable name.
+ //
+ virtual const target*
+ import (const prerequisite_key&,
+ const optional<string>& metadata,
+ const location&) const;
+
+ // Sometimes we want to match only if another rule of ours would match
+ // another operation. For example, we would want our install rule to match
+ // only if our update rule also matches.
+ //
+ // Arranging this, however, is not a simple matter of calling the other
+ // rule's match(): we also have to take into account ad hoc recipes and
+ // rule hints for that operation. This helper performs all the necessary
+ // checks. Note: should only be called from match() (see
+ // target::find_hint() for details). Note also that ad hoc recipes are
+ // checked for hint_op, not action's operation.
+ //
+ bool
+ sub_match (const string& rule_name, operation_id hint_op,
+ action, target&, match_extra&) const;
};
- // Simplified interface for rules that don't care about the extras.
+ // Simplified interface for rules that don't care about the hint or extras.
//
class LIBBUILD2_SYMEXPORT simple_rule: public rule
{
public:
virtual bool
- match (action, target&, const string& hint) const = 0;
+ match (action, target&) const = 0;
virtual recipe
apply (action, target&) const = 0;
@@ -65,31 +113,52 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const override;
+
+ // The simplified version of sub_match() above.
+ //
+ // Note that it calls the simplified match() directly rather than going
+ // through the original.
+ //
+ bool
+ sub_match (const string& rule_name, operation_id hint_op,
+ action, target&) const;
};
// Fallback rule that only matches if the file exists. It will also match
// an mtime_target provided it has a set timestamp.
//
- class LIBBUILD2_SYMEXPORT file_rule: public simple_rule
+ // Note: this rule is "hot" because it matches every static source file and
+ // so we don't use simple_rule to avoid two extra virtual calls.
+ //
+ class LIBBUILD2_SYMEXPORT file_rule: public rule
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
- file_rule () {}
+ // While this rule expects an mtime_target-based target, sometimes it's
+ // necessary to register it for something less specific (normally target)
+ // in order to achieve the desired rule matching priority (see the dist
+ // and config modules for an example). For such cases this rule can be
+ // instructed to check the type and only match if it's mtime_target-based.
+ //
+ file_rule (bool match_type = false): match_type_ (match_type) {}
- static const file_rule instance;
+ static const file_rule instance; // Note: does not match the target type.
static const build2::rule_match rule_match;
+
+ private:
+ bool match_type_;
};
class LIBBUILD2_SYMEXPORT alias_rule: public simple_rule
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -105,7 +174,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -120,7 +189,10 @@ namespace build2
// of fsdir{} without the overhead of switching to the execute phase.
//
static void
- perform_update_direct (action, const target&);
+ perform_update_direct (action, const fsdir&);
+
+ static void
+ perform_clean_direct (action, const fsdir&);
fsdir_rule () {}
static const fsdir_rule instance;
@@ -132,7 +204,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -145,9 +217,9 @@ namespace build2
//
// Used for both ad hoc pattern rules and ad hoc recipes. For recipes, it's
// essentially a rule of one case. Note that when used as part of a pattern,
- // the implementation cannot use the match_extra::buffer nor the target
- // auxiliary data storage until the pattern's apply_*() calls have been
- // made.
+ // the implementation cannot use the match_extra::data() facility nor the
+ // target auxiliary data storage until the pattern's apply_*() calls have
+ // been made.
//
// Note also that when used as part of a pattern, the rule is also register
// for the dist meta-operation (unless there is an explicit recipe for dist)
@@ -205,6 +277,16 @@ namespace build2
// The default implementation forwards to the pattern's match() if there
// is a pattern and returns true otherwise.
//
+ // Note also that in case of a member of a group-based target, match() is
+ // called on the group while apply() on the member (see match_rule_impl()
+ // in algorithms.cxx for details). This means that match() may be called
+ // without having the target locked and as a result match() should (unless
+ // known to only match a non-group) treat the target as const and only
+ // rely on immutable information (type, name, etc) since the group could
+ // be matched concurrenly. This case can be detected by examining
+ // match_extra::locked (see adhoc_rule_regex_pattern::match() for a
+ // use-case).
+ //
virtual bool
match (action, target&, const string&, match_extra&) const override;
@@ -219,8 +301,8 @@ namespace build2
// Implementation details.
//
public:
- // The name in rule_match is used as a hint and as a name in diagnostics.
- // The former does not apply to ad hoc recipes (but does apply to ad hoc
+ // The name in rule_match is used to match hints and in diagnostics. The
+ // former does not apply to ad hoc recipes (but does apply to ad hoc
// rules).
//
const build2::rule_match rule_match;
@@ -271,14 +353,29 @@ namespace build2
~adhoc_rule_pattern ();
public:
+ // Note: the adhoc_rule::match() restrictions apply here as well.
+ //
virtual bool
- match (action, target&, const string&, match_extra&) const = 0;
+ match (action, const target&, const string&, match_extra&) const = 0;
+ // Append additional group members. Note that this function should handle
+ // both ad hoc and explicit groups.
+ //
virtual void
- apply_adhoc_members (action, target&, match_extra&) const = 0;
-
+ apply_group_members (action, target&,
+ const scope& base,
+ match_extra&) const = 0;
+
+ // The implementation should append pattern prerequisites to
+ // t.prerequisite_targets[a] but not match. It should set bit 2 in
+ // prerequisite_target::include to indicate update=match and bit 3
+ // to indicate update=unmatch. It should also avoid adding duplicate
+ // fsdir{} similar to the search_prerequisite*() functions.
+ //
virtual void
- apply_prerequisites (action, target&, match_extra&) const = 0;
+ apply_prerequisites (action, target&,
+ const scope& base,
+ match_extra&) const = 0;
// Dump support.
//
diff --git a/libbuild2/scheduler.cxx b/libbuild2/scheduler.cxx
index bdd703d..e3fbcc1 100644
--- a/libbuild2/scheduler.cxx
+++ b/libbuild2/scheduler.cxx
@@ -5,8 +5,11 @@
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__APPLE__)
# include <pthread.h>
-# ifdef __FreeBSD__
+# if defined(__FreeBSD__)
# include <pthread_np.h> // pthread_attr_get_np() (in <pthread.h> on NetBSD)
+# elif defined(__OpenBSD__)
+# include <sys/signal.h>
+# include <pthread_np.h> // pthread_stackseg_np()
# endif
#endif
@@ -90,12 +93,11 @@ namespace build2
}
void scheduler::
- deactivate (bool external)
+ deactivate_impl (bool external, lock&& rl)
{
- if (max_active_ == 1) // Serial execution.
- return;
+ // Note: assume non-serial execution.
- lock l (mutex_);
+ lock l (move (rl)); // Make sure unlocked on exception.
active_--;
waiting_++;
@@ -128,11 +130,10 @@ namespace build2
}
}
- void scheduler::
- activate (bool external, bool collision)
+ scheduler::lock scheduler::
+ activate_impl (bool external, bool collision)
{
- if (max_active_ == 1) // Serial execution.
- return;
+ // Note: assume non-serial execution.
lock l (mutex_);
@@ -157,6 +158,8 @@ namespace build2
if (shutdown_)
throw_generic_error (ECANCELED);
+
+ return l;
}
void scheduler::
@@ -204,7 +207,10 @@ namespace build2
deallocate (size_t n)
{
if (max_active_ == 1) // Serial execution.
+ {
+ assert (n == 0);
return;
+ }
lock l (mutex_);
active_ -= n;
@@ -213,13 +219,15 @@ namespace build2
size_t scheduler::
suspend (size_t start_count, const atomic_count& task_count)
{
+ assert (max_active_ != 1); // Suspend during serial execution?
+
wait_slot& s (
wait_queue_[
hash<const atomic_count*> () (&task_count) % wait_queue_size_]);
// This thread is no longer active.
//
- deactivate (false /* external */);
+ deactivate_impl (false /* external */, lock (mutex_));
// Note that the task count is checked while holding the lock. We also
// have to notify while holding the lock (see resume()). The aim here
@@ -256,7 +264,7 @@ namespace build2
// This thread is no longer waiting.
//
- activate (false /* external */, collision);
+ activate_impl (false /* external */, collision);
return tc;
}
@@ -362,8 +370,14 @@ namespace build2
size_t init_active,
size_t max_threads,
size_t queue_depth,
- optional<size_t> max_stack)
+ optional<size_t> max_stack,
+ size_t orig_max_active)
{
+ if (orig_max_active == 0)
+ orig_max_active = max_active;
+ else
+ assert (max_active <= orig_max_active);
+
// Lock the mutex to make sure our changes are visible in (other) active
// threads.
//
@@ -375,16 +389,18 @@ namespace build2
// were asked to run serially.
//
if (max_threads == 0)
- max_threads = (max_active == 1 ? 1 :
- sizeof (void*) < 8 ? 8 : 32) * max_active;
+ max_threads = (orig_max_active == 1
+ ? 1
+ : (sizeof (void*) < 8 ? 8 : 32) * orig_max_active);
assert (shutdown_ &&
init_active != 0 &&
init_active <= max_active &&
- max_active <= max_threads);
+ orig_max_active <= max_threads);
active_ = init_active_ = init_active;
- max_active_ = orig_max_active_ = max_active;
+ max_active_ = max_active;
+ orig_max_active_ = orig_max_active;
max_threads_ = max_threads;
// This value should be proportional to the amount of hardware concurrency
@@ -398,7 +414,7 @@ namespace build2
//
task_queue_depth_ = queue_depth != 0
? queue_depth
- : max_active * 8;
+ : orig_max_active_ * 8;
queued_task_count_.store (0, memory_order_relaxed);
@@ -421,6 +437,8 @@ namespace build2
shutdown_ = false;
+ // Delay thread startup if serial.
+ //
if (max_active_ != 1)
dead_thread_ = thread (deadlock_monitor, this);
}
@@ -429,7 +447,7 @@ namespace build2
tune (size_t max_active)
{
// Note that if we tune a parallel scheduler to run serially, we will
- // still have the deadlock monitoring thread running.
+ // still have the deadlock monitoring thread loitering around.
// With multiple initial active threads we will need to make changes to
// max_active_ visible to other threads and which we currently say can be
@@ -451,6 +469,11 @@ namespace build2
lock l (wait_idle ());
swap (max_active_, max_active);
+
+ // Start the deadlock thread if its startup was delayed.
+ //
+ if (max_active_ != 1 && !dead_thread_.joinable ())
+ dead_thread_ = thread (deadlock_monitor, this);
}
return max_active == orig_max_active_ ? 0 : max_active;
@@ -519,7 +542,7 @@ namespace build2
// Wait for the deadlock monitor (the only remaining thread).
//
- if (orig_max_active_ != 1) // See tune() for why not max_active_.
+ if (dead_thread_.joinable ())
{
l.unlock ();
dead_condv_.notify_one ();
@@ -835,6 +858,15 @@ namespace build2
if (r != 0)
throw_system_error (r);
+#elif defined(__OpenBSD__)
+ stack_t s;
+ int r (pthread_stackseg_np (pthread_self (), &s));
+
+ if (r != 0)
+ throw_system_error (r);
+
+ stack_size = s.ss_size;
+
#else // defined(__APPLE__)
stack_size = pthread_get_stacksize_np (pthread_self ());
#endif
diff --git a/libbuild2/scheduler.hxx b/libbuild2/scheduler.hxx
index dcde79b..3cc206e 100644
--- a/libbuild2/scheduler.hxx
+++ b/libbuild2/scheduler.hxx
@@ -5,11 +5,10 @@
#define LIBBUILD2_SCHEDULER_HXX
#include <list>
-#include <mutex>
#include <tuple>
#include <atomic>
-#include <type_traits> // aligned_storage, etc
-#include <condition_variable>
+#include <cstddef> // max_align_t
+#include <type_traits> // decay, etc
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -193,13 +192,15 @@ namespace build2
//
// The external flag indicates whether the wait is for an event external
// to the scheduler, that is, triggered by something other than one of the
- // threads managed by the scheduler.
+ // threads managed by the scheduler. This is used to suspend deadlock
+ // detection (which is progress-based and which cannot be measured for
+ // external events).
//
void
deactivate (bool external);
void
- activate (bool external, bool = false);
+ activate (bool external);
// Sleep for the specified duration, deactivating the thread before going
// to sleep and re-activating it after waking up (which means this
@@ -218,7 +219,7 @@ namespace build2
// Allocate additional active thread count to the current active thread,
// for example, to be "passed" to an external program:
//
- // scheduler::alloc_guard ag (ctx.sched, ctx.sched.max_active () / 2);
+ // scheduler::alloc_guard ag (*ctx.sched, ctx.sched->max_active () / 2);
// args.push_back ("-flto=" + to_string (1 + ag.n));
// run (args);
// ag.deallocate ();
@@ -243,14 +244,38 @@ namespace build2
void
deallocate (size_t);
+ // Similar to allocate() but reserve all the available threads blocking
+ // until this becomes possible. Call unlock() on the specified lock before
+ // deactivating and lock() after activating (can be used to unlock the
+ // phase). Typical usage:
+ //
+ // scheduler::alloc_guard ag (*ctx.sched,
+ // phase_unlock (ctx, true /* delay */));
+ //
+ // Or, without unlocking the phase:
+ //
+ // scheduler::alloc_guard ag (*ctx.sched, phase_unlock (nullptr));
+ //
+ template <typename L>
+ size_t
+ serialize (L& lock);
+
struct alloc_guard
{
size_t n;
alloc_guard (): n (0), s_ (nullptr) {}
alloc_guard (scheduler& s, size_t m): n (s.allocate (m)), s_ (&s) {}
- alloc_guard (alloc_guard&& x): n (x.n), s_ (x.s_) {x.s_ = nullptr;}
- alloc_guard& operator= (alloc_guard&& x)
+
+ template <typename L,
+ typename std::enable_if<!std::is_integral<L>::value, int>::type = 0>
+ alloc_guard (scheduler& s, L&& l): n (s.serialize (l)), s_ (&s) {}
+
+ alloc_guard (alloc_guard&& x) noexcept
+ : n (x.n), s_ (x.s_) {x.s_ = nullptr;}
+
+ alloc_guard&
+ operator= (alloc_guard&& x) noexcept
{
if (&x != this)
{
@@ -301,14 +326,25 @@ namespace build2
// If the maximum threads or task queue depth arguments are unspecified,
// then appropriate defaults are used.
//
+ // Passing non-zero orig_max_active (normally the real max active) allows
+ // starting up a pre-tuned scheduler. In particular, starting a pre-tuned
+ // to serial scheduler is relatively cheap since starting the deadlock
+ // detection thread is delayed until the scheduler is re-tuned.
+ //
explicit
scheduler (size_t max_active,
size_t init_active = 1,
size_t max_threads = 0,
size_t queue_depth = 0,
- optional<size_t> max_stack = nullopt)
+ optional<size_t> max_stack = nullopt,
+ size_t orig_max_active = 0)
{
- startup (max_active, init_active, max_threads, queue_depth, max_stack);
+ startup (max_active,
+ init_active,
+ max_threads,
+ queue_depth,
+ max_stack,
+ orig_max_active);
}
// Start the scheduler.
@@ -318,7 +354,8 @@ namespace build2
size_t init_active = 1,
size_t max_threads = 0,
size_t queue_depth = 0,
- optional<size_t> max_stack = nullopt);
+ optional<size_t> max_stack = nullopt,
+ size_t orig_max_active = 0);
// Return true if the scheduler was started up.
//
@@ -343,12 +380,19 @@ namespace build2
size_t
tune (size_t max_active);
+ bool
+ tuned () const {return max_active_ != orig_max_active_;}
+
struct tune_guard
{
tune_guard (): s_ (nullptr), o_ (0) {}
tune_guard (scheduler& s, size_t ma): s_ (&s), o_ (s_->tune (ma)) {}
- tune_guard (tune_guard&& x): s_ (x.s_), o_ (x.o_) {x.s_ = nullptr;}
- tune_guard& operator= (tune_guard&& x)
+
+ tune_guard (tune_guard&& x) noexcept
+ : s_ (x.s_), o_ (x.o_) {x.s_ = nullptr;}
+
+ tune_guard&
+ operator= (tune_guard&& x) noexcept
{
if (&x != this)
{
@@ -416,8 +460,8 @@ namespace build2
{
explicit
monitor_guard (scheduler* s = nullptr): s_ (s) {}
- monitor_guard (monitor_guard&& x): s_ (x.s_) {x.s_ = nullptr;}
- monitor_guard& operator= (monitor_guard&& x)
+ monitor_guard (monitor_guard&& x) noexcept: s_ (x.s_) {x.s_ = nullptr;}
+ monitor_guard& operator= (monitor_guard&& x) noexcept
{
if (&x != this)
{
@@ -480,7 +524,7 @@ namespace build2
static size_t
hardware_concurrency ()
{
- return std::thread::hardware_concurrency ();
+ return build2::thread::hardware_concurrency ();
}
// Return a prime number that can be used as a lock shard size that's
@@ -497,7 +541,7 @@ namespace build2
// to become idle. Return the lock over the scheduler mutex. Normally you
// don't need to call this function directly.
//
- using lock = std::unique_lock<std::mutex>;
+ using lock = build2::mlock;
lock
wait_idle ();
@@ -533,8 +577,8 @@ namespace build2
atomic_count* task_count;
size_t start_count;
- func_type func;
args_type args;
+ func_type func;
template <size_t... i>
void
@@ -559,7 +603,7 @@ namespace build2
size_t monitor_init_; // Initial count.
function<size_t (size_t)> monitor_func_;
- std::mutex mutex_;
+ build2::mutex mutex_;
bool shutdown_ = true; // Shutdown flag.
optional<size_t> max_stack_;
@@ -599,8 +643,8 @@ namespace build2
//
size_t orig_max_active_ = 0;
- std::condition_variable idle_condv_; // Idle helpers queue.
- std::condition_variable ready_condv_; // Ready masters queue.
+ build2::condition_variable idle_condv_; // Idle helpers queue.
+ build2::condition_variable ready_condv_; // Ready masters queue.
// Statistics counters.
//
@@ -619,8 +663,8 @@ namespace build2
// Deadlock detection.
//
- std::thread dead_thread_;
- std::condition_variable dead_condv_;
+ build2::thread dead_thread_;
+ build2::condition_variable dead_condv_;
static void*
deadlock_monitor (void*);
@@ -641,8 +685,8 @@ namespace build2
//
struct wait_slot
{
- std::mutex mutex;
- std::condition_variable condv;
+ build2::mutex mutex;
+ build2::condition_variable condv;
size_t waiters = 0;
const atomic_count* task_count;
bool shutdown = true;
@@ -663,7 +707,11 @@ namespace build2
//
struct task_data
{
- std::aligned_storage<sizeof (void*) * 8>::type data;
+ static const size_t data_size = (sizeof (void*) == 4
+ ? sizeof (void*) * 16
+ : sizeof (void*) * 8);
+
+ alignas (std::max_align_t) unsigned char data[data_size];
void (*thunk) (scheduler&, lock&, void*);
};
@@ -714,7 +762,7 @@ namespace build2
struct task_queue: task_queue_data
{
- std::mutex mutex;
+ build2::mutex mutex;
bool shutdown = false;
size_t stat_full = 0; // Number of times push() returned NULL.
@@ -913,6 +961,12 @@ namespace build2
private:
optional<size_t>
wait_impl (size_t, const atomic_count&, work_queue);
+
+ void
+ deactivate_impl (bool, lock&&);
+
+ lock
+ activate_impl (bool, bool);
};
}
diff --git a/libbuild2/scheduler.ixx b/libbuild2/scheduler.ixx
index 96eaee1..f46d035 100644
--- a/libbuild2/scheduler.ixx
+++ b/libbuild2/scheduler.ixx
@@ -44,6 +44,20 @@ namespace build2
return suspend (start_count, task_count);
}
+ inline void scheduler::
+ deactivate (bool external)
+ {
+ if (max_active_ != 1) // Serial execution.
+ deactivate_impl (external, lock (mutex_));
+ }
+
+ inline void scheduler::
+ activate (bool external)
+ {
+ if (max_active_ != 1) // Serial execution.
+ activate_impl (external, false /* collision */);
+ }
+
inline scheduler::queue_mark::
queue_mark (scheduler& s)
: tq_ (s.queue ())
diff --git a/libbuild2/scheduler.test.cxx b/libbuild2/scheduler.test.cxx
index b29c932..2ef8d5c 100644
--- a/libbuild2/scheduler.test.cxx
+++ b/libbuild2/scheduler.test.cxx
@@ -2,7 +2,6 @@
// license : MIT; see accompanying LICENSE file
#include <chrono>
-#include <thread>
#include <iostream>
diff --git a/libbuild2/scheduler.txx b/libbuild2/scheduler.txx
index 5c6b339..87c9384 100644
--- a/libbuild2/scheduler.txx
+++ b/libbuild2/scheduler.txx
@@ -64,8 +64,8 @@ namespace build2
new (&td->data) task {
&task_count,
start_count,
- decay_copy (forward<F> (f)),
- typename task::args_type (decay_copy (forward<A> (a))...)};
+ typename task::args_type (decay_copy (forward<A> (a))...),
+ decay_copy (forward<F> (f))};
td->thunk = &task_thunk<F, A...>;
@@ -137,4 +137,42 @@ namespace build2
if (tc.fetch_sub (1, memory_order_release) - 1 <= t.start_count)
s.resume (tc); // Resume waiters, if any.
}
+
+ template <typename L>
+ size_t scheduler::
+ serialize (L& el)
+ {
+ if (max_active_ == 1) // Serial execution.
+ return 0;
+
+ lock l (mutex_);
+
+ if (active_ == 1)
+ active_ = max_active_;
+ else
+ {
+ // Wait until we are the only active thread.
+ //
+ el.unlock ();
+
+ while (active_ != 1)
+ {
+ // While it would have been more efficient to implement this via the
+ // condition variable notifications, that logic is already twisted
+ // enough (and took a considerable time to debug). So for now we keep
+ // it simple and do sleep and re-check. Make the sleep external not to
+ // trip up the deadlock detection.
+ //
+ deactivate_impl (true /* external */, move (l));
+ active_sleep (std::chrono::milliseconds (10));
+ l = activate_impl (true /* external */, false /* collision */);
+ }
+
+ active_ = max_active_;
+ l.unlock (); // Important: unlock before attempting to relock external!
+ el.lock ();
+ }
+
+ return max_active_ - 1;
+ }
}
diff --git a/libbuild2/scope.cxx b/libbuild2/scope.cxx
index 93f21db..23781a8 100644
--- a/libbuild2/scope.cxx
+++ b/libbuild2/scope.cxx
@@ -23,7 +23,7 @@ namespace build2
? empty_project_name
: i->first);
- os << (i != b ? " " : "") << n << '@' << i->second;
+ os << (i != b ? " " : "") << n << '@' << i->second.string ();
}
return os;
@@ -32,8 +32,8 @@ namespace build2
// scope
//
scope::
- scope (context& c, bool global)
- : ctx (c), vars (c, global), target_vars (c, global)
+ scope (context& c, bool shared)
+ : ctx (c), vars (*this, shared), target_vars (c, shared)
{
}
@@ -685,6 +685,8 @@ namespace build2
pair<const target_type*, optional<string>> scope::
find_target_type (name& n, const location& loc, const target_type* tt) const
{
+ // NOTE: see also functions-name.cxx:filter() if changing anything here.
+
optional<string> ext;
string& v (n.value);
@@ -790,9 +792,11 @@ namespace build2
}
pair<const target_type&, optional<string>> scope::
- find_target_type (name& n, name& o, const location& loc) const
+ find_target_type (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto r (find_target_type (n, loc));
+ auto r (find_target_type (n, loc, tt));
if (r.first == nullptr)
fail (loc) << "unknown target type " << n.type << " in " << n;
@@ -806,26 +810,68 @@ namespace build2
fail (loc) << "expected directory after '@'";
}
- dir_path& d (n.dir);
+ dir_path& dir (n.dir);
const dir_path& sd (src_path ());
const dir_path& od (out_path ());
- if (d.empty ())
- d = src ? sd : od; // Already dormalized.
+ bool nabs (false);
+
+ if (dir.empty ())
+ dir = src ? sd : od; // Already normalized.
else
{
- if (d.relative ())
- d = (src ? sd : od) / d;
+ if (dir.relative ())
+ dir = (src ? sd : od) / dir;
+ else if (src)
+ nabs = true;
- d.normalize ();
+ dir.normalize ();
}
dir_path out;
- if (src && sd != od) // If in-source build, then out must be empty.
+ if (src)
{
- out = o.dir.relative () ? od / o.dir : move (o.dir);
+ bool oabs (o.dir.absolute ());
+
+ out = oabs ? move (o.dir) : od / o.dir;
out.normalize ();
+
+ // Make sure out and src are parallel unless both were specified as
+ // absolute. We make an exception for this case because out may be used
+ // to "tag" imported targets (see cc::search_library()). So it's sort of
+ // the "I know what I am doing" escape hatch (it would have been even
+ // better to verify such a target is outside any project but that won't
+ // be cheap).
+ //
+ // See similar code for prerequisites in parser::parse_dependency().
+ //
+ if (nabs && oabs)
+ ;
+ else if (root_->out_eq_src ()
+ ? out == dir
+ //
+ // @@ PERF: could just compare leafs in place.
+ //
+ : (out.sub (root_->out_path ()) &&
+ dir.sub (root_->src_path ()) &&
+ out.leaf (root_->out_path ()) == dir.leaf (root_->src_path ())))
+ ;
+ else
+ // @@ TMP change warn to fail after 0.16.0 release.
+ //
+ warn (loc) << "target output directory " << out
+ << " must be parallel to source directory " << dir;
+
+ // If this target is in this project, then out must be empty if this is
+ // in source build. We assume that if either src or out are relative,
+ // then it belongs to this project.
+ //
+ if (root_->out_eq_src ())
+ {
+ if (!nabs || !oabs || out.sub (root_->out_path ()))
+ out.clear ();
+ }
}
o.dir = move (out); // Result.
@@ -834,14 +880,16 @@ namespace build2
}
target_key scope::
- find_target_key (names& ns, const location& loc) const
+ find_target_key (names& ns,
+ const location& loc,
+ const target_type* tt) const
{
if (size_t n = ns.size ())
{
if (n == (ns[0].pair ? 2 : 1))
{
name dummy;
- return find_target_key (ns[0], n == 1 ? dummy : ns[1], loc);
+ return find_target_key (ns[0], n == 1 ? dummy : ns[1], loc, tt);
}
}
@@ -849,9 +897,11 @@ namespace build2
}
pair<const target_type&, optional<string>> scope::
- find_prerequisite_type (name& n, name& o, const location& loc) const
+ find_prerequisite_type (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto r (find_target_type (n, loc));
+ auto r (find_target_type (n, loc, tt));
if (r.first == nullptr)
fail (loc) << "unknown target type " << n.type << " in " << n;
@@ -875,14 +925,16 @@ namespace build2
}
prerequisite_key scope::
- find_prerequisite_key (names& ns, const location& loc) const
+ find_prerequisite_key (names& ns,
+ const location& loc,
+ const target_type* tt) const
{
if (size_t n = ns.size ())
{
if (n == (ns[0].pair ? 2 : 1))
{
name dummy;
- return find_prerequisite_key (ns[0], n == 1 ? dummy : ns[1], loc);
+ return find_prerequisite_key (ns[0], n == 1 ? dummy : ns[1], loc, tt);
}
}
@@ -910,7 +962,9 @@ namespace build2
}
pair<reference_wrapper<const target_type>, bool> scope::
- derive_target_type (const string& name, const target_type& base)
+ derive_target_type (const string& name,
+ const target_type& base,
+ target_type::flag flags)
{
assert (root_scope () == this);
@@ -928,10 +982,20 @@ namespace build2
//
// Currently, if we define myfile{}: file{}, then myfile{foo} and
// myfile{foo.x} are the same target.
+
+ // Note: copies flags.
//
- unique_ptr<target_type> dt (new target_type (base));
- dt->base = &base;
- dt->factory = &derived_tt_factory;
+ unique_ptr<target_type> dt (
+ new target_type {
+ nullptr, // Will be patched in by insert() below.
+ &base,
+ &derived_tt_factory,
+ base.fixed_extension,
+ base.default_extension,
+ base.pattern,
+ base.print,
+ base.search,
+ base.flags | flags});
#if 0
// @@ We should probably inherit the fixed extension unless overriden with
@@ -1010,8 +1074,17 @@ namespace build2
derive_target_type (const target_type& et)
{
assert (root_scope () == this);
- unique_ptr<target_type> dt (new target_type (et));
- dt->factory = &derived_tt_factory;
+ unique_ptr<target_type> dt (
+ new target_type {
+ nullptr, // Will be patched in by insert() below.
+ et.base,
+ &derived_tt_factory,
+ et.fixed_extension,
+ et.default_extension,
+ et.pattern,
+ et.print,
+ et.search,
+ et.flags});
return root_extra->target_types.insert (et.name, move (dt)).first;
}
@@ -1028,7 +1101,7 @@ namespace build2
if (er.first->second.front () == nullptr)
{
- er.first->second.front () = new scope (ctx, true /* global */);
+ er.first->second.front () = new scope (ctx, true /* shared */);
er.second = true;
}
@@ -1132,8 +1205,8 @@ namespace build2
}
auto scope_map::
- find (const dir_path& k) const -> pair<scopes::const_iterator,
- scopes::const_iterator>
+ find (const dir_path& k, bool sno) const -> pair<scopes::const_iterator,
+ scopes::const_iterator>
{
assert (k.normalized (false));
auto i (map_.find_sup (k));
@@ -1142,9 +1215,9 @@ namespace build2
auto b (i->second.begin ());
auto e (i->second.end ());
- // Skip NULL first element.
+ // Skip NULL first element if requested.
//
- if (*b == nullptr)
+ if (sno && *b == nullptr)
++b;
assert (b != e);
diff --git a/libbuild2/scope.hxx b/libbuild2/scope.hxx
index f82db72..09d61e9 100644
--- a/libbuild2/scope.hxx
+++ b/libbuild2/scope.hxx
@@ -4,8 +4,6 @@
#ifndef LIBBUILD2_SCOPE_HXX
#define LIBBUILD2_SCOPE_HXX
-#include <unordered_set>
-
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
@@ -28,8 +26,12 @@ namespace build2
using subprojects = map<project_name, dir_path>;
+ // Print as name@dir sequence.
+ //
+ // Note: trailing slash is not printed for the directory path.
+ //
LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const subprojects&); // Print as name@dir sequence.
+ operator<< (ostream&, const subprojects&);
class LIBBUILD2_SYMEXPORT scope
{
@@ -99,8 +101,8 @@ namespace build2
scope& global_scope () {return const_cast<scope&> (ctx.global_scope);}
const scope& global_scope () const {return ctx.global_scope;}
- // Return true if the specified root scope is a sub-scope of this root
- // scope. Note that both scopes must be root.
+ // Return true if the specified root scope is a sub-scope of (but not the
+ // same as) this root scope. Note that both scopes must be root.
//
bool
sub_root (const scope&) const;
@@ -132,7 +134,7 @@ namespace build2
lookup_type
operator[] (const string& name) const
{
- const variable* var (ctx.var_pool.find (name));
+ const variable* var (var_pool ().find (name));
return var != nullptr ? operator[] (*var) : lookup_type ();
}
@@ -298,13 +300,6 @@ namespace build2
//
variable_type_map target_vars;
- // Set of buildfiles already loaded for this scope. The included
- // buildfiles are checked against the project's root scope while
- // imported -- against the global scope (global_scope).
- //
- public:
- std::unordered_set<path> buildfiles;
-
// Target types.
//
// Note that target types are project-wide (even if the module that
@@ -325,7 +320,7 @@ namespace build2
const target_type&
insert_target_type (const target_type& tt)
{
- return root_extra->target_types.insert (tt);
+ return root_extra->target_types.insert (tt).first;
}
template <typename T>
@@ -363,40 +358,56 @@ namespace build2
// the out directory.
//
pair<const target_type&, optional<string>>
- find_target_type (name&, name&, const location&) const;
+ find_target_type (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
// As above, but return the result as a target key (with its members
// shallow-pointing to processed parts in the two names).
//
target_key
- find_target_key (name&, name&, const location&) const;
+ find_target_key (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
// As above, but the names are passed as a vector. Issue appropriate
// diagnostics if the wrong number of names is passed.
//
target_key
- find_target_key (names&, const location&) const;
+ find_target_key (names&,
+ const location&,
+ const target_type* = nullptr) const;
// Similar to the find_target_type() but does not complete relative
// directories.
//
pair<const target_type&, optional<string>>
- find_prerequisite_type (name&, name&, const location&) const;
+ find_prerequisite_type (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
// As above, but return a prerequisite key.
//
prerequisite_key
- find_prerequisite_key (name&, name&, const location&) const;
+ find_prerequisite_key (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
prerequisite_key
- find_prerequisite_key (names&, const location&) const;
+ find_prerequisite_key (names&,
+ const location&,
+ const target_type* = nullptr) const;
// Dynamically derive a new target type from an existing one. Return the
// reference to the target type and an indicator of whether it was
// actually created.
//
+ // Note: the flags are OR'ed to the base's flags.
+ //
pair<reference_wrapper<const target_type>, bool>
- derive_target_type (const string& name, const target_type& base);
+ derive_target_type (const string& name,
+ const target_type& base,
+ target_type::flag flags = target_type::flag::none);
template <typename T>
pair<reference_wrapper<const target_type>, bool>
@@ -418,19 +429,20 @@ namespace build2
template <typename T>
void
- insert_rule (action_id a, string hint, const rule& r)
+ insert_rule (action_id a, string name, const rule& r)
{
- rules.insert<T> (a, move (hint), r);
+ rules.insert<T> (a, move (name), r);
}
+ // 0 meta-operation id is treated as an (emulated) wildcard.
+ //
+ // Emulated means that we just iterate over all the meta-operations known
+ // to this project (and they should all be known at this point) and
+ // register the rule for each of them.
+ //
template <typename T>
void
- insert_rule (meta_operation_id mid, operation_id oid,
- string hint,
- const rule& r)
- {
- rules.insert<T> (mid, oid, move (hint), r);
- }
+ insert_rule (meta_operation_id, operation_id, string name, const rule&);
// Operation callbacks.
//
@@ -481,9 +493,10 @@ namespace build2
// is not yet determined (happens at the end of bootstrap_src()). NULL
// means there are no subprojects.
//
- optional<const build2::subprojects*> subprojects;
+ optional<build2::subprojects*> subprojects;
- bool altn; // True if using alternative build file/directory naming.
+ bool altn; // True if using alternative build file/directory naming.
+ bool loaded; // True if already loaded (load_root()).
// Build file/directory naming scheme used by this project.
//
@@ -502,14 +515,40 @@ namespace build2
const path& src_root_file; // build[2]/bootstrap/src-root.build[2]
const path& out_root_file; // build[2]/bootstrap/src-root.build[2]
+ // Project-private variable pool.
+ //
+ // Note: see scope::var_pool_ and use scope::var_pool().
+ //
+ variable_pool var_pool;
+
// Meta/operations supported by this project.
//
build2::meta_operations meta_operations;
build2::operations operations;
- // Modules loaded by this project.
+ // Modules imported/loaded by this project.
+ //
+ module_import_map imported_modules;
+ module_state_map loaded_modules;
+
+ // Buildfiles already loaded for this project.
//
- module_map modules;
+ // We don't expect too many of them per project so let's use vector
+ // with linear search.
+ //
+ paths buildfiles;
+
+ bool
+ insert_buildfile (const path& f)
+ {
+ bool r (find (buildfiles.begin (),
+ buildfiles.end (),
+ f) == buildfiles.end ());
+ if (r)
+ buildfiles.push_back (f);
+
+ return r;
+ }
// Variable override cache.
//
@@ -537,33 +576,53 @@ namespace build2
// when, for example, caching environment-sensitive information.
//
string environment_checksum;
+
+ root_extra_type (scope&, bool altn); // file.cxx
};
unique_ptr<root_extra_type> root_extra;
+ // The last argument is the operation variable (see var_include) or NULL
+ // if not used.
+ //
void
- insert_operation (operation_id id, const operation_info& in)
+ insert_operation (operation_id id,
+ const operation_info& in,
+ const variable* ovar)
{
- root_extra->operations.insert (id, in);
+ // The operation variable should have prerequisite or target visibility.
+ //
+ assert (ovar == nullptr ||
+ (ovar->visibility == variable_visibility::prereq ||
+ ovar->visibility == variable_visibility::target));
+
+ root_extra->operations.insert (id, project_operation_info {&in, ovar});
}
void
insert_meta_operation (meta_operation_id id, const meta_operation_info& in)
{
- root_extra->meta_operations.insert (id, in);
+ root_extra->meta_operations.insert (id, &in);
}
bool
find_module (const string& name) const
{
- return root_extra->modules.find_module<module> (name) != nullptr;
+ return root_extra->loaded_modules.find_module<module> (name) != nullptr;
}
template <typename T>
T*
+ find_module (const string& name)
+ {
+ return root_extra->loaded_modules.find_module<T> (name);
+ }
+
+ template <typename T>
+ const T*
find_module (const string& name) const
{
- return root_extra->modules.find_module<T> (name);
+ return root_extra->loaded_modules.find_module<T> (name);
}
public:
@@ -576,10 +635,29 @@ namespace build2
return const_cast<scope&> (*this);
}
+ // Return the project-private variable pool (which is chained to the
+ // public pool) unless pub is true, in which case return the public pool.
+ //
+ // You would normally go for the public pool directly as an optimization
+ // (for example, in the module's init()) if you know all your variables
+ // are qualified and thus public.
+ //
variable_pool&
- var_pool ()
+ var_pool (bool pub = false)
{
- return ctx.var_pool.rw (*this);
+ return (pub ? ctx.var_pool :
+ var_pool_ != nullptr ? *var_pool_ :
+ root_ != nullptr ? *root_->var_pool_ :
+ ctx.var_pool).rw (*this);
+ }
+
+ const variable_pool&
+ var_pool (bool pub = false) const
+ {
+ return (pub ? ctx.var_pool :
+ var_pool_ != nullptr ? *var_pool_ :
+ root_ != nullptr ? *root_->var_pool_ :
+ ctx.var_pool);
}
private:
@@ -587,13 +665,13 @@ namespace build2
friend class scope_map;
friend class temp_scope;
- // These two from <libbuild2/file.hxx> set strong_.
+ // These from <libbuild2/file.hxx> set strong_.
//
- friend LIBBUILD2_SYMEXPORT void create_bootstrap_outer (scope&);
+ friend LIBBUILD2_SYMEXPORT void create_bootstrap_outer (scope&, bool);
friend LIBBUILD2_SYMEXPORT scope& create_bootstrap_inner (scope&,
const dir_path&);
- scope (context&, bool global);
+ scope (context&, bool shared);
~scope ();
// Return true if this root scope can be amalgamated.
@@ -608,6 +686,8 @@ namespace build2
scope* root_;
scope* strong_ = nullptr; // Only set on root scopes.
// NULL means no strong amalgamtion.
+
+ variable_pool* var_pool_ = nullptr; // For temp_scope override.
};
inline bool
@@ -675,24 +755,28 @@ namespace build2
// Temporary scope. The idea is to be able to create a temporary scope in
// order not to change the variables in the current scope. Such a scope is
- // not entered in to the scope map. As a result it can only be used as a
- // temporary set of variables. In particular, defining targets directly in
- // such a scope will surely end up badly. Defining any nested scopes will be
- // as if defining such a scope in the parent (since path() returns parent's
- // path).
+ // not entered in to the scope map and its parent is the global scope. As a
+ // result it can only be used as a temporary set of variables. In
+ // particular, defining targets directly in such a scope will surely end up
+ // badly.
//
class temp_scope: public scope
{
public:
- temp_scope (scope& p)
- : scope (p.ctx, false /* global */)
+ temp_scope (scope& gs)
+ : scope (gs.ctx, false /* shared */),
+ var_pool_ (nullptr /* shared */, &gs.ctx.var_pool.rw (gs), nullptr)
{
- out_path_ = p.out_path_;
- src_path_ = p.src_path_;
- parent_ = &p;
- root_ = p.root_;
- // No need to copy strong_ since we are never root scope.
+ // Note that making this scope its own root is a bad idea.
+ //
+ root_ = nullptr;
+ parent_ = &gs;
+ out_path_ = gs.out_path_;
+ scope::var_pool_ = &var_pool_;
}
+
+ private:
+ variable_pool var_pool_;
};
// Scope map. Protected by the phase mutex.
@@ -709,6 +793,8 @@ namespace build2
// The first element, if not NULL, is for the "owning" out path. The rest
// of the elements are for the src path shallow references.
//
+ // Note that the global scope is in the first element.
+ //
struct scopes: small_vector<scope*, 3>
{
scopes () = default;
@@ -748,6 +834,10 @@ namespace build2
// Find all the scopes that encompass this path (out or src).
//
+ // If skip_null_out is false, then the first element always corresponds to
+ // the out scope and is NULL if there is none (see struct scopes above for
+ // details).
+ //
// Note that the returned range will never be empty (there is always the
// global scope).
//
@@ -756,7 +846,7 @@ namespace build2
// single invocation. How can we pick the scope that is "ours", for some
// definition of "ours"?
//
- // The current think is that a project can be "associated" with other
+ // The current thinking is that a project can be "associated" with other
// projects: its sub-projects and imported projects (it doesn't feel like
// its super-projects should be in this set, but maybe). And "ours" could
// mean belonging to one of the associated projects. This feels correct
@@ -780,7 +870,7 @@ namespace build2
// "island append" restriction we have on loading additional buildfile.
//
LIBBUILD2_SYMEXPORT pair<scopes::const_iterator, scopes::const_iterator>
- find (const dir_path&) const;
+ find (const dir_path&, bool skip_null_out = true) const;
const_iterator begin () const {return map_.begin ();}
const_iterator end () const {return map_.end ();}
diff --git a/libbuild2/scope.ixx b/libbuild2/scope.ixx
index e123e4a..5975c76 100644
--- a/libbuild2/scope.ixx
+++ b/libbuild2/scope.ixx
@@ -146,9 +146,11 @@ namespace build2
}
inline target_key scope::
- find_target_key (name& n, name& o, const location& loc) const
+ find_target_key (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto p (find_target_type (n, o, loc));
+ auto p (find_target_type (n, o, loc, tt));
return target_key {
&p.first,
&n.dir,
@@ -158,9 +160,11 @@ namespace build2
}
inline prerequisite_key scope::
- find_prerequisite_key (name& n, name& o, const location& loc) const
+ find_prerequisite_key (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto p (find_prerequisite_type (n, o, loc));
+ auto p (find_prerequisite_type (n, o, loc, tt));
return prerequisite_key {
n.proj,
{
@@ -173,6 +177,37 @@ namespace build2
this};
}
+ template <typename T>
+ inline void scope::
+ insert_rule (meta_operation_id mid, operation_id oid,
+ string name,
+ const rule& r)
+ {
+ if (mid != 0)
+ rules.insert<T> (mid, oid, move (name), r);
+ else
+ {
+ auto& ms (root_scope ()->root_extra->meta_operations);
+
+ for (size_t i (1), n (ms.size ()); i != n; ++i)
+ {
+ if (ms[i] != nullptr)
+ {
+ // Skip a few well-known meta-operations that cannot possibly
+ // trigger a rule match.
+ //
+ mid = static_cast<meta_operation_id> (i);
+
+ if (mid != noop_id &&
+ mid != info_id &&
+ mid != create_id &&
+ mid != disfigure_id)
+ rules.insert<T> (mid, oid, name, r);
+ }
+ }
+ }
+ }
+
inline dir_path
src_out (const dir_path& out, const scope& r)
{
diff --git a/libbuild2/script/builtin-options.cxx b/libbuild2/script/builtin-options.cxx
index 56e7f24..b71b9d3 100644
--- a/libbuild2/script/builtin-options.cxx
+++ b/libbuild2/script/builtin-options.cxx
@@ -18,215 +18,14 @@
#include <utility>
#include <ostream>
#include <sstream>
+#include <cstring>
namespace build2
{
- namespace script
+ namespace build
{
namespace cli
{
- // unknown_option
- //
- unknown_option::
- ~unknown_option () throw ()
- {
- }
-
- void unknown_option::
- print (::std::ostream& os) const
- {
- os << "unknown option '" << option ().c_str () << "'";
- }
-
- const char* unknown_option::
- what () const throw ()
- {
- return "unknown option";
- }
-
- // unknown_argument
- //
- unknown_argument::
- ~unknown_argument () throw ()
- {
- }
-
- void unknown_argument::
- print (::std::ostream& os) const
- {
- os << "unknown argument '" << argument ().c_str () << "'";
- }
-
- const char* unknown_argument::
- what () const throw ()
- {
- return "unknown argument";
- }
-
- // missing_value
- //
- missing_value::
- ~missing_value () throw ()
- {
- }
-
- void missing_value::
- print (::std::ostream& os) const
- {
- os << "missing value for option '" << option ().c_str () << "'";
- }
-
- const char* missing_value::
- what () const throw ()
- {
- return "missing option value";
- }
-
- // invalid_value
- //
- invalid_value::
- ~invalid_value () throw ()
- {
- }
-
- void invalid_value::
- print (::std::ostream& os) const
- {
- os << "invalid value '" << value ().c_str () << "' for option '"
- << option ().c_str () << "'";
-
- if (!message ().empty ())
- os << ": " << message ().c_str ();
- }
-
- const char* invalid_value::
- what () const throw ()
- {
- return "invalid option value";
- }
-
- // eos_reached
- //
- void eos_reached::
- print (::std::ostream& os) const
- {
- os << what ();
- }
-
- const char* eos_reached::
- what () const throw ()
- {
- return "end of argument stream reached";
- }
-
- // scanner
- //
- scanner::
- ~scanner ()
- {
- }
-
- // argv_scanner
- //
- bool argv_scanner::
- more ()
- {
- return i_ < argc_;
- }
-
- const char* argv_scanner::
- peek ()
- {
- if (i_ < argc_)
- return argv_[i_];
- else
- throw eos_reached ();
- }
-
- const char* argv_scanner::
- next ()
- {
- if (i_ < argc_)
- {
- const char* r (argv_[i_]);
-
- if (erase_)
- {
- for (int i (i_ + 1); i < argc_; ++i)
- argv_[i - 1] = argv_[i];
-
- --argc_;
- argv_[argc_] = 0;
- }
- else
- ++i_;
-
- ++start_position_;
- return r;
- }
- else
- throw eos_reached ();
- }
-
- void argv_scanner::
- skip ()
- {
- if (i_ < argc_)
- {
- ++i_;
- ++start_position_;
- }
- else
- throw eos_reached ();
- }
-
- std::size_t argv_scanner::
- position ()
- {
- return start_position_;
- }
-
- // vector_scanner
- //
- bool vector_scanner::
- more ()
- {
- return i_ < v_.size ();
- }
-
- const char* vector_scanner::
- peek ()
- {
- if (i_ < v_.size ())
- return v_[i_].c_str ();
- else
- throw eos_reached ();
- }
-
- const char* vector_scanner::
- next ()
- {
- if (i_ < v_.size ())
- return v_[i_++].c_str ();
- else
- throw eos_reached ();
- }
-
- void vector_scanner::
- skip ()
- {
- if (i_ < v_.size ())
- ++i_;
- else
- throw eos_reached ();
- }
-
- std::size_t vector_scanner::
- position ()
- {
- return start_position_ + i_;
- }
-
template <typename X>
struct parser
{
@@ -254,10 +53,31 @@ namespace build2
struct parser<bool>
{
static void
- parse (bool& x, scanner& s)
+ parse (bool& x, bool& xs, scanner& s)
{
- s.next ();
- x = true;
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
}
};
@@ -367,6 +187,56 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
@@ -374,6 +244,14 @@ namespace build2
parser<T>::parse (x.*M, s);
}
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
template <typename X, typename T, T X::*M, bool X::*S>
void
thunk (X& x, scanner& s)
@@ -385,7 +263,6 @@ namespace build2
}
#include <map>
-#include <cstring>
namespace build2
{
@@ -406,13 +283,13 @@ namespace build2
set_options (int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
}
@@ -421,13 +298,13 @@ namespace build2
int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
}
@@ -436,13 +313,13 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
@@ -453,21 +330,21 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
set_options::
- set_options (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ set_options (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
@@ -476,7 +353,7 @@ namespace build2
}
typedef
- std::map<std::string, void (*) (set_options&, ::build2::script::cli::scanner&)>
+ std::map<std::string, void (*) (set_options&, ::build2::build::cli::scanner&)>
_cli_set_options_map;
static _cli_set_options_map _cli_set_options_map_;
@@ -486,24 +363,24 @@ namespace build2
_cli_set_options_map_init ()
{
_cli_set_options_map_["--exact"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::exact_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::exact_ >;
_cli_set_options_map_["-e"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::exact_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::exact_ >;
_cli_set_options_map_["--newline"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::newline_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::newline_ >;
_cli_set_options_map_["-n"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::newline_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::newline_ >;
_cli_set_options_map_["--whitespace"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::whitespace_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::whitespace_ >;
_cli_set_options_map_["-w"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::whitespace_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::whitespace_ >;
}
};
static _cli_set_options_map_init _cli_set_options_map_init_;
bool set_options::
- _parse (const char* o, ::build2::script::cli::scanner& s)
+ _parse (const char* o, ::build2::build::cli::scanner& s)
{
_cli_set_options_map::const_iterator i (_cli_set_options_map_.find (o));
@@ -517,13 +394,13 @@ namespace build2
}
bool set_options::
- _parse (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt_mode,
- ::build2::script::cli::unknown_mode arg_mode)
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
{
// Can't skip combined flags (--no-combined-flags).
//
- assert (opt_mode != ::build2::script::cli::unknown_mode::skip);
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
bool r = false;
bool opt = true;
@@ -565,14 +442,14 @@ namespace build2
const_cast<char*> (v)
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (_parse (co.c_str (), ns))
{
// Parsed the option but not its value?
//
if (ns.end () != 2)
- throw ::build2::script::cli::invalid_value (co, v);
+ throw ::build2::build::cli::invalid_value (co, v);
s.next ();
r = true;
@@ -613,7 +490,7 @@ namespace build2
cf
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (!_parse (cf, ns))
break;
@@ -638,19 +515,19 @@ namespace build2
switch (opt_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_option (o);
+ throw ::build2::build::cli::unknown_option (o);
}
}
@@ -660,19 +537,19 @@ namespace build2
switch (arg_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_argument (o);
+ throw ::build2::build::cli::unknown_argument (o);
}
}
@@ -695,11 +572,11 @@ namespace build2
timeout_options (int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
}
@@ -708,11 +585,11 @@ namespace build2
int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
}
@@ -721,11 +598,11 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
@@ -736,26 +613,26 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
timeout_options::
- timeout_options (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ timeout_options (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
_parse (s, opt, arg);
}
typedef
- std::map<std::string, void (*) (timeout_options&, ::build2::script::cli::scanner&)>
+ std::map<std::string, void (*) (timeout_options&, ::build2::build::cli::scanner&)>
_cli_timeout_options_map;
static _cli_timeout_options_map _cli_timeout_options_map_;
@@ -765,16 +642,16 @@ namespace build2
_cli_timeout_options_map_init ()
{
_cli_timeout_options_map_["--success"] =
- &::build2::script::cli::thunk< timeout_options, bool, &timeout_options::success_ >;
+ &::build2::build::cli::thunk< timeout_options, &timeout_options::success_ >;
_cli_timeout_options_map_["-s"] =
- &::build2::script::cli::thunk< timeout_options, bool, &timeout_options::success_ >;
+ &::build2::build::cli::thunk< timeout_options, &timeout_options::success_ >;
}
};
static _cli_timeout_options_map_init _cli_timeout_options_map_init_;
bool timeout_options::
- _parse (const char* o, ::build2::script::cli::scanner& s)
+ _parse (const char* o, ::build2::build::cli::scanner& s)
{
_cli_timeout_options_map::const_iterator i (_cli_timeout_options_map_.find (o));
@@ -788,13 +665,13 @@ namespace build2
}
bool timeout_options::
- _parse (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt_mode,
- ::build2::script::cli::unknown_mode arg_mode)
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
{
// Can't skip combined flags (--no-combined-flags).
//
- assert (opt_mode != ::build2::script::cli::unknown_mode::skip);
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
bool r = false;
bool opt = true;
@@ -836,14 +713,14 @@ namespace build2
const_cast<char*> (v)
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (_parse (co.c_str (), ns))
{
// Parsed the option but not its value?
//
if (ns.end () != 2)
- throw ::build2::script::cli::invalid_value (co, v);
+ throw ::build2::build::cli::invalid_value (co, v);
s.next ();
r = true;
@@ -884,7 +761,7 @@ namespace build2
cf
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (!_parse (cf, ns))
break;
@@ -909,19 +786,19 @@ namespace build2
switch (opt_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_option (o);
+ throw ::build2::build::cli::unknown_option (o);
}
}
@@ -931,19 +808,19 @@ namespace build2
switch (arg_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_argument (o);
+ throw ::build2::build::cli::unknown_argument (o);
}
}
@@ -969,14 +846,14 @@ namespace build2
export_options (int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
clear_specified_ (false)
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
}
@@ -985,14 +862,14 @@ namespace build2
int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
clear_specified_ (false)
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
}
@@ -1001,14 +878,14 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
clear_specified_ (false)
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
@@ -1019,22 +896,22 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
clear_specified_ (false)
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
export_options::
- export_options (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ export_options (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
@@ -1044,7 +921,7 @@ namespace build2
}
typedef
- std::map<std::string, void (*) (export_options&, ::build2::script::cli::scanner&)>
+ std::map<std::string, void (*) (export_options&, ::build2::build::cli::scanner&)>
_cli_export_options_map;
static _cli_export_options_map _cli_export_options_map_;
@@ -1054,16 +931,16 @@ namespace build2
_cli_export_options_map_init ()
{
_cli_export_options_map_["--unset"] =
- &::build2::script::cli::thunk< export_options, vector<string>, &export_options::unset_,
+ &::build2::build::cli::thunk< export_options, vector<string>, &export_options::unset_,
&export_options::unset_specified_ >;
_cli_export_options_map_["-u"] =
- &::build2::script::cli::thunk< export_options, vector<string>, &export_options::unset_,
+ &::build2::build::cli::thunk< export_options, vector<string>, &export_options::unset_,
&export_options::unset_specified_ >;
_cli_export_options_map_["--clear"] =
- &::build2::script::cli::thunk< export_options, vector<string>, &export_options::clear_,
+ &::build2::build::cli::thunk< export_options, vector<string>, &export_options::clear_,
&export_options::clear_specified_ >;
_cli_export_options_map_["-c"] =
- &::build2::script::cli::thunk< export_options, vector<string>, &export_options::clear_,
+ &::build2::build::cli::thunk< export_options, vector<string>, &export_options::clear_,
&export_options::clear_specified_ >;
}
};
@@ -1071,7 +948,7 @@ namespace build2
static _cli_export_options_map_init _cli_export_options_map_init_;
bool export_options::
- _parse (const char* o, ::build2::script::cli::scanner& s)
+ _parse (const char* o, ::build2::build::cli::scanner& s)
{
_cli_export_options_map::const_iterator i (_cli_export_options_map_.find (o));
@@ -1085,13 +962,304 @@ namespace build2
}
bool export_options::
- _parse (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt_mode,
- ::build2::script::cli::unknown_mode arg_mode)
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
+ {
+ // Can't skip combined flags (--no-combined-flags).
+ //
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
+
+ bool r = false;
+ bool opt = true;
+
+ while (s.more ())
+ {
+ const char* o = s.peek ();
+
+ if (std::strcmp (o, "--") == 0)
+ {
+ opt = false;
+ s.skip ();
+ r = true;
+ continue;
+ }
+
+ if (opt)
+ {
+ if (_parse (o, s))
+ {
+ r = true;
+ continue;
+ }
+
+ if (std::strncmp (o, "-", 1) == 0 && o[1] != '\0')
+ {
+ // Handle combined option values.
+ //
+ std::string co;
+ if (const char* v = std::strchr (o, '='))
+ {
+ co.assign (o, 0, v - o);
+ ++v;
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (co.c_str ()),
+ const_cast<char*> (v)
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (_parse (co.c_str (), ns))
+ {
+ // Parsed the option but not its value?
+ //
+ if (ns.end () != 2)
+ throw ::build2::build::cli::invalid_value (co, v);
+
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = co.c_str ();
+ }
+ }
+
+ // Handle combined flags.
+ //
+ char cf[3];
+ {
+ const char* p = o + 1;
+ for (; *p != '\0'; ++p)
+ {
+ if (!((*p >= 'a' && *p <= 'z') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= '0' && *p <= '9')))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ for (p = o + 1; *p != '\0'; ++p)
+ {
+ std::strcpy (cf, "-");
+ cf[1] = *p;
+ cf[2] = '\0';
+
+ int ac (1);
+ char* av[] =
+ {
+ cf
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (!_parse (cf, ns))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ // All handled.
+ //
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = cf;
+ }
+ }
+ }
+
+ switch (opt_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_option (o);
+ }
+ }
+
+ break;
+ }
+ }
+
+ switch (arg_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_argument (o);
+ }
+ }
+
+ break;
+ }
+
+ return r;
+ }
+
+ // for_options
+ //
+
+ for_options::
+ for_options ()
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ }
+
+ for_options::
+ for_options (int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ _parse (s, opt, arg);
+ }
+
+ for_options::
+ for_options (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ _parse (s, opt, arg);
+ }
+
+ for_options::
+ for_options (int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ _parse (s, opt, arg);
+ end = s.end ();
+ }
+
+ for_options::
+ for_options (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ _parse (s, opt, arg);
+ end = s.end ();
+ }
+
+ for_options::
+ for_options (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ _parse (s, opt, arg);
+ }
+
+ typedef
+ std::map<std::string, void (*) (for_options&, ::build2::build::cli::scanner&)>
+ _cli_for_options_map;
+
+ static _cli_for_options_map _cli_for_options_map_;
+
+ struct _cli_for_options_map_init
+ {
+ _cli_for_options_map_init ()
+ {
+ _cli_for_options_map_["--exact"] =
+ &::build2::build::cli::thunk< for_options, &for_options::exact_ >;
+ _cli_for_options_map_["-e"] =
+ &::build2::build::cli::thunk< for_options, &for_options::exact_ >;
+ _cli_for_options_map_["--newline"] =
+ &::build2::build::cli::thunk< for_options, &for_options::newline_ >;
+ _cli_for_options_map_["-n"] =
+ &::build2::build::cli::thunk< for_options, &for_options::newline_ >;
+ _cli_for_options_map_["--whitespace"] =
+ &::build2::build::cli::thunk< for_options, &for_options::whitespace_ >;
+ _cli_for_options_map_["-w"] =
+ &::build2::build::cli::thunk< for_options, &for_options::whitespace_ >;
+ }
+ };
+
+ static _cli_for_options_map_init _cli_for_options_map_init_;
+
+ bool for_options::
+ _parse (const char* o, ::build2::build::cli::scanner& s)
+ {
+ _cli_for_options_map::const_iterator i (_cli_for_options_map_.find (o));
+
+ if (i != _cli_for_options_map_.end ())
+ {
+ (*(i->second)) (*this, s);
+ return true;
+ }
+
+ return false;
+ }
+
+ bool for_options::
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
{
// Can't skip combined flags (--no-combined-flags).
//
- assert (opt_mode != ::build2::script::cli::unknown_mode::skip);
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
bool r = false;
bool opt = true;
@@ -1133,14 +1301,14 @@ namespace build2
const_cast<char*> (v)
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (_parse (co.c_str (), ns))
{
// Parsed the option but not its value?
//
if (ns.end () != 2)
- throw ::build2::script::cli::invalid_value (co, v);
+ throw ::build2::build::cli::invalid_value (co, v);
s.next ();
r = true;
@@ -1181,7 +1349,7 @@ namespace build2
cf
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (!_parse (cf, ns))
break;
@@ -1206,19 +1374,19 @@ namespace build2
switch (opt_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_option (o);
+ throw ::build2::build::cli::unknown_option (o);
}
}
@@ -1228,19 +1396,19 @@ namespace build2
switch (arg_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_argument (o);
+ throw ::build2::build::cli::unknown_argument (o);
}
}
diff --git a/libbuild2/script/builtin-options.hxx b/libbuild2/script/builtin-options.hxx
index d665279..9361d18 100644
--- a/libbuild2/script/builtin-options.hxx
+++ b/libbuild2/script/builtin-options.hxx
@@ -12,281 +12,7 @@
//
// End prologue.
-#include <vector>
-#include <iosfwd>
-#include <string>
-#include <cstddef>
-#include <exception>
-
-#ifndef CLI_POTENTIALLY_UNUSED
-# if defined(_MSC_VER) || defined(__xlC__)
-# define CLI_POTENTIALLY_UNUSED(x) (void*)&x
-# else
-# define CLI_POTENTIALLY_UNUSED(x) (void)x
-# endif
-#endif
-
-namespace build2
-{
- namespace script
- {
- namespace cli
- {
- class unknown_mode
- {
- public:
- enum value
- {
- skip,
- stop,
- fail
- };
-
- unknown_mode (value);
-
- operator value () const
- {
- return v_;
- }
-
- private:
- value v_;
- };
-
- // Exceptions.
- //
-
- class exception: public std::exception
- {
- public:
- virtual void
- print (::std::ostream&) const = 0;
- };
-
- ::std::ostream&
- operator<< (::std::ostream&, const exception&);
-
- class unknown_option: public exception
- {
- public:
- virtual
- ~unknown_option () throw ();
-
- unknown_option (const std::string& option);
-
- const std::string&
- option () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- };
-
- class unknown_argument: public exception
- {
- public:
- virtual
- ~unknown_argument () throw ();
-
- unknown_argument (const std::string& argument);
-
- const std::string&
- argument () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string argument_;
- };
-
- class missing_value: public exception
- {
- public:
- virtual
- ~missing_value () throw ();
-
- missing_value (const std::string& option);
-
- const std::string&
- option () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- };
-
- class invalid_value: public exception
- {
- public:
- virtual
- ~invalid_value () throw ();
-
- invalid_value (const std::string& option,
- const std::string& value,
- const std::string& message = std::string ());
-
- const std::string&
- option () const;
-
- const std::string&
- value () const;
-
- const std::string&
- message () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- std::string value_;
- std::string message_;
- };
-
- class eos_reached: public exception
- {
- public:
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
- };
-
- // Command line argument scanner interface.
- //
- // The values returned by next() are guaranteed to be valid
- // for the two previous arguments up until a call to a third
- // peek() or next().
- //
- // The position() function returns a monotonically-increasing
- // number which, if stored, can later be used to determine the
- // relative position of the argument returned by the following
- // call to next(). Note that if multiple scanners are used to
- // extract arguments from multiple sources, then the end
- // position of the previous scanner should be used as the
- // start position of the next.
- //
- class scanner
- {
- public:
- virtual
- ~scanner ();
-
- virtual bool
- more () = 0;
-
- virtual const char*
- peek () = 0;
-
- virtual const char*
- next () = 0;
-
- virtual void
- skip () = 0;
-
- virtual std::size_t
- position () = 0;
- };
-
- class argv_scanner: public scanner
- {
- public:
- argv_scanner (int& argc,
- char** argv,
- bool erase = false,
- std::size_t start_position = 0);
-
- argv_scanner (int start,
- int& argc,
- char** argv,
- bool erase = false,
- std::size_t start_position = 0);
-
- int
- end () const;
-
- virtual bool
- more ();
-
- virtual const char*
- peek ();
-
- virtual const char*
- next ();
-
- virtual void
- skip ();
-
- virtual std::size_t
- position ();
-
- protected:
- std::size_t start_position_;
- int i_;
- int& argc_;
- char** argv_;
- bool erase_;
- };
-
- class vector_scanner: public scanner
- {
- public:
- vector_scanner (const std::vector<std::string>&,
- std::size_t start = 0,
- std::size_t start_position = 0);
-
- std::size_t
- end () const;
-
- void
- reset (std::size_t start = 0, std::size_t start_position = 0);
-
- virtual bool
- more ();
-
- virtual const char*
- peek ();
-
- virtual const char*
- next ();
-
- virtual void
- skip ();
-
- virtual std::size_t
- position ();
-
- private:
- std::size_t start_position_;
- const std::vector<std::string>& v_;
- std::size_t i_;
- };
-
- template <typename X>
- struct parser;
- }
- }
-}
-
-#include <libbuild2/types.hxx>
+#include <libbuild2/common-options.hxx>
namespace build2
{
@@ -300,34 +26,34 @@ namespace build2
set_options (int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
set_options (int start,
int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
set_options (int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
set_options (int start,
int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
- set_options (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ set_options (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
// Option accessors and modifiers.
//
@@ -362,13 +88,13 @@ namespace build2
//
protected:
bool
- _parse (const char*, ::build2::script::cli::scanner&);
+ _parse (const char*, ::build2::build::cli::scanner&);
private:
bool
- _parse (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option,
- ::build2::script::cli::unknown_mode argument);
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
public:
bool exact_;
@@ -384,34 +110,34 @@ namespace build2
timeout_options (int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
timeout_options (int start,
int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
timeout_options (int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
timeout_options (int start,
int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
- timeout_options (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ timeout_options (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
// Option accessors and modifiers.
//
@@ -428,13 +154,13 @@ namespace build2
//
protected:
bool
- _parse (const char*, ::build2::script::cli::scanner&);
+ _parse (const char*, ::build2::build::cli::scanner&);
private:
bool
- _parse (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option,
- ::build2::script::cli::unknown_mode argument);
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
public:
bool success_;
@@ -448,34 +174,34 @@ namespace build2
export_options (int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
export_options (int start,
int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
export_options (int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
export_options (int start,
int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
- export_options (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ export_options (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
// Option accessors and modifiers.
//
@@ -513,13 +239,13 @@ namespace build2
//
protected:
bool
- _parse (const char*, ::build2::script::cli::scanner&);
+ _parse (const char*, ::build2::build::cli::scanner&);
private:
bool
- _parse (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option,
- ::build2::script::cli::unknown_mode argument);
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
public:
vector<string> unset_;
@@ -527,6 +253,90 @@ namespace build2
vector<string> clear_;
bool clear_specified_;
};
+
+ class for_options
+ {
+ public:
+ for_options ();
+
+ for_options (int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ // Option accessors and modifiers.
+ //
+ const bool&
+ exact () const;
+
+ bool&
+ exact ();
+
+ void
+ exact (const bool&);
+
+ const bool&
+ newline () const;
+
+ bool&
+ newline ();
+
+ void
+ newline (const bool&);
+
+ const bool&
+ whitespace () const;
+
+ bool&
+ whitespace ();
+
+ void
+ whitespace (const bool&);
+
+ // Implementation details.
+ //
+ protected:
+ bool
+ _parse (const char*, ::build2::build::cli::scanner&);
+
+ private:
+ bool
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
+
+ public:
+ bool exact_;
+ bool newline_;
+ bool whitespace_;
+ };
}
}
diff --git a/libbuild2/script/builtin-options.ixx b/libbuild2/script/builtin-options.ixx
index 8fef25a..575eb95 100644
--- a/libbuild2/script/builtin-options.ixx
+++ b/libbuild2/script/builtin-options.ixx
@@ -9,164 +9,6 @@
//
// End prologue.
-#include <cassert>
-
-namespace build2
-{
- namespace script
- {
- namespace cli
- {
- // unknown_mode
- //
- inline unknown_mode::
- unknown_mode (value v)
- : v_ (v)
- {
- }
-
- // exception
- //
- inline ::std::ostream&
- operator<< (::std::ostream& os, const exception& e)
- {
- e.print (os);
- return os;
- }
-
- // unknown_option
- //
- inline unknown_option::
- unknown_option (const std::string& option)
- : option_ (option)
- {
- }
-
- inline const std::string& unknown_option::
- option () const
- {
- return option_;
- }
-
- // unknown_argument
- //
- inline unknown_argument::
- unknown_argument (const std::string& argument)
- : argument_ (argument)
- {
- }
-
- inline const std::string& unknown_argument::
- argument () const
- {
- return argument_;
- }
-
- // missing_value
- //
- inline missing_value::
- missing_value (const std::string& option)
- : option_ (option)
- {
- }
-
- inline const std::string& missing_value::
- option () const
- {
- return option_;
- }
-
- // invalid_value
- //
- inline invalid_value::
- invalid_value (const std::string& option,
- const std::string& value,
- const std::string& message)
- : option_ (option),
- value_ (value),
- message_ (message)
- {
- }
-
- inline const std::string& invalid_value::
- option () const
- {
- return option_;
- }
-
- inline const std::string& invalid_value::
- value () const
- {
- return value_;
- }
-
- inline const std::string& invalid_value::
- message () const
- {
- return message_;
- }
-
- // argv_scanner
- //
- inline argv_scanner::
- argv_scanner (int& argc,
- char** argv,
- bool erase,
- std::size_t sp)
- : start_position_ (sp + 1),
- i_ (1),
- argc_ (argc),
- argv_ (argv),
- erase_ (erase)
- {
- }
-
- inline argv_scanner::
- argv_scanner (int start,
- int& argc,
- char** argv,
- bool erase,
- std::size_t sp)
- : start_position_ (sp + static_cast<std::size_t> (start)),
- i_ (start),
- argc_ (argc),
- argv_ (argv),
- erase_ (erase)
- {
- }
-
- inline int argv_scanner::
- end () const
- {
- return i_;
- }
-
- // vector_scanner
- //
- inline vector_scanner::
- vector_scanner (const std::vector<std::string>& v,
- std::size_t i,
- std::size_t sp)
- : start_position_ (sp), v_ (v), i_ (i)
- {
- }
-
- inline std::size_t vector_scanner::
- end () const
- {
- return i_;
- }
-
- inline void vector_scanner::
- reset (std::size_t i, std::size_t sp)
- {
- i_ = i;
- start_position_ = sp;
- }
- }
- }
-}
-
namespace build2
{
namespace script
@@ -311,6 +153,63 @@ namespace build2
{
this->clear_specified_ = x;
}
+
+ // for_options
+ //
+
+ inline const bool& for_options::
+ exact () const
+ {
+ return this->exact_;
+ }
+
+ inline bool& for_options::
+ exact ()
+ {
+ return this->exact_;
+ }
+
+ inline void for_options::
+ exact (const bool& x)
+ {
+ this->exact_ = x;
+ }
+
+ inline const bool& for_options::
+ newline () const
+ {
+ return this->newline_;
+ }
+
+ inline bool& for_options::
+ newline ()
+ {
+ return this->newline_;
+ }
+
+ inline void for_options::
+ newline (const bool& x)
+ {
+ this->newline_ = x;
+ }
+
+ inline const bool& for_options::
+ whitespace () const
+ {
+ return this->whitespace_;
+ }
+
+ inline bool& for_options::
+ whitespace ()
+ {
+ return this->whitespace_;
+ }
+
+ inline void for_options::
+ whitespace (const bool& x)
+ {
+ this->whitespace_ = x;
+ }
}
}
diff --git a/libbuild2/script/builtin.cli b/libbuild2/script/builtin.cli
index 1e3fb45..c993983 100644
--- a/libbuild2/script/builtin.cli
+++ b/libbuild2/script/builtin.cli
@@ -1,7 +1,7 @@
// file : libbuild2/script/builtin.cli
// license : MIT; see accompanying LICENSE file
-include <libbuild2/types.hxx>;
+include <libbuild2/common.cli>;
// Note that options in this file are undocumented because we generate neither
// the usage printing code nor man pages. Instead, they are documented in the
@@ -30,5 +30,12 @@ namespace build2
vector<string> --unset|-u;
vector<string> --clear|-c;
};
+
+ class for_options
+ {
+ bool --exact|-e;
+ bool --newline|-n;
+ bool --whitespace|-w;
+ };
}
}
diff --git a/libbuild2/script/lexer.cxx b/libbuild2/script/lexer.cxx
index 7577149..e13bbdb 100644
--- a/libbuild2/script/lexer.cxx
+++ b/libbuild2/script/lexer.cxx
@@ -24,10 +24,7 @@ namespace build2
bool q (true); // quotes
if (!esc)
- {
- assert (!state_.empty ());
- esc = state_.top ().escapes;
- }
+ esc = current_state ().escapes;
switch (m)
{
@@ -84,7 +81,7 @@ namespace build2
}
assert (ps == '\0');
- state_.push (
+ mode_impl (
state {m, data, nullopt, false, false, ps, s, n, q, *esc, s1, s2});
}
@@ -93,7 +90,7 @@ namespace build2
{
token r;
- switch (state_.top ().mode)
+ switch (mode ())
{
case lexer_mode::command_expansion:
case lexer_mode::here_line_single:
@@ -119,7 +116,7 @@ namespace build2
xchar c (get ());
uint64_t ln (c.line), cn (c.column);
- const state& st (state_.top ());
+ const state& st (current_state ());
lexer_mode m (st.mode);
auto make_token = [&sep, &m, ln, cn] (type t)
diff --git a/libbuild2/script/lexer.hxx b/libbuild2/script/lexer.hxx
index dbfdfcc..3cbcc03 100644
--- a/libbuild2/script/lexer.hxx
+++ b/libbuild2/script/lexer.hxx
@@ -112,6 +112,8 @@ namespace build2
const redirect_aliases_type& redirect_aliases;
protected:
+ using build2::lexer::mode; // Getter.
+
lexer (istream& is, const path_name& name, uint64_t line,
const char* escapes,
bool set_mode,
diff --git a/libbuild2/script/parser.cxx b/libbuild2/script/parser.cxx
index 7722002..84d2afc 100644
--- a/libbuild2/script/parser.cxx
+++ b/libbuild2/script/parser.cxx
@@ -3,9 +3,14 @@
#include <libbuild2/script/parser.hxx>
+#include <cstring> // strchr()
+#include <sstream>
+
#include <libbuild2/variable.hxx>
-#include <libbuild2/script/run.hxx> // exit
+
+#include <libbuild2/script/run.hxx> // exit, stream_reader
#include <libbuild2/script/lexer.hxx>
+#include <libbuild2/script/builtin-options.hxx>
using namespace std;
@@ -15,6 +20,33 @@ namespace build2
{
using type = token_type;
+ bool parser::
+ need_cmdline_relex (const string& s)
+ {
+ for (auto i (s.begin ()), e (s.end ()); i != e; ++i)
+ {
+ char c (*i);
+
+ if (c == '\\')
+ {
+ if (++i == e)
+ return false;
+
+ c = *i;
+
+ if (c == '\\' || c == '\'' || c == '\"')
+ return true;
+
+ // Fall through.
+ }
+
+ if (strchr ("|<>&\"'", c) != nullptr)
+ return true;
+ }
+
+ return false;
+ }
+
value parser::
parse_variable_line (token& t, type& tt)
{
@@ -111,18 +143,20 @@ namespace build2
return nullopt;
}
- pair<command_expr, parser::here_docs> parser::
+ parser::parse_command_expr_result parser::
parse_command_expr (token& t, type& tt,
- const redirect_aliases& ra)
+ const redirect_aliases& ra,
+ optional<token>&& program)
{
- // enter: first token of the command line
+ // enter: first (or second, if program) token of the command line
// leave: <newline> or unknown token
command_expr expr;
// OR-ed to an implied false for the first term.
//
- expr.push_back ({expr_operator::log_or, command_pipe ()});
+ if (!pre_parse_)
+ expr.push_back ({expr_operator::log_or, command_pipe ()});
command c; // Command being assembled.
@@ -189,8 +223,8 @@ namespace build2
// Add the next word to either one of the pending positions or to
// program arguments by default.
//
- auto add_word = [&c, &p, &mod, &check_regex_mod, this] (
- string&& w, const location& l)
+ auto add_word = [&c, &p, &mod, &check_regex_mod, this]
+ (string&& w, const location& l)
{
auto add_merge = [&l, this] (optional<redirect>& r,
const string& w,
@@ -668,11 +702,30 @@ namespace build2
const location ll (get_location (t)); // Line location.
// Keep parsing chunks of the command line until we see one of the
- // "terminators" (newline, exit status comparison, etc).
+ // "terminators" (newline or unknown/unexpected token).
//
location l (ll);
names ns; // Reuse to reduce allocations.
+ bool for_loop (false);
+
+ if (program)
+ {
+ assert (program->type == type::word);
+
+ // Note that here we skip all the parse_program() business since the
+ // program can only be one of the specially-recognized names.
+ //
+ if (program->value == "for")
+ for_loop = true;
+ else
+ assert (false); // Must be specially-recognized program.
+
+ // Save the program name and continue parsing as a command.
+ //
+ add_word (move (program->value), get_location (*program));
+ }
+
for (bool done (false); !done; l = get_location (t))
{
tt = ra.resolve (tt);
@@ -688,6 +741,9 @@ namespace build2
case type::equal:
case type::not_equal:
{
+ if (for_loop)
+ fail (l) << "for-loop exit code cannot be checked";
+
if (!pre_parse_)
check_pending (l);
@@ -718,30 +774,39 @@ namespace build2
}
case type::pipe:
+ if (for_loop)
+ fail (l) << "for-loop must be last command in a pipe";
+ // Fall through.
+
case type::log_or:
case type::log_and:
+ if (for_loop)
+ fail (l) << "command expression involving for-loop";
+ // Fall through.
- case type::in_pass:
- case type::out_pass:
+ case type::clean:
+ if (for_loop)
+ fail (l) << "cleanup in for-loop";
+ // Fall through.
- case type::in_null:
+ case type::out_pass:
case type::out_null:
-
case type::out_trace:
-
case type::out_merge:
-
- case type::in_str:
- case type::in_doc:
case type::out_str:
case type::out_doc:
-
- case type::in_file:
case type::out_file_cmp:
case type::out_file_ovr:
case type::out_file_app:
+ if (for_loop)
+ fail (l) << "output redirect in for-loop";
+ // Fall through.
- case type::clean:
+ case type::in_pass:
+ case type::in_null:
+ case type::in_str:
+ case type::in_doc:
+ case type::in_file:
{
if (pre_parse_)
{
@@ -939,6 +1004,42 @@ namespace build2
next (t, tt);
break;
}
+ case type::lsbrace:
+ {
+ // Recompose the attributes into a single command argument.
+ //
+ assert (!pre_parse_);
+
+ attributes_push (t, tt, true /* standalone */);
+
+ attributes as (attributes_pop ());
+ assert (!as.empty ());
+
+ ostringstream os;
+ names storage;
+ char c ('[');
+ for (const attribute& a: as)
+ {
+ os << c << a.name;
+
+ if (!a.value.null)
+ {
+ os << '=';
+
+ storage.clear ();
+ to_stream (os,
+ reverse (a.value, storage, true /* reduce */),
+ quote_mode::normal,
+ '@');
+ }
+
+ c = ',';
+ }
+ os << ']';
+
+ add_word (os.str (), l);
+ break;
+ }
default:
{
// Bail out if this is one of the unknown tokens.
@@ -1007,11 +1108,12 @@ namespace build2
hd.push_back (
here_doc {
{rd},
- move (end),
- (t.qtype == quote_type::unquoted ||
- t.qtype == quote_type::single),
- move (mod),
- r.intro, move (r.flags)});
+ move (end),
+ (t.qtype == quote_type::unquoted ||
+ t.qtype == quote_type::single),
+ move (mod),
+ r.intro,
+ move (r.flags)});
p = pending::none;
mod.clear ();
@@ -1024,16 +1126,34 @@ namespace build2
bool prog (p == pending::program_first ||
p == pending::program_next);
- // Check if this is the env pseudo-builtin.
+ // Check if this is the env pseudo-builtin or the for-loop.
//
bool env (false);
- if (prog && tt == type::word && t.value == "env")
+ if (prog && tt == type::word)
{
- parsed_env r (parse_env_builtin (t, tt));
- c.cwd = move (r.cwd);
- c.variables = move (r.variables);
- c.timeout = r.timeout;
- env = true;
+ if (t.value == "env")
+ {
+ parsed_env r (parse_env_builtin (t, tt));
+ c.cwd = move (r.cwd);
+ c.variables = move (r.variables);
+ c.timeout = r.timeout;
+ c.timeout_success = r.timeout_success;
+ env = true;
+ }
+ else if (t.value == "for")
+ {
+ if (expr.size () > 1)
+ fail (l) << "command expression involving for-loop";
+
+ for_loop = true;
+
+ // Save 'for' as a program name and continue parsing as a
+ // command.
+ //
+ add_word (move (t.value), l);
+ next (t, tt);
+ continue;
+ }
}
// Parse the next chunk as names to get expansion, etc. Note that
@@ -1092,16 +1212,17 @@ namespace build2
// Process what we got.
//
- // First see if this is a value that should not be re-lexed. The
- // long term plan is to only re-lex values of a special type
- // representing a canned command line.
+ // First see if this is a value that should not be re-lexed. We
+ // only re-lex values of the special `cmdline` type that
+ // represents a canned command line.
//
// Otherwise, determine whether anything inside was quoted (note
// that the current token is "next" and is not part of this).
//
- bool q (
- (pr.value && !relex_) ||
- (quoted () - (t.qtype != quote_type::unquoted ? 1 : 0)) != 0);
+ bool lex (
+ pr.value
+ ? pr.type != nullptr && pr.type->is_a<cmdline> ()
+ : (quoted () - (t.qtype != quote_type::unquoted ? 1 : 0)) == 0);
for (name& n: ns)
{
@@ -1115,7 +1236,7 @@ namespace build2
{
diag_record dr (fail (l));
dr << "invalid string value ";
- to_stream (dr.os, n, true /* quote */);
+ to_stream (dr.os, n, quote_mode::normal);
}
// If it is a quoted chunk, then we add the word as is.
@@ -1123,10 +1244,7 @@ namespace build2
// interesting characters (operators plus quotes/escapes),
// then no need to re-lex.
//
- // NOTE: update quoting (script.cxx:to_stream_q()) if adding
- // any new characters.
- //
- if (q || s.find_first_of ("|&<>\'\"\\") == string::npos)
+ if (!lex || !need_cmdline_relex (s))
add_word (move (s), l);
else
{
@@ -1216,9 +1334,16 @@ namespace build2
switch (tt)
{
case type::pipe:
+ if (for_loop)
+ fail (l) << "for-loop must be last command in a pipe";
+ // Fall through.
+
case type::log_or:
case type::log_and:
{
+ if (for_loop)
+ fail (l) << "command expression involving for-loop";
+
// Check that the previous command makes sense.
//
check_command (l, tt != type::pipe);
@@ -1238,30 +1363,11 @@ namespace build2
break;
}
- case type::in_pass:
- case type::out_pass:
-
- case type::in_null:
- case type::out_null:
-
- case type::out_trace:
-
- case type::out_merge:
-
- case type::in_str:
- case type::out_str:
-
- case type::in_file:
- case type::out_file_cmp:
- case type::out_file_ovr:
- case type::out_file_app:
- {
- parse_redirect (move (t), tt, l);
- break;
- }
-
case type::clean:
{
+ if (for_loop)
+ fail (l) << "cleanup in for-loop";
+
parse_clean (t);
break;
}
@@ -1272,6 +1378,27 @@ namespace build2
fail (l) << "here-document redirect in expansion";
break;
}
+
+ case type::out_pass:
+ case type::out_null:
+ case type::out_trace:
+ case type::out_merge:
+ case type::out_str:
+ case type::out_file_cmp:
+ case type::out_file_ovr:
+ case type::out_file_app:
+ if (for_loop)
+ fail (l) << "output redirect in for-loop";
+ // Fall through.
+
+ case type::in_pass:
+ case type::in_null:
+ case type::in_str:
+ case type::in_file:
+ {
+ parse_redirect (move (t), tt, l);
+ break;
+ }
}
}
@@ -1299,7 +1426,7 @@ namespace build2
expr.back ().pipe.push_back (move (c));
}
- return make_pair (move (expr), move (hd));
+ return parse_command_expr_result {move (expr), move (hd), for_loop};
}
parser::parsed_env parser::
@@ -1351,7 +1478,7 @@ namespace build2
{
diag_record dr (fail (l));
dr << "invalid string value ";
- to_stream (dr.os, n, true /* quote */);
+ to_stream (dr.os, n, quote_mode::normal);
}
}
@@ -1475,6 +1602,10 @@ namespace build2
{
r.timeout = chrono::seconds (*v);
}
+ else if (o == "-s" || o == "--timeout-success")
+ {
+ r.timeout_success = true;
+ }
else if (optional<dir_path> v = dir ("--cwd", "-c"))
{
r.cwd = move (*v);
@@ -1489,6 +1620,9 @@ namespace build2
break;
}
+ if (r.timeout_success && !r.timeout)
+ fail (l) << "env: -s|--timeout-success specified without -t|--timeout";
+
// Parse arguments (variable sets).
//
for (; i != e; ++i)
@@ -1537,7 +1671,7 @@ namespace build2
diag_record dr;
dr << fail (l) << "expected exit status instead of ";
- to_stream (dr.os, ns, true /* quote */);
+ to_stream (dr.os, ns, quote_mode::normal);
dr << info << "exit status is an unsigned integer less than 256";
}
@@ -1548,7 +1682,7 @@ namespace build2
void parser::
parse_here_documents (token& t, type& tt,
- pair<command_expr, here_docs>& p)
+ parse_command_expr_result& pr)
{
// enter: newline
// leave: newline
@@ -1556,7 +1690,7 @@ namespace build2
// Parse here-document fragments in the order they were mentioned on
// the command line.
//
- for (here_doc& h: p.second)
+ for (here_doc& h: pr.docs)
{
// Switch to the here-line mode which is like single/double-quoted
// string but recognized the newline as a separator.
@@ -1576,7 +1710,7 @@ namespace build2
{
auto i (h.redirects.cbegin ());
- command& c (p.first[i->expr].pipe[i->pipe]);
+ command& c (pr.expr[i->expr].pipe[i->pipe]);
optional<redirect>& r (i->fd == 0 ? c.in :
i->fd == 1 ? c.out :
@@ -1608,7 +1742,7 @@ namespace build2
//
for (++i; i != h.redirects.cend (); ++i)
{
- command& c (p.first[i->expr].pipe[i->pipe]);
+ command& c (pr.expr[i->expr].pipe[i->pipe]);
optional<redirect>& ir (i->fd == 0 ? c.in :
i->fd == 1 ? c.out :
@@ -2034,6 +2168,8 @@ namespace build2
else if (n == "elif") r = line_type::cmd_elif;
else if (n == "elif!") r = line_type::cmd_elifn;
else if (n == "else") r = line_type::cmd_else;
+ else if (n == "while") r = line_type::cmd_while;
+ else if (n == "for") r = line_type::cmd_for_stream;
else if (n == "end") r = line_type::cmd_end;
else
{
@@ -2064,8 +2200,9 @@ namespace build2
exec_lines (lines::const_iterator i, lines::const_iterator e,
const function<exec_set_function>& exec_set,
const function<exec_cmd_function>& exec_cmd,
- const function<exec_if_function>& exec_if,
- size_t& li,
+ const function<exec_cond_function>& exec_cond,
+ const function<exec_for_function>& exec_for,
+ const iteration_index* ii, size_t& li,
variable_pool* var_pool)
{
try
@@ -2089,6 +2226,73 @@ namespace build2
next (t, tt);
const location ll (get_location (t));
+ // If end is true, then find the flow control construct's end ('end'
+ // line). Otherwise, find the flow control construct's block end
+ // ('end', 'else', etc). If skip is true then increment the command
+ // line index.
+ //
+ auto fcend = [e, &li] (lines::const_iterator j,
+ bool end,
+ bool skip) -> lines::const_iterator
+ {
+ // We need to be aware of nested flow control constructs.
+ //
+ size_t n (0);
+
+ for (++j; j != e; ++j)
+ {
+ line_type lt (j->type);
+
+ if (lt == line_type::cmd_if ||
+ lt == line_type::cmd_ifn ||
+ lt == line_type::cmd_while ||
+ lt == line_type::cmd_for_stream ||
+ lt == line_type::cmd_for_args)
+ ++n;
+
+ // If we are nested then we just wait until we get back
+ // to the surface.
+ //
+ if (n == 0)
+ {
+ switch (lt)
+ {
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_else:
+ if (end) break;
+ // Fall through.
+ case line_type::cmd_end: return j;
+ default: break;
+ }
+ }
+
+ if (lt == line_type::cmd_end)
+ --n;
+
+ if (skip)
+ {
+ // Note that we don't count else, end, and 'for x: ...' as
+ // commands.
+ //
+ switch (lt)
+ {
+ case line_type::cmd:
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_while: ++li; break;
+ default: break;
+ }
+ }
+ }
+
+ assert (false); // Missing end.
+ return e;
+ };
+
switch (lt)
{
case line_type::var:
@@ -2124,7 +2328,10 @@ namespace build2
single = true;
}
- exec_cmd (t, tt, li++, single, ll);
+ exec_cmd (t, tt,
+ ii, li++, single,
+ nullptr /* command_function */,
+ ll);
replay_stop ();
break;
@@ -2140,7 +2347,7 @@ namespace build2
bool take;
if (lt != line_type::cmd_else)
{
- take = exec_if (t, tt, li++, ll);
+ take = exec_cond (t, tt, ii, li++, ll);
if (lt == line_type::cmd_ifn || lt == line_type::cmd_elifn)
take = !take;
@@ -2153,97 +2360,383 @@ namespace build2
replay_stop ();
- // If end is true, then find the 'end' line. Otherwise, find
- // the next if-else line. If skip is true then increment the
- // command line index.
+ // If we are taking this branch then we need to parse all the
+ // lines until the next if-else line and then skip all the lines
+ // until the end (unless we are already at the end).
+ //
+ // Otherwise, we need to skip all the lines until the next
+ // if-else line and then continue parsing.
//
- auto next = [e, &li] (lines::const_iterator j,
- bool end,
- bool skip) -> lines::const_iterator
+ if (take)
+ {
+ // Find block end.
+ //
+ lines::const_iterator j (fcend (i, false, false));
+
+ if (!exec_lines (i + 1, j,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ ii, li,
+ var_pool))
+ return false;
+
+ // Find construct end.
+ //
+ i = j->type == line_type::cmd_end ? j : fcend (j, true, true);
+ }
+ else
+ {
+ // Find block end.
+ //
+ i = fcend (i, false, true);
+
+ if (i->type != line_type::cmd_end)
+ --i; // Continue with this line (e.g., elif or else).
+ }
+
+ break;
+ }
+ case line_type::cmd_while:
+ {
+ // The while-loop construct end. Set on the first iteration.
+ //
+ lines::const_iterator we (e);
+
+ size_t wli (li);
+
+ for (iteration_index wi {1, ii};; wi.index++)
+ {
+ next (t, tt); // Skip to start of command.
+
+ bool exec (exec_cond (t, tt, &wi, li++, ll));
+
+ replay_stop ();
+
+ // If the condition evaluates to true, then we need to parse
+ // all the lines until the end line, prepare for the condition
+ // reevaluation, and re-iterate.
+ //
+ // Otherwise, we need to skip all the lines until the end
+ // line, bail out from the loop, and continue parsing.
+ //
+ if (exec)
{
- // We need to be aware of nested if-else chains.
+ // Find the construct end, if it is not found yet.
+ //
+ if (we == e)
+ we = fcend (i, true, false);
+
+ if (!exec_lines (i + 1, we,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ &wi, li,
+ var_pool))
+ return false;
+
+ // Prepare for the condition reevaluation.
//
- size_t n (0);
+ replay_data (replay_tokens (ln.tokens));
+ next (t, tt);
+ li = wli;
+ }
+ else
+ {
+ // Position to the construct end, always incrementing the
+ // line index (skip is true).
+ //
+ i = fcend (i, true, true);
+ break; // Bail out from the while-loop.
+ }
+ }
+
+ break;
+ }
+ case line_type::cmd_for_stream:
+ {
+ // The for-loop construct end. Set on the first iteration.
+ //
+ lines::const_iterator fe (e);
- for (++j; j != e; ++j)
+ // Let's "wrap up" all the required data into the single object
+ // to rely on the "small function object" optimization.
+ //
+ struct loop_data
+ {
+ lines::const_iterator i;
+ lines::const_iterator e;
+ const function<exec_set_function>& exec_set;
+ const function<exec_cmd_function>& exec_cmd;
+ const function<exec_cond_function>& exec_cond;
+ const function<exec_for_function>& exec_for;
+ const iteration_index* ii;
+ size_t& li;
+ variable_pool* var_pool;
+ decltype (fcend)& fce;
+ lines::const_iterator& fe;
+ } ld {i, e,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ ii, li,
+ var_pool,
+ fcend,
+ fe};
+
+ function<command_function> cf (
+ [&ld, this]
+ (environment& env,
+ const strings& args,
+ auto_fd in,
+ pipe_command* pipe,
+ const optional<deadline>& dl,
+ const location& ll)
+ {
+ namespace cli = build2::build::cli;
+
+ try
{
- line_type lt (j->type);
+ // Parse arguments.
+ //
+ cli::vector_scanner scan (args);
+ for_options ops (scan);
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
- ++n;
+ // Note: diagnostics consistent with the set builtin.
+ //
+ if (ops.whitespace () && ops.newline ())
+ fail (ll) << "for: both -n|--newline and "
+ << "-w|--whitespace specified";
- // If we are nested then we just wait until we get back
- // to the surface.
+ if (!scan.more ())
+ fail (ll) << "for: missing variable name";
+
+ string vname (scan.next ());
+ if (vname.empty ())
+ fail (ll) << "for: empty variable name";
+
+ // Detect patterns analogous to parse_variable_name() (so
+ // we diagnose `for x[string]`).
+ //
+ if (vname.find_first_of ("[*?") != string::npos)
+ fail (ll) << "for: expected variable name instead of "
+ << vname;
+
+ // Let's also diagnose the `... | for x:...` misuse which
+ // can probably be quite common.
//
- if (n == 0)
+ if (vname.find (':') != string::npos)
+ fail (ll) << "for: ':' after variable name";
+
+ string attrs;
+ if (scan.more ())
{
- switch (lt)
- {
- case line_type::cmd_elif:
- case line_type::cmd_elifn:
- case line_type::cmd_else:
- if (end) break;
- // Fall through.
- case line_type::cmd_end: return j;
- default: break;
- }
+ attrs = scan.next ();
+
+ if (attrs.empty ())
+ fail (ll) << "for: empty variable attributes";
+
+ if (scan.more ())
+ fail (ll) << "for: unexpected argument '"
+ << scan.next () << "'";
}
- if (lt == line_type::cmd_end)
- --n;
+ // Since the command pipe is parsed, we can stop
+ // replaying. Note that we should do this before calling
+ // exec_lines() for the loop body. Also note that we
+ // should increment the line index before that.
+ //
+ replay_stop ();
+
+ size_t fli (++ld.li);
+ iteration_index fi {1, ld.ii};
- if (skip)
+ // Let's "wrap up" all the required data into the single
+ // object to rely on the "small function object"
+ // optimization.
+ //
+ struct
{
- // Note that we don't count else and end as commands.
- //
- switch (lt)
+ loop_data& ld;
+ environment& env;
+ const string& vname;
+ const string& attrs;
+ const location& ll;
+ size_t fli;
+ iteration_index& fi;
+
+ } d {ld, env, vname, attrs, ll, fli, fi};
+
+ function<void (string&&)> f (
+ [&d, this] (string&& s)
{
- case line_type::cmd:
- case line_type::cmd_if:
- case line_type::cmd_ifn:
- case line_type::cmd_elif:
- case line_type::cmd_elifn: ++li; break;
- default: break;
- }
- }
+ loop_data& ld (d.ld);
+
+ ld.li = d.fli;
+
+ // Don't move from the variable name since it is used
+ // on each iteration.
+ //
+ d.env.set_variable (d.vname,
+ names {name (move (s))},
+ d.attrs,
+ d.ll);
+
+ // Find the construct end, if it is not found yet.
+ //
+ if (ld.fe == ld.e)
+ ld.fe = ld.fce (ld.i, true, false);
+
+ if (!exec_lines (ld.i + 1, ld.fe,
+ ld.exec_set,
+ ld.exec_cmd,
+ ld.exec_cond,
+ ld.exec_for,
+ &d.fi, ld.li,
+ ld.var_pool))
+ {
+ throw exit (true);
+ }
+
+ d.fi.index++;
+ });
+
+ read (move (in),
+ !ops.newline (), ops.newline (), ops.exact (),
+ f,
+ pipe,
+ dl,
+ ll,
+ "for");
+ }
+ catch (const cli::exception& e)
+ {
+ fail (ll) << "for: " << e;
}
+ });
- assert (false); // Missing end.
- return e;
- };
+ exec_cmd (t, tt, ii, li, false /* single */, cf, ll);
- // If we are taking this branch then we need to parse all the
- // lines until the next if-else line and then skip all the
- // lines until the end (unless next is already end).
+ // Position to construct end.
//
- // Otherwise, we need to skip all the lines until the next
- // if-else line and then continue parsing.
+ i = (fe != e ? fe : fcend (i, true, true));
+
+ break;
+ }
+ case line_type::cmd_for_args:
+ {
+ // Parse the variable name.
//
- if (take)
+ next (t, tt);
+
+ assert (tt == type::word && t.qtype == quote_type::unquoted);
+
+ string vn (move (t.value));
+
+ // Enter the variable into the pool if this is not done during
+ // the script parsing (see the var line type handling for
+ // details).
+ //
+ const variable* var (ln.var);
+
+ if (var == nullptr)
{
- // Next if-else.
- //
- lines::const_iterator j (next (i, false, false));
- if (!exec_lines (i + 1, j,
- exec_set, exec_cmd, exec_if,
- li,
- var_pool))
- return false;
+ assert (var_pool != nullptr);
- i = j->type == line_type::cmd_end ? j : next (j, true, true);
+ var = &var_pool->insert (move (vn));
}
- else
+
+ // Parse the potential element attributes and skip the colon.
+ //
+ next_with_attributes (t, tt);
+ attributes_push (t, tt);
+
+ assert (tt == type::colon);
+
+ // Save element attributes so that we can inject them on each
+ // iteration.
+ //
+ attributes val_attrs (attributes_pop ());
+
+ // Parse the value with the potential attributes.
+ //
+ // Note that we don't really need to change the mode since we
+ // are replaying the tokens.
+ //
+ value val;
+ apply_value_attributes (nullptr /* variable */,
+ val,
+ parse_variable_line (t, tt),
+ type::assign);
+
+ replay_stop ();
+
+ // If the value is not NULL then iterate over its elements,
+ // assigning them to the for-loop variable, and parsing all the
+ // construct lines afterwards. Then position to the end line of
+ // the construct and continue parsing.
+
+ // The for-loop construct end. Set on the first iteration.
+ //
+ lines::const_iterator fe (e);
+
+ if (val)
{
- i = next (i, false, true);
- if (i->type != line_type::cmd_end)
- --i; // Continue with this line (e.g., elif or else).
+ // If this value is a vector, then save its element type so
+ // that we can typify each element below.
+ //
+ const value_type* etype (nullptr);
+
+ if (val.type != nullptr)
+ {
+ etype = val.type->element_type;
+
+ // Note that here we don't want to be reducing empty simple
+ // values to empty lists.
+ //
+ untypify (val, false /* reduce */);
+ }
+
+ size_t fli (li);
+ iteration_index fi {1, ii};
+ names& ns (val.as<names> ());
+
+ for (auto ni (ns.begin ()), ne (ns.end ()); ni != ne; ++ni)
+ {
+ li = fli;
+
+ // Set the variable value.
+ //
+ bool pair (ni->pair);
+ names n;
+ n.push_back (move (*ni));
+ if (pair) n.push_back (move (*++ni));
+ value v (move (n)); // Untyped.
+
+ if (etype != nullptr)
+ typify (v, *etype, var);
+
+ exec_for (*var, move (v), val_attrs, ll);
+
+ // Find the construct end, if it is not found yet.
+ //
+ if (fe == e)
+ fe = fcend (i, true, false);
+
+ if (!exec_lines (i + 1, fe,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ &fi, li,
+ var_pool))
+ return false;
+
+ fi.index++;
+ }
}
+ // Position to construct end.
+ //
+ i = (fe != e ? fe : fcend (i, true, true));
+
break;
}
case line_type::cmd_end:
{
assert (false);
+ break;
}
}
}
@@ -2278,7 +2771,7 @@ namespace build2
}
parser::parsed_doc::
- parsed_doc (parsed_doc&& d)
+ parsed_doc (parsed_doc&& d) noexcept
: re (d.re), end_line (d.end_line), end_column (d.end_column)
{
if (re)
diff --git a/libbuild2/script/parser.hxx b/libbuild2/script/parser.hxx
index 6e24d37..795ce4e 100644
--- a/libbuild2/script/parser.hxx
+++ b/libbuild2/script/parser.hxx
@@ -25,7 +25,7 @@ namespace build2
class parser: protected build2::parser
{
public:
- parser (context& c, bool relex): build2::parser (c), relex_ (relex) {}
+ parser (context& c): build2::parser (c) {}
// Helpers.
//
@@ -42,6 +42,15 @@ namespace build2
using build2::parser::apply_value_attributes;
+ // Return true if a command line element needs to be re-lexed.
+ //
+ // Specifically, it needs to be re-lexed if it contains any of the
+ // special characters (|<>&), quotes ("') or effective escape sequences
+ // (\", \', \\).
+ //
+ static bool
+ need_cmdline_relex (const string&);
+
// Commonly used parsing functions. Issue diagnostics and throw failed
// in case of an error.
//
@@ -88,15 +97,34 @@ namespace build2
};
using here_docs = vector<here_doc>;
- pair<command_expr, here_docs>
- parse_command_expr (token&, token_type&, const redirect_aliases&);
+ struct parse_command_expr_result
+ {
+ command_expr expr; // Single pipe for the for-loop.
+ here_docs docs;
+ bool for_loop = false;
+
+ parse_command_expr_result () = default;
+
+ parse_command_expr_result (command_expr&& e,
+ here_docs&& h,
+ bool f)
+ : expr (move (e)), docs (move (h)), for_loop (f) {}
+ };
+
+ // Pass the first special command program name (token_type::word) if it
+ // is already pre-parsed.
+ //
+ parse_command_expr_result
+ parse_command_expr (token&, token_type&,
+ const redirect_aliases&,
+ optional<token>&& program = nullopt);
command_exit
parse_command_exit (token&, token_type&);
void
parse_here_documents (token&, token_type&,
- pair<command_expr, here_docs>&);
+ parse_command_expr_result&);
struct parsed_doc
{
@@ -112,7 +140,7 @@ namespace build2
parsed_doc (string, uint64_t line, uint64_t column);
parsed_doc (regex_lines&&, uint64_t line, uint64_t column);
- parsed_doc (parsed_doc&&); // Note: move constuctible-only type.
+ parsed_doc (parsed_doc&&) noexcept; // Note: move constuctible-only type.
~parsed_doc ();
};
@@ -126,18 +154,24 @@ namespace build2
// the first two tokens. Use the specified lexer mode to peek the second
// token.
//
+ // Always return the cmd_for_stream line type for the for-loop. Note
+ // that the for-loop form cannot be detected easily, based on the first
+ // two tokens. Also note that the detection can be specific for the
+ // script implementation (custom lexing mode, special variables, etc).
+ //
line_type
pre_parse_line_start (token&, token_type&, lexer_mode);
// Parse the env pseudo-builtin arguments up to the program name. Return
- // the program execution timeout, CWD, the list of the variables that
- // should be unset ("name") and/or set ("name=value") in the command
- // environment, and the token/type that starts the program name. Note
- // that the variable unsets come first, if present.
+ // the program execution timeout and its success flag, CWD, the list of
+ // the variables that should be unset ("name") and/or set ("name=value")
+ // in the command environment, and the token/type that starts the
+ // program name. Note that the variable unsets come first, if present.
//
struct parsed_env
{
optional<duration> timeout;
+ bool timeout_success = false;
optional<dir_path> cwd;
environment_vars variables;
};
@@ -150,19 +184,26 @@ namespace build2
protected:
// Return false if the execution of the script should be terminated with
// the success status (e.g., as a result of encountering the exit
- // builtin). For unsuccessful termination the failed exception is thrown.
+ // builtin). For unsuccessful termination the failed exception is
+ // thrown.
//
using exec_set_function = void (const variable&,
token&, token_type&,
const location&);
using exec_cmd_function = void (token&, token_type&,
- size_t li,
+ const iteration_index*, size_t li,
bool single,
+ const function<command_function>&,
const location&);
- using exec_if_function = bool (token&, token_type&,
- size_t li,
+ using exec_cond_function = bool (token&, token_type&,
+ const iteration_index*, size_t li,
+ const location&);
+
+ using exec_for_function = void (const variable&,
+ value&&,
+ const attributes& value_attrs,
const location&);
// If a parser implementation doesn't pre-enter variables into a pool
@@ -174,8 +215,9 @@ namespace build2
exec_lines (lines::const_iterator b, lines::const_iterator e,
const function<exec_set_function>&,
const function<exec_cmd_function>&,
- const function<exec_if_function>&,
- size_t& li,
+ const function<exec_cond_function>&,
+ const function<exec_for_function>&,
+ const iteration_index*, size_t& li,
variable_pool* = nullptr);
// Customization hooks.
@@ -200,6 +242,13 @@ namespace build2
// something that requires re-lexing, for example `foo|bar`, which won't
// be easy to translate but which are handled by the parser.
//
+ // Note that the chunk could be of the special cmdline type in which
+ // case the names may need to be "preprocessed" (at least unquoted or
+ // potentially fully re-lexed) before being analyzed/consumed. Note also
+ // that in this case any names left unconsumed must remain of the
+ // cmdline type.
+ //
+ //
// During the pre-parsing phase the returned process path and names
// (that must still be parsed) are discarded. The main purpose of the
// call is to allow implementations to perform static script analysis,
@@ -229,7 +278,6 @@ namespace build2
size_t replay_quoted_;
protected:
- bool relex_;
lexer* lexer_ = nullptr;
};
}
diff --git a/libbuild2/script/regex.cxx b/libbuild2/script/regex.cxx
index 3f796b6..11ff8a1 100644
--- a/libbuild2/script/regex.cxx
+++ b/libbuild2/script/regex.cxx
@@ -75,15 +75,29 @@ namespace build2
string::traits_type::find (ex, 4, c) != nullptr)));
}
+ template <typename S>
+ static inline const char_string*
+ find_or_insert (line_pool& p, S&& s)
+ {
+ auto i (find (p.strings.begin (), p.strings.end (), s));
+ if (i == p.strings.end ())
+ {
+ p.strings.push_front (forward<S> (s));
+ i = p.strings.begin ();
+ }
+
+ return &*i;
+ }
+
line_char::
line_char (const char_string& s, line_pool& p)
- : line_char (&(*p.strings.emplace (s).first))
+ : line_char (find_or_insert (p, s))
{
}
line_char::
line_char (char_string&& s, line_pool& p)
- : line_char (&(*p.strings.emplace (move (s)).first))
+ : line_char (find_or_insert (p, move (s)))
{
}
diff --git a/libbuild2/script/regex.hxx b/libbuild2/script/regex.hxx
index e043c99..3c49b31 100644
--- a/libbuild2/script/regex.hxx
+++ b/libbuild2/script/regex.hxx
@@ -9,7 +9,6 @@
#include <locale>
#include <string> // basic_string
#include <type_traits> // make_unsigned, enable_if, is_*
-#include <unordered_set>
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -59,7 +58,12 @@ namespace build2
// Note that we assume the pool can be moved without invalidating
// pointers to any already pooled entities.
//
- std::unordered_set<char_string> strings;
+ // Note that we used to use unordered_set for strings but (1) there is
+ // no general expectation that we will have many identical strings and
+ // (2) the number of strings is not expected to be large. So that felt
+ // like an overkill and we now use a list with linear search.
+ //
+ std::list<char_string> strings;
std::list<char_regex> regexes;
};
@@ -267,8 +271,8 @@ namespace build2
template <typename T>
struct line_char_cmp
: public std::enable_if<std::is_integral<T>::value ||
- (std::is_enum<T>::value &&
- !std::is_same<T, char_flags>::value)> {};
+ (std::is_enum<T>::value &&
+ !std::is_same<T, char_flags>::value)> {};
template <typename T, typename = typename line_char_cmp<T>::type>
bool
@@ -466,10 +470,10 @@ namespace std
is (mask m, char_type c) const
{
return m ==
- (c.type () == line_type::special && c.special () >= 0 &&
- build2::digit (static_cast<char> (c.special ()))
- ? digit
- : 0);
+ (c.type () == line_type::special && c.special () >= 0 &&
+ build2::digit (static_cast<char> (c.special ()))
+ ? digit
+ : 0);
}
const char_type*
diff --git a/libbuild2/script/regex.test.cxx b/libbuild2/script/regex.test.cxx
index 9ec2432..6659d39 100644
--- a/libbuild2/script/regex.test.cxx
+++ b/libbuild2/script/regex.test.cxx
@@ -4,6 +4,9 @@
#include <regex>
#include <type_traits> // is_*
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
#include <libbuild2/script/regex.hxx>
#undef NDEBUG
@@ -15,6 +18,8 @@ using namespace build2::script::regex;
int
main ()
{
+ build2::init_process ();
+
using lc = line_char;
using ls = line_string;
using lr = line_regex;
diff --git a/libbuild2/script/run.cxx b/libbuild2/script/run.cxx
index 12078c4..f8f98c1 100644
--- a/libbuild2/script/run.cxx
+++ b/libbuild2/script/run.cxx
@@ -9,7 +9,8 @@
# include <libbutl/win32-utility.hxx> // DBG_TERMINATE_PROCESS
#endif
-#include <ios> // streamsize
+#include <ios> // streamsize
+#include <cstring> // strchr()
#include <libbutl/regex.hxx>
#include <libbutl/builtin.hxx>
@@ -26,6 +27,8 @@
using namespace std;
using namespace butl;
+namespace cli = build2::build::cli;
+
namespace build2
{
namespace script
@@ -757,6 +760,31 @@ namespace build2
output_info (d, op);
}
+ // Note that a here-document regex without ':' modifier can never
+ // match an empty output since it always contains the trailing empty
+ // line-char. This can be confusing, as for example while testing a
+ // program which can print some line or nothing with the following
+ // test:
+ //
+ // $* >>~%EOO%
+ // %(
+ // Hello, World!
+ // %)?
+ // EOO
+ //
+ // Note that the above line-regex contains 4 line-chars and will never
+ // match empty output.
+ //
+ // Thus, let's complete an empty output with an empty line-char for
+ // such a regex, so it may potentially match.
+ //
+ if (ls.empty () &&
+ rd.type == redirect_type::here_doc_regex &&
+ rd.modifiers ().find (':') == string::npos)
+ {
+ ls += line_char (string (), regex.pool);
+ }
+
// Match the output with the regex.
//
// Note that we don't distinguish between the line_regex and
@@ -782,7 +810,7 @@ namespace build2
// regex to file for troubleshooting regardless of whether we print
// the diagnostics or not. We, however, register it for cleanup in the
// later case (the expression may still succeed, we can be evaluating
- // the if condition, etc).
+ // the flow control construct condition, etc).
//
optional<path> rp;
if (env.temp_dir_keep)
@@ -944,96 +972,660 @@ namespace build2
: path (c.program.recall_string ());
}
- // Read out the stream content into a string. Throw io_error on the
- // underlying OS error.
+ // Read the stream content into a string, optionally splitting the input
+ // data at whitespaces or newlines in which case return one, potentially
+ // incomplete, substring at a time (see the set builtin options for the
+ // splitting semantics). Throw io_error on the underlying OS error.
//
- // If the execution deadline is specified, then turn the stream into the
- // non-blocking mode reading its content in chunks and with a single
- // operation otherwise. If the specified deadline is reached while
- // reading the stream, then bail out for the successful deadline and
- // fail otherwise. Note that in the former case the result will be
- // incomplete, but we leave it to the caller to handle that.
+ // On POSIX expects the stream to be non-blocking and its exception mask
+ // to have at least badbit. On Windows can also handle a blocking stream.
//
// Note that on Windows we can only turn pipe file descriptors into the
- // non-blocking mode. Thus, we have no choice but to read from
- // descriptors of other types synchronously there. That implies that we
- // can potentially block indefinitely reading a file and missing the
- // deadline on Windows. Note though, that the user can normally rewrite
- // the command, for example, `set foo <<<file` with `cat file | set foo`
- // to avoid this problem.
+ // non-blocking mode. Thus, we have no choice but to read from descriptors
+ // of other types synchronously there. That implies that we can
+ // potentially block indefinitely reading a file and missing a deadline on
+ // Windows. Note though, that the user can normally rewrite the command,
+ // for example, `set foo <<<file` with `cat file | set foo` to avoid this
+ // problem.
//
- static string
- read (auto_fd in,
+ class stream_reader
+ {
+ public:
+ stream_reader (ifdstream&, bool whitespace, bool newline, bool exact);
+
+ // Read next substring. Return true if the substring has been read or
+ // false if it should be called again once the stream has more data to
+ // read. Also return true on eof (in which case no substring is read).
+ // The string must be empty on the first call. Throw ios::failure on the
+ // underlying OS error.
+ //
+ // Note that there could still be data to read in the stream's buffer
+ // (as opposed to file descriptor) after this function returns true and
+ // you should be careful not to block on fdselect() in this case. The
+ // recommended usage pattern is similar to that of
+ // butl::getline_non_blocking(). The only difference is that
+ // ifdstream::eof() needs to be used instead of butl::eof() since this
+ // function doesn't set failbit and only sets eofbit after the last
+ // substring is returned.
+ //
+ bool
+ next (string&);
+
+ private:
+ ifdstream& is_;
+ bool whitespace_;
+ bool newline_;
+ bool exact_;
+
+ bool empty_ = true; // Set to false after the first character is read.
+ };
+
+ stream_reader::
+ stream_reader (ifdstream& is, bool ws, bool nl, bool ex)
+ : is_ (is),
+ whitespace_ (ws),
+ newline_ (nl),
+ exact_ (ex)
+ {
+ }
+
+ bool stream_reader::
+ next (string& ss)
+ {
#ifndef _WIN32
- bool,
+ assert ((is_.exceptions () & ifdstream::badbit) != 0 && !is_.blocking ());
#else
- bool pipe,
+ assert ((is_.exceptions () & ifdstream::badbit) != 0);
+#endif
+
+ fdstreambuf& sb (*static_cast<fdstreambuf*> (is_.rdbuf ()));
+
+ // Return the number of characters available in the stream buffer's get
+ // area, which can be:
+ //
+ // -1 -- EOF.
+ // 0 -- no data since blocked before encountering more data/EOF.
+ // >0 -- there is some data.
+ //
+ // Note that on Windows if the stream is blocking, then the lambda calls
+ // underflow() instead of returning 0.
+ //
+ // @@ Probably we can call underflow() only once per the next() call,
+ // emulating the 'no data' case. This will allow the caller to
+ // perform some housekeeping (reading other streams, checking for the
+ // deadline, etc). But let's keep it simple for now.
+ //
+ auto avail = [&sb] () -> streamsize
+ {
+ // Note that here we reasonably assume that any failure in in_avail()
+ // will lead to badbit and thus an exception (see showmanyc()).
+ //
+ streamsize r (sb.in_avail ());
+
+#ifdef _WIN32
+ if (r == 0 && sb.blocking ())
+ {
+ if (sb.underflow () == ifdstream::traits_type::eof ())
+ return -1;
+
+ r = sb.in_avail ();
+
+ assert (r != 0); // We wouldn't be here otherwise.
+ }
#endif
+
+ return r;
+ };
+
+ // Read until blocked (0), EOF (-1) or encounter the delimiter.
+ //
+ streamsize s;
+ while ((s = avail ()) > 0)
+ {
+ if (empty_)
+ empty_ = false;
+
+ const char* p (sb.gptr ());
+ size_t n (sb.egptr () - p);
+
+ // We move p and bump by the number of consumed characters.
+ //
+ auto bump = [&sb, &p] () {sb.gbump (static_cast<int> (p - sb.gptr ()));};
+
+ if (whitespace_) // The whitespace mode.
+ {
+ const char* sep (" \n\r\t");
+
+ // Skip the whitespaces.
+ //
+ for (; n != 0 && strchr (sep, *p) != nullptr; ++p, --n) ;
+
+ // If there are any non-whitespace characters in the get area, then
+ // append them to the resulting substring until a whitespace
+ // character is encountered.
+ //
+ if (n != 0)
+ {
+ // Append the non-whitespace characters.
+ //
+ for (char c; n != 0 && strchr (sep, c = *p) == nullptr; ++p, --n)
+ ss += c;
+
+ // If a separator is encountered, then consume it, bump, and
+ // return the substring.
+ //
+ if (n != 0)
+ {
+ ++p; --n; // Consume the separator character.
+
+ bump ();
+ return true;
+ }
+
+ // Fall through.
+ }
+
+ bump (); // Bump and continue reading.
+ }
+ else // The newline or no-split mode.
+ {
+ // Note that we don't collapse multiple consecutive newlines.
+ //
+ // Note also that we always sanitize CRs, so in the no-split mode we
+ // need to loop rather than consume the whole get area at once.
+ //
+ while (n != 0)
+ {
+ // Append the characters until the newline character or the end of
+ // the get area is encountered.
+ //
+ char c;
+ for (; n != 0 && (c = *p) != '\n'; ++p, --n)
+ ss += c;
+
+ // If the newline character is encountered, then sanitize CRs and
+ // return the substring in the newline mode and continue
+ // parsing/reading otherwise.
+ //
+ if (n != 0)
+ {
+ // Strip the trailing CRs that can appear while, for example,
+ // cross-testing Windows target or as a part of msvcrt junk
+ // production (see above).
+ //
+ while (!ss.empty () && ss.back () == '\r')
+ ss.pop_back ();
+
+ assert (c == '\n');
+
+ ++p; --n; // Consume the newline character.
+
+ if (newline_)
+ {
+ bump ();
+ return true;
+ }
+
+ ss += c; // Append newline to the resulting string.
+
+ // Fall through.
+ }
+
+ bump (); // Bump and continue parsing/reading.
+ }
+ }
+ }
+
+ // Here s can be:
+ //
+ // -1 -- EOF.
+ // 0 -- blocked before encountering delimiter/EOF.
+ //
+ // Note: >0 (encountered the delimiter) case is handled in-place.
+ //
+ assert (s == -1 || s == 0);
+
+ if (s == -1)
+ {
+ // Return the last substring if it is not empty or it is the trailing
+ // "blank" in the exact mode. Otherwise, set eofbit for the stream
+ // indicating that we are done.
+ //
+ if (!ss.empty () || (exact_ && !empty_))
+ {
+ // Also, strip the trailing newline character, if present, in the
+ // no-split no-exact mode.
+ //
+ if (!ss.empty () && ss.back () == '\n' && // Trailing newline.
+ !newline_ && !whitespace_ && !exact_) // No-split no-exact mode.
+ {
+ ss.pop_back ();
+ }
+
+ exact_ = false; // Make sure we will set eofbit on the next call.
+ }
+ else
+ is_.setstate (ifdstream::eofbit);
+ }
+
+ return s == -1;
+ }
+
+ // Stack-allocated linked list of information about the running pipeline
+ // processes and builtins.
+ //
+ // Note: constructed incrementally.
+ //
+ struct pipe_command
+ {
+ // Initially NULL. Set to the address of the process or builtin object
+ // when it is created. Reset back to NULL when the respective
+ // process/builtin is executed and its exit status is collected (see
+ // complete_pipe() for details).
+ //
+ // We could probably use a union here, but let's keep it simple for now
+ // (at least one is NULL).
+ //
+ process* proc = nullptr;
+ builtin* bltn = nullptr;
+
+ const command& cmd;
+ const cstrings* args = nullptr;
+ const optional<deadline>& dl;
+
+ diag_buffer dbuf;
+
+ bool terminated = false; // True if this command has been terminated.
+
+ // True if this command has been terminated but we failed to read out
+ // its stdout and/or stderr streams in the reasonable timeframe (2
+ // seconds) after the termination.
+ //
+ // Note that this may happen if there is a still running child process
+ // of the terminated command which has inherited the parent's stdout and
+ // stderr file descriptors.
+ //
+ bool unread_stdout = false;
+ bool unread_stderr = false;
+
+ // Only for diagnostics.
+ //
+ const location& loc;
+ const path* isp = nullptr; // stdin cache.
+ const path* osp = nullptr; // stdout cache.
+ const path* esp = nullptr; // stderr cache.
+
+ pipe_command* prev; // NULL for the left-most command.
+ pipe_command* next; // Left-most command for the right-most command.
+
+ pipe_command (context& x,
+ const command& c,
+ const optional<deadline>& d,
+ const location& l,
+ pipe_command* p,
+ pipe_command* f)
+ : cmd (c), dl (d), dbuf (x), loc (l), prev (p), next (f) {}
+ };
+
+ // Wait for a process/builtin to complete until the deadline is reached
+ // and return the underlying wait function result (optional<something>).
+ //
+ template<typename P>
+ static auto
+ timed_wait (P& p, const timestamp& deadline) -> decltype(p.try_wait ())
+ {
+ timestamp now (system_clock::now ());
+ return deadline > now ? p.timed_wait (deadline - now) : p.try_wait ();
+ }
+
+ // Terminate the pipeline processes starting from the specified one and up
+ // to the leftmost one and then kill those which didn't terminate after 2
+ // seconds.
+ //
+ // After that wait for the pipeline builtins completion. Since their
+ // standard streams should no longer be written to or read from by any
+ // process, that shouldn't take long. If, however, they won't be able to
+ // complete in 2 seconds, then some of them have probably stuck while
+ // communicating with a slow filesystem device or similar, and since we
+ // currently have no way to terminate asynchronous builtins, we have no
+ // choice but to abort.
+ //
+ // Issue diagnostics and fail if something goes wrong, but still try to
+ // terminate/kill all the pipe processes.
+ //
+ static void
+ term_pipe (pipe_command* pc, tracer& trace)
+ {
+ auto prog = [] (pipe_command* c) {return cmd_path (c->cmd);};
+
+ // Terminate processes gracefully and set the terminate flag for the
+ // pipe commands.
+ //
+ diag_record dr;
+ for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ {
+ if (process* p = c->proc)
+ try
+ {
+ l5 ([&]{trace (c->loc) << "terminating: " << c->cmd;});
+
+ p->term ();
+ }
+ catch (const process_error& e)
+ {
+ // If unable to terminate the process for any reason (the process is
+ // exiting on Windows, etc) then just ignore this, postponing the
+ // potential failure till the kill() call.
+ //
+ l5 ([&]{trace (c->loc) << "unable to terminate " << prog (c)
+ << ": " << e;});
+ }
+
+ c->terminated = true;
+ }
+
+ // Wait a bit for the processes to terminate and kill the remaining
+ // ones.
+ //
+ timestamp dl (system_clock::now () + chrono::seconds (2));
+
+ for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ {
+ if (process* p = c->proc)
+ try
+ {
+ l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
+
+ if (!timed_wait (*p, dl))
+ {
+ l5 ([&]{trace (c->loc) << "killing: " << c->cmd;});
+
+ p->kill ();
+ p->wait ();
+ }
+ }
+ catch (const process_error& e)
+ {
+ dr << fail (c->loc) << "unable to wait/kill " << prog (c) << ": "
+ << e;
+ }
+ }
+
+ // Wait a bit for the builtins to complete and abort if any remain
+ // running.
+ //
+ dl = system_clock::now () + chrono::seconds (2);
+
+ for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ {
+ if (builtin* b = c->bltn)
+ try
+ {
+ l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
+
+ if (!timed_wait (*b, dl))
+ {
+ error (c->loc) << prog (c) << " builtin hanged, aborting";
+ terminate (false /* trace */);
+ }
+ }
+ catch (const system_error& e)
+ {
+ dr << fail (c->loc) << "unable to wait for " << prog (c) << ": "
+ << e;
+ }
+ }
+ }
+
+ void
+ read (auto_fd&& in,
+ bool whitespace, bool newline, bool exact,
+ const function<void (string&&)>& cf,
+ pipe_command* pipeline,
const optional<deadline>& dl,
- const command& deadline_cmd,
- const location& ll)
+ const location& ll,
+ const char* what)
{
- string r;
- ifdstream cin;
+ tracer trace ("script::stream_read");
+ // Note: stays blocking on Windows if the descriptor is not of the pipe
+ // type.
+ //
#ifndef _WIN32
- if (dl)
+ fdstream_mode m (fdstream_mode::non_blocking);
#else
- if (dl && pipe)
+ fdstream_mode m (pipeline != nullptr
+ ? fdstream_mode::non_blocking
+ : fdstream_mode::blocking);
#endif
+
+ ifdstream is (move (in), m, ifdstream::badbit);
+ stream_reader sr (is, whitespace, newline, exact);
+
+ fdselect_set fds;
+ for (pipe_command* c (pipeline); c != nullptr; c = c->prev)
+ {
+ diag_buffer& b (c->dbuf);
+
+ if (b.is.is_open ())
+ fds.emplace_back (b.is.fd (), c);
+ }
+
+ fds.emplace_back (is.fd ());
+ fdselect_state& ist (fds.back ());
+ size_t unread (fds.size ());
+
+ optional<timestamp> dlt (dl ? dl->value : optional<timestamp> ());
+
+ // If there are some left-hand side processes/builtins running, then
+ // terminate them and, if there are unread stdout/stderr file
+ // descriptors, then increase the deadline by another 2 seconds and
+ // return true. In this case the term() should be called again upon
+ // reaching the timeout. Otherwise return false. If there are no
+ // left-hand side processes/builtins running, then fail straight away.
+ //
+ // Note that in the former case the further reading will be performed
+ // with the adjusted timeout. We assume that this timeout is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_command for the possible reasons), then term() needs to be
+ // called for the second time and the reading should be interrupted
+ // afterwards.
+ //
+ auto term = [&dlt, pipeline, &fds, &ist, &is, &unread,
+ &trace, &ll, what, terminated = false] () mutable -> bool
{
- fdselect_set fds {in.get ()};
- cin.open (move (in), fdstream_mode::non_blocking);
+ // Can only be called if the deadline is specified.
+ //
+ assert (dlt);
- const timestamp& dlt (dl->value);
+ if (pipeline == nullptr)
+ fail (ll) << what << " terminated: execution timeout expired";
- for (char buf[4096];; )
+ if (!terminated)
{
- timestamp now (system_clock::now ());
+ // Terminate the pipeline and adjust the deadline.
+ //
- if (dlt <= now || ifdselect (fds, dlt - now) == 0)
+ // Note that if we are still reading the stream and it's a builtin
+ // stdout, then we need to close it before terminating the pipeline.
+ // Not doing so can result in blocking this builtin on the write
+ // operation and thus aborting the build2 process (see term_pipe()
+ // for details).
+ //
+ // Should we do the same for all the pipeline builtins' stderr
+ // streams? No we don't, since the builtin diagnostics is assumed to
+ // always fit the pipe buffer (see libbutl/builtin.cxx for details).
+ // Thus, we will leave them open to fully read out the diagnostics.
+ //
+ if (ist.fd != nullfd && pipeline->bltn != nullptr)
{
- if (!dl->success)
- fail (ll) << cmd_path (deadline_cmd)
- << " terminated: execution timeout expired";
- else
- break;
+ try
+ {
+ is.close ();
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here.
+ }
+
+ ist.fd = nullfd;
+ --unread;
}
- streamsize n (cin.readsome (buf, sizeof (buf)));
+ term_pipe (pipeline, trace);
+ terminated = true;
+
+ if (unread != 0)
+ dlt = system_clock::now () + chrono::seconds (2);
- // Bail out if eos is reached.
+ return unread != 0;
+ }
+ else
+ {
+ // Set the unread_{stderr,stdout} flags to true for the commands
+ // whose streams are not fully read yet.
//
- if (n == 0)
- break;
- r.append (buf, n);
+ // Can only be called after the first call of term() which would
+ // throw failed if pipeline is NULL.
+ //
+ assert (pipeline != nullptr);
+
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ if (s.data != nullptr) // stderr.
+ {
+ pipe_command* c (static_cast<pipe_command*> (s.data));
+
+ c->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() with a not fully read stream (eof is
+ // not reached, etc).
+ //
+ try
+ {
+ c->dbuf.is.close ();
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here. Anyway the diagnostics will be
+ // issued by complete_pipe().
+ }
+ }
+ else // stdout.
+ pipeline->unread_stdout = true;
+ }
+ }
+
+ return false;
}
- }
- else
+ };
+
+ // Note that on Windows if the file descriptor is not a pipe, then
+ // ifdstream assumes the blocking mode for which ifdselect() would throw
+ // invalid_argument. Such a descriptor can, however, only appear for the
+ // first command in the pipeline and so fds will only contain the input
+ // stream's descriptor. That all means that this descriptor will be read
+ // out by a series of the stream_reader::next() calls which can only
+ // return true and thus no ifdselect() calls will ever be made.
+ //
+ string s;
+ while (unread != 0)
{
- cin.open (move (in));
- r = cin.read_text ();
- }
+ // Read any pending data from the input stream.
+ //
+ if (ist.fd != nullfd)
+ {
+ // Prior to reading let's check that the deadline, if specified, is
+ // not reached. This way we handle the (hypothetical) case when we
+ // are continuously fed with the data without delays and thus can
+ // never get to ifdselect() which watches for the deadline. Also
+ // this check is the only way to bail out early on Windows for a
+ // blocking file descriptor.
+ //
+ if (dlt && *dlt <= system_clock::now ())
+ {
+ if (!term ())
+ break;
+ }
- cin.close ();
+ if (sr.next (s))
+ {
+ if (!is.eof ())
+ {
+ // Consume the substring.
+ //
+ cf (move (s));
+ s.clear ();
+ }
+ else
+ {
+ ist.fd = nullfd;
+ --unread;
+ }
- return r;
+ continue;
+ }
+ }
+
+ try
+ {
+ // Wait until the data appear in any of the streams. If a deadline
+ // is specified, then pass the timeout to fdselect().
+ //
+ if (dlt)
+ {
+ timestamp now (system_clock::now ());
+
+ if (*dlt <= now || ifdselect (fds, *dlt - now) == 0)
+ {
+ if (term ())
+ continue;
+ else
+ break;
+ }
+ }
+ else
+ ifdselect (fds);
+
+ // Read out the pending data from the stderr streams.
+ //
+ for (fdselect_state& s: fds)
+ {
+ if (s.ready &&
+ s.data != nullptr &&
+ !static_cast<pipe_command*> (s.data)->dbuf.read ())
+ {
+ s.fd = nullfd;
+ --unread;
+ }
+ }
+ }
+ catch (const io_error& e)
+ {
+ fail (ll) << "io error reading pipeline streams: " << e;
+ }
+ }
}
// The set pseudo-builtin: set variable from the stdin input.
//
- // set [-e|--exact] [(-n|--newline)|(-w|--whitespace)] [<attr>] <var>
+ // set [-e|--exact] [(-n|--newline)|(-w|--whitespace)] <var> [<attr>]
//
static void
set_builtin (environment& env,
const strings& args,
auto_fd in,
- bool pipe,
+ pipe_command* pipeline,
const optional<deadline>& dl,
- const command& deadline_cmd,
const location& ll)
{
+ tracer trace ("script::set_builtin");
+
try
{
// Parse arguments.
@@ -1047,105 +1639,41 @@ namespace build2
if (!scan.more ())
fail (ll) << "set: missing variable name";
- string a (scan.next ()); // Either attributes or variable name.
- const string* ats (!scan.more () ? nullptr : &a);
- string vname (!scan.more () ? move (a) : scan.next ());
-
- if (scan.more ())
- fail (ll) << "set: unexpected argument '" << scan.next () << "'";
-
- if (ats != nullptr && ats->empty ())
- fail (ll) << "set: empty variable attributes";
-
+ string vname (scan.next ());
if (vname.empty ())
fail (ll) << "set: empty variable name";
- // Read out the stream content into a string while keeping an eye on
- // the deadline.
+ // Detect patterns analogous to parser::parse_variable_name() (so we
+ // diagnose `set x[string]`).
//
- string s (read (move (in), pipe, dl, deadline_cmd, ll));
+ if (vname.find_first_of ("[*?") != string::npos)
+ fail (ll) << "set: expected variable name instead of " << vname;
- // Parse the stream content into the variable value.
- //
- names ns;
-
- if (!s.empty ())
+ string attrs;
+ if (scan.more ())
{
- if (ops.whitespace ()) // The whitespace mode.
- {
- // Note that we collapse multiple consecutive whitespaces.
- //
- for (size_t p (0); p != string::npos; )
- {
- // Skip the whitespaces.
- //
- const char* sep (" \n\r\t");
- size_t b (s.find_first_not_of (sep, p));
+ attrs = scan.next ();
- if (b != string::npos) // Word beginning.
- {
- size_t e (s.find_first_of (sep, b)); // Find the word end.
- ns.emplace_back (string (s, b, e != string::npos ? e - b : e));
-
- p = e;
- }
- else // Trailings whitespaces.
- {
- // Append the trailing "blank" after the trailing whitespaces
- // in the exact mode.
- //
- if (ops.exact ())
- ns.emplace_back (empty_string);
+ if (attrs.empty ())
+ fail (ll) << "set: empty variable attributes";
- // Bail out since the end of the string is reached.
- //
- break;
- }
- }
- }
- else // The newline or no-split mode.
- {
- // Note that we don't collapse multiple consecutive newlines.
- //
- // Note also that we always sanitize CRs so this loop is always
- // needed.
- //
- for (size_t p (0); p != string::npos; )
- {
- size_t e (s.find ('\n', p));
- string l (s, p, e != string::npos ? e - p : e);
-
- // Strip the trailing CRs that can appear while, for example,
- // cross-testing Windows target or as a part of msvcrt junk
- // production (see above).
- //
- while (!l.empty () && l.back () == '\r')
- l.pop_back ();
+ if (scan.more ())
+ fail (ll) << "set: unexpected argument '" << scan.next () << "'";
+ }
- // Append the line.
- //
- if (!l.empty () || // Non-empty.
- e != string::npos || // Empty, non-trailing.
- ops.exact ()) // Empty, trailing, in the exact mode.
- {
- if (ops.newline () || ns.empty ())
- ns.emplace_back (move (l));
- else
- {
- ns[0].value += '\n';
- ns[0].value += l;
- }
- }
+ // Parse the stream content into the variable value.
+ //
+ names ns;
- p = e != string::npos ? e + 1 : e;
- }
- }
- }
+ read (move (in),
+ ops.whitespace (), ops.newline (), ops.exact (),
+ [&ns] (string&& s) {ns.emplace_back (move (s));},
+ pipeline,
+ dl,
+ ll,
+ "set");
- env.set_variable (move (vname),
- move (ns),
- ats != nullptr ? *ats : empty_string,
- ll);
+ env.set_variable (move (vname), move (ns), attrs, ll);
}
catch (const io_error& e)
{
@@ -1172,51 +1700,16 @@ namespace build2
name);
}
- // Stack-allocated linked list of information about the running pipeline
- // processes and builtins.
- //
- struct pipe_command
- {
- // We could probably use a union here, but let's keep it simple for now
- // (one is NULL).
- //
- process* proc;
- builtin* bltn;
-
- // True if this command has been terminated.
- //
- bool terminated = false;
-
- // Only for diagnostics.
- //
- const command& cmd;
- const location& loc;
-
- pipe_command* prev; // NULL for the left-most command.
-
- pipe_command (process& p,
- const command& c,
- const location& l,
- pipe_command* v)
- : proc (&p), bltn (nullptr), cmd (c), loc (l), prev (v) {}
-
- pipe_command (builtin& b,
- const command& c,
- const location& l,
- pipe_command* v)
- : proc (nullptr), bltn (&b), cmd (c), loc (l), prev (v) {}
- };
-
static bool
run_pipe (environment& env,
command_pipe::const_iterator bc,
command_pipe::const_iterator ec,
auto_fd ifd,
- size_t ci, size_t li, const location& ll,
+ const iteration_index* ii, size_t li, size_t ci,
+ const location& ll,
bool diag,
- string* output,
+ const function<command_function>& cf, bool last_cmd,
optional<deadline> dl = nullopt,
- const command* dl_cmd = nullptr, // env -t <cmd>
pipe_command* prev_cmd = nullptr)
{
tracer trace ("script::run_pipe");
@@ -1225,8 +1718,10 @@ namespace build2
//
if (bc == ec)
{
- if (output != nullptr)
+ if (cf != nullptr)
{
+ assert (!last_cmd); // Otherwise we wouldn't be here.
+
// The pipeline can't be empty.
//
assert (ifd != nullfd && prev_cmd != nullptr);
@@ -1235,15 +1730,14 @@ namespace build2
try
{
- *output = read (move (ifd),
- true /* pipe */,
- dl,
- dl_cmd != nullptr ? *dl_cmd : c,
- ll);
+ cf (env, strings () /* arguments */,
+ move (ifd), prev_cmd,
+ dl,
+ ll);
}
catch (const io_error& e)
{
- fail (ll) << "io error reading " << cmd_path (c) << " output: "
+ fail (ll) << "unable to read from " << cmd_path (c) << " stdout: "
<< e;
}
}
@@ -1301,9 +1795,10 @@ namespace build2
command_pipe::const_iterator nc (bc + 1);
bool last (nc == ec);
- // Make sure that stdout is not redirected if meant to be read.
+ // Make sure that stdout is not redirected if meant to be read (last_cmd
+ // is false) or cannot not be produced (last_cmd is true).
//
- if (last && output != nullptr && c.out)
+ if (last && c.out && cf != nullptr)
fail (ll) << "stdout cannot be redirected";
// True if the process path is not pre-searched and the program path
@@ -1317,7 +1812,7 @@ namespace build2
const redirect& in ((c.in ? *c.in : env.in).effective ());
- const redirect* out (!last || output != nullptr
+ const redirect* out (!last || (cf != nullptr && !last_cmd)
? nullptr // stdout is piped.
: &(c.out ? *c.out : env.out).effective ());
@@ -1325,13 +1820,7 @@ namespace build2
auto process_args = [&c] () -> cstrings
{
- cstrings args {c.program.recall_string ()};
-
- for (const auto& a: c.arguments)
- args.push_back (a.c_str ());
-
- args.push_back (nullptr);
- return args;
+ return build2::process_args (c.program.recall_string (), c.arguments);
};
// Prior to opening file descriptors for command input/output redirects
@@ -1354,14 +1843,29 @@ namespace build2
// content), to make sure that the command doesn't print any unwanted
// diagnostics about IO operation failure.
//
- // Note though, that doing so would be a bad idea if the deadline is
- // specified, since we can block on read and miss the deadline.
- //
- if (!dl)
+ if (ifd != nullfd)
{
- // Note that dtor will ignore any errors (which is what we want).
+ // Note that we can't use ifdstream dtor in the skip mode here since
+ // it turns the stream into the blocking mode and we won't be able
+ // to read out the potentially buffered stderr for the
+ // pipeline. Using read() is also not ideal since it performs
+ // parsing and allocations needlessly. This, however, is probably ok
+ // for such an uncommon case.
//
- ifdstream (move (ifd), fdstream_mode::skip);
+ //ifdstream (move (ifd), fdstream_mode::skip);
+
+ // Let's try to minimize the allocation size splitting the input
+ // data at whitespaces.
+ //
+ read (move (ifd),
+ true /* whitespace */,
+ false /* newline */,
+ false /* exact */,
+ [] (string&&) {}, // Just drop the string.
+ prev_cmd,
+ dl,
+ ll,
+ program.c_str ());
}
if (!first || !last)
@@ -1385,7 +1889,7 @@ namespace build2
if (c.out)
fail (ll) << program << " builtin stdout cannot be redirected";
- if (output != nullptr)
+ if (cf != nullptr && !last_cmd)
fail (ll) << program << " builtin stdout cannot be read";
if (c.err)
@@ -1417,17 +1921,29 @@ namespace build2
// Create a unique path for a command standard stream cache file.
//
- auto std_path = [&env, &ci, &li, &ll] (const char* n) -> path
+ auto std_path = [&env, ii, &li, &ci, &ll] (const char* nm) -> path
{
using std::to_string;
- path p (n);
+ string s (nm);
+ size_t n (s.size ());
+
+ if (ii != nullptr)
+ {
+ // Note: reverse order (outermost to innermost).
+ //
+ for (const iteration_index* i (ii); i != nullptr; i = i->prev)
+ s.insert (n, "-i" + to_string (i->index));
+ }
// 0 if belongs to a single-line script, otherwise is the command line
// number (start from one) in the script.
//
- if (li > 0)
- p += "-" + to_string (li);
+ if (li != 0)
+ {
+ s += "-n";
+ s += to_string (li);
+ }
// 0 if belongs to a single-command expression, otherwise is the
// command number (start from one) in the expression.
@@ -1436,10 +1952,13 @@ namespace build2
// single-line script or to N-th single-command line of multi-line
// script. These cases are mutually exclusive and so are unambiguous.
//
- if (ci > 0)
- p += "-" + to_string (ci);
+ if (ci != 0)
+ {
+ s += "-c";
+ s += to_string (ci);
+ }
- return normalize (move (p), temp_dir (env), ll);
+ return normalize (path (move (s)), temp_dir (env), ll);
};
// If this is the first pipeline command, then open stdin descriptor
@@ -1544,19 +2063,15 @@ namespace build2
// Calculate the process/builtin execution deadline. Note that we should
// also consider the left-hand side processes deadlines, not to keep
// them waiting for us and allow them to terminate not later than their
- // deadlines. Thus, let's also track which command has introduced the
- // deadline, so we can report it if the deadline is missed.
+ // deadlines.
//
dl = earlier (dl, env.effective_deadline ());
if (c.timeout)
{
- deadline d (system_clock::now () + *c.timeout, false /* success */);
+ deadline d (system_clock::now () + *c.timeout, c.timeout_success);
if (!dl || d < *dl)
- {
dl = d;
- dl_cmd = &c;
- }
}
// Prior to opening file descriptors for command outputs redirects
@@ -1577,7 +2092,7 @@ namespace build2
if (c.out)
fail (ll) << "set builtin stdout cannot be redirected";
- if (output != nullptr)
+ if (cf != nullptr && !last_cmd)
fail (ll) << "set builtin stdout cannot be read";
if (c.err)
@@ -1589,14 +2104,54 @@ namespace build2
if (verb >= 2)
print_process (process_args ());
- set_builtin (env, c.arguments,
- move (ifd), !first,
- dl, dl_cmd != nullptr ? *dl_cmd : c,
- ll);
+ set_builtin (env, c.arguments, move (ifd), prev_cmd, dl, ll);
+ return true;
+ }
+
+ // If this is the last command in the pipe and the command function is
+ // specified for it, then call it.
+ //
+ if (last && cf != nullptr && last_cmd)
+ {
+ // Must be enforced by the caller.
+ //
+ assert (!c.out && !c.err && !c.exit);
+
+ try
+ {
+ cf (env, c.arguments, move (ifd), prev_cmd, dl, ll);
+ }
+ catch (const io_error& e)
+ {
+ diag_record dr (fail (ll));
+
+ dr << cmd_path (c) << ": unable to read from ";
+
+ if (prev_cmd != nullptr)
+ dr << cmd_path (prev_cmd->cmd) << " output";
+ else
+ dr << "stdin";
+
+ dr << ": " << e;
+ }
return true;
}
+ // Propagate the pointer to the left-most command.
+ //
+ pipe_command pc (env.context,
+ c,
+ dl,
+ ll,
+ prev_cmd,
+ prev_cmd != nullptr ? prev_cmd->next : nullptr);
+
+ if (prev_cmd != nullptr)
+ prev_cmd->next = &pc;
+ else
+ pc.next = &pc; // Points to itself.
+
// Open a file for command output redirect if requested explicitly
// (file overwrite/append redirects) or for the purpose of the output
// validation (none, here_*, file comparison redirects), register the
@@ -1606,9 +2161,9 @@ namespace build2
// or null-device descriptor for merge, pass or null redirects
// respectively (not opening any file).
//
- auto open = [&env, &wdir, &ll, &std_path] (const redirect& r,
- int dfd,
- path& p) -> auto_fd
+ auto open = [&env, &wdir, &ll, &std_path, &c, &pc] (const redirect& r,
+ int dfd,
+ path& p) -> auto_fd
{
assert (dfd == 1 || dfd == 2);
const char* what (dfd == 1 ? "stdout" : "stderr");
@@ -1626,11 +2181,34 @@ namespace build2
{
try
{
+ if (dfd == 2) // stderr?
+ {
+ fdpipe p;
+ if (diag_buffer::pipe (env.context) == -1) // Are we buffering?
+ p = fdopen_pipe ();
+
+ // Deduce the args0 argument similar to cmd_path().
+ //
+ // Note that we must open the diag buffer regardless of the
+ // diag_buffer::pipe() result.
+ //
+ pc.dbuf.open ((c.program.initial == nullptr
+ ? c.program.recall.string ().c_str ()
+ : c.program.recall_string ()),
+ move (p.in),
+ fdstream_mode::non_blocking);
+
+ if (p.out != nullfd)
+ return move (p.out);
+
+ // Fall through.
+ }
+
return fddup (dfd);
}
catch (const io_error& e)
{
- fail (ll) << "unable to duplicate " << what << ": " << e;
+ fail (ll) << "unable to redirect " << what << ": " << e;
}
}
@@ -1759,111 +2337,386 @@ namespace build2
//
assert (ofd.out != nullfd && efd != nullfd);
- // Wait for a process/builtin to complete until the deadline is reached
- // and return the underlying wait function result (optional<something>).
- //
- auto timed_wait = [] (auto& p, const timestamp& deadline)
- {
- timestamp now (system_clock::now ());
- return deadline > now ? p.timed_wait (deadline - now) : p.try_wait ();
- };
+ pc.isp = &isp;
+ pc.osp = &osp;
+ pc.esp = &esp;
- // Terminate the pipeline processes starting from the specified one and
- // up to the leftmost one and then kill those which didn't terminate
- // after 2 seconds.
+ // Read out all the pipeline's buffered strerr streams watching for the
+ // deadline, if specified. If the deadline is reached, then terminate
+ // the whole pipeline, move the deadline by another 2 seconds, and
+ // continue reading.
//
- // After that wait for the pipeline builtins completion. Since their
- // standard streams should no longer be written to or read from by any
- // process, that shouldn't take long. If, however, they won't be able to
- // complete in 2 seconds, then some of them have probably stuck while
- // communicating with a slow filesystem device or similar, and since we
- // currently have no way to terminate asynchronous builtins, we have no
- // choice but to abort.
+ // Note that we assume that this timeout increment is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_command for the possible reasons), then we just set
+ // unread_stderr flag to true for such commands and bail out.
//
- // Issue diagnostics and fail if something goes wrong, but still try to
- // terminate/kill all the pipe processes.
+ // Also note that this is a reduced version of the above read() function.
//
- auto term_pipe = [&timed_wait, &trace] (pipe_command* pc)
+ auto read_pipe = [&pc, &ll, &trace] ()
{
- diag_record dr;
+ fdselect_set fds;
+ for (pipe_command* c (&pc); c != nullptr; c = c->prev)
+ {
+ diag_buffer& b (c->dbuf);
- auto prog = [] (pipe_command* c) {return cmd_path (c->cmd);};
+ if (b.is.is_open ())
+ fds.emplace_back (b.is.fd (), c);
+ }
- // Terminate processes gracefully and set the terminate flag for the
- // pipe commands.
+ // Note that the current command deadline is the earliest (see above).
//
- for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ optional<timestamp> dlt (pc.dl ? pc.dl->value : optional<timestamp> ());
+
+ bool terminated (false);
+
+ for (size_t unread (fds.size ()); unread != 0;)
{
- if (process* p = c->proc)
try
{
- l5 ([&]{trace (c->loc) << "terminating: " << c->cmd;});
+ // If a deadline is specified, then pass the timeout to fdselect().
+ //
+ if (dlt)
+ {
+ timestamp now (system_clock::now ());
+
+ if (*dlt <= now || ifdselect (fds, *dlt - now) == 0)
+ {
+ if (!terminated)
+ {
+ term_pipe (&pc, trace);
+ terminated = true;
+
+ dlt = system_clock::now () + chrono::seconds (2);
+ continue;
+ }
+ else
+ {
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ pipe_command* c (static_cast<pipe_command*> (s.data));
+
+ c->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() (see read() for details).
+ //
+ try
+ {
+ c->dbuf.is.close ();
+ }
+ catch (const io_error&) {}
+ }
+ }
+
+ break;
+ }
+ }
+ }
+ else
+ ifdselect (fds);
- p->term ();
+ for (fdselect_state& s: fds)
+ {
+ if (s.ready &&
+ !static_cast<pipe_command*> (s.data)->dbuf.read ())
+ {
+ s.fd = nullfd;
+ --unread;
+ }
+ }
}
- catch (const process_error& e)
+ catch (const io_error& e)
{
- // If unable to terminate the process for any reason (the process
- // is exiting on Windows, etc) then just ignore this, postponing
- // the potential failure till the kill() call.
- //
- l5 ([&]{trace (c->loc) << "unable to terminate " << prog (c)
- << ": " << e;});
+ fail (ll) << "io error reading pipeline streams: " << e;
}
-
- c->terminated = true;
}
+ };
- // Wait a bit for the processes to terminate and kill the remaining
- // ones.
- //
- timestamp dl (system_clock::now () + chrono::seconds (2));
-
- for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ // Wait for the pipeline processes and builtins to complete, watching
+ // for their deadlines if present. If a deadline is reached for any of
+ // them, then terminate the whole pipeline.
+ //
+ // Note: must be called after read_pipe().
+ //
+ auto wait_pipe = [&pc, &dl, &trace] ()
+ {
+ for (pipe_command* c (&pc); c != nullptr; c = c->prev)
{
- if (process* p = c->proc)
try
{
- l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
-
- if (!timed_wait (*p, dl))
+ if (process* p = c->proc)
+ {
+ if (!dl)
+ p->wait ();
+ else if (!timed_wait (*p, dl->value))
+ term_pipe (c, trace);
+ }
+ else
{
- l5 ([&]{trace (c->loc) << "killing: " << c->cmd;});
+ builtin* b (c->bltn);
- p->kill ();
- p->wait ();
+ if (!dl)
+ b->wait ();
+ else if (!timed_wait (*b, dl->value))
+ term_pipe (c, trace);
}
}
catch (const process_error& e)
{
- dr << fail (c->loc) << "unable to wait/kill " << prog (c) << ": "
- << e;
+ fail (c->loc) << "unable to wait " << cmd_path (c->cmd) << ": "
+ << e;
}
}
+ };
- // Wait a bit for the builtins to complete and abort if any remain
- // running.
- //
- dl = system_clock::now () + chrono::seconds (2);
+ // Iterate over the pipeline processes and builtins left to right,
+ // printing their stderr if buffered and issuing the diagnostics if the
+ // exit code is not available (terminated abnormally or due to a
+ // deadline), is unexpected, or stdout and/or stderr was not fully
+ // read. Throw failed at the end if the exit code for any of them is not
+ // available or stdout and/or stderr was not fully read. Return false if
+ // exit code for any of them is unexpected (the return is used, for
+ // example, in the if-conditions).
+ //
+ // Note: must be called after wait_pipe() and only once.
+ //
+ auto complete_pipe = [&pc, &env, diag] ()
+ {
+ bool r (true);
+ bool fail (false);
+
+ pipe_command* c (pc.next); // Left-most command.
+ assert (c != nullptr); // Since the lambda must be called once.
- for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ for (pc.next = nullptr; c != nullptr; c = c->next)
{
- if (builtin* b = c->bltn)
- try
+ // Collect the exit status, if present.
+ //
+ // Absent if the process/builtin misses the "unsuccessful" deadline.
+ //
+ optional<process_exit> exit;
+
+ const char* w (c->bltn != nullptr ? "builtin" : "process");
+
+ if (c->bltn != nullptr)
{
- l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
+ // Note that this also handles ad hoc termination (without the
+ // call to term_pipe()) by the sleep builtin.
+ //
+ if (c->terminated)
+ {
+ if (c->dl && c->dl->success)
+ exit = process_exit (0);
+ }
+ else
+ exit = process_exit (c->bltn->wait ());
+
+ c->bltn = nullptr;
+ }
+ else if (c->proc != nullptr)
+ {
+ const process& pr (*c->proc);
- if (!timed_wait (*b, dl))
+#ifndef _WIN32
+ if (c->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->signal () == SIGTERM)
+#else
+ if (c->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->status == DBG_TERMINATE_PROCESS)
+#endif
{
- error (c->loc) << prog (c) << " builtin hanged, aborting";
- terminate (false /* trace */);
+ if (c->dl && c->dl->success)
+ exit = process_exit (0);
}
+ else
+ exit = pr.exit;
+
+ c->proc = nullptr;
}
- catch (const system_error& e)
+ else
+ assert (false); // The lambda can only be called once.
+
+ const command& cmd (c->cmd);
+ const location& ll (c->loc);
+
+ // Verify the exit status and issue the diagnostics on failure.
+ //
+ diag_record dr;
+
+ path pr (cmd_path (cmd));
+
+ // Print the diagnostics if the command stdout and/or stderr are not
+ // fully read.
+ //
+ auto unread_output_diag = [&dr, c, w, &pr] (bool main_error)
+ {
+ if (main_error)
+ dr << error (c->loc) << w << ' ' << pr << ' ';
+ else
+ dr << error;
+
+ if (c->unread_stdout)
+ {
+ dr << "stdout ";
+
+ if (c->unread_stderr)
+ dr << "and ";
+ }
+
+ if (c->unread_stderr)
+ dr << "stderr ";
+
+ dr << "not closed after exit";
+ };
+
+ // Fail if the process is terminated due to reaching the deadline.
+ //
+ if (!exit)
+ {
+ dr << error (ll) << w << ' ' << pr
+ << " terminated: execution timeout expired";
+
+ if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (false /* main_error */);
+
+ if (verb == 1)
+ {
+ dr << info << "command line: ";
+ print_process (dr, *c->args);
+ }
+
+ fail = true;
+ }
+ else
{
- dr << fail (c->loc) << "unable to wait for " << prog (c) << ": "
- << e;
+ // If there is no valid exit code available by whatever reason
+ // then we print the proper diagnostics, dump stderr (if cached
+ // and not too large) and fail the whole script. Otherwise if the
+ // exit code is not correct then we print diagnostics if requested
+ // and fail the pipeline.
+ //
+ bool valid (exit->normal ());
+
+ // On Windows the exit code can be out of the valid codes range
+ // being defined as uint16_t.
+ //
+#ifdef _WIN32
+ if (valid)
+ valid = exit->code () < 256;
+#endif
+
+ // In the presense of a valid exit code and given stdout and
+ // stderr are fully read out we print the diagnostics and return
+ // false rather than throw.
+ //
+ // Note that there can be a race, so that the process we have
+ // terminated due to reaching the deadline has in fact exited
+ // normally. Thus, the 'unread stderr' situation can also happen
+ // to a successfully terminated process. If that's the case, we
+ // report this problem as the main error and the secondary error
+ // otherwise.
+ //
+ if (!valid || c->unread_stdout || c->unread_stderr)
+ fail = true;
+
+ exit_comparison cmp (cmd.exit
+ ? cmd.exit->comparison
+ : exit_comparison::eq);
+
+ uint16_t exc (cmd.exit ? cmd.exit->code : 0);
+
+ bool success (valid &&
+ (cmp == exit_comparison::eq) ==
+ (exc == exit->code ()));
+
+ if (!success)
+ r = false;
+
+ if (!valid || (!success && diag))
+ {
+ dr << error (ll) << w << ' ' << pr << ' ';
+
+ if (!exit->normal ())
+ dr << *exit;
+ else
+ {
+ uint16_t ec (exit->code ()); // Make sure printed as integer.
+
+ if (!valid)
+ {
+ dr << "exit code " << ec << " out of 0-255 range";
+ }
+ else
+ {
+ if (cmd.exit)
+ dr << "exit code " << ec
+ << (cmp == exit_comparison::eq ? " != " : " == ")
+ << exc;
+ else
+ dr << "exited with code " << ec;
+ }
+ }
+
+ if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (false /* main_error */);
+
+ if (verb == 1)
+ {
+ dr << info << "command line: ";
+ print_process (dr, *c->args);
+ }
+
+ if (non_empty (*c->esp, ll) && avail_on_failure (*c->esp, env))
+ dr << info << "stderr: " << *c->esp;
+
+ if (non_empty (*c->osp, ll) && avail_on_failure (*c->osp, env))
+ dr << info << "stdout: " << *c->osp;
+
+ if (non_empty (*c->isp, ll) && avail_on_failure (*c->isp, env))
+ dr << info << "stdin: " << *c->isp;
+
+ // Print cached stderr.
+ //
+ print_file (dr, *c->esp, ll);
+ }
+ else if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (true /* main_error */);
}
+
+ // Now print the buffered stderr, if present, and/or flush the
+ // diagnostics, if issued.
+ //
+ if (c->dbuf.is_open ())
+ c->dbuf.close (move (dr));
+ }
+
+ // Fail if required.
+ //
+ if (fail)
+ throw failed ();
+
+ return r;
+ };
+
+ // Close all buffered pipeline stderr streams ignoring io_error
+ // exceptions.
+ //
+ auto close_pipe = [&pc] ()
+ {
+ for (pipe_command* c (&pc); c != nullptr; c = c->prev)
+ {
+ if (c->dbuf.is.is_open ())
+ try
+ {
+ c->dbuf.is.close();
+ }
+ catch (const io_error&) {}
}
};
@@ -1889,9 +2742,8 @@ namespace build2
fail (ll) << "specified working directory " << cwd
<< " does not exist";
- // Absent if the process/builtin misses the "unsuccessful" deadline.
- //
- optional<process_exit> exit;
+ cstrings args (process_args ());
+ pc.args = &args;
const builtin_info* bi (resolve ? builtins.find (program) : nullptr);
@@ -1901,8 +2753,11 @@ namespace build2
{
// Execute the builtin.
//
- if (verb >= 2)
- print_process (process_args ());
+ // Don't print the true and false builtins, since they are normally
+ // used for the commands execution flow control.
+ //
+ if (verb >= 2 && program != "true" && program != "false")
+ print_process (args);
// Some of the script builtins (cp, mkdir, etc) extend libbutl
// builtins (via callbacks) registering/moving cleanups for the
@@ -1943,18 +2798,6 @@ namespace build2
// We also extend the sleep builtin, deactivating the thread before
// going to sleep and waking up before the deadline is reached.
//
- // Let's "wrap up" the sleep-related values into the single object to
- // rely on "small function object" optimization.
- //
- struct sleep
- {
- optional<timestamp> deadline;
- bool terminated = false;
-
- sleep (const optional<timestamp>& d): deadline (d) {}
- };
- sleep slp (dl ? dl->value : optional<timestamp> ());
-
builtin_callbacks bcs {
// create
@@ -2116,16 +2959,19 @@ namespace build2
// sleep
//
- [&env, &slp] (const duration& d)
+ [&env, &pc] (const duration& d)
{
duration t (d);
- const optional<timestamp>& dl (slp.deadline);
+ const optional<timestamp>& dl (pc.dl
+ ? pc.dl->value
+ : optional<timestamp> ());
if (dl)
{
timestamp now (system_clock::now ());
- slp.terminated = now + t > *dl;
+ if (now + t > *dl)
+ pc.terminated = true;
if (*dl <= now)
return;
@@ -2138,7 +2984,7 @@ namespace build2
// If/when required we could probably support the precise sleep
// mode (e.g., via an option).
//
- env.context.sched.sleep (t);
+ env.context.sched->sleep (t);
}
};
@@ -2150,19 +2996,19 @@ namespace build2
move (ifd), move (ofd.out), move (efd),
cwd,
bcs));
+ pc.bltn = &b;
- pipe_command pc (b, c, ll, prev_cmd);
-
- // If the deadline is specified, then make sure we don't miss it
- // waiting indefinitely in the builtin destructor on the right-hand
- // side of the pipe failure.
+ // If the right-hand part of the pipe fails, then make sure we don't
+ // wait indefinitely in the process destructor if the deadlines are
+ // specified or just because a process is blocked on stderr.
//
- auto g (make_exception_guard ([&dl, &pc, &term_pipe] ()
+ auto g (make_exception_guard ([&pc, &close_pipe, &trace] ()
{
- if (dl)
+ if (pc.bltn != nullptr)
try
{
- term_pipe (&pc);
+ close_pipe ();
+ term_pipe (&pc, trace);
}
catch (const failed&)
{
@@ -2173,28 +3019,21 @@ namespace build2
success = run_pipe (env,
nc, ec,
move (ofd.in),
- ci + 1, li, ll, diag,
- output,
- dl, dl_cmd,
+ ii, li, ci + 1, ll, diag,
+ cf, last_cmd,
+ dl,
&pc);
- if (!dl)
- b.wait ();
- else if (!timed_wait (b, dl->value))
- term_pipe (&pc);
-
- // Note that this also handles ad hoc termination (without the call
- // to term_pipe()) by the sleep builtin (see above).
+ // Complete the pipeline execution, if not done yet.
//
- if (pc.terminated || slp.terminated)
+ if (pc.bltn != nullptr)
{
- assert (dl);
+ read_pipe ();
+ wait_pipe ();
- if (dl->success)
- exit = process_exit (0);
+ if (!complete_pipe ())
+ success = false;
}
- else
- exit = process_exit (r);
}
catch (const system_error& e)
{
@@ -2206,8 +3045,6 @@ namespace build2
{
// Execute the process.
//
- cstrings args (process_args ());
-
// If the process path is not pre-searched then resolve the relative
// non-simple program path against the script's working directory. The
// simple one will be left for the process path search machinery. Also
@@ -2265,10 +3102,16 @@ namespace build2
if (verb >= 2)
print_process (pe, args);
+ // Note that stderr can only be a pipe if we are buffering the
+ // diagnostics. In this case also pass the reading end so it can be
+ // "probed" on Windows (see butl::process::pipe for details).
+ //
process pr (
*pe.path,
args.data (),
- {ifd.get (), -1}, process::pipe (ofd), {-1, efd.get ()},
+ {ifd.get (), -1},
+ process::pipe (ofd),
+ {pc.dbuf.is.fd (), efd.get ()},
cwd.string ().c_str (),
pe.vars);
@@ -2278,18 +3121,19 @@ namespace build2
ofd.out.reset ();
efd.reset ();
- pipe_command pc (pr, c, ll, prev_cmd);
+ pc.proc = &pr;
- // If the deadline is specified, then make sure we don't miss it
- // waiting indefinitely in the process destructor on the right-hand
- // part of the pipe failure.
+ // If the right-hand part of the pipe fails, then make sure we don't
+ // wait indefinitely in the process destructor (see above for
+ // details).
//
- auto g (make_exception_guard ([&dl, &pc, &term_pipe] ()
+ auto g (make_exception_guard ([&pc, &close_pipe, &trace] ()
{
- if (dl)
+ if (pc.proc != nullptr)
try
{
- term_pipe (&pc);
+ close_pipe ();
+ term_pipe (&pc, trace);
}
catch (const failed&)
{
@@ -2300,33 +3144,21 @@ namespace build2
success = run_pipe (env,
nc, ec,
move (ofd.in),
- ci + 1, li, ll, diag,
- output,
- dl, dl_cmd,
+ ii, li, ci + 1, ll, diag,
+ cf, last_cmd,
+ dl,
&pc);
- if (!dl)
- pr.wait ();
- else if (!timed_wait (pr, dl->value))
- term_pipe (&pc);
-
-#ifndef _WIN32
- if (pc.terminated &&
- !pr.exit->normal () &&
- pr.exit->signal () == SIGTERM)
-#else
- if (pc.terminated &&
- !pr.exit->normal () &&
- pr.exit->status == DBG_TERMINATE_PROCESS)
-#endif
+ // Complete the pipeline execution, if not done yet.
+ //
+ if (pc.proc != nullptr)
{
- assert (dl);
+ read_pipe ();
+ wait_pipe ();
- if (dl->success)
- exit = process_exit (0);
+ if (!complete_pipe ())
+ success = false;
}
- else
- exit = pr.exit;
}
catch (const process_error& e)
{
@@ -2339,98 +3171,23 @@ namespace build2
}
}
- // If the righ-hand side pipeline failed than the whole pipeline fails,
- // and no further checks are required.
- //
- if (!success)
- return false;
-
- // Fail if the process is terminated due to reaching the deadline.
- //
- if (!exit)
- fail (ll) << cmd_path (dl_cmd != nullptr ? *dl_cmd : c)
- << " terminated: execution timeout expired";
-
- path pr (cmd_path (c));
-
- // If there is no valid exit code available by whatever reason then we
- // print the proper diagnostics, dump stderr (if cached and not too
- // large) and fail the whole script. Otherwise if the exit code is not
- // correct then we print diagnostics if requested and fail the pipeline.
- //
- bool valid (exit->normal ());
-
- // On Windows the exit code can be out of the valid codes range being
- // defined as uint16_t.
- //
-#ifdef _WIN32
- if (valid)
- valid = exit->code () < 256;
-#endif
-
- exit_comparison cmp (c.exit ? c.exit->comparison : exit_comparison::eq);
- uint16_t exc (c.exit ? c.exit->code : 0);
-
- success = valid &&
- (cmp == exit_comparison::eq) == (exc == exit->code ());
-
- if (!valid || (!success && diag))
- {
- // In the presense of a valid exit code we print the diagnostics and
- // return false rather than throw.
- //
- diag_record d (valid ? error (ll) : fail (ll));
-
- if (!exit->normal ())
- d << pr << " " << *exit;
- else
- {
- uint16_t ec (exit->code ()); // Make sure is printed as integer.
-
- if (!valid)
- d << pr << " exit code " << ec << " out of 0-255 range";
- else if (!success)
- {
- if (diag)
- {
- if (c.exit)
- d << pr << " exit code " << ec
- << (cmp == exit_comparison::eq ? " != " : " == ") << exc;
- else
- d << pr << " exited with code " << ec;
- }
- }
- else
- assert (false);
- }
-
- if (non_empty (esp, ll) && avail_on_failure (esp, env))
- d << info << "stderr: " << esp;
-
- if (non_empty (osp, ll) && avail_on_failure (osp, env))
- d << info << "stdout: " << osp;
-
- if (non_empty (isp, ll) && avail_on_failure (isp, env))
- d << info << "stdin: " << isp;
-
- // Print cached stderr.
- //
- print_file (d, esp, ll);
- }
-
- // If exit code is correct then check if the standard outputs match the
- // expectations. Note that stdout is only redirected to file for the
- // last command in the pipeline.
+ // If the pipeline or the righ-hand side outputs check failed, then no
+ // further checks are required. Otherwise, check if the standard outputs
+ // match the expectations. Note that stdout can only be redirected to
+ // file for the last command in the pipeline.
//
// The thinking behind matching stderr first is that if it mismatches,
// then the program probably misbehaves (executes wrong functionality,
// etc) in which case its stdout doesn't really matter.
//
if (success)
- success =
- check_output (pr, esp, isp, err, ll, env, diag, "stderr") &&
- (out == nullptr ||
- check_output (pr, osp, isp, *out, ll, env, diag, "stdout"));
+ {
+ path pr (cmd_path (c));
+
+ success = check_output (pr, esp, isp, err, ll, env, diag, "stderr") &&
+ (out == nullptr ||
+ check_output (pr, osp, isp, *out, ll, env, diag, "stdout"));
+ }
return success;
}
@@ -2438,9 +3195,10 @@ namespace build2
static bool
run_expr (environment& env,
const command_expr& expr,
- size_t li, const location& ll,
+ const iteration_index* ii, size_t li,
+ const location& ll,
bool diag,
- string* output)
+ const function<command_function>& cf, bool last_cmd)
{
// Commands are numbered sequentially throughout the expression
// starting with 1. Number 0 means the command is a single one.
@@ -2484,8 +3242,8 @@ namespace build2
r = run_pipe (env,
p.begin (), p.end (),
auto_fd (),
- ci, li, ll, print,
- output);
+ ii, li, ci, ll, print,
+ cf, last_cmd);
}
ci += p.size ();
@@ -2497,26 +3255,37 @@ namespace build2
void
run (environment& env,
const command_expr& expr,
- size_t li, const location& ll,
- string* output)
+ const iteration_index* ii, size_t li,
+ const location& ll,
+ const function<command_function>& cf,
+ bool last_cmd)
{
// Note that we don't print the expression at any verbosity level
// assuming that the caller does this, potentially providing some
// additional information (command type, etc).
//
- if (!run_expr (env, expr, li, ll, true /* diag */, output))
+ if (!run_expr (env,
+ expr,
+ ii, li, ll,
+ true /* diag */,
+ cf, last_cmd))
throw failed (); // Assume diagnostics is already printed.
}
bool
- run_if (environment& env,
- const command_expr& expr,
- size_t li, const location& ll,
- string* output)
+ run_cond (environment& env,
+ const command_expr& expr,
+ const iteration_index* ii, size_t li,
+ const location& ll,
+ const function<command_function>& cf, bool last_cmd)
{
// Note that we don't print the expression here (see above).
//
- return run_expr (env, expr, li, ll, false /* diag */, output);
+ return run_expr (env,
+ expr,
+ ii, li, ll,
+ false /* diag */,
+ cf, last_cmd);
}
void
@@ -2765,8 +3534,7 @@ namespace build2
try
{
size_t n (0);
- for (const dir_entry& de: dir_iterator (p,
- false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (p, dir_iterator::no_follow))
{
if (n++ < 10)
dr << '\n' << (de.ltype () == entry_type::directory
diff --git a/libbuild2/script/run.hxx b/libbuild2/script/run.hxx
index 8bc246c..c4c2aa2 100644
--- a/libbuild2/script/run.hxx
+++ b/libbuild2/script/run.hxx
@@ -38,22 +38,24 @@ namespace build2
// Location is the start position of this command line in the script. It
// can be used in diagnostics.
//
- // Optionally, save the command output into the referenced variable. In
- // this case assume that the expression contains a single pipline.
+ // Optionally, execute the specified function at the end of the pipe,
+ // either after the last command or instead of it.
//
void
run (environment&,
const command_expr&,
- size_t index,
+ const iteration_index*, size_t index,
const location&,
- string* output = nullptr);
+ const function<command_function>& = nullptr,
+ bool last_cmd = true);
bool
- run_if (environment&,
- const command_expr&,
- size_t index,
- const location&,
- string* output = nullptr);
+ run_cond (environment&,
+ const command_expr&,
+ const iteration_index*, size_t index,
+ const location&,
+ const function<command_function>& = nullptr,
+ bool last_cmd = true);
// Perform the registered special file cleanups in the direct order and
// then the regular cleanups in the reverse order.
@@ -80,6 +82,40 @@ namespace build2
//
string
diag_path (const dir_name_view&);
+
+ // Read the stream content, optionally splitting the input data at
+ // whitespaces or newlines and calling the specified callback function for
+ // each substring (see the set builtin options for the splitting
+ // semantics). Throw failed on io_error.
+ //
+ // If the stream is a pipeline's output, then the pipeline argument must
+ // also be specified. Normally called from a custom command function (see
+ // command_function for details) which is provided with the pipeline
+ // information.
+ //
+ // Turn the stream into the non-blocking mode and, if the pipeline is
+ // specified, read out its buffered stderr streams while waiting for the
+ // input stream data. If a deadline is specified and is reached, then
+ // terminate the whole pipeline, if specified, and bail out. Otherwise
+ // issue diagnostics and fail. The thinking here is that in the former
+ // case the caller first needs to dump the buffered stderr streams, issue
+ // the appropriate diagnostics for the pipeline processes/builtins, and
+ // only throw failed afterwards.
+ //
+ // Note that on Windows we can only turn file descriptors of the pipe type
+ // into the non-blocking mode. Thus, a non-pipe descriptor is read in the
+ // blocking manner (and the deadline is checked less accurately). This is
+ // fine since there are no pipeline stderr streams to read out in this
+ // case.
+ //
+ void
+ read (auto_fd&&,
+ bool whitespace, bool newline, bool exact,
+ const function<void (string&&)>&,
+ pipe_command* pipeline,
+ const optional<deadline>&,
+ const location&,
+ const char* what);
}
}
diff --git a/libbuild2/script/script.cxx b/libbuild2/script/script.cxx
index 9e6eeed..b53fc23 100644
--- a/libbuild2/script/script.cxx
+++ b/libbuild2/script/script.cxx
@@ -20,14 +20,17 @@ namespace build2
switch (lt)
{
- case line_type::var: s = "variable"; break;
- case line_type::cmd: s = "command"; break;
- case line_type::cmd_if: s = "'if'"; break;
- case line_type::cmd_ifn: s = "'if!'"; break;
- case line_type::cmd_elif: s = "'elif'"; break;
- case line_type::cmd_elifn: s = "'elif!'"; break;
- case line_type::cmd_else: s = "'else'"; break;
- case line_type::cmd_end: s = "'end'"; break;
+ case line_type::var: s = "variable"; break;
+ case line_type::cmd: s = "command"; break;
+ case line_type::cmd_if: s = "'if'"; break;
+ case line_type::cmd_ifn: s = "'if!'"; break;
+ case line_type::cmd_elif: s = "'elif'"; break;
+ case line_type::cmd_elifn: s = "'elif!'"; break;
+ case line_type::cmd_else: s = "'else'"; break;
+ case line_type::cmd_while: s = "'while'"; break;
+ case line_type::cmd_for_args: s = "'for'"; break;
+ case line_type::cmd_for_stream: s = "'for'"; break;
+ case line_type::cmd_end: s = "'end'"; break;
}
return o << s;
@@ -186,14 +189,14 @@ namespace build2
void
dump (ostream& os, const string& ind, const lines& ls)
{
- // Additionally indent the if-branch lines.
+ // Additionally indent the flow control construct block lines.
//
- string if_ind;
+ string fc_ind;
for (const line& l: ls)
{
- // Before printing indentation, decrease it if the else or end line is
- // reached.
+ // Before printing indentation, decrease it if the else, end, etc line
+ // is reached.
//
switch (l.type)
{
@@ -202,9 +205,9 @@ namespace build2
case line_type::cmd_else:
case line_type::cmd_end:
{
- size_t n (if_ind.size ());
+ size_t n (fc_ind.size ());
assert (n >= 2);
- if_ind.resize (n - 2);
+ fc_ind.resize (n - 2);
break;
}
default: break;
@@ -212,9 +215,10 @@ namespace build2
// Print indentations.
//
- os << ind << if_ind;
+ os << ind << fc_ind;
- // After printing indentation, increase it for if/else branch.
+ // After printing indentation, increase it for the flow control
+ // construct block lines.
//
switch (l.type)
{
@@ -222,7 +226,10 @@ namespace build2
case line_type::cmd_ifn:
case line_type::cmd_elif:
case line_type::cmd_elifn:
- case line_type::cmd_else: if_ind += " "; break;
+ case line_type::cmd_else:
+ case line_type::cmd_while:
+ case line_type::cmd_for_args:
+ case line_type::cmd_for_stream: fc_ind += " "; break;
default: break;
}
@@ -418,9 +425,14 @@ namespace build2
// Timeout.
//
if (c.timeout)
+ {
o << " -t "
<< chrono::duration_cast<chrono::seconds> (*c.timeout).count ();
+ if (c.timeout_success)
+ o << " -s";
+ }
+
// CWD.
//
if (c.cwd)
@@ -761,7 +773,9 @@ namespace build2
{
using script::cleanup;
- assert (!implicit || c.type == cleanup_type::always);
+ // Implicit never-cleanup doesn't make sense.
+ //
+ assert (!implicit || c.type != cleanup_type::never);
const path& p (c.path);
diff --git a/libbuild2/script/script.hxx b/libbuild2/script/script.hxx
index 81bc13c..c406165 100644
--- a/libbuild2/script/script.hxx
+++ b/libbuild2/script/script.hxx
@@ -27,6 +27,9 @@ namespace build2
cmd_elif,
cmd_elifn,
cmd_else,
+ cmd_while,
+ cmd_for_args, // `for x: ...`
+ cmd_for_stream, // `... | for x` and `for x <...`
cmd_end
};
@@ -40,7 +43,7 @@ namespace build2
union
{
- const variable* var; // Pre-entered for line_type::var.
+ const variable* var; // Pre-entered for line_type::{var,cmd_for_*}.
};
};
@@ -262,7 +265,7 @@ namespace build2
cleanup_type type;
build2::path path;
};
- using cleanups = vector<cleanup>;
+ using cleanups = small_vector<cleanup, 1>;
// command_exit
//
@@ -315,6 +318,10 @@ namespace build2
add (string);
};
+ // @@ For better diagnostics we may want to store an individual location
+ // of each command in the pipeline (maybe we can share the file part
+ // somehow since a pipline cannot span multiple files).
+ //
struct command
{
// We use NULL initial as an indication that the path stored in recall
@@ -324,9 +331,13 @@ namespace build2
process_path program;
strings arguments;
- optional<dir_path> cwd; // From env builtin.
- environment_vars variables; // From env builtin.
- optional<duration> timeout; // From env builtin.
+
+ // These come from the env builtin.
+ //
+ optional<dir_path> cwd;
+ environment_vars variables;
+ optional<duration> timeout;
+ bool timeout_success;
optional<redirect> in;
optional<redirect> out;
@@ -354,6 +365,10 @@ namespace build2
// command_pipe
//
+ // Note that we cannot use small_vector here, since moving from objects of
+ // the command_pipe type would invalidate the command redirects of the
+ // reference type in this case.
+ //
using command_pipe = vector<command>;
void
@@ -372,7 +387,7 @@ namespace build2
command_pipe pipe;
};
- using command_expr = vector<expr_term>;
+ using command_expr = small_vector<expr_term, 1>;
void
to_stream (ostream&, const command_expr&, command_to_stream);
@@ -380,6 +395,15 @@ namespace build2
ostream&
operator<< (ostream&, const command_expr&);
+ // Stack-allocated linked list of iteration indexes of the nested loops.
+ //
+ struct iteration_index
+ {
+ size_t index; // 1-based.
+
+ const iteration_index* prev; // NULL for the top-most loop.
+ };
+
struct timeout
{
duration value;
@@ -536,7 +560,7 @@ namespace build2
// Set variable value with optional (non-empty) attributes.
//
virtual void
- set_variable (string&& name,
+ set_variable (string name,
names&&,
const string& attrs,
const location&) = 0;
@@ -569,6 +593,20 @@ namespace build2
~environment () = default;
};
+ // Custom command function that can be executed at the end of the
+ // pipeline. Should throw io_error on the underlying OS error.
+ //
+ // Note: the pipeline can be NULL (think of `for x <<<='foo'`).
+ //
+ struct pipe_command;
+
+ using command_function = void (environment&,
+ const strings& args,
+ auto_fd in,
+ pipe_command* pipeline,
+ const optional<deadline>&,
+ const location&);
+
// Helpers.
//
// Issue diagnostics with the specified prefix and fail if the string
@@ -585,6 +623,10 @@ namespace build2
verify_environment_var_assignment (const string&,
const char* prefix,
const location&);
+
+ // "Unhide" operator<< from the build2 namespace.
+ //
+ using build2::operator<<;
}
}
diff --git a/libbuild2/search.cxx b/libbuild2/search.cxx
index fca19ea..4e855e3 100644
--- a/libbuild2/search.cxx
+++ b/libbuild2/search.cxx
@@ -15,7 +15,9 @@ using namespace butl;
namespace build2
{
const target*
- search_existing_target (context& ctx, const prerequisite_key& pk)
+ search_existing_target (context& ctx,
+ const prerequisite_key& pk,
+ bool out_only)
{
tracer trace ("search_existing_target");
@@ -39,9 +41,10 @@ namespace build2
// Prerequisite's out directory can be one of the following:
//
- // empty This means out is undetermined and we simply search for a
- // target that is in the out tree which happens to be indicated
- // by an empty value, so we can just pass this as is.
+ // empty This means out is undetermined and we search for a target
+ // first in the out tree (which happens to be indicated by an
+ // empty value, so we can just pass this as is) and if not
+ // found, then in the src tree (unless suppressed).
//
// absolute This is the "final" value that doesn't require any processing
// and we simply use it as is.
@@ -58,8 +61,11 @@ namespace build2
else
{
o = pk.scope->out_path ();
- o /= *tk.out;
- o.normalize ();
+ if (!tk.out->current ())
+ {
+ o /= *tk.out;
+ o.normalize ();
+ }
}
// Drop out if it is the same as src (in-src build).
@@ -71,6 +77,27 @@ namespace build2
const target* t (
ctx.targets.find (*tk.type, d, o, *tk.name, tk.ext, trace));
+ // Try in the src tree.
+ //
+ if (t == nullptr &&
+ !out_only &&
+ tk.out->empty () &&
+ tk.dir->relative () &&
+ !pk.scope->out_eq_src ())
+ {
+ o = move (d);
+
+ d = pk.scope->src_path ();
+
+ if (!tk.dir->empty ())
+ {
+ d /= *tk.dir;
+ d.normalize ();
+ }
+
+ t = ctx.targets.find (*tk.type, d, o, *tk.name, tk.ext, trace);
+ }
+
if (t != nullptr)
l5 ([&]{trace << "existing target " << *t
<< " for prerequisite " << pk;});
@@ -86,6 +113,10 @@ namespace build2
const target_key& ctk (cpk.tk);
const scope* s (cpk.scope);
+ // Has to be a file target.
+ //
+ assert (ctk.type->is_a<file> ());
+
path f;
if (ctk.dir->absolute ())
@@ -165,11 +196,7 @@ namespace build2
// will be from the src tree.
//
// In the other two cases we use the prerequisite's out (in case it is
- // relative, we need to complete it, which is @@ OUT TODO). Note that we
- // blindly trust the user's value which can be used for some interesting
- // tricks, for example:
- //
- // ../cxx{foo}@./
+ // relative, we need to complete it).
//
dir_path out;
@@ -179,21 +206,39 @@ namespace build2
out = out_src (d, *s->root_scope ());
}
else
- out = *tk.out;
+ {
+ if (tk.out->absolute ())
+ out = *tk.out; // Already normalized.
+ else
+ {
+ out = pk.scope->out_path ();
+ if (!tk.out->current ())
+ {
+ out /= *tk.out;
+ out.normalize ();
+ }
+ }
+
+ // Drop out if it is the same as src (in-src build).
+ //
+ if (out == d)
+ out.clear ();
+ }
// Find or insert. Note that we are using our updated extension.
//
+ // More often insert than find, so skip find in insert().
+ //
auto r (ctx.targets.insert (*tk.type,
move (d),
move (out),
*tk.name,
ext,
target_decl::prereq_file,
- trace));
+ trace,
+ true /* skip_find */));
- // Has to be a file_target.
- //
- const file& t (dynamic_cast<const file&> (r.first));
+ const file& t (r.first.as<file> ());
l5 ([&]{trace << (r.second ? "new" : "existing") << " target " << t
<< " for prerequisite " << cpk;});
@@ -210,11 +255,34 @@ namespace build2
const target_key& tk (pk.tk);
+ // If out is present, then it means the target is in src and we shouldn't
+ // be creating new targets in src, should we? Feels like this should not
+ // even be called if out is not empty.
+ //
+ assert (tk.out->empty ());
+
// We default to the target in this directory scope.
//
dir_path d;
if (tk.dir->absolute ())
+ {
d = *tk.dir; // Already normalized.
+
+ // Even if out is empty, it may still be (only) in src.
+ //
+ // Note: issue diagnostics consistent with search() after skipping this
+ // function due to non-empty out.
+ //
+ // @@ PERF: we could first check if it's in pk.scope, which feels like
+ // the common case. Though this doesn't seem to affect
+ // performance in any noticeable way.
+ //
+ auto p (ctx.scopes.find (d, false)); // Note: never empty.
+ if (*p.first == nullptr && ++p.first != p.second)
+ {
+ fail << "no existing source file for prerequisite " << pk << endf;
+ }
+ }
else
{
d = pk.scope->out_path ();
@@ -228,7 +296,7 @@ namespace build2
// Find or insert.
//
- // @@ OUT: same story as in search_existing_target() re out.
+ // More often insert than find, so skip find in insert().
//
auto r (ctx.targets.insert (*tk.type,
move (d),
@@ -236,7 +304,8 @@ namespace build2
*tk.name,
tk.ext,
target_decl::prereq_new,
- trace));
+ trace,
+ true /* skip_find */));
const target& t (r.first);
l5 ([&]{trace << (r.second ? "new" : "existing") << " target " << t
@@ -251,11 +320,27 @@ namespace build2
const target_key& tk (pk.tk);
+ // If out is present, then it means the target is in src and we shouldn't
+ // be creating new targets in src, should we? Feels like this should not
+ // even be called if out is not empty.
+ //
+ assert (tk.out->empty ());
+
// We default to the target in this directory scope.
//
dir_path d;
if (tk.dir->absolute ())
+ {
d = *tk.dir; // Already normalized.
+
+ // As above.
+ //
+ auto p (ctx.scopes.find (d, false));
+ if (*p.first == nullptr && ++p.first != p.second)
+ {
+ fail << "no existing source file for prerequisite " << pk << endf;
+ }
+ }
else
{
d = pk.scope->out_path ();
@@ -269,7 +354,7 @@ namespace build2
// Find or insert.
//
- // @@ OUT: same story as in search_existing_target() re out.
+ // More often insert than find, so skip find in insert_locked().
//
auto r (ctx.targets.insert_locked (*tk.type,
move (d),
@@ -277,7 +362,8 @@ namespace build2
*tk.name,
tk.ext,
target_decl::prereq_new,
- trace));
+ trace,
+ true /* skip_find */));
l5 ([&]
{
diag_record dr (trace);
diff --git a/libbuild2/search.hxx b/libbuild2/search.hxx
index aa30648..198c65f 100644
--- a/libbuild2/search.hxx
+++ b/libbuild2/search.hxx
@@ -15,8 +15,13 @@ namespace build2
// Search for an existing target in this prerequisite's scope. Scope can be
// NULL if directories are absolute.
//
+ // If dir is relative and out is not specified, then first search in the out
+ // tree and, if not found, then in the src tree, unless out_only is true.
+ // If dir is absolute, then out is expected to be specified as well, if
+ // necessary.
+ //
LIBBUILD2_SYMEXPORT const target*
- search_existing_target (context&, const prerequisite_key&);
+ search_existing_target (context&, const prerequisite_key&, bool out_only);
// Search for an existing file. If the prerequisite directory is relative,
// then look in the scope's src directory. Otherwise, if the absolute
@@ -32,6 +37,8 @@ namespace build2
// Create a new target in this prerequisite's scope.
//
+ // Fail if the target is in src directory.
+ //
LIBBUILD2_SYMEXPORT const target&
create_new_target (context&, const prerequisite_key&);
diff --git a/libbuild2/target-key.hxx b/libbuild2/target-key.hxx
index c5690a9..9ac87dc 100644
--- a/libbuild2/target-key.hxx
+++ b/libbuild2/target-key.hxx
@@ -94,8 +94,21 @@ namespace build2
LIBBUILD2_SYMEXPORT ostream&
operator<< (ostream&, const target_key&);
- LIBBUILD2_SYMEXPORT ostream&
- to_stream (ostream&, const target_key&, optional<stream_verbosity> = nullopt);
+ // If name_only is true, then only print the target name (and extension, if
+ // necessary), without the directory or type.
+ //
+ // Return true if the result is regular, that is, in the
+ // <dir>/<type>{<name>}@<out>/ form with the individual components
+ // corresponding directly to the target_key members (that is, without moving
+ // parts around as would be the case for directories). This information is
+ // used when trying to print several targets in a combined form (for
+ // example, {hxx cxx}{foo}) in print_diag().
+ //
+ LIBBUILD2_SYMEXPORT bool
+ to_stream (ostream&,
+ const target_key&,
+ optional<stream_verbosity> = nullopt,
+ bool name_only = false);
}
namespace std
diff --git a/libbuild2/target-state.hxx b/libbuild2/target-state.hxx
index 3457b13..a6106f7 100644
--- a/libbuild2/target-state.hxx
+++ b/libbuild2/target-state.hxx
@@ -18,9 +18,14 @@ namespace build2
// Note that postponed is "greater" than unchanged since it may result in
// the changed state.
//
+ // Note also that value 0 is available to indicate absent/invalid state.
+ //
+ // NOTE: don't forget to also update operator<<(ostream,target_state) if
+ // changing anything here.
+ //
enum class target_state: uint8_t
{
- unknown,
+ unknown = 1,
unchanged,
postponed,
busy,
@@ -38,8 +43,14 @@ namespace build2
return l;
}
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, target_state); // target.cxx
+ LIBBUILD2_SYMEXPORT string
+ to_string (target_state); // target.cxx
+
+ inline ostream&
+ operator<< (ostream& o, target_state ts)
+ {
+ return o << to_string (ts);
+ }
}
#endif // LIBBUILD2_TARGET_STATE_HXX
diff --git a/libbuild2/target-type.hxx b/libbuild2/target-type.hxx
index 0e24e3a..93c5744 100644
--- a/libbuild2/target-type.hxx
+++ b/libbuild2/target-type.hxx
@@ -89,11 +89,41 @@ namespace build2
const location&,
bool reverse);
- void (*print) (ostream&, const target_key&);
+ // See to_stream(ostream,target_key) for details.
+ //
+ bool (*print) (ostream&, const target_key&, bool name_only);
+
+ // Target type-specific prerequisite to target search.
+ //
+ // If passed target is NULL, then only search for an existing target (and
+ // which can be performed during execute, not only match).
+ //
+ const target* (*search) (context&,
+ const target*,
+ const prerequisite_key&);
+
+ // Target type flags.
+ //
+ // Note that the member_hint flag should only be used on groups with
+ // link-up during load (see lib{}, for example). In particular, if the
+ // group link-up only happens during match, then the hint would be looked
+ // up before the group is known.
+ //
+ // Note: consider exposing as an attribute in define if adding a new flag.
+ //
+ enum class flag: uint64_t
+ {
+ none = 0,
+ group = 0x01, // A (non-adhoc) group.
+ see_through = group | 0x02, // A group with "see through" semantics.
+ member_hint = group | 0x04, // Untyped rule hint applies to members.
+ dyn_members = group | 0x08 // A group with dynamic members.
+ };
- const target* (*search) (const target&, const prerequisite_key&);
+ flag flags;
- bool see_through; // A group with the default "see through" semantics.
+ bool
+ see_through () const;
template <typename T>
bool
@@ -102,14 +132,18 @@ namespace build2
bool
is_a (const target_type& tt) const
{
- return this == &tt || (base != nullptr && is_a_base (tt));
+ for (const target_type* b (this); b != nullptr; b = b->base)
+ if (b == &tt)
+ return true;
+
+ return false;
}
bool
is_a (const char*) const; // Defined in target.cxx
- bool
- is_a_base (const target_type&) const; // Defined in target.cxx
+ target_type& operator= (target_type&&) = delete;
+ target_type& operator= (const target_type&) = delete;
};
inline bool
@@ -124,6 +158,32 @@ namespace build2
inline ostream&
operator<< (ostream& os, const target_type& tt) {return os << tt.name;}
+ inline target_type::flag
+ operator&= (target_type::flag& x, target_type::flag y)
+ {
+ return x = static_cast<target_type::flag> (
+ static_cast<uint64_t> (x) & static_cast<uint64_t> (y));
+ }
+
+ inline target_type::flag
+ operator|= (target_type::flag& x, target_type::flag y)
+ {
+ return x = static_cast<target_type::flag> (
+ static_cast<uint64_t> (x) | static_cast<uint64_t> (y));
+ }
+
+ inline target_type::flag
+ operator& (target_type::flag x, target_type::flag y) {return x &= y;}
+
+ inline target_type::flag
+ operator| (target_type::flag x, target_type::flag y) {return x |= y;}
+
+ inline bool target_type::
+ see_through () const
+ {
+ return (flags & flag::see_through) == flag::see_through;
+ }
+
// Target type map.
//
class target_type_map
@@ -144,18 +204,18 @@ namespace build2
return type_map_.empty ();
}
- const target_type&
+ pair<reference_wrapper<const target_type>, bool>
insert (const target_type& tt)
{
- type_map_.emplace (tt.name, target_type_ref (tt));
- return tt;
+ auto r (type_map_.emplace (tt.name, target_type_ref (tt)));
+ return {r.second ? tt : r.first->second.get (), r.second};
}
template <typename T>
const target_type&
insert ()
{
- return insert (T::static_type);
+ return insert (T::static_type).first;
}
pair<reference_wrapper<const target_type>, bool>
@@ -201,7 +261,7 @@ namespace build2
target_type_ref (unique_ptr<target_type>&& p)
: p_ (p.release ()), d_ (true) {}
- target_type_ref (target_type_ref&& r)
+ target_type_ref (target_type_ref&& r) noexcept
: p_ (r.p_), d_ (r.d_) {r.p_ = nullptr;}
~target_type_ref () {if (p_ != nullptr && d_) delete p_;}
diff --git a/libbuild2/target.cxx b/libbuild2/target.cxx
index bc5dbba..2a134a4 100644
--- a/libbuild2/target.cxx
+++ b/libbuild2/target.cxx
@@ -22,26 +22,13 @@ namespace build2
bool target_type::
is_a (const char* n) const
{
- if (strcmp (name, n) == 0)
- return true;
-
- for (const target_type* b (base); b != nullptr; b = b->base)
+ for (const target_type* b (this); b != nullptr; b = b->base)
if (strcmp (b->name, n) == 0)
return true;
return false;
}
- bool target_type::
- is_a_base (const target_type& tt) const
- {
- for (const target_type* b (base); b != nullptr; b = b->base)
- if (*b == tt)
- return true;
-
- return false;
- }
-
// target_key
//
void target_key::
@@ -51,7 +38,9 @@ namespace build2
if (!name->empty ())
{
v = *name;
- target::combine_name (v, ext, false /* @@ TODO: what to do? */);
+ // @@ TMP: see also other calls to combine_name() -- need to fix.
+ //
+ target::combine_name (v, ext, false /* @@ TMP: what to do? */);
}
else
assert (!ext || ext->empty ()); // Unspecified or none.
@@ -69,6 +58,7 @@ namespace build2
//
static const char* const target_state_[] =
{
+ "<invalid>", // Absent/invalid (see target_state for details).
"unknown",
"unchanged",
"postponed",
@@ -78,10 +68,10 @@ namespace build2
"group"
};
- ostream&
- operator<< (ostream& os, target_state ts)
+ string
+ to_string (target_state ts)
{
- return os << target_state_[static_cast<uint8_t> (ts)];
+ return target_state_[static_cast<uint8_t> (ts)];
}
// target
@@ -91,7 +81,6 @@ namespace build2
target::
~target ()
{
- clear_data ();
}
const string& target::
@@ -122,33 +111,40 @@ namespace build2
group_view target::
group_members (action) const
{
- assert (false); // Not a group or doesn't expose its members.
+ // Not a group or doesn't expose its members.
+ //
return group_view {nullptr, 0};
}
const scope& target::
- base_scope () const
+ base_scope_impl () const
{
// If this target is from the src tree, use its out directory to find
// the scope.
//
- return ctx.scopes.find_out (out_dir ());
- }
+ const scope& s (ctx.scopes.find_out (out_dir ()));
- const scope& target::
- root_scope () const
- {
- // This is tricky to cache so we do the lookup for now.
+ // Cache unless we are in the load phase.
//
- const scope* r (base_scope ().root_scope ());
- assert (r != nullptr);
- return *r;
+ if (ctx.phase != run_phase::load)
+ {
+ const scope* e (nullptr);
+ if (!base_scope_.compare_exchange_strong (
+ e,
+ &s,
+ memory_order_release,
+ memory_order_consume))
+ assert (e == &s);
+ }
+
+ return s;
}
pair<lookup, size_t> target::
lookup_original (const variable& var,
bool target_only,
- const scope* bs) const
+ const scope* bs,
+ bool locked) const
{
pair<lookup_type, size_t> r (lookup_type (), 0);
@@ -166,6 +162,11 @@ namespace build2
{
++r.second;
+ // While we went back to not treating the first member as a group for
+ // variable lookup, let's keep this logic in case one day we end up with
+ // a separate ad hoc group target.
+ //
+#if 0
// In case of an ad hoc group, we may have to look in two groups.
//
if ((g1 = group) != nullptr)
@@ -183,6 +184,19 @@ namespace build2
}
}
}
+#else
+ // Skip looking up in the ad hoc group, which is semantically the
+ // first/primary member.
+ //
+ if ((g1 = group == nullptr
+ ? nullptr
+ : group->adhoc_group () ? group->group : group))
+ {
+ auto p (g1->vars.lookup (var));
+ if (p.first != nullptr)
+ r.first = lookup_type (*p.first, p.second, g1->vars);
+ }
+#endif
}
// Delegate to scope's lookup_original().
@@ -191,9 +205,14 @@ namespace build2
{
if (!target_only)
{
- target_key tk (key ());
- target_key g1k (g1 != nullptr ? g1->key () : target_key {});
- target_key g2k (g2 != nullptr ? g2->key () : target_key {});
+ auto key = [locked] (const target* t)
+ {
+ return locked ? t->key_locked () : t->key ();
+ };
+
+ target_key tk (key (this));
+ target_key g1k (g1 != nullptr ? key (g1) : target_key {});
+ target_key g2k (g2 != nullptr ? key (g2) : target_key {});
if (bs == nullptr)
bs = &base_scope ();
@@ -214,14 +233,30 @@ namespace build2
}
value& target::
- append (const variable& var)
+ append (const variable& var, const scope* bs)
{
// Note: see also prerequisite::append() if changing anything here.
// Note that here we want the original value without any overrides
// applied.
//
- auto l (lookup_original (var).first);
+ auto l (lookup_original (var, false, bs).first);
+
+ if (l.defined () && l.belongs (*this)) // Existing var in this target.
+ return vars.modify (l); // Ok since this is original.
+
+ value& r (assign (var)); // NULL.
+
+ if (l.defined ())
+ r = *l; // Copy value (and type) from the outer scope.
+
+ return r;
+ }
+
+ value& target::
+ append_locked (const variable& var, const scope* bs)
+ {
+ auto l (lookup_original (var, false, bs, true /* locked */).first);
if (l.defined () && l.belongs (*this)) // Existing var in this target.
return vars.modify (l); // Ok since this is original.
@@ -541,40 +576,127 @@ namespace build2
// include()
//
+ // See var_include documentation for details on what's going on here.
+ //
include_type
include_impl (action a,
const target& t,
const prerequisite& p,
- const target* m)
+ const target* m,
+ lookup* rl)
{
context& ctx (t.ctx);
include_type r (include_type::normal);
+ {
+ lookup l (p.vars[ctx.var_include]);
+
+ if (l.defined ())
+ {
+ if (l->null)
+ {
+ // @@ TMP (added in 0.16.0).
+ //
+ warn << "null " << *ctx.var_include << " variable value specified "
+ << "for prerequisite " << p <<
+ info << "treated as undefined for backwards compatibility" <<
+ info << "this warning will become error in the future";
+ }
+ else
+ {
+ const string& v (cast<string> (*l));
+
+ if (v == "false") r = include_type::excluded;
+ else if (v == "true") r = include_type::normal;
+ else if (v == "adhoc") r = include_type::adhoc;
+ else if (v == "posthoc") r = include_type::posthoc;
+ else
+ fail << "invalid " << *ctx.var_include << " variable value '"
+ << v << "' specified for prerequisite " << p;
+ }
+ }
+ }
- // If var_clean is defined, then it takes precedence over include for
- // the clean operation.
+ // Handle operation-specific override (see var_include documentation
+ // for details).
//
lookup l;
- if (a.operation () == clean_id && (l = p.vars[ctx.var_clean]))
- {
- r = cast<bool> (l) ? include_type::normal : include_type::excluded;
- }
- else if (const string* v = cast_null<string> (p.vars[ctx.var_include]))
+ optional<bool> r1; // Absent means something other than true|false.
+
+ names storage;
+ names_view ns;
+ const variable* ovar (nullptr);
+
+ if (r != include_type::excluded)
{
- if (*v == "false") r = include_type::excluded;
- else if (*v == "adhoc") r = include_type::adhoc;
- else if (*v == "true") r = include_type::normal;
- else
- fail << "invalid " << ctx.var_include->name << " variable value "
- << "'" << *v << "' specified for prerequisite " << p;
+ // Instead of going via potentially expensive target::base_scope(), use
+ // the prerequisite's scope; while it may not be the same as the
+ // targets's base scope, they must have the same root scope.
+ //
+ const scope& rs (*p.scope.root_scope ());
+
+ ovar = rs.root_extra->operations[
+ (a.outer ()
+ ? ctx.current_outer_oif
+ : ctx.current_inner_oif)->id].ovar;
+
+ if (ovar != nullptr)
+ {
+ l = p.vars[*ovar];
+
+ if (l.defined ())
+ {
+ if (l->null)
+ fail << "null " << *ovar << " variable value specified for "
+ << "prerequisite " << p;
+
+ // Maybe we should optimize this for the common cases (bool, path,
+ // name)? But then again we don't expect many such overrides. Plus
+ // will complicate the diagnostics below.
+ //
+ ns = reverse (*l, storage, true /* reduce */);
+
+ if (ns.size () == 1)
+ {
+ const name& n (ns[0]);
+
+ if (n.simple ())
+ {
+ const string& v (n.value);
+
+ if (v == "false")
+ r1 = false;
+ else if (v == "true")
+ r1 = true;
+ }
+ }
+
+ if (r1 && !*r1)
+ r = include_type::excluded;
+ }
+ }
}
// Call the meta-operation override, if any (currently used by dist).
//
- if (r != include_type::normal)
+ if (r != include_type::normal || l)
{
if (auto f = ctx.current_mif->include)
- r = f (a, t, prerequisite_member {p, m}, r);
+ r = f (a, t, prerequisite_member {p, m}, r, l);
+ }
+
+ if (l)
+ {
+ if (rl != nullptr)
+ *rl = l;
+ else if (!r1)
+ {
+ // Note: we have to delay this until the meta-operation callback above
+ // had a chance to override it.
+ //
+ fail << "unrecognized " << *ovar << " variable value '" << ns
+ << "' specified for prerequisite " << p;
+ }
}
return r;
@@ -585,7 +707,9 @@ namespace build2
const target* target_set::
find (const target_key& k, tracer& trace) const
{
- slock sl (mutex_);
+ bool load (ctx.phase == run_phase::load);
+
+ slock sl (mutex_, defer_lock); if (!load) sl.lock ();
map_type::const_iterator i (map_.find (k));
if (i == map_.end ())
@@ -604,14 +728,18 @@ namespace build2
// Between us releasing the shared lock and acquiring unique the
// extension could change and possibly a new target that matches the
// key could be inserted. In this case we simply re-run find ().
+ // Naturally, can't happen during load.
//
- sl.unlock ();
- ul = ulock (mutex_);
-
- if (ext) // Someone set the extension.
+ if (!load)
{
- ul.unlock ();
- return find (k, trace);
+ sl.unlock ();
+ ul = ulock (mutex_);
+
+ if (ext) // Someone set the extension.
+ {
+ ul.unlock ();
+ return find (k, trace);
+ }
}
}
@@ -645,10 +773,12 @@ namespace build2
string name,
optional<string> ext,
target_decl decl,
- tracer& trace)
+ tracer& trace,
+ bool skip_find,
+ bool need_lock)
{
target_key tk {&tt, &dir, &out, &name, move (ext)};
- target* t (const_cast<target*> (find (tk, trace)));
+ target* t (skip_find ? nullptr : const_cast<target*> (find (tk, trace)));
if (t == nullptr)
{
@@ -669,7 +799,9 @@ namespace build2
// case we proceed pretty much like find() except already under the
// exclusive lock.
//
- ulock ul (mutex_);
+ ulock ul (mutex_, defer_lock);
+ if (ctx.phase != run_phase::load || need_lock)
+ ul.lock ();
auto p (map_.emplace (target_key {&tt, &t->dir, &t->out, &t->name, e},
unique_ptr<target> (t)));
@@ -678,10 +810,28 @@ namespace build2
if (p.second)
{
+#if 0
+ {
+ size_t n (map_.bucket_count ());
+ if (n > buckets_)
+ {
+ text << "target_set buckets: " << buckets_ << " -> " << n
+ << " (" << map_.size () << ")";
+ buckets_ = n;
+ }
+ }
+#endif
+
t->ext_ = &i->first.ext;
t->decl = decl;
t->state.inner.target_ = t;
t->state.outer.target_ = t;
+ t->state.inner.vars.target_ = t;
+ t->state.outer.vars.target_ = t;
+
+ if (ctx.phase != run_phase::load && !need_lock)
+ ul.unlock ();
+
return pair<target&, ulock> (*t, move (ul));
}
@@ -733,9 +883,14 @@ namespace build2
static const optional<string> unknown_ext ("?");
- ostream&
- to_stream (ostream& os, const target_key& k, optional<stream_verbosity> osv)
+ bool
+ to_stream (ostream& os,
+ const target_key& k,
+ optional<stream_verbosity> osv,
+ bool name_only)
{
+ // Note: similar code in print_diag_impl(vector<target_key>).
+
stream_verbosity sv (osv ? *osv : stream_verb (os));
uint16_t dv (sv.path);
uint16_t ev (sv.extension);
@@ -745,22 +900,29 @@ namespace build2
//
bool n (!k.name->empty ());
- // Note: relative() returns empty for './'.
- //
- const dir_path& rd (dv < 1 ? relative (*k.dir) : *k.dir); // Relative.
- const dir_path& pd (n ? rd : rd.directory ()); // Parent.
+ const target_type& tt (*k.type);
- if (!pd.empty ())
+ dir_path rds; // Storage.
+ if (!name_only)
{
+ // Note: relative() returns empty for './'.
+ //
if (dv < 1)
- os << diag_relative (pd);
- else
- to_stream (os, pd, true /* representation */);
- }
+ rds = relative (*k.dir);
- const target_type& tt (*k.type);
+ const dir_path& rd (dv < 1 ? rds : *k.dir); // Relative.
+ const dir_path& pd (n ? rd : rd.directory ()); // Parent.
- os << tt.name << '{';
+ if (!pd.empty ())
+ {
+ if (dv < 1)
+ os << diag_relative (pd);
+ else
+ to_stream (os, pd, true /* representation */);
+ }
+
+ os << tt.name << '{';
+ }
if (n)
{
@@ -803,37 +965,47 @@ namespace build2
}
}
else
+ {
+ if (name_only && dv < 1) // Already done if !name_only.
+ rds = relative (*k.dir);
+
+ const dir_path& rd (dv < 1 ? rds : *k.dir);
+
to_stream (os,
rd.empty () ? dir_path (".") : rd.leaf (),
true /* representation */);
+ }
- os << '}';
-
- // If this target is from src, print its out.
- //
- if (!k.out->empty ())
+ if (!name_only)
{
- if (dv < 1)
+ os << '}';
+
+ // If this target is from src, print its out.
+ //
+ if (!k.out->empty ())
{
- // Don't print '@./'.
- //
- const string& o (diag_relative (*k.out, false));
+ if (dv < 1)
+ {
+ // Don't print '@./'.
+ //
+ const string& o (diag_relative (*k.out, false));
- if (!o.empty ())
- os << '@' << o;
+ if (!o.empty ())
+ os << '@' << o;
+ }
+ else
+ os << '@' << *k.out;
}
- else
- os << '@' << *k.out;
}
- return os;
+ return n; // Regular if we had the name.
}
ostream&
operator<< (ostream& os, const target_key& k)
{
if (auto p = k.type->print)
- p (os, k);
+ p (os, k, false /* name_only */);
else
to_stream (os, k, stream_verb (os));
@@ -854,14 +1026,19 @@ namespace build2
case run_phase::load: break;
case run_phase::match:
{
- // Similar logic to matched_state_impl().
+ // Similar logic to target::matched().
//
const opstate& s (state[action () /* inner */]);
- // Note: already synchronized.
- size_t o (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ // Note: use acquire for group_state().
+ //
+ size_t c (s.task_count.load (memory_order_acquire));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- if (o != offset_applied && o != offset_executed)
+ if (!(c == (b + offset_applied) ||
+ c == (b + offset_executed) ||
+ (c >= (b + offset_busy) &&
+ s.match_extra.cur_options_.load (memory_order_relaxed) != 0)))
break;
}
// Fall through.
@@ -984,25 +1161,27 @@ namespace build2
//
const target*
- target_search (const target& t, const prerequisite_key& pk)
+ target_search (context& ctx, const target*, const prerequisite_key& pk)
{
// The default behavior is to look for an existing target in the
// prerequisite's directory scope.
//
- return search_existing_target (t.ctx, pk);
+ return search_existing_target (ctx, pk, true /* out_only */);
}
const target*
- file_search (const target& t, const prerequisite_key& pk)
+ file_search (context& ctx, const target* t, const prerequisite_key& pk)
{
- // First see if there is an existing target.
+ // First see if there is an existing target in the out or src tree.
//
- if (const target* e = search_existing_target (t.ctx, pk))
+ if (const target* e = search_existing_target (ctx,
+ pk,
+ false /* out_only */))
return e;
// Then look for an existing file in the src tree.
//
- return search_existing_file (t.ctx, pk);
+ return t != nullptr ? search_existing_file (ctx, pk) : nullptr;
}
extern const char target_extension_none_[] = "";
@@ -1022,20 +1201,20 @@ namespace build2
return tk.ext->c_str ();
}
- void
- target_print_0_ext_verb (ostream& os, const target_key& k)
+ bool
+ target_print_0_ext_verb (ostream& os, const target_key& k, bool no)
{
stream_verbosity sv (stream_verb (os));
if (sv.extension == 1) sv.extension = 0; // Remap 1 to 0.
- to_stream (os, k, sv);
+ return to_stream (os, k, sv, no);
}
- void
- target_print_1_ext_verb (ostream& os, const target_key& k)
+ bool
+ target_print_1_ext_verb (ostream& os, const target_key& k, bool no)
{
stream_verbosity sv (stream_verb (os));
if (sv.extension == 0) sv.extension = 1; // Remap 0 to 1.
- to_stream (os, k, sv);
+ return to_stream (os, k, sv, no);
}
// type info
@@ -1051,7 +1230,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none,
};
const target_type mtime_target::static_type
@@ -1064,7 +1243,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type path_target::static_type
@@ -1077,7 +1256,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type file::static_type
@@ -1090,18 +1269,82 @@ namespace build2
nullptr, /* pattern */
&target_print_1_ext_verb, // Print extension even at verbosity level 0.
&file_search,
- false
+ target_type::flag::none
+ };
+
+ // group
+ //
+ group_view group::
+ group_members (action a) const
+ {
+ if (members_on == 0) // Not yet discovered.
+ return group_view {nullptr, 0};
+
+ // Members discovered during anything other than perform_update are only
+ // good for that operation. For example, we only return the static members
+ // ("representative sample") for perform_configure.
+ //
+ // We also re-discover the members on each update and clean not to
+ // overcomplicate the already twisted adhoc_buildscript_rule::apply()
+ // logic.
+ //
+ if (members_on != ctx.current_on)
+ {
+ if (members_action != perform_update_id ||
+ a == perform_update_id ||
+ a == perform_clean_id)
+ return group_view {nullptr, 0};
+ }
+
+ // Note that we may have no members (e.g., perform_configure and there are
+ // no static members). However, whether std::vector returns a non-NULL
+ // pointer in this case is undefined.
+ //
+ size_t n (members.size ());
+ return group_view {
+ n != 0
+ ? members.data ()
+ : reinterpret_cast<const target* const*> (this),
+ n};
+ }
+
+ const target_type group::static_type
+ {
+ "group",
+ &mtime_target::static_type,
+ &target_factory<group>,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ //
+ // Note that the dyn_members semantics is used not only to handle
+ // depdb-dyndep --dyn-target, but also pattern rule-static members.
+ //
+ target_type::flag::group | target_type::flag::dyn_members
};
+ // alias
+ //
static const target*
- alias_search (const target& t, const prerequisite_key& pk)
+ alias_search (context& ctx, const target* t, const prerequisite_key& pk)
{
// For an alias we don't want to silently create a target since it will do
- // nothing and it most likely not what the user intended.
+ // nothing and it most likely not what the user intended (but omit this
+ // check when searching for an existing target since presumably a new one
+ // won't be created in this case).
+ //
+ // But, allowing implied aliases seems harmless since all the alias does
+ // is pull its prerequisites. And they are handy to use as metadata
+ // carriers.
//
- const target* e (search_existing_target (t.ctx, pk));
+ // Doesn't feel like an alias in the src tree makes much sense.
+ //
+ const target* e (search_existing_target (ctx, pk, true /* out_only */));
- if (e == nullptr || e->decl != target_decl::real)
+ if ((e == nullptr ||
+ !(operator>= (e->decl, target_decl::implied))) && t != nullptr)
fail << "no explicit target for " << pk;
return e;
@@ -1117,7 +1360,7 @@ namespace build2
nullptr,
nullptr,
&alias_search,
- false
+ target_type::flag::none
};
// dir
@@ -1127,7 +1370,7 @@ namespace build2
{
try
{
- for (const dir_entry& e: dir_iterator (d, true /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::detect_dangling))
{
switch (e.type ())
{
@@ -1145,6 +1388,16 @@ namespace build2
break;
}
+ case entry_type::unknown:
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / e.path ();
+
+ break;
+ }
default:
break;
}
@@ -1166,17 +1419,26 @@ namespace build2
try
{
- for (const dir_entry& e: dir_iterator (d, true /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::detect_dangling))
{
if (e.type () == entry_type::directory)
+ {
r.push_back (
- prerequisite (nullopt,
- dir::static_type,
+ prerequisite (dir::static_type,
dir_path (e.path ().representation ()), // Relative.
dir_path (), // In the out tree.
string (),
nullopt,
bs));
+ }
+ else if (e.type () == entry_type::unknown)
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / e.path ();
+ }
}
}
catch (const system_error& e)
@@ -1188,17 +1450,27 @@ namespace build2
}
static const target*
- dir_search (const target& t, const prerequisite_key& pk)
+ dir_search (context& ctx, const target* t, const prerequisite_key& pk)
{
tracer trace ("dir_search");
- // The first step is like in search_alias(): looks for an existing target.
+ // The first step is like in alias_search(): looks for an existing target
+ // (but unlike alias, no implied, think `test/: install=false`).
//
- const target* e (search_existing_target (t.ctx, pk));
+ // Likewise, dir{} in the src tree doesn't make much sense.
+ //
+ const target* e (search_existing_target (ctx, pk, true /* out_only */));
if (e != nullptr && e->decl == target_decl::real)
return e;
+ // The search for an existing target can also be done during execute so
+ // none of the below code applied. Note: return implied instead of NULL
+ // (to be consistent with search_new(), for example).
+ //
+ if (t == nullptr)
+ return e;
+
// If not found (or is implied), then try to load the corresponding
// buildfile (which would normally define this target). Failed that, see
// if we can assume an implied buildfile which would be equivalent to:
@@ -1232,18 +1504,18 @@ namespace build2
//
bool retest (false);
- assert (t.ctx.phase == run_phase::match);
+ assert (ctx.phase == run_phase::match);
{
// Switch the phase to load.
//
- phase_switch ps (t.ctx, run_phase::load);
+ phase_switch ps (ctx, run_phase::load);
// This is subtle: while we were fussing around another thread may have
// loaded the buildfile. So re-test now that we are in an exclusive
// phase.
//
if (e == nullptr)
- e = search_existing_target (t.ctx, pk);
+ e = search_existing_target (ctx, pk, true);
if (e != nullptr && e->decl == target_decl::real)
retest = true;
@@ -1281,14 +1553,14 @@ namespace build2
}
}
- assert (t.ctx.phase == run_phase::match);
+ assert (ctx.phase == run_phase::match);
// If we loaded/implied the buildfile, examine the target again.
//
if (retest)
{
if (e == nullptr)
- e = search_existing_target (t.ctx, pk);
+ e = search_existing_target (ctx, pk, true);
if (e != nullptr && e->decl == target_decl::real)
return e;
@@ -1333,7 +1605,7 @@ namespace build2
&dir_pattern,
nullptr,
&dir_search,
- false
+ target_type::flag::none
};
const target_type fsdir::static_type
@@ -1346,7 +1618,7 @@ namespace build2
&dir_pattern,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
static optional<string>
@@ -1413,8 +1685,8 @@ namespace build2
nullptr,
#endif
nullptr,
- &file_search,
- false
+ &file_search, // Note: can also be a script in src.
+ target_type::flag::none
};
static const char*
@@ -1500,7 +1772,56 @@ namespace build2
&buildfile_target_pattern,
nullptr,
&file_search,
- false
+ target_type::flag::none
+ };
+
+ static const char*
+ buildscript_target_extension (const target_key& tk, const scope*)
+ {
+ // If the name is special 'buildscript', then there is no extension,
+ // otherwise it is .buildscript.
+ //
+ return *tk.name == "buildscript" ? "" : "buildscript";
+ }
+
+ static bool
+ buildscript_target_pattern (const target_type&,
+ const scope&,
+ string& v,
+ optional<string>& e,
+ const location& l,
+ bool r)
+ {
+ if (r)
+ {
+ assert (e);
+ e = nullopt;
+ }
+ else
+ {
+ e = target::split_name (v, l);
+
+ if (!e && v != "buildscript")
+ {
+ e = "buildscript";
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ const target_type buildscript::static_type
+ {
+ "buildscript",
+ &file::static_type,
+ &target_factory<buildscript>,
+ &buildscript_target_extension,
+ nullptr, /* default_extension */
+ &buildscript_target_pattern,
+ nullptr,
+ &file_search,
+ target_type::flag::none
};
const target_type doc::static_type
@@ -1513,7 +1834,7 @@ namespace build2
nullptr, /* pattern */ // Same as file.
&target_print_1_ext_verb, // Same as file.
&file_search,
- false
+ target_type::flag::none
};
const target_type legal::static_type
@@ -1526,7 +1847,7 @@ namespace build2
nullptr, /* pattern */ // Same as file.
&target_print_1_ext_verb, // Same as file.
&file_search,
- false
+ target_type::flag::none
};
const target_type man::static_type
@@ -1539,7 +1860,7 @@ namespace build2
nullptr,
&target_print_1_ext_verb, // Print extension even at verbosity level 0.
&file_search,
- false
+ target_type::flag::none
};
extern const char man1_ext[] = "1"; // VC14 rejects constexpr.
@@ -1554,7 +1875,7 @@ namespace build2
&target_pattern_fix<man1_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false
+ target_type::flag::none
};
static const char*
@@ -1603,6 +1924,6 @@ namespace build2
&manifest_target_pattern,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
}
diff --git a/libbuild2/target.hxx b/libbuild2/target.hxx
index 4ce871b..20cd32d 100644
--- a/libbuild2/target.hxx
+++ b/libbuild2/target.hxx
@@ -4,8 +4,9 @@
#ifndef LIBBUILD2_TARGET_HXX
#define LIBBUILD2_TARGET_HXX
+#include <cstddef> // max_align_t
#include <iterator> // tags, etc.
-#include <type_traits> // aligned_storage
+#include <type_traits> // is_*
#include <unordered_map>
#include <libbutl/multi-index.hxx> // map_iterator_adapter
@@ -38,16 +39,19 @@ namespace build2
// Prerequisite inclusion/exclusion (see include() function below).
//
+ // Note that posthoc is handled internally and should normally be treated by
+ // the rules the same as excluded.
+ //
class include_type
{
public:
- enum value {excluded, adhoc, normal};
+ enum value {excluded, posthoc, adhoc, normal};
include_type (value v): v_ (v) {}
include_type (bool v): v_ (v ? normal : excluded) {}
operator value () const {return v_;}
- explicit operator bool () const {return v_ != excluded;}
+ explicit operator bool () const {return v_ == normal || v_ == adhoc;}
private:
value v_;
@@ -70,50 +74,308 @@ namespace build2
};
// List of prerequisites resolved to targets. Unless additional storage is
- // needed, it can be used as just vector<const target*> (which is what we
+ // needed, it can be treated as just vector<const target*> (which is what we
// used to have initially).
//
+ // The include member normally just indicates (in the first bit) whether
+ // this prerequisite is ad hoc. But it can also carry additional information
+ // (for example, from operation-specific override) in other bits (see below
+ // for details).
+ //
struct prerequisite_target
{
using target_type = build2::target;
prerequisite_target (const target_type* t, bool a = false, uintptr_t d = 0)
- : target (t), adhoc (a), data (d) {}
+ : target (t), include (a ? include_adhoc : 0), data (d) {}
+
+ prerequisite_target (const target_type& t, bool a = false, uintptr_t d = 0)
+ : prerequisite_target (&t, a, d) {}
prerequisite_target (const target_type* t, include_type a, uintptr_t d = 0)
: prerequisite_target (t, a == include_type::adhoc, d) {}
+ prerequisite_target (const target_type& t, include_type a, uintptr_t d = 0)
+ : prerequisite_target (&t, a, d) {}
+
+ const target_type* target;
+
operator const target_type*& () {return target;}
operator const target_type* () const {return target;}
const target_type* operator-> () const {return target;}
- const target_type* target;
- bool adhoc; // True if include=adhoc.
- uintptr_t data;
+ // The first 8 bits are reserved with the first two having the following
+ // semantics:
+ //
+ // adhoc
+ //
+ // This prerequisite is ad hoc.
+ //
+ // udm
+ //
+ // This prerequisite is updated during match. Note that only static
+ // prerequisites that are updated during match should have this bit set
+ // (see dyndep_rule::*_existing_file() for details).
+ //
+ // target
+ //
+ // The data member contains the target pointer that has been "blanked
+ // out" for some reason (updated during match, unmatched, etc). See
+ // dyndep_rule::updated_during_match() for details.
+ //
+ static const uintptr_t include_adhoc = 0x01;
+ static const uintptr_t include_udm = 0x02;
+ static const uintptr_t include_target = 0x80;
+
+ uintptr_t include;
+
+ bool adhoc () const {return (include & include_adhoc) != 0;}
+
+ // Auxiliary data.
+ //
+ uintptr_t data;
};
using prerequisite_targets = vector<prerequisite_target>;
- // A rule match is an element of hint_rule_map.
+ // A rule match is an element of name_rule_map.
//
using rule_match = pair<const string, reference_wrapper<const rule>>;
+ // A map of target type plus operation ids to rule hints (see name_rule_map
+ // for details on rule names and hints). The default_id serves as a fallback
+ // for update and clean operations.
+ //
+ // Note that for now hints are tried in the order specified and the first
+ // that matches, used.
+ //
+ struct rule_hints
+ {
+ // Return empty string if not found.
+ //
+ const string&
+ find (const target_type&, operation_id, bool untyped) const;
+
+ bool
+ empty () const {return map.empty ();}
+
+ // Note that insertion of an existing entry overrides the old value.
+ //
+ void
+ insert (const target_type*, operation_id, string);
+
+ struct value_type
+ {
+ const target_type* type;
+ operation_id operation;
+ string hint;
+ };
+
+ vector<value_type> map;
+ };
+
// Additional information about a rule match (see rule.hxx for details).
//
+ // Note that passing this information to a base rule's match() as-is may or
+ // may not be correct. If some changes must be made (for example, the
+ // fallback flag must be cleared), then that should be done by modifying
+ // (and restoring, if necessary) the passed instance rather than making a
+ // copy (which would not survive to apply()).
+ //
struct match_extra
{
- bool fallback; // True if matching a fallback rule.
- string buffer; // Auxiliary buffer that's reused during match/apply.
+ bool locked; // Normally true (see adhoc_rule::match() for background).
+ bool fallback; // True if matching a fallback rule (see match_rule_impl()).
+
+ // When matching a rule, the caller may wish to request a subset of the
+ // full functionality of performing the operation on the target. This is
+ // achieved with match options.
+ //
+ // Since the match caller normally has no control over which rule will be
+ // matched, the options are not specific to a particular rule. Rather,
+ // options are defined for performing a specific operation on a specific
+ // target type and would normally be part of the target type semantics.
+ // To put it another way, when a rule matches a target of certain type for
+ // certain operation, there is an expectation of certain semantics, some
+ // parts of which could be made optional.
+ //
+ // As a concrete example, consider installing libs{}, which traditionally
+ // has two parts: runtime (normally just the versioned shared library) and
+ // build-time (non-versioned symlinks, pkg-config files, headers, etc).
+ // The option to install only the runtime files is part of the bin::libs{}
+ // semantics, not of, say, cc::install_rule.
+ //
+ // The match options are specified as a uint64_t mask, which means there
+ // can be a maximum of 64 options per operation/target type. Options are
+ // opt-out rather than opt-in. That is, by default, all the options are
+ // enabled unless the match caller explicitly opted out of some
+ // functionality. Even if the caller opted out, there is no guarantee that
+ // the matching rule will honor this request (for example, because it is a
+ // user-provided ad hoc recipe). To put it another way, support for
+ // options is a quality of implementation matter.
+ //
+ // From the rule implementation's point view, match options are handled as
+ // follows: On initial match()/apply(), cur_options is initialized to ~0
+ // (all options enabled) and the matching rule is expected to override it
+ // with new_options in apply() (note that match() should no base any
+ // decisions on new_options since they may change between match() and
+ // apply()). This way a rule that does not support any match options does
+ // not need to do anything. Subsequent match calls may add new options
+ // which causes a rematch that manifests in the rule's reapply() call. In
+ // reapply(), cur_options are the currently enabled options and
+ // new_options are the newly requested options. Here the rule is expected
+ // to factor new_options to cur_options as appropriate. Note also that on
+ // rematch, if current options already include new options, then no call
+ // to reapply() is made. This, in particular, means that a rule that does
+ // not adjust cur_options in match() will never get a reapply() call
+ // (because all the options are enabled from the start). Note that
+ // cur_options should only be modfied in apply() or reapply().
+ //
+ // If a rematch is triggered after the rule has already been executed, an
+ // error is issued. This means that match options are not usable for
+ // operation/target types that could plausibly be executed during
+ // match. In particular, using match options for update and clean
+ // operations is a bad idea (update of pretty much any target can happen
+ // during match as a result of a tool update while clean might have to be
+ // performed during match to provide the mirror semantics).
+ //
+ // Note also that with rematches the assumption that in the match phase
+ // after matching the target we can MT-safely examine its state (such as
+ // its prerequisite_targets) no longer holds since such state could be
+ // modified during a rematch. As a result, if the target type specifies
+ // options for a certain operation, then you should not rely on this
+ // assumption for targets of this type during this operation.
+ //
+ // A rule that supports match options must also be prepared to handle the
+ // apply() call with new_options set to 0, for example, by using a
+ // minimally supported set of options instead. While 0 usually won't be
+ // passed by the match caller, this value is passed in the following
+ // circumstances:
+ //
+ // - match to resolve group (resolve_group())
+ // - match to resolve members (resolve_members())
+ // - match of ad hoc group via one of its ad hoc members
+ //
+ // Note that the 0 cur_options value is illegal.
+ //
+ // When it comes to match options specified for group members, the
+ // semantics differs between explicit and ad hoc groups. For explicit
+ // groups, the standard semantics described above applies and the group's
+ // reapply() function will be called both for the group itself as well as
+ // for its members and its the responsibility of the rule to decide what
+ // to do with the two sets of options (e.g., factor member's options into
+ // group's options, etc). For ad hoc groups, members are not matched to a
+ // rule but to the group_recipe directly (so there cannot be a call to
+ // reapply()). Currently, ad hoc group members cannot have options (more
+ // precisely, their options should always be ~0). An alternative semantics
+ // where the group rule is called to translate member options to group
+ // options may be implemented in the future (see match_impl_impl() for
+ // details).
+ //
+ // Note: match options are currently not exposed in Buildscript ad hoc
+ // recipes/rules (but are in C++).
+ //
+ static constexpr uint64_t all_options = ~uint64_t (0);
+
+ uint64_t cur_options;
+ uint64_t new_options;
+
+ atomic<uint64_t> cur_options_; // Implementation detail (see lock_impl()).
+
+ // The list of post hoc prerequisite targets for this target. Only not
+ // NULL in rule::apply_posthoc() and rule::reapply() functions and only if
+ // there are post hoc prerequisites. Primarily useful for adjusting match
+ // options for post hoc prerequisites (but can also be used to blank some
+ // of them out).
+ //
+ vector<context::posthoc_target::prerequisite_target>*
+ posthoc_prerequisite_targets;
+
+ // Auxiliary data storage.
+ //
+ // A rule (whether matches or not) may use this pad to pass data between
+ // its match and apply functions (but not the recipe). The rule should
+ // static assert that the size of the pad is sufficient for its needs.
+ //
+ // This facility is complementary to the auxiliary data storage in target:
+ // it can store slightly more/extra data without dynamic memory allocation
+ // but can only be used during match/apply.
+ //
+ // Note also that a rule that delegates to another rule may not be able to
+ // use this mechanism fully since the delegated-to rule may also need the
+ // data storage.
+ //
+ static constexpr size_t data_size = (sizeof (string) > sizeof (void*) * 4
+ ? sizeof (string)
+ : sizeof (void*) * 4);
+
+ alignas (std::max_align_t) unsigned char data_[data_size];
+ void (*data_dtor_) (void*) = nullptr;
+
+ template <typename R,
+ typename T = typename std::remove_cv<
+ typename std::remove_reference<R>::type>::type>
+ typename std::enable_if<std::is_trivially_destructible<T>::value,T&>::type
+ data (R&& d)
+ {
+ assert (sizeof (T) <= data_size);
+ clear_data ();
+ return *new (&data_) T (forward<R> (d));
+ }
+
+ template <typename R,
+ typename T = typename std::remove_cv<
+ typename std::remove_reference<R>::type>::type>
+ typename std::enable_if<!std::is_trivially_destructible<T>::value,T&>::type
+ data (R&& d)
+ {
+ assert (sizeof (T) <= data_size);
+ clear_data ();
+ T& r (*new (&data_) T (forward<R> (d)));
+ data_dtor_ = [] (void* p) {static_cast<T*> (p)->~T ();};
+ return r;
+ }
+
+ template <typename T>
+ T&
+ data () {return *reinterpret_cast<T*> (&data_);}
+
+ template <typename T>
+ const T&
+ data () const {return *reinterpret_cast<const T*> (&data_);}
+
+ void
+ clear_data ()
+ {
+ if (data_dtor_ != nullptr)
+ {
+ data_dtor_ (&data_);
+ data_dtor_ = nullptr;
+ }
+ }
// Implementation details.
//
+ // NOTE: see match_rule_impl() in algorithms.cxx if changing anything here.
+ //
public:
+ explicit
+ match_extra (bool l = true, bool f = false)
+ : locked (l), fallback (f),
+ cur_options (all_options), new_options (0),
+ posthoc_prerequisite_targets (nullptr) {}
+
void
- init (bool fallback);
+ reinit (bool fallback);
// Force freeing of the dynamically-allocated memory.
//
void
free ();
+
+ ~match_extra ()
+ {
+ clear_data ();
+ }
};
// Target.
@@ -126,17 +388,34 @@ namespace build2
// Note that the order of the enumerators is arranged so that their
// integral values indicate whether one "overrides" the other.
//
+ // We refer to the targets other than real and implied as
+ // dynamically-created or just dynamic.
+ //
// @@ We have cases (like pkg-config extraction) where it should probably be
// prereq_file rather than implied (also audit targets.insert<> calls).
//
+ // @@ Also, synthesized dependency declarations (e.g., in cc::link_rule) are
+ // fuzzy: they feel more `real` than `implied`. Maybe introduce
+ // `synthesized` in-between?
+ //
+ // @@ There are also now dynamically-discovered targets (ad hoc group
+ // members; see depdb-dyndep --dyn-target) which currently end up
+ // with prereq_new.
+ //
enum class target_decl: uint8_t
{
- prereq_new, // Created from prerequisite (create_new_target()).
- prereq_file, // Created from prerequisite/file (search_existing_file ()).
- implied, // Target-spec variable assignment, implicitly-entered, etc.
- real // Real dependency declaration.
+ prereq_new = 1, // Created from prerequisite (create_new_target()).
+ prereq_file, // Created from prerequisite/file (search_existing_file()).
+ implied, // Target-spec variable assignment, implicitly-entered, etc.
+ real // Real dependency declaration.
};
+ inline bool
+ operator>= (target_decl l, target_decl r)
+ {
+ return static_cast<uint8_t> (l) >= static_cast<uint8_t> (r);
+ }
+
class LIBBUILD2_SYMEXPORT target
{
public:
@@ -201,15 +480,15 @@ namespace build2
// obj{}).
//
// In an all-group, when a group is updated, normally all its members are
- // updates (and usually with a single command), though there could be some
+ // updated (and usually with a single command), though there could be some
// members that are omitted, depending on the configuration (e.g., an
// inline file not/being generated). When an all-group is mentioned as a
// prerequisite, the rule is usually interested in the individual members
- // rather than the whole group. For example, a C++ compile rule would like
- // to "see" the ?xx{} members when it gets a cli.cxx{} group.
+ // rather than the group target. For example, a C++ compile rule would
+ // like to "see" the ?xx{} members when it gets a cli.cxx{} group.
//
// Which brings us to the group iteration mode. The target type contains a
- // member called see_through that indicates whether the default iteration
+ // flag called see_through that indicates whether the default iteration
// mode for the group should be "see through"; that is, whether we see the
// members or the group itself. For the iteration support itself, see the
// *_prerequisite_members() machinery below.
@@ -221,9 +500,12 @@ namespace build2
//
// Note that the group-member link-up can happen anywhere between the
// member creation and rule matching so reading the group before the
- // member has been matched can be racy.
+ // member has been matched can be racy. However, once the member is linked
+ // up to the group, this relationship is immutable. As a result, one can
+ // atomically query the current value to see if already linked up (can be
+ // used as an optimization, to avoid deadlocks, etc).
//
- const target* group = nullptr;
+ relaxed_atomic<const target*> group = nullptr;
// What has been described above is an "explicit" group. That is, there is
// a dedicated target type that explicitly serves as a group and there is
@@ -256,7 +538,7 @@ namespace build2
// usually needed is to derive its path.
//
// - Unless declared, members are discovered lazily, they are only known
- // after the group's rule's apply() call.
+ // after the matching rule's apply() call.
//
// - Only declared members can be used as prerequisites but all can be
// used as targets (e.g., to set variables, etc).
@@ -266,6 +548,10 @@ namespace build2
// - Ad hoc group cannot have sub-groups (of any kind) though an ad hoc
// group can be a sub-group of an explicit group.
//
+ // - Member variable lookup skips the ad hoc group (since the group is the
+ // first member, this is normally what we want). But special semantics
+ // could be arranged; see var_backlink, for example.
+ //
// Note that ad hoc groups can be part of explicit groups. In a sense, we
// have a two-level grouping: an explicit group with its members each of
// which can be an ad hoc group. For example, lib{} contains libs{} which
@@ -274,6 +560,20 @@ namespace build2
// Use add_adhoc_member(), find_adhoc_member() from algorithms to manage
// ad hoc members.
//
+ // One conceptual issue we have with our ad hoc group implementation is
+ // that the behavior could be sensitive to the order in which the members
+ // are specified (due to the primary member logic). For example, unless we
+ // specify the header in the header/source group first, it will not be
+ // installed. Perhaps the solution is to synthesize a separate group
+ // target for the ad hoc members (with a special target type that rules
+ // like install could recognize). See also the variable lookup semantics.
+ // We could also probably support see_through via an attribute or some
+ // such. Or perhaps such cases should be handled through explicit groups
+ // and the ad hoc semantics is left to the non-see_through "primary
+ // targets with a bunch of subordinates" cases. In other words, if the
+ // members are "equal/symmetrical", then perhaps an explicit group is the
+ // correct approach.
+ //
const_ptr<target> adhoc_member = nullptr;
// Return true if this target is an ad hoc group (that is, its primary
@@ -299,7 +599,8 @@ namespace build2
public:
// Normally you should not call this function directly and rather use
- // resolve_members() from <libbuild2/algorithm.hxx>.
+ // resolve_members() from <libbuild2/algorithm.hxx>. Note that action
+ // is always inner.
//
virtual group_view
group_members (action) const;
@@ -332,7 +633,16 @@ namespace build2
// Most qualified scope that contains this target.
//
const scope&
- base_scope () const;
+ base_scope () const
+ {
+ if (ctx.phase != run_phase::load)
+ {
+ if (const scope* s = base_scope_.load (memory_order_consume))
+ return *s;
+ }
+
+ return base_scope_impl ();
+ }
// Root scope of a project that contains this target. Note that
// a target can be out of any (known) project root in which case
@@ -340,7 +650,10 @@ namespace build2
// then use base_scope().root_scope() expression instead.
//
const scope&
- root_scope () const;
+ root_scope () const
+ {
+ return *base_scope ().root_scope ();
+ }
// Root scope of a bundle amalgamation that contains this target. The
// same notes as to root_scope() apply.
@@ -366,6 +679,16 @@ namespace build2
return out_dir ().sub (s.out_path ());
}
+ // Implementation details (use above functions instead).
+ //
+ // Base scope cached value. Invalidated every time we switch to the load
+ // phase (which is the only phase where we may insert new scopes).
+ //
+ mutable atomic<const scope*> base_scope_ {nullptr};
+
+ const scope&
+ base_scope_impl () const;
+
// Prerequisites.
//
// We use an atomic-empty semantics that allows one to "swap in" a set of
@@ -379,7 +702,8 @@ namespace build2
prerequisites () const;
// Swap-in a list of prerequisites. Return false if unsuccessful (i.e.,
- // someone beat us to it). Note that it can be called on const target.
+ // someone beat us to it), in which case the passed prerequisites are
+ // not moved. Note that it can be called on const target.
//
bool
prerequisites (prerequisites_type&&) const;
@@ -438,8 +762,9 @@ namespace build2
lookup_type
operator[] (const string& name) const
{
- const variable* var (ctx.var_pool.find (name));
- return var != nullptr ? operator[] (*var) : lookup_type ();
+ const scope& bs (base_scope ());
+ const variable* var (bs.var_pool ().find (name));
+ return var != nullptr ? lookup (*var, &bs).first : lookup_type ();
}
// As above but also return the depth at which the value is found. The
@@ -451,22 +776,26 @@ namespace build2
// earlier. If no value is found, then the depth is set to ~0.
//
pair<lookup_type, size_t>
- lookup (const variable& var) const
+ lookup (const variable& var, const scope* bs = nullptr) const
{
- auto p (lookup_original (var));
+ auto p (lookup_original (var, false, bs));
return var.overrides == nullptr
? p
- : base_scope ().lookup_override (var, move (p), true);
+ : (bs != nullptr
+ ? *bs
+ : base_scope ()).lookup_override (var, move (p), true);
}
// If target_only is true, then only look in target and its target group
// without continuing in scopes. As an optimization, the caller can also
- // pass the base scope of the target, if already known.
+ // pass the base scope of the target, if already known. If locked is true,
+ // assume the targets mutex is locked.
//
pair<lookup_type, size_t>
lookup_original (const variable&,
bool target_only = false,
- const scope* bs = nullptr) const;
+ const scope* bs = nullptr,
+ bool locked = false) const;
// Return a value suitable for assignment. See scope for details.
//
@@ -476,10 +805,53 @@ namespace build2
value&
assign (const variable* var) {return vars.assign (var);} // For cached.
+ // Note: variable must already be entered.
+ //
+ value&
+ assign (const string& name)
+ {
+ return vars.assign (base_scope ().var_pool ().find (name));
+ }
+
// Return a value suitable for appending. See scope for details.
//
value&
- append (const variable&);
+ append (const variable&, const scope* bs = nullptr);
+
+ // Note: variable must already be entered.
+ //
+ value&
+ append (const string& name)
+ {
+ const scope& bs (base_scope ());
+ return append (*bs.var_pool ().find (name), &bs);
+ }
+
+ // As above but assume the targets mutex is locked.
+ //
+ value&
+ append_locked (const variable&, const scope* bs = nullptr);
+
+ // Note: variable must already be entered.
+ //
+ value&
+ append_locked (const string& name)
+ {
+ const scope& bs (base_scope ());
+ return append_locked (*bs.var_pool ().find (name), &bs);
+ }
+
+ // Rule hints.
+ //
+ public:
+ build2::rule_hints rule_hints;
+
+ // Find the rule hint for the specified operation taking into account the
+ // target type/group. Note: racy with regards to the group link-up and
+ // should only be called when safe.
+ //
+ const string&
+ find_hint (operation_id) const;
// Ad hoc recipes.
//
@@ -516,6 +888,12 @@ namespace build2
static const size_t offset_executed = 5; // Recipe has been executed.
static const size_t offset_busy = 6; // Match/execute in progress.
+ // @@ PERF There is a lot of data below that is only needed for "output"
+ // as opposed to "source" targets (data pads, prerequisite_targets,
+ // etc). Maybe we should move this stuff to an optional extra (like we
+ // have for the root scope). Maybe we could even allocate it as part of
+ // the target's memory block or some such?
+
// Inner/outer operation state. See <libbuild2/action.hxx> for details.
//
class LIBBUILD2_SYMEXPORT opstate
@@ -531,24 +909,40 @@ namespace build2
//
mutable atomic_count dependents {0};
- // Match state storage between the match() and apply() calls.
+ // Match state storage between the match() and apply() calls with only
+ // the *_options members extended to reapply().
+ //
+ // Note: in reality, cur_options are used beyong (re)apply() as an
+ // implementation detail.
//
build2::match_extra match_extra;
- // Matched rule (pointer to hint_rule_map element). Note that in case of
+ // Matched rule (pointer to name_rule_map element). Note that in case of
// a direct recipe assignment we may not have a rule (NULL).
//
const rule_match* rule;
// Applied recipe.
//
- build2::recipe recipe;
+ // Note: also used as the auxiliary data storage during match, which is
+ // why mutable (see the target::data() API below for details). The
+ // default recipe_keep value is set by clear_target().
+ //
+ mutable build2::recipe recipe;
+ mutable bool recipe_keep; // Keep after execution.
+ bool recipe_group_action; // Recipe is group_action.
// Target state for this operation. Note that it is undetermined until
// a rule is matched and recipe applied (see set_recipe()).
//
target_state state;
+ // Set to true (only for the inner action) if this target has been
+ // matched but not executed as a result of the resolve_members() call.
+ // See also context::resolve_count.
+ //
+ bool resolve_counted;
+
// Rule-specific variables.
//
// The rule (for this action) has to be matched before these variables
@@ -556,8 +950,8 @@ namespace build2
// no iffy modifications of the group's variables by member's rules).
//
// They are also automatically cleared before another rule is matched,
- // similar to the data pad. In other words, rule-specific variables are
- // only valid for this match-execute phase.
+ // similar to the auxiliary data storage. In other words, rule-specific
+ // variables are only valid for this match-execute phase.
//
variable_map vars;
@@ -581,13 +975,6 @@ namespace build2
return operator[] (*var);
}
- lookup_type
- operator[] (const string& name) const
- {
- const variable* var (target_->ctx.var_pool.find (name));
- return var != nullptr ? operator[] (*var) : lookup_type ();
- }
-
// As above but also return the depth at which the value is found. The
// depth is calculated by adding 1 for each test performed. So a value
// that is from the rule will have depth 1. That from the target - 2,
@@ -616,14 +1003,18 @@ namespace build2
value&
assign (const variable* var) {return vars.assign (var);} // For cached.
+ // Implementation details.
+ //
public:
explicit
- opstate (context& c): vars (c, false /* global */) {}
+ opstate (context& c): vars (variable_map::owner::target, &c) {}
private:
friend class target_set;
- const target* target_ = nullptr; // Back-pointer, set by target_set.
+ // Back-pointer, set by target_set along with vars.target_.
+ //
+ const target* target_ = nullptr;
};
action_state<opstate> state;
@@ -632,10 +1023,13 @@ namespace build2
const opstate& operator[] (action a) const {return state[a];}
// Return true if the target has been matched for the specified action.
- // This function can only be called during execution.
+ // This function can only be called during the match or execute phases.
+ //
+ // If you need to observe something in the matched target (e.g., the
+ // matched rule or recipe), use memory_order_acquire.
//
bool
- matched (action) const;
+ matched (action, memory_order mo = memory_order_relaxed) const;
// This function can only be called during match if we have observed
// (synchronization-wise) that this target has been matched (i.e., the
@@ -644,7 +1038,7 @@ namespace build2
target_state
matched_state (action, bool fail = true) const;
- // See try_match().
+ // See try_match_sync().
//
pair<bool, target_state>
try_matched_state (action, bool fail = true) const;
@@ -664,12 +1058,18 @@ namespace build2
target_state
executed_state (action, bool fail = true) const;
+ // Return true if the state comes from the group. Target must be at least
+ // matched except for ad hoc group members during the execute phase.
+ //
+ bool
+ group_state (action) const;
+
protected:
// Version that should be used during match after the target has been
// matched for this action.
//
// Indicate whether there is a rule match with the first half of the
- // result (see try_match()).
+ // result (see try_match_sync()).
//
pair<bool, target_state>
matched_state_impl (action) const;
@@ -680,110 +1080,246 @@ namespace build2
target_state
executed_state_impl (action) const;
- // Return true if the state comes from the group. Target must be at least
- // matched.
- //
- bool
- group_state (action) const;
-
public:
// Targets to which prerequisites resolve for this action. Note that
// unlike prerequisite::target, these can be resolved to group members.
// NULL means the target should be skipped (or the rule may simply not add
// such a target to the list).
//
- // Note also that it is possible the target can vary from action to
- // action, just like recipes. We don't need to keep track of the action
- // here since the targets will be updated if the recipe is updated,
- // normally as part of rule::apply().
- //
- // Note that the recipe may modify this list.
+ // A rule should make sure that the target's prerequisite_targets are in
+ // the "canonical" form (that is, all the prerequisites that need to be
+ // executed are present with prerequisite_target::target pointing to the
+ // corresponding target). This is relied upon in a number of places,
+ // including in dump and to be able to pretend-execute the operation on
+ // this target without actually calling the recipe (see perform_execute(),
+ // resolve_members_impl() for background). Note that a rule should not
+ // store targets that are semantically prerequisites in an ad hoc manner
+ // (e.g., in match data) with a few well-known execeptions (see
+ // group_recipe and inner_recipe).
+ //
+ // Note that the recipe may modify this list during execute. Normally this
+ // would be just blanking out of ad hoc prerequisites, in which case check
+ // for ad hoc first and for not NULL second if accessing prerequisites of
+ // targets that you did not execute (see the library metadata protocol in
+ // cc for an example).
//
mutable action_state<build2::prerequisite_targets> prerequisite_targets;
- // Auxilary data storage.
+ // Auxiliary data storage.
//
// A rule that matches (i.e., returns true from its match() function) may
- // use this pad to pass data between its match and apply functions as well
- // as the recipe. After the recipe is executed, the data is destroyed by
- // calling data_dtor (if not NULL). The rule should static assert that the
- // size of the pad is sufficient for its needs.
- //
- // Note also that normally at least 2 extra pointers may be stored without
- // a dynamic allocation in the returned recipe (small object optimization
- // in std::function). So if you need to pass data only between apply() and
- // the recipe, then this might be a more convenient way.
- //
- // Note also that a rule that delegates to another rule may not be able to
- // use this mechanism fully since the delegated-to rule may also need the
- // data pad.
- //
- // Currenly the data is not destroyed until the next match.
+ // use this facility to pass data between its match and apply functions as
+ // well as the recipe. Specifically, between match() and apply() the data
+ // is stored in the recipe member (which is std::move_only_function-like).
+ // If the data needs to be passed on to the recipe, then it must become
+ // the recipe itself. Here is a typical arrangement:
+ //
+ // class compile_rule
+ // {
+ // struct match_data
+ // {
+ // ... // Data.
+ //
+ // const compile_rule& rule;
+ //
+ // target_state
+ // operator() (action a, const target& t)
+ // {
+ // return rule.perform_update (a, t, this);
+ // }
+ // };
+ //
+ // virtual bool
+ // match (action a, const target& t)
+ // {
+ // ... // Determine if matching.
+ //
+ // t.data (a, match_data {..., *this});
+ // return true;
+ // }
+ //
+ // virtual bool
+ // apply (action a, target& t)
+ // {
+ // match_data& md (t.data (a));
+ //
+ // ... // Match prerequisites, etc.
+ //
+ // return move (md); // Data becomes the recipe.
+ // }
+ //
+ // target_state
+ // perform_update (action a, const target& t, match_data& md) const
+ // {
+ // ... // Access data (also available as t.data<match_data> (a)).
+ // }
+ // };
+ //
+ // Note: see also similar facility in match_extra.
+ //
+ // After the recipe is executed, the recipe/data is destroyed, unless
+ // explicitly requested not to (see below). The rule may static assert
+ // that the small size of the storage (which doesn't require dynamic
+ // memory allocation) is sufficient for its needs.
+ //
+ // Note also that a rule that delegates to another rule may need to store
+ // the base rule's data/recipe in its own data/recipe.
+
+ // Provide the small object optimization size for the common compilers
+ // (see recipe.hxx for details) in case a rule wants to make sure its data
+ // won't require a dynamic memory allocation. Note that using a minimum
+ // generally available (2 pointers) is not always possible because the
+ // data size may depend on sizes of other compiler-specific types (e.g.,
+ // std::string).
+ //
+ static constexpr size_t small_data_size =
+#if defined(__GLIBCXX__)
+ sizeof (void*) * 2
+#elif defined(_LIBCPP_VERSION)
+ sizeof (void*) * 3
+#elif defined(_MSC_VER)
+ sizeof (void*) * 6
+#else
+ sizeof (void*) * 2 // Assume at least 2 pointers.
+#endif
+ ;
+
+ template <typename T>
+ struct data_wrapper
+ {
+ T d;
+
+ target_state
+ operator() (action, const target&) const // Never called.
+ {
+ return target_state::unknown;
+ }
+ };
+
+ // Avoid wrapping the data if it is already a recipe.
//
- // Note that the recipe may modify the data. Currently reserved for the
- // inner part of the action.
+ // Note that this techniques requires a fix for LWG issue 2132 (which all
+ // our minimum supported compiler versions appear to have).
//
- static constexpr size_t data_size = sizeof (string) * 16;
- mutable std::aligned_storage<data_size>::type data_pad;
+ template <typename T>
+ struct data_invocable: std::is_constructible<
+ std::function<recipe_function>,
+ std::reference_wrapper<typename std::remove_reference<T>::type>> {};
- mutable void (*data_dtor) (void*) = nullptr;
+ template <typename T>
+ typename std::enable_if<!data_invocable<T>::value, void>::type
+ data (action a, T&& d) const
+ {
+ using V = typename std::remove_cv<
+ typename std::remove_reference<T>::type>::type;
- template <typename R,
- typename T = typename std::remove_cv<
- typename std::remove_reference<R>::type>::type>
- typename std::enable_if<std::is_trivially_destructible<T>::value,T&>::type
- data (R&& d) const
+ const opstate& s (state[a]);
+ s.recipe = data_wrapper<V> {forward<T> (d)};
+ s.recipe_keep = false; // Can't keep non-recipe data.
+ }
+
+ template <typename T>
+ typename std::enable_if<!data_invocable<T>::value, T&>::type
+ data (action a) const
{
- assert (sizeof (T) <= data_size);
- clear_data ();
- return *new (&data_pad) T (forward<R> (d));
+ using V = typename std::remove_cv<T>::type;
+ return state[a].recipe.target<data_wrapper<V>> ()->d;
}
- template <typename R,
- typename T = typename std::remove_cv<
- typename std::remove_reference<R>::type>::type>
- typename std::enable_if<!std::is_trivially_destructible<T>::value,T&>::type
- data (R&& d) const
+ // Return NULL if there is no data or the data is of a different type.
+ //
+ template <typename T>
+ typename std::enable_if<!data_invocable<T>::value, T*>::type
+ try_data (action a) const
{
- assert (sizeof (T) <= data_size);
- clear_data ();
- T& r (*new (&data_pad) T (forward<R> (d)));
- data_dtor = [] (void* p) {static_cast<T*> (p)->~T ();};
- return r;
+ using V = typename std::remove_cv<T>::type;
+
+ if (auto& r = state[a].recipe)
+ if (auto* t = r.target<data_wrapper<V>> ())
+ return &t->d;
+
+ return nullptr;
}
+ // Note that in this case we don't strip const (the expectation is that we
+ // move the recipe in/out of data).
+ //
+ // If keep is true, then keep the recipe as data after execution. In
+ // particular, this can be used to communicate between inner/outer rules
+ // (see cc::install_rule for an example).
+ //
+ //
template <typename T>
- T&
- data () const {return *reinterpret_cast<T*> (&data_pad);}
+ typename std::enable_if<data_invocable<T>::value, void>::type
+ data (action a, T&& d, bool keep = false) const
+ {
+ const opstate& s (state[a]);
+ s.recipe = forward<T> (d);
+ s.recipe_keep = keep;
+ }
void
- clear_data () const
+ keep_data (action a, bool keep = true) const
{
- if (data_dtor != nullptr)
- {
- data_dtor (&data_pad);
- data_dtor = nullptr;
- }
+ state[a].recipe_keep = keep;
+ }
+
+ template <typename T>
+ typename std::enable_if<data_invocable<T>::value, T&>::type
+ data (action a) const
+ {
+ return *state[a].recipe.target<T> ();
+ }
+
+ template <typename T>
+ typename std::enable_if<data_invocable<T>::value, T*>::type
+ try_data (action a) const
+ {
+ auto& r = state[a].recipe;
+ return r ? r.target<T> () : nullptr;
}
// Target type info and casting.
//
public:
const target*
- is_a (const target_type& tt) const {
- return type ().is_a (tt) ? this : nullptr;}
+ is_a (const target_type& tt) const
+ {
+ return type ().is_a (tt) ? this : nullptr;
+ }
template <typename T>
T*
- is_a () {return dynamic_cast<T*> (this);}
+ is_a ()
+ {
+ // At least with GCC we see slightly better and more consistent
+ // performance with our own type information.
+ //
+#if 0
+ return dynamic_cast<T*> (this);
+#else
+ // We can skip dynamically-derived type here (derived_type).
+ //
+ return dynamic_type->is_a<T> () ? static_cast<T*> (this) : nullptr;
+#endif
+ }
template <typename T>
const T*
- is_a () const {return dynamic_cast<const T*> (this);}
+ is_a () const
+ {
+#if 0
+ return dynamic_cast<const T*> (this);
+#else
+ return dynamic_type->is_a<T> () ? static_cast<const T*> (this) : nullptr;
+#endif
+ }
const target*
- is_a (const char* n) const {
- return type ().is_a (n) ? this : nullptr;}
+ is_a (const char* n) const
+ {
+ return type ().is_a (n) ? this : nullptr;
+ }
// Unchecked cast.
//
@@ -795,18 +1331,23 @@ namespace build2
const T&
as () const {return static_cast<const T&> (*this);}
- // Dynamic derivation to support define.
+ // Target type information.
+ //
+ // A derived target is expected to set dynamic_type to its static_type in
+ // its constructor body.
+ //
+ // We also have dynamic "derivation" support (e.g., via define in
+ // buildfile).
//
- const target_type* derived_type = nullptr;
-
const target_type&
type () const
{
- return derived_type != nullptr ? *derived_type : dynamic_type ();
+ return derived_type != nullptr ? *derived_type : *dynamic_type;
}
static const target_type static_type;
- virtual const target_type& dynamic_type () const = 0;
+ const target_type* dynamic_type;
+ const target_type* derived_type = nullptr;
// RW access.
//
@@ -835,13 +1376,19 @@ namespace build2
// Targets should be created via the targets set below.
//
- public:
+ protected:
+ friend class target_set;
+
target (context& c, dir_path d, dir_path o, string n)
: ctx (c),
dir (move (d)), out (move (o)), name (move (n)),
- vars (c, false /* global */),
- state (c) {}
+ vars (*this, false /* shared */),
+ state (c)
+ {
+ dynamic_type = &static_type;
+ }
+ public:
target (target&&) = delete;
target& operator= (target&&) = delete;
@@ -850,8 +1397,6 @@ namespace build2
virtual
~target ();
-
- friend class target_set;
};
// All targets are from the targets set below.
@@ -888,13 +1433,15 @@ namespace build2
// Helper for dealing with the prerequisite inclusion/exclusion (see
// var_include in context.hxx).
//
+ // If the lookup argument is not NULL, then it will be set to the operation-
+ // specific override, if present. Note that in this case the caller is
+ // expected to validate that the override value is valid (note: use the same
+ // diagnostics as in include() for consistency).
+ //
// Note that the include(prerequisite_member) overload is also provided.
//
include_type
- include (action,
- const target&,
- const prerequisite&,
- const target* = nullptr);
+ include (action, const target&, const prerequisite&, lookup* = nullptr);
// A "range" that presents the prerequisites of a group and one of
// its members as one continuous sequence, or, in other words, as
@@ -1077,7 +1624,8 @@ namespace build2
return member != nullptr ? member : prerequisite.target.load (mo);
}
- // Return as a new prerequisite instance.
+ // Return as a new prerequisite instance. Note that it includes a copy
+ // of prerequisite-specific variables.
//
prerequisite_type
as_prerequisite () const;
@@ -1107,11 +1655,8 @@ namespace build2
return os << pm.key ();
}
- inline include_type
- include (action a, const target& t, const prerequisite_member& pm)
- {
- return include (a, t, pm.prerequisite, pm.member);
- }
+ include_type
+ include (action, const target&, const prerequisite_member&, lookup* = nullptr);
// A "range" that presents a sequence of prerequisites (e.g., from
// group_prerequisites()) as a sequence of prerequisite_member's. For each
@@ -1144,10 +1689,19 @@ namespace build2
// See-through group members iteration mode. Ad hoc members must always
// be entered explicitly.
//
+ // Note that if the group is empty, then we see the group itself (rather
+ // than nothing). Failed that, an empty group would never be executed (e.g.,
+ // during clean) since there is no member to trigger the group execution.
+ // Other than that, it feels like seeing the group in this cases should be
+ // harmless (i.e., rules are generally prepared to see prerequisites they
+ // don't recognize).
+ //
enum class members_mode
{
- always, // Iterate over members, assert if not resolvable.
- maybe, // Iterate over members if resolvable, group otherwise.
+ always, // Iterate over members if not empty, group if empty, assert if
+ // not resolvable.
+ maybe, // Iterate over members if resolvable and not empty, group
+ // otherwise.
never // Iterate over group (can still use enter_group()).
};
@@ -1185,7 +1739,7 @@ namespace build2
{
if (r_->mode_ != members_mode::never &&
i_ != r_->e_ &&
- i_->type.see_through)
+ i_->type.see_through ())
switch_mode ();
}
@@ -1200,9 +1754,10 @@ namespace build2
leave_group ();
// Iterate over this group's members. Return false if the member
- // information is not available. Similar to leave_group(), you should
- // increment the iterator after calling this function (provided it
- // returned true).
+ // information is not available (note: return true if the group is
+ // empty). Similar to leave_group(), you should increment the iterator
+ // after calling this function provided group() returns true (see
+ // below).
//
bool
enter_group ();
@@ -1212,7 +1767,7 @@ namespace build2
//
// for (...; ++i)
// {
- // if (i->prerequisite.type.see_through)
+ // if (i->prerequisite.type.see_through ())
// {
// for (i.enter_group (); i.group (); )
// {
@@ -1277,8 +1832,7 @@ namespace build2
group_view g_;
size_t j_; // 1-based index, to support enter_group().
const target* k_; // Current member of ad hoc group or NULL.
- mutable typename std::aligned_storage<sizeof (value_type),
- alignof (value_type)>::type m_;
+ alignas (value_type) mutable unsigned char m_[sizeof (value_type)];
};
iterator
@@ -1395,7 +1949,7 @@ namespace build2
const dir_path& out,
const string& name) const
{
- slock l (mutex_);
+ slock l (mutex_, defer_lock); if (ctx.phase != run_phase::load) l.lock ();
auto i (map_.find (target_key {&type, &dir, &out, &name, nullopt}));
return i != map_.end () ? i->second.get () : nullptr;
}
@@ -1409,7 +1963,17 @@ namespace build2
// If the target was inserted, keep the map exclusive-locked and return
// the lock. In this case, the target is effectively still being created
- // since nobody can see it until the lock is released.
+ // since nobody can see it until the lock is released. Note that there
+ // is normally quite a bit of contention around this map so make sure to
+ // not hold the lock longer than absolutely necessary.
+ //
+ // If skip_find is true, then don't first try to find an existing target
+ // with a shared lock, instead going directly for the unique lock and
+ // insert. It's a good idea to pass true as this argument if you know the
+ // target is unlikely to be there.
+ //
+ // If need_lock is false, then release the lock (the target insertion is
+ // indicated by the presence of the associated mutex).
//
pair<target&, ulock>
insert_locked (const target_type&,
@@ -1418,8 +1982,13 @@ namespace build2
string name,
optional<string> ext,
target_decl,
- tracer&);
+ tracer&,
+ bool skip_find = false,
+ bool need_lock = true);
+ // As above but instead of the lock return an indication of whether the
+ // target was inserted.
+ //
pair<target&, bool>
insert (const target_type& tt,
dir_path dir,
@@ -1427,7 +1996,8 @@ namespace build2
string name,
optional<string> ext,
target_decl decl,
- tracer& t)
+ tracer& t,
+ bool skip_find = false)
{
auto p (insert_locked (tt,
move (dir),
@@ -1435,9 +2005,11 @@ namespace build2
move (name),
move (ext),
decl,
- t));
+ t,
+ skip_find,
+ false));
- return pair<target&, bool> (p.first, p.second.owns_lock ()); // Clang 3.7
+ return pair<target&, bool> (p.first, p.second.mutex () != nullptr);
}
// Note that the following versions always enter implied targets.
@@ -1449,7 +2021,8 @@ namespace build2
dir_path out,
string name,
optional<string> ext,
- tracer& t)
+ tracer& t,
+ bool skip_find = false)
{
return insert (tt,
move (dir),
@@ -1457,7 +2030,8 @@ namespace build2
move (name),
move (ext),
target_decl::implied,
- t).first.template as<T> ();
+ t,
+ skip_find).first.template as<T> ();
}
template <typename T>
@@ -1466,9 +2040,10 @@ namespace build2
const dir_path& out,
const string& name,
const optional<string>& ext,
- tracer& t)
+ tracer& t,
+ bool skip_find = false)
{
- return insert<T> (T::static_type, dir, out, name, ext, t);
+ return insert<T> (T::static_type, dir, out, name, ext, t, skip_find);
}
template <typename T>
@@ -1476,18 +2051,23 @@ namespace build2
insert (const dir_path& dir,
const dir_path& out,
const string& name,
- tracer& t)
+ tracer& t,
+ bool skip_find = false)
{
- return insert<T> (dir, out, name, nullopt, t);
+ return insert<T> (dir, out, name, nullopt, t, skip_find);
}
// Note: not MT-safe so can only be used during serial execution.
//
public:
- using iterator = butl::map_iterator_adapter<map_type::const_iterator>;
+ using iterator = butl::map_iterator_adapter<map_type::iterator>;
+ using const_iterator = butl::map_iterator_adapter<map_type::const_iterator>;
+
+ iterator begin () {return map_.begin ();}
+ iterator end () {return map_.end ();}
- iterator begin () const {return map_.begin ();}
- iterator end () const {return map_.end ();}
+ const_iterator begin () const {return map_.begin ();}
+ const_iterator end () const {return map_.end ();}
size_t
size () const {return map_.size ();}
@@ -1506,6 +2086,10 @@ namespace build2
mutable shared_mutex mutex_;
map_type map_;
+
+#if 0
+ size_t buckets_ = 0;
+#endif
};
// Modification time-based target.
@@ -1513,9 +2097,13 @@ namespace build2
class LIBBUILD2_SYMEXPORT mtime_target: public target
{
public:
- using target::target;
+ mtime_target (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
- // Modification time is an "atomic cash". That is, it can be set at any
+ // Modification time is an "atomic cache". That is, it can be set at any
// time (including on a const instance) and we assume everything will be
// ok regardless of the order in which racing updates happen because we do
// not modify the external state (which is the source of timestemps) while
@@ -1548,8 +2136,7 @@ namespace build2
// If the mtime is unknown, then load it from the filesystem also caching
// the result.
//
- // Note: can only be called during executing and must not be used if the
- // target state is group.
+ // Note: must not be used if the target state is group.
//
timestamp
load_mtime (const path&) const;
@@ -1600,13 +2187,17 @@ namespace build2
class LIBBUILD2_SYMEXPORT path_target: public mtime_target
{
public:
- using mtime_target::mtime_target;
+ path_target (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
using path_type = build2::path;
// Target path. Must be absolute and normalized.
//
- // Target path is an "atomic consistent cash". That is, it can be set at
+ // Target path is an "atomic consistent cache". That is, it can be set at
// any time (including on a const instance) but any subsequent updates
// must set the same path. Or, in other words, once the path is set, it
// never changes.
@@ -1749,11 +2340,62 @@ namespace build2
class LIBBUILD2_SYMEXPORT file: public path_target
{
public:
- using path_target::path_target;
+ file (context& c, dir_path d, dir_path o, string n)
+ : path_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Mtime-based group target.
+ //
+ // Used to support explicit groups in buildfiles: can be derived from,
+ // populated with static members using the group{foo}<...> syntax, and
+ // matched with an ad hoc recipe/rule, including dynamic member extraction.
+ // Note that it is not see-through but a derived group can be made see-
+ // through via the [see_through] attribute.
+ //
+ // Note also that you shouldn't use it as a base for a custom group defined
+ // in C++, instead deriving from mtime_target directly and using a custom
+ // members layout more appropriate for the group's semantics. To put it
+ // another way, a group-based target should only be matched by an ad hoc
+ // recipe/rule (see match_rule_impl() in algorithms.cxx for details).
+ //
+ class LIBBUILD2_SYMEXPORT group: public mtime_target
+ {
+ public:
+ vector<reference_wrapper<const target>> static_members;
+
+ // Note: we expect no NULL entries in members.
+ //
+ vector<const target*> members; // Layout compatible with group_view.
+ action members_action; // Action on which members were resolved.
+ size_t members_on = 0; // Operation number on which members were resolved.
+ size_t members_static; // Number of static ones in members (always first).
+
+ void
+ reset_members (action a)
+ {
+ members.clear ();
+ members_action = a;
+ members_on = ctx.current_on;
+ members_static = 0;
+ }
+
+ virtual group_view
+ group_members (action) const override;
+
+ group (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Alias target. It represents a list of targets (its prerequisites)
@@ -1762,11 +2404,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT alias: public target
{
public:
- using target::target;
+ alias (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Directory target. Note that this is not a filesystem directory
@@ -1776,11 +2421,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT dir: public alias
{
public:
- using alias::alias;
+ dir (context& c, dir_path d, dir_path o, string n)
+ : alias (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
public:
template <typename K>
@@ -1809,11 +2457,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT fsdir: public target
{
public:
- using target::target;
+ fsdir (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Executable file (not necessarily binary, though we do fallback to the
@@ -1823,7 +2474,11 @@ namespace build2
class LIBBUILD2_SYMEXPORT exe: public file
{
public:
- using file::file;
+ exe (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
using process_path_type = build2::process_path;
@@ -1851,7 +2506,6 @@ namespace build2
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
private:
process_path_type process_path_;
@@ -1860,11 +2514,30 @@ namespace build2
class LIBBUILD2_SYMEXPORT buildfile: public file
{
public:
- using file::file;
+ buildfile (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // This target type is primarily used for files mentioned in the `recipe`
+ // directive.
+ //
+ class LIBBUILD2_SYMEXPORT buildscript: public file
+ {
+ public:
+ buildscript (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Common documentation file target.
@@ -1872,11 +2545,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT doc: public file
{
public:
- using file::file;
+ doc (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Legal files (LICENSE, AUTHORS, COPYRIGHT, etc).
@@ -1884,11 +2560,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT legal: public doc
{
public:
- using doc::doc;
+ legal (context& c, dir_path d, dir_path o, string n)
+ : doc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// The problem with man pages is this: different platforms have
@@ -1923,26 +2602,32 @@ namespace build2
// in the generic install rule. @@ This is still a TODO.
//
// Note that handling subsections with man1..9{} is easy, we
- // simply specify the extension explicitly, e.g., man{foo.1p}.
+ // simply specify the extension explicitly, e.g., man1{foo.1p}.
//
class LIBBUILD2_SYMEXPORT man: public doc
{
public:
- using doc::doc;
+ man (context& c, dir_path d, dir_path o, string n)
+ : doc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_SYMEXPORT man1: public man
{
public:
- using man::man;
+ man1 (context& c, dir_path d, dir_path o, string n)
+ : man (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// We derive manifest from doc rather than file so that it get automatically
@@ -1953,11 +2638,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT manifest: public doc
{
public:
- using doc::doc;
+ manifest (context& c, dir_path d, dir_path o, string n)
+ : doc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Common implementation of the target factory, extension, and search
@@ -2002,32 +2690,39 @@ namespace build2
string&, optional<string>&, const location&,
bool);
- // Target print functions.
+ // Target print functions (target_type::print).
//
// Target type uses the extension but it is fixed and there is no use
// printing it (e.g., man1{}).
//
- LIBBUILD2_SYMEXPORT void
- target_print_0_ext_verb (ostream&, const target_key&);
+ LIBBUILD2_SYMEXPORT bool
+ target_print_0_ext_verb (ostream&, const target_key&, bool);
// Target type uses the extension and there is normally no default so it
// should be printed (e.g., file{}).
//
- LIBBUILD2_SYMEXPORT void
- target_print_1_ext_verb (ostream&, const target_key&);
+ LIBBUILD2_SYMEXPORT bool
+ target_print_1_ext_verb (ostream&, const target_key&, bool);
+
+ // Target search functions (target_type::search).
+ //
// The default behavior, that is, look for an existing target in the
// prerequisite's directory scope.
//
+ // Note that this implementation assumes a target can only be found in the
+ // out tree (targets that can be in the src tree would normally use
+ // file_search() below).
+ //
LIBBUILD2_SYMEXPORT const target*
- target_search (const target&, const prerequisite_key&);
+ target_search (context&, const target*, const prerequisite_key&);
- // First look for an existing target as above. If not found, then look
- // for an existing file in the target-type-specific list of paths.
+ // First look for an existing target both in out and src. If not found, then
+ // look for an existing file in src.
//
LIBBUILD2_SYMEXPORT const target*
- file_search (const target&, const prerequisite_key&);
+ file_search (context&, const target*, const prerequisite_key&);
}
#include <libbuild2/target.ixx>
diff --git a/libbuild2/target.ixx b/libbuild2/target.ixx
index cfc3847..39b81e7 100644
--- a/libbuild2/target.ixx
+++ b/libbuild2/target.ixx
@@ -3,12 +3,13 @@
#include <cstring> // memcpy()
-#include <libbuild2/filesystem.hxx> // mtime()
-
#include <libbuild2/export.hxx>
namespace build2
{
+ LIBBUILD2_SYMEXPORT timestamp
+ mtime (const char*); // filesystem.cxx
+
// target_key
//
inline const string& target_key::
@@ -52,20 +53,102 @@ namespace build2
return r;
}
+ // rule_hints
+ //
+ inline const string& rule_hints::
+ find (const target_type& tt, operation_id o, bool ut) const
+ {
+ // Look for fallback during the same iteration.
+ //
+ const value_type* f (nullptr);
+
+ for (const value_type& v: map)
+ {
+ if (!(v.type == nullptr ? ut : tt.is_a (*v.type)))
+ continue;
+
+ if (v.operation == o)
+ return v.hint;
+
+ if (f == nullptr &&
+ v.operation == default_id &&
+ (o == update_id || o == clean_id))
+ f = &v;
+ }
+
+ return f != nullptr ? f->hint : empty_string;
+ }
+
+ inline void rule_hints::
+ insert (const target_type* tt, operation_id o, string h)
+ {
+ auto i (find_if (map.begin (), map.end (),
+ [tt, o] (const value_type& v)
+ {
+ return v.operation == o && v.type == tt;
+ }));
+
+ if (i == map.end ())
+ map.push_back (value_type {tt, o, move (h)});
+ else
+ i->hint = move (h);
+ }
+
+ inline const string& target::
+ find_hint (operation_id o) const
+ {
+ using flag = target_type::flag;
+
+ const target_type& tt (type ());
+
+ // First check the target itself.
+ //
+ if (!rule_hints.empty ())
+ {
+ // If this is a group that "gave" its untyped hints to the members, then
+ // ignore untyped entries.
+ //
+ bool ut ((tt.flags & flag::member_hint) != flag::member_hint);
+
+ const string& r (rule_hints.find (tt, o, ut));
+ if (!r.empty ())
+ return r;
+ }
+
+ // Then check the group.
+ //
+ if (const target* g = group)
+ {
+ if (!g->rule_hints.empty ())
+ {
+ // If the group "gave" its untyped hints to the members, then don't
+ // ignore untyped entries.
+ //
+ bool ut ((g->type ().flags & flag::member_hint) == flag::member_hint);
+
+ return g->rule_hints.find (tt, o, ut);
+ }
+ }
+
+ return empty_string;
+ }
+
// match_extra
//
inline void match_extra::
- init (bool f)
+ reinit (bool f)
{
+ clear_data ();
fallback = f;
- buffer.clear ();
+ cur_options = all_options;
+ new_options = 0;
+ posthoc_prerequisite_targets = nullptr;
}
inline void match_extra::
free ()
{
- string s;
- buffer.swap (s);
+ clear_data ();
}
// target
@@ -155,24 +238,41 @@ namespace build2
}
inline bool target::
- matched (action a) const
+ matched (action a, memory_order mo) const
{
- assert (ctx.phase == run_phase::execute);
+ assert (ctx.phase == run_phase::match ||
+ ctx.phase == run_phase::execute);
const opstate& s (state[a]);
+ size_t c (s.task_count.load (mo));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- // Note that while the target could be being executed, we should see at
- // least offset_matched since it must have been "achieved" before the
- // phase switch.
- //
- size_t c (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
-
- return c >= offset_matched;
+ if (ctx.phase == run_phase::match)
+ {
+ // While it will normally be applied, it could also be already executed
+ // or being relocked to reapply match options (see lock_impl() for
+ // background).
+ //
+ // Note that we can't just do >= offset_applied since offset_busy can
+ // also mean it is being matched.
+ //
+ // See also matched_state_impl(), mtime() for similar logic.
+ //
+ return (c == (b + offset_applied) ||
+ c == (b + offset_executed) ||
+ (c >= (b + offset_busy) &&
+ s.match_extra.cur_options_.load (memory_order_relaxed) != 0));
+ }
+ else
+ {
+ // Note that while the target could be being executed, we should see at
+ // least offset_matched since it must have been "achieved" before the
+ // phase switch.
+ //
+ return c >= (b + offset_matched);
+ }
}
- LIBBUILD2_SYMEXPORT target_state
- group_action (action, const target&); // <libbuild2/algorithm.hxx>
-
inline bool target::
group_state (action a) const
{
@@ -180,16 +280,32 @@ namespace build2
// raw state is not group provided the recipe is group_recipe and the
// state is unknown (see mtime() for a discussion on why we do it).
//
+ // Note that additionally s.state may not be target_state::group even
+ // after execution due to deferment (see execute_impl() for details).
+ //
+ // @@ Hm, I wonder why not just return s.recipe_group_action now that we
+ // cache it.
+ //
+
+ // This special hack allows us to do things like query an ad hoc member's
+ // state or mtime without matching/executing the member, only the group.
+ // Requiring matching/executing the member would be too burdensome and
+ // this feels harmless (ad hoc membership cannot be changed during the
+ // execute phase).
+ //
+ // Note: this test must come first since the member may not be matched and
+ // thus its state uninitialized.
+ //
+ if (ctx.phase == run_phase::execute && adhoc_group_member ())
+ return true;
+
const opstate& s (state[a]);
if (s.state == target_state::group)
return true;
if (s.state == target_state::unknown && group != nullptr)
- {
- if (recipe_function* const* f = s.recipe.target<recipe_function*> ())
- return *f == &group_action;
- }
+ return s.recipe_group_action;
return false;
}
@@ -203,15 +319,22 @@ namespace build2
// Note: already synchronized.
//
- size_t o (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ size_t c (s.task_count.load (memory_order_relaxed));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- if (o == offset_tried)
+ if (c == (b + offset_tried))
return make_pair (false, target_state::unknown);
else
{
- // Normally applied but can also be already executed.
+ // The same semantics as in target::matched(). Note that in the executed
+ // case we are guaranteed to be synchronized since we are in the match
+ // phase.
//
- assert (o == offset_applied || o == offset_executed);
+ assert (c == (b + offset_applied) ||
+ c == (b + offset_executed) ||
+ (c >= (b + offset_busy) &&
+ s.match_extra.cur_options_.load (memory_order_relaxed) != 0));
+
return make_pair (true, (group_state (a) ? group->state[a] : s).state);
}
}
@@ -330,15 +453,27 @@ namespace build2
// include()
//
LIBBUILD2_SYMEXPORT include_type
- include_impl (action, const target&, const prerequisite&, const target*);
+ include_impl (action, const target&,
+ const prerequisite&, const target*,
+ lookup*);
inline include_type
- include (action a, const target& t, const prerequisite& p, const target* m)
+ include (action a, const target& t, const prerequisite& p, lookup* l)
{
// Most of the time no prerequisite-specific variables will be specified,
// so let's optimize for that.
//
- return p.vars.empty () ? include_type (true) : include_impl (a, t, p, m);
+ return p.vars.empty ()
+ ? include_type (true)
+ : include_impl (a, t, p, nullptr, l);
+ }
+
+ inline include_type
+ include (action a, const target& t, const prerequisite_member& pm, lookup* l)
+ {
+ return pm.prerequisite.vars.empty ()
+ ? include_type (true)
+ : include_impl (a, t, pm.prerequisite, pm.member, l);
}
// group_prerequisites
@@ -423,7 +558,12 @@ namespace build2
//
assert (!member->adhoc_group_member ());
- return prerequisite_type (*member);
+ // Feels like copying the prerequisite's variables to member is more
+ // correct than not (consider for_install, for example).
+ //
+ prerequisite_type p (*member);
+ p.vars = prerequisite.vars;
+ return p;
}
inline prerequisite_key prerequisite_member::
@@ -456,6 +596,25 @@ namespace build2
}
template <typename T>
+ inline void prerequisite_members_range<T>::iterator::
+ switch_mode ()
+ {
+ g_ = resolve_members (*i_);
+
+ if (g_.members != nullptr)
+ {
+ // See empty see through groups as groups.
+ //
+ for (j_ = 1; j_ <= g_.count && g_.members[j_ - 1] == nullptr; ++j_) ;
+
+ if (j_ > g_.count)
+ g_.count = 0;
+ }
+ else
+ assert (r_->mode_ != members_mode::always); // Group can't be resolved.
+ }
+
+ template <typename T>
inline auto prerequisite_members_range<T>::iterator::
operator++ () -> iterator&
{
@@ -480,7 +639,7 @@ namespace build2
if (r_->mode_ != members_mode::never &&
i_ != r_->e_ &&
- i_->type.see_through)
+ i_->type.see_through ())
switch_mode ();
}
@@ -587,15 +746,20 @@ namespace build2
inline timestamp mtime_target::
load_mtime (const path& p) const
{
- assert (ctx.phase == run_phase::execute &&
- !group_state (action () /* inner */));
+ // We can only enforce "not group state" during the execute phase. During
+ // match (e.g., the target is being matched), we will just have to pay
+ // attention.
+ //
+ assert (ctx.phase == run_phase::match ||
+ (ctx.phase == run_phase::execute &&
+ !group_state (action () /* inner */)));
duration::rep r (mtime_.load (memory_order_consume));
if (r == timestamp_unknown_rep)
{
assert (!p.empty ());
- r = build2::mtime (p).time_since_epoch ().count ();
+ r = build2::mtime (p.string ().c_str ()).time_since_epoch ().count ();
mtime_.store (r, memory_order_release);
}
@@ -605,6 +769,8 @@ namespace build2
inline bool mtime_target::
newer (timestamp mt, target_state s) const
{
+ assert (s != target_state::unknown); // Should be executed.
+
timestamp mp (mtime ());
// What do we do if timestamps are equal? This can happen, for example,
diff --git a/libbuild2/target.txx b/libbuild2/target.txx
index 5b48ad1..976d204 100644
--- a/libbuild2/target.txx
+++ b/libbuild2/target.txx
@@ -1,46 +1,11 @@
// file : libbuild2/target.txx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <libbutl/filesystem.hxx> // dir_iterator
-
#include <libbuild2/scope.hxx>
#include <libbuild2/diagnostics.hxx>
namespace build2
{
- // prerequisite_members_range
- //
- template <typename T>
- void prerequisite_members_range<T>::iterator::
- switch_mode ()
- {
- // A group could be empty, so we may have to iterate.
- //
- do
- {
- g_ = resolve_members (*i_);
-
- // Group could not be resolved.
- //
- if (g_.members == nullptr)
- {
- assert (r_->mode_ != members_mode::always);
- return;
- }
-
- // Skip empty see through groups.
- //
- for (j_ = 1; j_ <= g_.count && g_.members[j_ - 1] == nullptr; ++j_) ;
- if (j_ <= g_.count)
- break;
-
- g_.count = 0;
- }
- while (++i_ != r_->e_ && i_->type.see_through);
- }
-
- //
- //
template <const char* ext>
const char*
target_extension_fix (const target_key& tk, const scope*)
diff --git a/libbuild2/test/common.cxx b/libbuild2/test/common.cxx
index 7fdb347..89f3dd6 100644
--- a/libbuild2/test/common.cxx
+++ b/libbuild2/test/common.cxx
@@ -150,8 +150,7 @@ namespace build2
t.name == n->value && // Name matches.
tt.name == n->type && // Target type matches.
d == n->dir && // Directory matches.
- (search_existing (*n, *root_) == &t ||
- search_existing (*n, *root_, d) == &t);
+ search_existing (*n, *root_) == &t;
if (r)
break;
@@ -198,8 +197,7 @@ namespace build2
t.name == n->value &&
tt.name == n->type &&
d == n->dir &&
- (search_existing (*n, *root_) == &t ||
- search_existing (*n, *root_, d) == &t);
+ search_existing (*n, *root_) == &t;
if (!r)
continue; // Not our target.
diff --git a/libbuild2/test/init.cxx b/libbuild2/test/init.cxx
index 539cdec..32548f4 100644
--- a/libbuild2/test/init.cxx
+++ b/libbuild2/test/init.cxx
@@ -23,6 +23,8 @@ namespace build2
{
namespace test
{
+ static const file_rule file_rule_ (true /* check_type */);
+
void
boot (scope& rs, const location&, module_boot_extra& extra)
{
@@ -30,15 +32,14 @@ namespace build2
l5 ([&]{trace << "for " << rs;});
- // Register our operations.
- //
- rs.insert_operation (test_id, op_test);
- rs.insert_operation (update_for_test_id, op_update_for_test);
-
// Enter module variables. Do it during boot in case they get assigned
// in bootstrap.build.
//
- auto& vp (rs.var_pool ());
+ // Most of the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
+ auto& pvp (rs.var_pool ()); // For `test` and `for_test`.
common_data d {
@@ -69,7 +70,7 @@ namespace build2
// The test variable is a name which can be a path (with the
// true/false special values) or a target name.
//
- vp.insert<name> ("test", variable_visibility::target),
+ pvp.insert<name> ("test", variable_visibility::target),
vp.insert<strings> ("test.options"),
vp.insert<strings> ("test.arguments"),
@@ -111,12 +112,12 @@ namespace build2
// This one is used by other modules/rules.
//
- vp.insert<bool> ("for_test", variable_visibility::prereq);
+ pvp.insert<bool> ("for_test", variable_visibility::prereq);
// These are only used in testscript.
//
- vp.insert<strings> ("test.redirects");
- vp.insert<strings> ("test.cleanups");
+ vp.insert<cmdline> ("test.redirects");
+ vp.insert<cmdline> ("test.cleanups");
// Unless already set, default test.target to build.host. Note that it
// can still be overriden by the user, e.g., in root.build.
@@ -125,9 +126,14 @@ namespace build2
value& v (rs.assign (d.test_target));
if (!v || v.empty ())
- v = cast<target_triplet> (rs.ctx.global_scope["build.host"]);
+ v = *rs.ctx.build_host;
}
+ // Register our operations.
+ //
+ rs.insert_operation (test_id, op_test, &d.var_test);
+ rs.insert_operation (update_for_test_id, op_update_for_test, &d.var_test);
+
extra.set_module (new module (move (d)));
}
@@ -296,18 +302,18 @@ namespace build2
{
default_rule& dr (m);
- // Note: register for mtime_target to take priority over the fallback
- // rule below.
- //
- rs.insert_rule<target> (perform_test_id, "test", dr);
- rs.insert_rule<mtime_target> (perform_test_id, "test", dr);
- rs.insert_rule<alias> (perform_test_id, "test", dr);
+ rs.insert_rule<target> (perform_test_id, "test", dr);
+ rs.insert_rule<alias> (perform_test_id, "test", dr);
// Register the fallback file rule for the update-for-test operation,
// similar to update.
//
- rs.global_scope ().insert_rule<mtime_target> (
- perform_test_id, "test.file", file_rule::instance);
+ // Note: use target instead of anything more specific (such as
+ // mtime_target) in order not to take precedence over the "test" rule
+ // above.
+ //
+ rs.global_scope ().insert_rule<target> (
+ perform_test_id, "test.file", file_rule_);
}
return true;
diff --git a/libbuild2/test/operation.cxx b/libbuild2/test/operation.cxx
index 841abb5..2535adb 100644
--- a/libbuild2/test/operation.cxx
+++ b/libbuild2/test/operation.cxx
@@ -17,14 +17,8 @@ namespace build2
namespace test
{
static operation_id
- test_pre (context&,
- const values& params,
- meta_operation_id mo,
- const location& l)
+ pre_test (context&, const values&, meta_operation_id mo, const location&)
{
- if (!params.empty ())
- fail (l) << "unexpected parameters for operation test";
-
// Run update as a pre-operation, unless we are disfiguring.
//
return mo != disfigure_id ? update_id : 0;
@@ -70,7 +64,9 @@ namespace build2
"has nothing to test", // We cannot "be tested".
execution_mode::first,
1 /* concurrency */,
- &test_pre,
+ &pre_test,
+ nullptr,
+ nullptr,
nullptr,
nullptr,
&adhoc_apply
@@ -90,6 +86,8 @@ namespace build2
op_update.concurrency,
op_update.pre_operation,
op_update.post_operation,
+ op_update.operation_pre,
+ op_update.operation_post,
op_update.adhoc_match,
op_update.adhoc_apply
};
diff --git a/libbuild2/test/rule.cxx b/libbuild2/test/rule.cxx
index 06fb12f..28eb35b 100644
--- a/libbuild2/test/rule.cxx
+++ b/libbuild2/test/rule.cxx
@@ -30,7 +30,7 @@ namespace build2
namespace test
{
bool rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
// We always match, even if this target is not testable (so that we can
// ignore it; see apply()).
@@ -66,11 +66,11 @@ namespace build2
// Resolve group members.
//
- if (!see_through || t.type ().see_through)
+ if (!see_through_only || t.type ().see_through ())
{
// Remember that we are called twice: first during update for test
// (pre-operation) and then during test. During the former, we rely on
- // the normall update rule to resolve the group members. During the
+ // the normal update rule to resolve the group members. During the
// latter, there will be no rule to do this but the group will already
// have been resolved by the pre-operation.
//
@@ -540,11 +540,19 @@ namespace build2
if (verb)
{
- diag_record dr (text);
- dr << "test " << ts;
-
- if (!t.is_a<alias> ())
- dr << ' ' << t;
+ // If the target is an alias, then testscript itself is the
+ // target.
+ //
+ if (t.is_a<alias> ())
+ print_diag ("test", ts);
+ else
+ {
+ // In this case the test is really a combination of the target
+ // and testscript and using "->" feels off. Also, let's list the
+ // testscript after the target even though its a source.
+ //
+ print_diag ("test", t, ts, "+");
+ }
}
res.push_back (ctx.dry_run
@@ -555,22 +563,22 @@ namespace build2
{
scope_state& r (res.back ());
- if (!ctx.sched.async (ctx.count_busy (),
- t[a].task_count,
- [this] (const diag_frame* ds,
- scope_state& r,
- const target& t,
- const testscript& ts,
- const dir_path& wd)
- {
- diag_frame::stack_guard dsg (ds);
- r = perform_script_impl (t, ts, wd, *this);
- },
- diag_frame::stack (),
- ref (r),
- cref (t),
- cref (ts),
- cref (wd)))
+ if (!ctx.sched->async (ctx.count_busy (),
+ t[a].task_count,
+ [this] (const diag_frame* ds,
+ scope_state& r,
+ const target& t,
+ const testscript& ts,
+ const dir_path& wd)
+ {
+ diag_frame::stack_guard dsg (ds);
+ r = perform_script_impl (t, ts, wd, *this);
+ },
+ diag_frame::stack (),
+ ref (r),
+ cref (t),
+ cref (ts),
+ cref (wd)))
{
// Executed synchronously. If failed and we were not asked to
// keep going, bail out.
@@ -641,25 +649,50 @@ namespace build2
// Stack-allocated linked list of information about the running pipeline
// processes.
//
+ // Note: constructed incrementally.
+ //
struct pipe_process
{
- process& proc;
- const char* prog; // Only for diagnostics.
+ // Initially NULL. Set to the address of the process object when it is
+ // created. Reset back to NULL when the process is executed and its exit
+ // status is collected (see complete_pipe() for details).
+ //
+ process* proc = nullptr;
+
+ char const** args; // Only for diagnostics.
+
+ diag_buffer dbuf;
+ bool force_dbuf;
// True if this process has been terminated.
//
bool terminated = false;
- pipe_process* prev; // NULL for the left-most program.
+ // True if this process has been terminated but we failed to read out
+ // its stderr stream in the reasonable timeframe (2 seconds) after the
+ // termination.
+ //
+ // Note that this may happen if there is a still running child process
+ // of the terminated process which has inherited the parent's stderr
+ // file descriptor.
+ //
+ bool unread_stderr = false;
- pipe_process (process& p, const char* g, pipe_process* r)
- : proc (p), prog (g), prev (r) {}
+ pipe_process* prev; // NULL for the left-most program.
+ pipe_process* next; // Left-most program for the right-most program.
+
+ pipe_process (context& x,
+ char const** as,
+ bool fb,
+ pipe_process* p,
+ pipe_process* f)
+ : args (as), dbuf (x), force_dbuf (fb), prev (p), next (f) {}
};
- static bool
+ static void
run_test (const target& t,
- diag_record& dr,
char const** args,
+ int ofd,
const optional<timestamp>& deadline,
pipe_process* prev = nullptr)
{
@@ -669,14 +702,28 @@ namespace build2
for (next++; *next != nullptr; next++) ;
next++;
+ bool last (*next == nullptr);
+
// Redirect stdout to a pipe unless we are last.
//
- int out (*next != nullptr ? -1 : 1);
- bool pr;
+ int out (last ? ofd : -1);
- // Absent if the process misses the deadline.
+ // Propagate the pointer to the left-most program.
//
- optional<process_exit> pe;
+ // Also force diag buffering for the trailing diff process, so it's
+ // stderr is never printed if the test program fails (see
+ // complete_pipe() for details).
+ //
+ pipe_process pp (t.ctx,
+ args,
+ last && ofd == 2,
+ prev,
+ prev != nullptr ? prev->next : nullptr);
+
+ if (prev != nullptr)
+ prev->next = &pp;
+ else
+ pp.next = &pp; // Points to itself.
try
{
@@ -707,11 +754,11 @@ namespace build2
{
try
{
- p->proc.term ();
+ p->proc->term ();
}
catch (const process_error& e)
{
- dr << fail << "unable to terminate " << p->prog << ": " << e;
+ dr << fail << "unable to terminate " << p->args[0] << ": " << e;
}
p->terminated = true;
@@ -724,7 +771,7 @@ namespace build2
for (pipe_process* p (pp); p != nullptr; p = p->prev)
{
- process& pr (p->proc);
+ process& pr (*p->proc);
try
{
@@ -736,26 +783,310 @@ namespace build2
}
catch (const process_error& e)
{
- dr << fail << "unable to wait/kill " << p->prog << ": " << e;
+ dr << fail << "unable to wait/kill " << p->args[0] << ": " << e;
+ }
+ }
+ };
+
+ // Read out all the pipeline's buffered strerr streams watching for
+ // the deadline, if specified. If the deadline is reached, then
+ // terminate the whole pipeline, move the deadline by another 2
+ // seconds, and continue reading.
+ //
+ // Note that we assume that this timeout increment is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_process for the possible reasons), then we just set
+ // unread_stderr flag to true for such processes and bail out.
+ //
+ // Also note that this implementation is inspired by the
+ // script::run_pipe::read_pipe() lambda.
+ //
+ auto read_pipe = [&pp, &deadline, &term_pipe] ()
+ {
+ fdselect_set fds;
+ for (pipe_process* p (&pp); p != nullptr; p = p->prev)
+ {
+ diag_buffer& b (p->dbuf);
+
+ if (b.is.is_open ())
+ fds.emplace_back (b.is.fd (), p);
+ }
+
+ optional<timestamp> dl (deadline);
+ bool terminated (false);
+
+ for (size_t unread (fds.size ()); unread != 0;)
+ {
+ try
+ {
+ // If a deadline is specified, then pass the timeout to
+ // fdselect().
+ //
+ if (dl)
+ {
+ timestamp now (system_clock::now ());
+
+ if (*dl <= now || ifdselect (fds, *dl - now) == 0)
+ {
+ if (!terminated)
+ {
+ term_pipe (&pp);
+ terminated = true;
+
+ dl = system_clock::now () + chrono::seconds (2);
+ continue;
+ }
+ else
+ {
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ pipe_process* p (static_cast<pipe_process*> (s.data));
+
+ p->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() (see script::read() for
+ // details).
+ //
+ try
+ {
+ p->dbuf.is.close ();
+ }
+ catch (const io_error&) {}
+ }
+ }
+
+ break;
+ }
+ }
+ }
+ else
+ ifdselect (fds);
+
+ for (fdselect_state& s: fds)
+ {
+ if (s.ready)
+ {
+ pipe_process* p (static_cast<pipe_process*> (s.data));
+
+ if (!p->dbuf.read (p->force_dbuf))
+ {
+ s.fd = nullfd;
+ --unread;
+ }
+ }
+ }
+ }
+ catch (const io_error& e)
+ {
+ fail << "io error reading pipeline streams: " << e;
+ }
+ }
+ };
+
+ // Wait for the pipeline processes to complete, watching for the
+ // deadline, if specified. If the deadline is reached, then terminate
+ // the whole pipeline.
+ //
+ // Note: must be called after read_pipe().
+ //
+ auto wait_pipe = [&pp, &deadline, &timed_wait, &term_pipe] ()
+ {
+ for (pipe_process* p (&pp); p != nullptr; p = p->prev)
+ {
+ try
+ {
+ if (!deadline)
+ p->proc->wait ();
+ else if (!timed_wait (*p->proc, *deadline))
+ term_pipe (p);
+ }
+ catch (const process_error& e)
+ {
+ fail << "unable to wait " << p->args[0] << ": " << e;
+ }
+ }
+ };
+
+ // Iterate over the pipeline processes left to right, printing their
+ // stderr if buffered and issuing the diagnostics if the exit code is
+ // not available (terminated abnormally or due to a deadline), is
+ // non-zero, or stderr was not fully read. Afterwards, fail if any of
+ // such a faulty processes were encountered.
+ //
+ // Note that we only issue diagnostics for the first failure.
+ //
+ // Note: must be called after wait_pipe() and only once.
+ //
+ auto complete_pipe = [&pp, &t] ()
+ {
+ pipe_process* b (pp.next); // Left-most program.
+ assert (b != nullptr); // The lambda can only be called once.
+ pp.next = nullptr;
+
+ bool fail (false);
+ for (pipe_process* p (b); p != nullptr; p = p->next)
+ {
+ assert (p->proc != nullptr); // The lambda can only be called once.
+
+ // Collect the exit status, if present.
+ //
+ // Absent if the process misses the deadline.
+ //
+ optional<process_exit> pe;
+
+ const process& pr (*p->proc);
+
+#ifndef _WIN32
+ if (!(p->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->signal () == SIGTERM))
+#else
+ if (!(p->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->status == DBG_TERMINATE_PROCESS))
+#endif
+ pe = pr.exit;
+
+ p->proc = nullptr;
+
+ // Verify the exit status and issue the diagnostics on failure.
+ //
+ // Note that we only issue diagnostics for the first failure but
+ // continue iterating to reset process pointers to NULL. Also note
+ // that if the test program fails, then the potential diff's
+ // diagnostics is suppressed since it is always buffered.
+ //
+ if (!fail)
+ {
+ diag_record dr;
+
+ // Note that there can be a race, so that the process we have
+ // terminated due to reaching the deadline has in fact exited
+ // normally. Thus, the 'unread stderr' situation can also happen
+ // to a successfully terminated process. If that's the case, we
+ // report this problem as the main error and the secondary error
+ // otherwise.
+ //
+ if (!pe ||
+ !pe->normal () ||
+ pe->code () != 0 ||
+ p->unread_stderr)
+ {
+ fail = true;
+
+ dr << error << "test " << t << " failed" // Multi test: test 1.
+ << error << "process " << p->args[0] << ' ';
+
+ if (!pe)
+ {
+ dr << "terminated: execution timeout expired";
+
+ if (p->unread_stderr)
+ dr << error << "stderr not closed after exit";
+ }
+ else if (!pe->normal () || pe->code () != 0)
+ {
+ dr << *pe;
+
+ if (p->unread_stderr)
+ dr << error << "stderr not closed after exit";
+ }
+ else
+ {
+ assert (p->unread_stderr);
+
+ dr << "stderr not closed after exit";
+ }
+
+ if (verb == 1)
+ {
+ dr << info << "test command line: ";
+
+ for (pipe_process* p (b); p != nullptr; p = p->next)
+ {
+ if (p != b)
+ dr << " | ";
+
+ print_process (dr, p->args);
+ }
+ }
+ }
+
+ // Now print the buffered stderr, if present, and/or flush the
+ // diagnostics, if issued.
+ //
+ if (p->dbuf.is_open ())
+ p->dbuf.close (move (dr));
}
}
+
+ if (fail)
+ throw failed ();
};
- process p (prev == nullptr
- ? process (args, 0, out) // First process.
- : process (args, prev->proc, out)); // Next process.
+ process p;
+ {
+ process::pipe ep;
+ {
+ fdpipe p;
+ if (diag_buffer::pipe (t.ctx, pp.force_dbuf) == -1) // Buffering?
+ {
+ try
+ {
+ p = fdopen_pipe ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to redirect stderr: " << e;
+ }
+
+ // Note that we must return non-owning fd to our end of the pipe
+ // (see the process class for details).
+ //
+ ep = process::pipe (p.in.get (), move (p.out));
+ }
+ else
+ ep = process::pipe (-1, 2);
+
+ // Note that we must open the diag buffer regardless of the
+ // diag_buffer::pipe() result.
+ //
+ pp.dbuf.open (args[0], move (p.in), fdstream_mode::non_blocking);
+ }
+
+ p = (prev == nullptr
+ ? process (args, 0, out, move (ep)) // First process.
+ : process (args, *prev->proc, out, move (ep))); // Next process.
+ }
- pipe_process pp (p, args[0], prev);
+ pp.proc = &p;
- // If the deadline is specified, then make sure we don't miss it
- // waiting indefinitely in the process destructor on the right-hand
- // part of the pipe failure.
+ // If the right-hand part of the pipe fails, then make sure we don't
+ // wait indefinitely in the process destructor if the deadline is
+ // specified or just because a process is blocked on stderr.
//
- auto g (make_exception_guard ([&deadline, &pp, &term_pipe] ()
+ auto g (make_exception_guard ([&pp, &term_pipe] ()
{
- if (deadline)
+ if (pp.proc != nullptr)
try
{
+ // Close all buffered pipeline stderr streams ignoring io_error
+ // exceptions.
+ //
+ for (pipe_process* p (&pp); p != nullptr; p = p->prev)
+ {
+ if (p->dbuf.is.is_open ())
+ try
+ {
+ p->dbuf.is.close();
+ }
+ catch (const io_error&) {}
+ }
+
term_pipe (&pp);
}
catch (const failed&)
@@ -764,25 +1095,17 @@ namespace build2
}
}));
- pr = *next == nullptr || run_test (t, dr, next, deadline, &pp);
-
- if (!deadline)
- p.wait ();
- else if (!timed_wait (p, *deadline))
- term_pipe (&pp);
+ if (!last)
+ run_test (t, next, ofd, deadline, &pp);
- assert (p.exit);
-
-#ifndef _WIN32
- if (!(pp.terminated &&
- !p.exit->normal () &&
- p.exit->signal () == SIGTERM))
-#else
- if (!(pp.terminated &&
- !p.exit->normal () &&
- p.exit->status == DBG_TERMINATE_PROCESS))
-#endif
- pe = *p.exit;
+ // Complete the pipeline execution, if not done yet.
+ //
+ if (pp.proc != nullptr)
+ {
+ read_pipe ();
+ wait_pipe ();
+ complete_pipe ();
+ }
}
catch (const process_error& e)
{
@@ -793,24 +1116,6 @@ namespace build2
throw failed ();
}
-
- bool wr (pe && pe->normal () && pe->code () == 0);
-
- if (!wr)
- {
- if (pr) // First failure?
- dr << fail << "test " << t << " failed"; // Multi test: test 1.
-
- dr << error;
- print_process (dr, args);
-
- if (pe)
- dr << " " << *pe;
- else
- dr << " terminated: execution timeout expired";
- }
-
- return pr && wr;
}
target_state rule::
@@ -856,7 +1161,7 @@ namespace build2
fail << "invalid test executable override: '" << *n << "'";
else
{
- // Must be a target name.
+ // Must be a target name. Could be from src (e.g., a script).
//
// @@ OUT: what if this is a @-qualified pair of names?
//
@@ -986,10 +1291,19 @@ namespace build2
// Do we have stdout?
//
+ // If we do, then match it using diff. Also redirect the diff's stdout
+ // to stderr, similar to how we do that for the script (see
+ // script::check_output() for the reasoning). That will also prevent the
+ // diff's output from interleaving with any other output.
+ //
path dp ("diff");
process_path dpp;
+ int ofd (1);
+
if (pass_n != pts_n && pts[pass_n + 1] != nullptr)
{
+ ofd = 2;
+
const file& ot (pts[pass_n + 1]->as<file> ());
const path& op (ot.path ());
assert (!op.empty ()); // Should have been assigned by update.
@@ -1035,25 +1349,29 @@ namespace build2
args.push_back (nullptr); // Second.
if (verb >= 2)
- print_process (args);
+ print_process (args); // Note: prints the whole pipeline.
else if (verb)
- text << "test " << tt;
+ print_diag ("test", tt);
if (!ctx.dry_run)
{
- diag_record dr;
- pipe_process pp (cat, "cat", nullptr);
-
- if (!run_test (tt,
- dr,
- args.data () + (sin ? 3 : 0), // Skip cat.
- test_deadline (tt),
- sin ? &pp : nullptr))
+ pipe_process pp (tt.ctx,
+ args.data (), // Note: only cat's args are considered.
+ false /* force_dbuf */,
+ nullptr /* prev */,
+ nullptr /* next */);
+
+ if (sin)
{
- dr << info << "test command line: ";
- print_process (dr, args);
- dr << endf; // return
+ pp.next = &pp; // Points to itself.
+ pp.proc = &cat;
}
+
+ run_test (tt,
+ args.data () + (sin ? 3 : 0), // Skip cat.
+ ofd,
+ test_deadline (tt),
+ sin ? &pp : nullptr);
}
return target_state::changed;
diff --git a/libbuild2/test/rule.hxx b/libbuild2/test/rule.hxx
index e96b68b..6fcf208 100644
--- a/libbuild2/test/rule.hxx
+++ b/libbuild2/test/rule.hxx
@@ -20,7 +20,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -34,10 +34,10 @@ namespace build2
target_state
perform_script (action, const target&, size_t) const;
- rule (common_data&& d, bool see_through_only)
- : common (move (d)), see_through (see_through_only) {}
+ rule (common_data&& d, bool sto)
+ : common (move (d)), see_through_only (sto) {}
- bool see_through;
+ bool see_through_only;
};
class default_rule: public rule
diff --git a/libbuild2/test/script/lexer+for-loop.test.testscript b/libbuild2/test/script/lexer+for-loop.test.testscript
new file mode 100644
index 0000000..fcd12f7
--- /dev/null
+++ b/libbuild2/test/script/lexer+for-loop.test.testscript
@@ -0,0 +1,231 @@
+# file : libbuild2/test/script/lexer+for-loop.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+test.arguments = for-loop
+
+: semi
+{
+ : immediate
+ :
+ $* <"cmd;" >>EOO
+ 'cmd'
+ ;
+ <newline>
+ EOO
+
+ : separated
+ :
+ $* <"cmd ;" >>EOO
+ 'cmd'
+ ;
+ <newline>
+ EOO
+
+ : only
+ :
+ $* <";" >>EOO
+ ;
+ <newline>
+ EOO
+}
+
+: colon
+:
+{
+ : immediate
+ :
+ $* <"cmd: dsc" >>EOO
+ 'cmd'
+ :
+ 'dsc'
+ <newline>
+ EOO
+
+ : separated
+ :
+ $* <"cmd :dsc" >>EOO
+ 'cmd'
+ :
+ 'dsc'
+ <newline>
+ EOO
+
+ : only
+ :
+ $* <":" >>EOO
+ :
+ <newline>
+ EOO
+}
+
+: redirect
+:
+{
+ : pass
+ :
+ $* <"cmd <| 1>|" >>EOO
+ 'cmd'
+ <|
+ '1'
+ >|
+ <newline>
+ EOO
+
+ : null
+ :
+ $* <"cmd <- 1>-" >>EOO
+ 'cmd'
+ <-
+ '1'
+ >-
+ <newline>
+ EOO
+
+ : trace
+ :
+ $* <"cmd 1>!" >>EOO
+ 'cmd'
+ '1'
+ >!
+ <newline>
+ EOO
+
+ : merge
+ :
+ $* <"cmd 1>&2" >>EOO
+ 'cmd'
+ '1'
+ >&
+ '2'
+ <newline>
+ EOO
+
+ : str
+ :
+ $* <"cmd <a 1>b" >>EOO
+ 'cmd'
+ <
+ 'a'
+ '1'
+ >
+ 'b'
+ <newline>
+ EOO
+
+ : str-nn
+ :
+ $* <"cmd <:a 1>:b" >>EOO
+ 'cmd'
+ <:
+ 'a'
+ '1'
+ >:
+ 'b'
+ <newline>
+ EOO
+
+ : doc
+ :
+ $* <"cmd <<EOI 1>>EOO" >>EOO
+ 'cmd'
+ <<
+ 'EOI'
+ '1'
+ >>
+ 'EOO'
+ <newline>
+ EOO
+
+ : doc-nn
+ :
+ $* <"cmd <<:EOI 1>>:EOO" >>EOO
+ 'cmd'
+ <<:
+ 'EOI'
+ '1'
+ >>:
+ 'EOO'
+ <newline>
+ EOO
+
+ : file-cmp
+ :
+ $* <"cmd <<<in >>>out 2>>>err" >>EOO
+ 'cmd'
+ <<<
+ 'in'
+ >>>
+ 'out'
+ '2'
+ >>>
+ 'err'
+ <newline>
+ EOO
+
+ : file-write
+ :
+ $* <"cmd >=out 2>+err" >>EOO
+ 'cmd'
+ >=
+ 'out'
+ '2'
+ >+
+ 'err'
+ <newline>
+ EOO
+}
+
+: cleanup
+:
+{
+ : always
+ :
+ $* <"cmd &file" >>EOO
+ 'cmd'
+ &
+ 'file'
+ <newline>
+ EOO
+
+ : maybe
+ :
+ $* <"cmd &?file" >>EOO
+ 'cmd'
+ &?
+ 'file'
+ <newline>
+ EOO
+
+ : never
+ :
+ $* <"cmd &!file" >>EOO
+ 'cmd'
+ &!
+ 'file'
+ <newline>
+ EOO
+}
+
+: for
+:
+{
+ : form-1
+ :
+ $* <"for x: a" >>EOO
+ 'for'
+ 'x'
+ :
+ 'a'
+ <newline>
+ EOO
+
+ : form-3
+ :
+ $* <"for <<<a x" >>EOO
+ 'for'
+ <<<
+ 'a'
+ 'x'
+ <newline>
+ EOO
+}
diff --git a/libbuild2/test/script/lexer.cxx b/libbuild2/test/script/lexer.cxx
index f9c8ac6..aec91fc 100644
--- a/libbuild2/test/script/lexer.cxx
+++ b/libbuild2/test/script/lexer.cxx
@@ -34,13 +34,16 @@ namespace build2
bool q (true); // quotes
if (!esc)
- {
- assert (!state_.empty ());
- esc = state_.top ().escapes;
- }
+ esc = current_state ().escapes;
switch (m)
{
+ case lexer_mode::for_loop:
+ {
+ // Leading tokens of the for-loop. Like command_line but also
+ // recognizes lsbrace like value.
+ }
+ // Fall through.
case lexer_mode::command_line:
{
s1 = ":;=!|&<> $(#\t\n";
@@ -107,7 +110,7 @@ namespace build2
}
assert (ps == '\0');
- state_.push (
+ mode_impl (
state {m, data, nullopt, false, false, ps, s, n, q, *esc, s1, s2});
}
@@ -116,12 +119,13 @@ namespace build2
{
token r;
- switch (state_.top ().mode)
+ switch (mode ())
{
case lexer_mode::command_line:
case lexer_mode::first_token:
case lexer_mode::second_token:
case lexer_mode::variable_line:
+ case lexer_mode::for_loop:
r = next_line ();
break;
case lexer_mode::description_line:
@@ -144,7 +148,7 @@ namespace build2
xchar c (get ());
uint64_t ln (c.line), cn (c.column);
- state st (state_.top ()); // Make copy (see first/second_token).
+ state st (current_state ()); // Make copy (see first/second_token).
lexer_mode m (st.mode);
auto make_token = [&sep, ln, cn] (type t)
@@ -157,9 +161,10 @@ namespace build2
//
if (st.lsbrace)
{
- assert (m == lexer_mode::variable_line);
+ assert (m == lexer_mode::variable_line ||
+ m == lexer_mode::for_loop);
- state_.top ().lsbrace = false; // Note: st is a copy.
+ current_state ().lsbrace = false; // Note: st is a copy.
if (c == '[' && (!st.lsbrace_unsep || !sep))
return make_token (type::lsbrace);
@@ -172,7 +177,7 @@ namespace build2
// we push any new mode (e.g., double quote).
//
if (m == lexer_mode::first_token || m == lexer_mode::second_token)
- state_.pop ();
+ expire_mode ();
// NOTE: remember to update mode() if adding new special characters.
@@ -183,7 +188,7 @@ namespace build2
// Expire variable value mode at the end of the line.
//
if (m == lexer_mode::variable_line)
- state_.pop ();
+ expire_mode ();
sep = true; // Treat newline as always separated.
return make_token (type::newline);
@@ -197,10 +202,11 @@ namespace build2
// Line separators.
//
- if (m == lexer_mode::command_line ||
- m == lexer_mode::first_token ||
- m == lexer_mode::second_token ||
- m == lexer_mode::variable_line)
+ if (m == lexer_mode::command_line ||
+ m == lexer_mode::first_token ||
+ m == lexer_mode::second_token ||
+ m == lexer_mode::variable_line ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -210,7 +216,8 @@ namespace build2
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -222,7 +229,8 @@ namespace build2
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -244,7 +252,8 @@ namespace build2
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
if (optional<token> t = next_cmd_op (c, sep))
return move (*t);
@@ -310,7 +319,7 @@ namespace build2
if (c == '\n')
{
get ();
- state_.pop (); // Expire the description mode.
+ expire_mode (); // Expire the description mode.
return token (type::newline, true, ln, cn, token_printer);
}
@@ -330,15 +339,17 @@ namespace build2
}
token lexer::
- word (state st, bool sep)
+ word (const state& st, bool sep)
{
- lexer_mode m (st.mode);
+ lexer_mode m (st.mode); // Save.
token r (base_lexer::word (st, sep));
if (m == lexer_mode::variable)
{
- if (r.value.size () == 1 && digit (r.value[0])) // $N
+ if (r.type == type::word &&
+ r.value.size () == 1 &&
+ digit (r.value[0])) // $N
{
xchar c (peek ());
diff --git a/libbuild2/test/script/lexer.hxx b/libbuild2/test/script/lexer.hxx
index 452e794..39b950a 100644
--- a/libbuild2/test/script/lexer.hxx
+++ b/libbuild2/test/script/lexer.hxx
@@ -24,10 +24,11 @@ namespace build2
enum
{
command_line = base_type::value_next,
- first_token, // Expires at the end of the token.
- second_token, // Expires at the end of the token.
- variable_line, // Expires at the end of the line.
- description_line // Expires at the end of the line.
+ first_token, // Expires at the end of the token.
+ second_token, // Expires at the end of the token.
+ variable_line, // Expires at the end of the line.
+ description_line, // Expires at the end of the line.
+ for_loop // Used for sensing the for-loop leading tokens.
};
lexer_mode () = default;
@@ -67,6 +68,8 @@ namespace build2
static redirect_aliases_type redirect_aliases;
private:
+ using build2::script::lexer::mode; // Getter.
+
token
next_line ();
@@ -74,7 +77,7 @@ namespace build2
next_description ();
virtual token
- word (state, bool) override;
+ word (const state&, bool) override;
};
}
}
diff --git a/libbuild2/test/script/lexer.test.cxx b/libbuild2/test/script/lexer.test.cxx
index 76f102d..ef3ce4d 100644
--- a/libbuild2/test/script/lexer.test.cxx
+++ b/libbuild2/test/script/lexer.test.cxx
@@ -36,6 +36,7 @@ namespace build2
else if (s == "variable-line") m = lexer_mode::variable_line;
else if (s == "description-line") m = lexer_mode::description_line;
else if (s == "variable") m = lexer_mode::variable;
+ else if (s == "for-loop") m = lexer_mode::for_loop;
else assert (false);
}
diff --git a/libbuild2/test/script/parser+command-if.test.testscript b/libbuild2/test/script/parser+command-if.test.testscript
index 0b72b4a..9e223dd 100644
--- a/libbuild2/test/script/parser+command-if.test.testscript
+++ b/libbuild2/test/script/parser+command-if.test.testscript
@@ -315,6 +315,7 @@
}
: end
+:
{
: without-if
:
@@ -322,7 +323,7 @@
cmd
end
EOI
- testscript:2:1: error: 'end' without preceding 'if'
+ testscript:2:1: error: 'end' without preceding 'if', 'for', or 'while'
EOE
: without-if-semi
@@ -331,10 +332,11 @@
cmd;
end
EOI
- testscript:2:1: error: 'end' without preceding 'if'
+ testscript:2:1: error: 'end' without preceding 'if', 'for', or 'while'
EOE
: before
+ :
{
: semi
:
diff --git a/libbuild2/test/script/parser+command-re-parse.test.testscript b/libbuild2/test/script/parser+command-re-parse.test.testscript
index 84465b3..5a082eb 100644
--- a/libbuild2/test/script/parser+command-re-parse.test.testscript
+++ b/libbuild2/test/script/parser+command-re-parse.test.testscript
@@ -4,7 +4,7 @@
: double-quote
:
$* <<EOI >>EOO
-x = cmd \">-\" "'<-'"
+x = [cmdline] cmd \">-\" "'<-'"
$x
EOI
cmd '>-' '<-'
diff --git a/libbuild2/test/script/parser+description.test.testscript b/libbuild2/test/script/parser+description.test.testscript
index cee540f..f656b7d 100644
--- a/libbuild2/test/script/parser+description.test.testscript
+++ b/libbuild2/test/script/parser+description.test.testscript
@@ -313,7 +313,7 @@
x = y
end
EOI
- testscript:2:1: error: description before/after setup/teardown variable-if
+ testscript:2:1: error: description before/after setup/teardown variable-only 'if'
EOE
: var-if-after
@@ -323,7 +323,7 @@
x = y
end : foo
EOI
- testscript:1:1: error: description before/after setup/teardown variable-if
+ testscript:1:1: error: description before/after setup/teardown variable-only 'if'
EOE
: test
diff --git a/libbuild2/test/script/parser+expansion.test.testscript b/libbuild2/test/script/parser+expansion.test.testscript
index 77a7d6d..c31b0ad 100644
--- a/libbuild2/test/script/parser+expansion.test.testscript
+++ b/libbuild2/test/script/parser+expansion.test.testscript
@@ -27,7 +27,7 @@ EOE
: invalid-redirect
:
$* <<EOI 2>>EOE != 0
-x = "1>&a"
+x = [cmdline] "1>&a"
cmd $x
EOI
<string>:1:4: error: stdout merge redirect file descriptor must be 2
diff --git a/libbuild2/test/script/parser+for.test.testscript b/libbuild2/test/script/parser+for.test.testscript
new file mode 100644
index 0000000..985f9c9
--- /dev/null
+++ b/libbuild2/test/script/parser+for.test.testscript
@@ -0,0 +1,1029 @@
+# file : libbuild2/test/script/parser+for.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: form-1
+:
+: for x: ...
+:
+{
+ : for
+ :
+ {
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for
+ cmd
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for x: a b
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : null
+ :
+ $* <<EOI >:''
+ for x: [null]
+ cmd $x
+ end
+ EOI
+
+ : empty
+ :
+ $* <<EOI >:''
+ for x:
+ cmd $x
+ end
+ EOI
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x: $vs
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : typed-values
+ :
+ $* <<EOI >>~%EOO%
+ for x: [dir_paths] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem-value
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: [strings] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : scope-var
+ :
+ $* <<EOI >>EOO
+ x = x
+
+ for x: a b
+ cmd $x
+ end
+
+ -cmd $x
+ EOI
+ cmd a
+ cmd b
+ -cmd x
+ EOO
+ }
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ for x: a b
+ cmd2 $x
+ end
+ EOI
+ {
+ {
+ cmd1
+ cmd2 a
+ cmd2 b
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +for x: a b
+ cmd $x
+ end
+ EOI
+ {
+ +cmd a
+ +cmd b
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -for x: a b
+ cmd $x
+ end
+ EOI
+ {
+ -cmd a
+ -cmd b
+ }
+ EOO
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for x: a b
+ cmd1 $x # 1
+ if ($x == "a") # 2
+ cmd2 # 3
+ for y: x y
+ cmd3 # 4
+ end
+ else
+ cmd4 # 5
+ end
+ cmd5 # 6
+ end;
+ cmd6 # 7
+ EOI
+ cmd1 a # 1 i1
+ ? true # 2 i1
+ cmd2 # 3 i1
+ cmd3 # 4 i1 i1
+ cmd3 # 4 i1 i2
+ cmd5 # 6 i1
+ cmd1 b # 1 i2
+ ? false # 2 i2
+ cmd4 # 5 i2
+ cmd5 # 6 i2
+ cmd6 # 7
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'for'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'for'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'for'
+ EOE
+ }
+
+ : var
+ :
+ $* <<EOI >>EOO
+ for x: a b
+ cmd1 $x
+ end;
+ cmd2 $x
+ EOI
+ cmd1 a
+ cmd1 b
+ cmd2 b
+ EOO
+
+ : leading-and-trailing-description
+ :
+ $* <<EOI 2>>EOE != 0
+ : foo
+ for x: a b
+ cmd
+ end : bar
+ EOI
+ testscript:4:1: error: both leading and trailing descriptions
+ EOE
+}
+
+: form-2
+:
+: ... | for x
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x != 0
+ cmd
+ end
+ EOI
+ testscript:1:20: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x | echo x
+ cmd
+ end
+ EOI
+ testscript:1:20: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x|echo x
+ cmd
+ end
+ EOI
+ testscript:1:19: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x && echo x
+ cmd
+ end
+ EOI
+ testscript:1:20: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&&echo x
+ cmd
+ end
+ EOI
+ testscript:1:19: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x | for x
+ cmd
+ end
+ EOI
+ testscript:1:24: error: command expression involving for-loop
+ EOE
+
+ : expression-before-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x|for x
+ cmd
+ end
+ EOI
+ testscript:1:22: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x &f
+ cmd
+ end
+ EOI
+ testscript:1:20: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&f
+ cmd
+ end
+ EOI
+ testscript:1:19: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x >a
+ cmd
+ end
+ EOI
+ testscript:1:20: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x>a
+ cmd
+ end
+ EOI
+ testscript:1:19: error: output redirect in for-loop
+ EOE
+
+ : stdin-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x <a
+ cmd
+ end
+ EOI
+ testscript:1:20: error: stdin is both piped and redirected
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for
+ cmd
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ echo $vs | for x
+ cmd $x
+ end
+ EOI
+ echo a b | for x
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x [dir_path]
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x [dir_path]
+ EOO
+ }
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ echo 'a b' | for x
+ cmd2 $x
+ end
+ EOI
+ {
+ {
+ cmd1
+ echo 'a b' | for x
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +echo 'a b' | for x
+ cmd $x
+ end
+ EOI
+ {
+ +echo 'a b' | for x
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -echo 'a b' | for x
+ cmd $x
+ end
+ EOI
+ {
+ -echo 'a b' | for x
+ }
+ EOO
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ echo 'a b' | for x # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ echo x y | for y # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end;
+ cmd6 # 9
+ EOI
+ echo 'a b' | for x # 1
+ cmd6 # 9
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'for'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'for'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'for'
+ EOE
+ }
+
+ : leading-and-trailing-description
+ :
+ $* <<EOI 2>>EOE != 0
+ : foo
+ echo 'a b' | for x
+ cmd
+ end : bar
+ EOI
+ testscript:4:1: error: both leading and trailing descriptions
+ EOE
+}
+
+: form-3
+:
+: for x <...
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a != 0
+ cmd
+ end
+ EOI
+ testscript:1:10: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a | echo x
+ cmd
+ end
+ EOI
+ testscript:1:10: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x|echo x
+ cmd
+ end
+ EOI
+ testscript:1:9: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a && echo x
+ cmd
+ end
+ EOI
+ testscript:1:10: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&&echo x
+ cmd
+ end
+ EOI
+ testscript:1:9: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && for x <a
+ cmd
+ end
+ EOI
+ testscript:1:15: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a &f
+ cmd
+ end
+ EOI
+ testscript:1:10: error: cleanup in for-loop
+ EOE
+
+ : cleanup-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for &f x <a
+ cmd
+ end
+ EOI
+ testscript:1:5: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&f
+ cmd
+ end
+ EOI
+ testscript:1:9: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ for x >a
+ cmd
+ end
+ EOI
+ testscript:1:7: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for >a x
+ cmd
+ end
+ EOI
+ testscript:1:5: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for x>a
+ cmd
+ end
+ EOI
+ testscript:1:6: error: output redirect in for-loop
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a
+ cmd
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ EOE
+
+ : quoted-opt
+ :
+ $* <<EOI >>EOO
+ o = -w
+ for "$o" x <'a b'
+ cmd $x
+ end;
+ for "($o)" x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ for -w x <'a b'
+ EOO
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for -w x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x <$vs
+ cmd $x
+ end
+ EOI
+ for x b <a
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ for -w x [dir_path] <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x [dir_path] <'a b'
+ EOO
+ }
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ for x <'a b'
+ cmd2 $x
+ end
+ EOI
+ {
+ {
+ cmd1
+ for x <'a b'
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +for x <'a b'
+ cmd $x
+ end
+ EOI
+ {
+ +for x <'a b'
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -for x <'a b'
+ cmd $x
+ end
+ EOI
+ {
+ -for x <'a b'
+ }
+ EOO
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for -w x <'a b' # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ for -w y <'x y' # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end;
+ cmd6 # 9
+ EOI
+ for -w x <'a b' # 1
+ cmd6 # 9
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'for'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'for'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'for'
+ EOE
+ }
+
+ : leading-and-trailing-description
+ :
+ $* <<EOI 2>>EOE != 0
+ : foo
+ for x <'a b'
+ cmd
+ end : bar
+ EOI
+ testscript:4:1: error: both leading and trailing descriptions
+ EOE
+}
diff --git a/libbuild2/test/script/parser+while.test.testscript b/libbuild2/test/script/parser+while.test.testscript
new file mode 100644
index 0000000..b1a2b44
--- /dev/null
+++ b/libbuild2/test/script/parser+while.test.testscript
@@ -0,0 +1,265 @@
+# file : libbuild2/test/script/parser+while.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: while
+:
+{
+ : true
+ :
+ $* <<EOI >>EOO
+ while ($v != "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? true
+ cmd ''
+ ? true
+ cmd a
+ ? false
+ EOO
+
+ : false
+ :
+ $* <<EOI >>EOO
+ while ($v == "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? false
+ EOO
+
+ : without-command
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd
+ end
+ EOI
+ testscript:1:6: error: missing program
+ EOE
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ while ($v != "aa")
+ cmd2 "$v"
+ v = "$(v)a"
+ end
+ EOI
+ {
+ {
+ cmd1
+ ? true
+ cmd2 ''
+ ? true
+ cmd2 a
+ ? false
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +while ($v != "aa")
+ cmd2 "$v"
+ v = "$(v)a"
+ end
+ EOI
+ {
+ ? true
+ +cmd2 ''
+ ? true
+ +cmd2 a
+ ? false
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -while ($v != "aa")
+ cmd2 "$v"
+ v = "$(v)a"
+ end
+ EOI
+ {
+ ? true
+ -cmd2 ''
+ ? true
+ -cmd2 a
+ ? false
+ }
+ EOO
+}
+
+: end
+:
+{
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ while true
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+}
+
+: elif
+:
+{
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ while false
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+}
+
+: nested
+:
+{
+ $* -l -r <<EOI >>EOO
+ while ($v != "aa") # 1
+ cmd1 "$v" # 2
+ if ($v == "a") # 3
+ cmd2 # 4
+ while ($v2 != "$v") # 5
+ cmd3 # 6
+ v2=$v
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ v = "$(v)a"
+ end;
+ cmd6
+ EOI
+ ? true # 1 i1
+ cmd1 '' # 2 i1
+ ? false # 3 i1
+ cmd4 # 7 i1
+ cmd5 # 8 i1
+ ? true # 1 i2
+ cmd1 a # 2 i2
+ ? true # 3 i2
+ cmd2 # 4 i2
+ ? true # 5 i2 i1
+ cmd3 # 6 i2 i1
+ ? false # 5 i2 i2
+ cmd5 # 8 i2
+ ? false # 1 i3
+ cmd6 # 9
+ EOO
+}
+
+: contained
+:
+{
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'while'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'while'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'while'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'while'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'while'
+ EOE
+}
+
+: var
+:
+$* <<EOI >>EOO
+while ($v1 != "a")
+ v1 = "$(v1)a"
+ v2 = "$v1"
+end
+cmd $v1
+EOI
+? true
+? false
+cmd a
+EOO
+
+: leading-and-trailing-description
+:
+$* <<EOI 2>>EOE != 0
+: foo
+while false
+ cmd
+end : bar
+EOI
+testscript:4:1: error: both leading and trailing descriptions
+EOE
diff --git a/libbuild2/test/script/parser.cxx b/libbuild2/test/script/parser.cxx
index 9e92f3b..b712c21 100644
--- a/libbuild2/test/script/parser.cxx
+++ b/libbuild2/test/script/parser.cxx
@@ -293,22 +293,30 @@ namespace build2
}
// Parse a logical line (as well as scope-if since the only way to
- // recognize it is to parse the if line).
+ // recognize it is to parse the if line), handling the flow control
+ // constructs recursively.
//
// If one is true then only parse one line returning an indication of
- // whether the line ended with a semicolon. If if_line is true then this
- // line can be an if-else construct flow control line (else, end, etc).
+ // whether the line ended with a semicolon. If the flow control
+ // construct type is specified, then this line is assumed to belong to
+ // such construct.
//
bool parser::
pre_parse_line (token& t, type& tt,
optional<description>& d,
lines* ls,
bool one,
- bool if_line)
+ optional<line_type> fct)
{
// enter: next token is peeked at (type in tt)
// leave: newline
+ assert (!fct ||
+ *fct == line_type::cmd_if ||
+ *fct == line_type::cmd_while ||
+ *fct == line_type::cmd_for_stream ||
+ *fct == line_type::cmd_for_args);
+
// Note: token is only peeked at.
//
const location ll (get_location (peeked ()));
@@ -317,6 +325,52 @@ namespace build2
//
line_type lt;
type st (type::eos); // Later, can only be set to plus or minus.
+ bool semi (false);
+
+ // Parse the command line tail, starting from the newline or the
+ // potential colon/semicolon token.
+ //
+ // Note that colon and semicolon are only valid in test command lines
+ // and after 'end' in flow control constructs. Note that we always
+ // recognize them lexically, even when they are not valid tokens per
+ // the grammar.
+ //
+ auto parse_command_tail = [&t, &tt, &st, &lt, &d, &semi, &ll, this] ()
+ {
+ if (tt != type::newline)
+ {
+ if (lt != line_type::cmd && lt != line_type::cmd_end)
+ fail (t) << "expected newline instead of " << t;
+
+ switch (st)
+ {
+ case type::plus: fail (t) << t << " after setup command" << endf;
+ case type::minus: fail (t) << t << " after teardown command" << endf;
+ }
+ }
+
+ switch (tt)
+ {
+ case type::colon:
+ {
+ if (d)
+ fail (ll) << "both leading and trailing descriptions";
+
+ d = parse_trailing_description (t, tt);
+ break;
+ }
+ case type::semi:
+ {
+ semi = true;
+ replay_pop (); // See above for the reasoning.
+ next (t, tt); // Get newline.
+ break;
+ }
+ }
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t;
+ };
switch (tt)
{
@@ -364,8 +418,12 @@ namespace build2
{
const string& n (t.value);
- if (n == "if") lt = line_type::cmd_if;
- else if (n == "if!") lt = line_type::cmd_ifn;
+ // Handle the for-loop consistently with pre_parse_line_start().
+ //
+ if (n == "if") lt = line_type::cmd_if;
+ else if (n == "if!") lt = line_type::cmd_ifn;
+ else if (n == "while") lt = line_type::cmd_while;
+ else if (n == "for") lt = line_type::cmd_for_stream;
}
break;
@@ -379,8 +437,6 @@ namespace build2
// Pre-parse the line keeping track of whether it ends with a semi.
//
- bool semi (false);
-
line ln;
switch (lt)
{
@@ -407,76 +463,147 @@ namespace build2
mode (lexer_mode::variable_line);
parse_variable_line (t, tt);
+ // Note that the semicolon token is only required during
+ // pre-parsing to decide which line list the current line should
+ // go to and provides no additional semantics during the
+ // execution. Moreover, build2::script::parser::exec_lines()
+ // doesn't expect this token to be present. Thus, we just drop
+ // this token from the saved tokens.
+ //
semi = (tt == type::semi);
- if (tt == type::semi)
+ if (semi)
+ {
+ replay_pop ();
next (t, tt);
+ }
if (tt != type::newline)
fail (t) << "expected newline instead of " << t;
break;
}
+ //
+ // See pre_parse_line_start() for details.
+ //
+ case line_type::cmd_for_args: assert (false); break;
+ case line_type::cmd_for_stream:
+ {
+ // First we need to sense the next few tokens and detect which
+ // form of the for-loop that actually is (see
+ // libbuild2/build/script/parser.cxx for details).
+ //
+ token pt (t);
+ assert (pt.type == type::word && pt.value == "for");
+
+ mode (lexer_mode::for_loop);
+ next (t, tt);
+
+ string& n (t.value);
+
+ if (tt == type::word && t.qtype == quote_type::unquoted &&
+ (n[0] == '_' || alpha (n[0]) || // Variable.
+ n == "*" || n == "~" || n == "@")) // Special variable.
+ {
+ // Detect patterns analogous to parse_variable_name() (so we
+ // diagnose `for x[string]: ...`).
+ //
+ if (n.find_first_of ("[*?") != string::npos)
+ fail (t) << "expected variable name instead of " << n;
+
+ if (special_variable (n))
+ fail (t) << "attempt to set '" << n << "' variable directly";
+
+ if (lexer_->peek_char ().first == '[')
+ {
+ token vt (move (t));
+ next_with_attributes (t, tt);
+
+ attributes_push (t, tt,
+ true /* standalone */,
+ false /* next_token */);
+
+ t = move (vt);
+ tt = t.type;
+ }
+
+ if (lexer_->peek_char ().first == ':')
+ lt = line_type::cmd_for_args;
+ }
+
+ if (lt == line_type::cmd_for_stream) // for x <...
+ {
+ ln.var = nullptr;
+
+ expire_mode ();
+
+ parse_command_expr_result r (
+ parse_command_expr (t, tt,
+ lexer::redirect_aliases,
+ move (pt)));
+
+ assert (r.for_loop);
+
+ parse_command_tail ();
+ parse_here_documents (t, tt, r);
+ }
+ else // for x: ...
+ {
+ ln.var = &script_->var_pool.insert (move (n));
+
+ next (t, tt);
+
+ assert (tt == type::colon);
+
+ expire_mode ();
+
+ // Parse the value similar to the var line type (see above),
+ // except for the fact that we don't expect a trailing semicolon.
+ //
+ mode (lexer_mode::variable_line);
+ parse_variable_line (t, tt);
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t << " after for";
+ }
+
+ break;
+ }
case line_type::cmd_elif:
case line_type::cmd_elifn:
case line_type::cmd_else:
- case line_type::cmd_end:
{
- if (!if_line)
- {
+ if (!fct || *fct != line_type::cmd_if)
fail (t) << lt << " without preceding 'if'";
- }
+ }
+ // Fall through.
+ case line_type::cmd_end:
+ {
+ if (!fct)
+ fail (t) << lt << " without preceding 'if', 'for', or 'while'";
}
// Fall through.
case line_type::cmd_if:
case line_type::cmd_ifn:
+ case line_type::cmd_while:
next (t, tt); // Skip to start of command.
// Fall through.
case line_type::cmd:
{
- pair<command_expr, here_docs> p;
+ parse_command_expr_result r;
if (lt != line_type::cmd_else && lt != line_type::cmd_end)
- p = parse_command_expr (t, tt, lexer::redirect_aliases);
+ r = parse_command_expr (t, tt, lexer::redirect_aliases);
- // Colon and semicolon are only valid in test command lines and
- // after 'end' in if-else. Note that we still recognize them
- // lexically, they are just not valid tokens per the grammar.
- //
- if (tt != type::newline)
+ if (r.for_loop)
{
- if (lt != line_type::cmd && lt != line_type::cmd_end)
- fail (t) << "expected newline instead of " << t;
-
- switch (st)
- {
- case type::plus: fail (t) << t << " after setup command" << endf;
- case type::minus: fail (t) << t << " after teardown command" << endf;
- }
+ lt = line_type::cmd_for_stream;
+ ln.var = nullptr;
}
- switch (tt)
- {
- case type::colon:
- {
- if (d)
- fail (ll) << "both leading and trailing descriptions";
-
- d = parse_trailing_description (t, tt);
- break;
- }
- case type::semi:
- {
- semi = true;
- next (t, tt); // Get newline.
- break;
- }
- }
-
- if (tt != type::newline)
- fail (t) << "expected newline instead of " << t;
+ parse_command_tail ();
+ parse_here_documents (t, tt, r);
- parse_here_documents (t, tt, p);
break;
}
}
@@ -494,24 +621,39 @@ namespace build2
ln.tokens = replay_data ();
ls->push_back (move (ln));
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
+ switch (lt)
{
- semi = pre_parse_if_else (t, tt, d, *ls);
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ {
+ semi = pre_parse_if_else (t, tt, d, *ls);
- // If this turned out to be scope-if, then ls is empty, semi is
- // false, and none of the below logic applies.
- //
- if (ls->empty ())
- return semi;
+ // If this turned out to be scope-if, then ls is empty, semi is
+ // false, and none of the below logic applies.
+ //
+ if (ls->empty ())
+ return semi;
+
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ semi = pre_parse_loop (t, tt, lt, d, *ls);
+ break;
+ }
+ default: break;
}
// Unless we were told where to put it, decide where it actually goes.
//
if (ls == &ls_data)
{
- // First pre-check variable and variable-if: by themselves (i.e.,
- // without a trailing semicolon) they are treated as either setup or
- // teardown without plus/minus. Also handle illegal line types.
+ // First pre-check variables and variable-only flow control
+ // constructs: by themselves (i.e., without a trailing semicolon)
+ // they are treated as either setup or teardown without
+ // plus/minus. Also handle illegal line types.
//
switch (lt)
{
@@ -524,8 +666,11 @@ namespace build2
}
case line_type::cmd_if:
case line_type::cmd_ifn:
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
{
- // See if this is a variable-only command-if.
+ // See if this is a variable-only flow control construct.
//
if (find_if (ls_data.begin (), ls_data.end (),
[] (const line& l) {
@@ -549,7 +694,7 @@ namespace build2
fail (ll) << "description before setup/teardown variable";
else
fail (ll) << "description before/after setup/teardown "
- << "variable-if";
+ << "variable-only " << lt;
}
// If we don't have any nested scopes or teardown commands,
@@ -793,7 +938,7 @@ namespace build2
td,
&ls,
true /* one */,
- true /* if_line */));
+ line_type::cmd_if));
assert (ls.size () == 1 && ls.back ().type == lt);
assert (tt == type::newline);
@@ -831,6 +976,99 @@ namespace build2
return false; // We never end with a semi.
}
+ // Pre-parse the flow control construct block line. Fail if the line is
+ // unexpectedly followed with a semicolon or test description.
+ //
+ bool parser::
+ pre_parse_block_line (token& t, type& tt,
+ line_type bt,
+ optional<description>& d,
+ lines& ls)
+ {
+ // enter: peeked first token of the line (type in tt)
+ // leave: newline
+
+ const location ll (get_location (peeked ()));
+
+ switch (tt)
+ {
+ case type::colon:
+ fail (ll) << "description inside " << bt << endf;
+ case type::eos:
+ case type::rcbrace:
+ case type::lcbrace:
+ fail (ll) << "expected closing 'end'" << endf;
+ case type::plus:
+ fail (ll) << "setup command inside " << bt << endf;
+ case type::minus:
+ fail (ll) << "teardown command inside " << bt << endf;
+ }
+
+ // Parse one line. Note that this one line can still be multiple lines
+ // in case of a flow control construct. In this case we want to view
+ // it as, for example, cmd_if, not cmd_end. Thus remember the start
+ // position of the next logical line.
+ //
+ size_t i (ls.size ());
+
+ line_type fct; // Flow control construct type the block type relates to.
+
+ switch (bt)
+ {
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_else:
+ {
+ fct = line_type::cmd_if;
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ fct = bt;
+ break;
+ }
+ default: assert(false);
+ }
+
+ optional<description> td;
+ bool semi (pre_parse_line (t, tt, td, &ls, true /* one */, fct));
+
+ assert (tt == type::newline);
+
+ line_type lt (ls[i].type);
+
+ // First take care of 'end'.
+ //
+ if (lt == line_type::cmd_end)
+ {
+ if (td)
+ {
+ if (d)
+ fail (ll) << "both leading and trailing descriptions";
+
+ d = move (td);
+ }
+
+ return semi;
+ }
+
+ // For any other line trailing semi or description is illegal.
+ //
+ // @@ Not the exact location of semi/colon.
+ //
+ if (semi)
+ fail (ll) << "';' inside " << bt;
+
+ if (td)
+ fail (ll) << "description inside " << bt;
+
+ return false;
+ }
+
bool parser::
pre_parse_if_else_command (token& t, type& tt,
optional<description>& d,
@@ -839,70 +1077,23 @@ namespace build2
// enter: peeked first token of next line (type in tt)
// leave: newline
- // Parse lines until we see closing 'end'. Nested if-else blocks are
- // handled recursively.
+ // Parse lines until we see closing 'end'.
//
for (line_type bt (line_type::cmd_if); // Current block.
;
tt = peek (lexer_mode::first_token))
{
const location ll (get_location (peeked ()));
-
- switch (tt)
- {
- case type::colon:
- fail (ll) << "description inside " << bt << endf;
- case type::eos:
- case type::rcbrace:
- case type::lcbrace:
- fail (ll) << "expected closing 'end'" << endf;
- case type::plus:
- fail (ll) << "setup command inside " << bt << endf;
- case type::minus:
- fail (ll) << "teardown command inside " << bt << endf;
- }
-
- // Parse one line. Note that this one line can still be multiple
- // lines in case of if-else. In this case we want to view it as
- // cmd_if, not cmd_end. Thus remember the start position of the
- // next logical line.
- //
size_t i (ls.size ());
- optional<description> td;
- bool semi (pre_parse_line (t, tt,
- td,
- &ls,
- true /* one */,
- true /* if_line */));
- assert (tt == type::newline);
+ bool semi (pre_parse_block_line (t, tt, bt, d, ls));
line_type lt (ls[i].type);
// First take care of 'end'.
//
if (lt == line_type::cmd_end)
- {
- if (td)
- {
- if (d)
- fail (ll) << "both leading and trailing descriptions";
-
- d = move (td);
- }
-
return semi;
- }
-
- // For any other line trailing semi or description is illegal.
- //
- // @@ Not the exact location of semi/colon.
- //
- if (semi)
- fail (ll) << "';' inside " << bt;
-
- if (td)
- fail (ll) << "description inside " << bt;
// Check if-else block sequencing.
//
@@ -924,6 +1115,40 @@ namespace build2
default: break;
}
}
+
+ assert (false); // Can't be here.
+ return false;
+ }
+
+ bool parser::
+ pre_parse_loop (token& t, type& tt,
+ line_type lt,
+ optional<description>& d,
+ lines& ls)
+ {
+ // enter: <newline> (previous line)
+ // leave: <newline>
+
+ assert (lt == line_type::cmd_while ||
+ lt == line_type::cmd_for_stream ||
+ lt == line_type::cmd_for_args);
+
+ tt = peek (lexer_mode::first_token);
+
+ // Parse lines until we see closing 'end'.
+ //
+ for (;; tt = peek (lexer_mode::first_token))
+ {
+ size_t i (ls.size ());
+
+ bool semi (pre_parse_block_line (t, tt, lt, d, ls));
+
+ if (ls[i].type == line_type::cmd_end)
+ return semi;
+ }
+
+ assert (false); // Can't be here.
+ return false;
}
void parser::
@@ -1057,7 +1282,7 @@ namespace build2
diag_record dr (fail (dl));
dr << "invalid testscript include path ";
- to_stream (dr.os, n, true); // Quote.
+ to_stream (dr.os, n, quote_mode::normal);
}
}
@@ -1266,21 +1491,18 @@ namespace build2
// Note: this one is only used during execution.
- pair<command_expr, here_docs> p (
+ parse_command_expr_result pr (
parse_command_expr (t, tt, lexer::redirect_aliases));
- switch (tt)
- {
- case type::colon: parse_trailing_description (t, tt); break;
- case type::semi: next (t, tt); break; // Get newline.
- }
+ if (tt == type::colon)
+ parse_trailing_description (t, tt);
assert (tt == type::newline);
- parse_here_documents (t, tt, p);
+ parse_here_documents (t, tt, pr);
assert (tt == type::newline);
- command_expr r (move (p.first));
+ command_expr r (move (pr.expr));
// If the test program runner is specified, then adjust the
// expressions to run test programs via this runner.
@@ -1402,9 +1624,6 @@ namespace build2
mode (lexer_mode::variable_line);
value rhs (parse_variable_line (t, tt));
- if (tt == type::semi)
- next (t, tt);
-
assert (tt == type::newline);
// Assign.
@@ -1424,8 +1643,9 @@ namespace build2
command_type ct;
auto exec_cmd = [&ct, this] (token& t, build2::script::token_type& tt,
- size_t li,
+ const iteration_index* ii, size_t li,
bool single,
+ const function<command_function>& cf,
const location& ll)
{
// We use the 0 index to signal that this is the only command.
@@ -1437,19 +1657,35 @@ namespace build2
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- runner_->run (*scope_, ce, ct, li, ll);
+ runner_->run (*scope_, ce, ct, ii, li, cf, ll);
};
- auto exec_if = [this] (token& t, build2::script::token_type& tt,
- size_t li,
- const location& ll)
+ auto exec_cond = [this] (token& t, build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- // Assume if-else always involves multiple commands.
+ // Assume a flow control construct always involves multiple
+ // commands.
//
- return runner_->run_if (*scope_, ce, li, ll);
+ return runner_->run_cond (*scope_, ce, ii, li, ll);
+ };
+
+ auto exec_for = [this] (const variable& var,
+ value&& val,
+ const attributes& val_attrs,
+ const location&)
+ {
+ value& lhs (scope_->assign (var));
+
+ attributes_.push_back (val_attrs);
+
+ apply_value_attributes (&var, lhs, move (val), type::assign);
+
+ if (script_->test_command_var (var.name))
+ scope_->reset_special ();
};
size_t li (1);
@@ -1459,16 +1695,17 @@ namespace build2
ct = command_type::test;
exec_lines (t->tests_.begin (), t->tests_.end (),
- exec_set, exec_cmd, exec_if,
- li);
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */, li);
}
else if (group* g = dynamic_cast<group*> (scope_))
{
ct = command_type::setup;
- bool exec_scope (exec_lines (g->setup_.begin (), g->setup_.end (),
- exec_set, exec_cmd, exec_if,
- li));
+ bool exec_scope (
+ exec_lines (g->setup_.begin (), g->setup_.end (),
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */, li));
if (exec_scope)
{
@@ -1526,7 +1763,8 @@ namespace build2
try
{
- take = runner_->run_if (*scope_, ce, li++, ll);
+ take = runner_->run_cond (
+ *scope_, ce, nullptr /* iteration_index */, li++, ll);
}
catch (const exit_scope& e)
{
@@ -1593,24 +1831,24 @@ namespace build2
// UBSan workaround.
//
const diag_frame* df (diag_frame::stack ());
- if (!ctx.sched.async (task_count,
- [] (const diag_frame* ds,
- scope& s,
- script& scr,
- runner& r)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (s, scr, r);
- },
- df,
- ref (*chain),
- ref (*script_),
- ref (*runner_)))
+ if (!ctx->sched->async (task_count,
+ [] (const diag_frame* ds,
+ scope& s,
+ script& scr,
+ runner& r)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (s, scr, r);
+ },
+ df,
+ ref (*chain),
+ ref (*script_),
+ ref (*runner_)))
{
// Bail out if the scope has failed and we weren't instructed
// to keep going.
//
- if (chain->state == scope_state::failed && !ctx.keep_going)
+ if (chain->state == scope_state::failed && !ctx->keep_going)
throw failed ();
}
}
@@ -1637,8 +1875,8 @@ namespace build2
ct = command_type::teardown;
exec_lines (g->tdown_.begin (), g->tdown_.end (),
- exec_set, exec_cmd, exec_if,
- li);
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */, li);
}
else
assert (false);
@@ -1652,7 +1890,8 @@ namespace build2
// The rest.
//
- // When add a special variable don't forget to update lexer::word().
+ // When add a special variable don't forget to update lexer::word() and
+ // for-loop parsing in pre_parse_line().
//
bool parser::
special_variable (const string& n) noexcept
@@ -1661,7 +1900,7 @@ namespace build2
}
lookup parser::
- lookup_variable (name&& qual, string&& name, const location& loc)
+ lookup_variable (names&& qual, string&& name, const location& loc)
{
if (pre_parse_)
return lookup ();
diff --git a/libbuild2/test/script/parser.hxx b/libbuild2/test/script/parser.hxx
index c63bce6..6fe46e2 100644
--- a/libbuild2/test/script/parser.hxx
+++ b/libbuild2/test/script/parser.hxx
@@ -30,7 +30,7 @@ namespace build2
// Pre-parse. Issue diagnostics and throw failed in case of an error.
//
public:
- parser (context& c): build2::script::parser (c, true /* relex */) {}
+ parser (context& c): build2::script::parser (c) {}
void
pre_parse (script&);
@@ -62,7 +62,13 @@ namespace build2
optional<description>&,
lines* = nullptr,
bool one = false,
- bool if_line = false);
+ optional<line_type> flow_control_type = nullopt);
+
+ bool
+ pre_parse_block_line (token&, token_type&,
+ line_type block_type,
+ optional<description>&,
+ lines&);
bool
pre_parse_if_else (token&, token_type&,
@@ -79,6 +85,12 @@ namespace build2
optional<description>&,
lines&);
+ bool
+ pre_parse_loop (token&, token_type&,
+ line_type,
+ optional<description>&,
+ lines&);
+
void
pre_parse_directive (token&, token_type&);
@@ -117,7 +129,7 @@ namespace build2
//
protected:
virtual lookup
- lookup_variable (name&&, string&&, const location&) override;
+ lookup_variable (names&&, string&&, const location&) override;
// Insert id into the id map checking for duplicates.
//
diff --git a/libbuild2/test/script/parser.test.cxx b/libbuild2/test/script/parser.test.cxx
index 47d56ce..6838e47 100644
--- a/libbuild2/test/script/parser.test.cxx
+++ b/libbuild2/test/script/parser.test.cxx
@@ -33,8 +33,11 @@ namespace build2
class print_runner: public runner
{
public:
- print_runner (bool scope, bool id, bool line)
- : scope_ (scope), id_ (id), line_ (line) {}
+ print_runner (bool scope, bool id, bool line, bool iterations)
+ : scope_ (scope),
+ id_ (id),
+ line_ (line),
+ iterations_ (iterations) {}
virtual bool
test (scope&) const override
@@ -97,11 +100,32 @@ namespace build2
}
virtual void
- run (scope&,
+ run (scope& env,
const command_expr& e, command_type t,
- size_t i,
- const location&) override
+ const iteration_index* ii, size_t i,
+ const function<command_function>& cf,
+ const location& ll) override
{
+ // If the functions is specified, then just execute it with an empty
+ // stdin so it can perform the housekeeping (stop replaying tokens,
+ // increment line index, etc).
+ //
+ if (cf != nullptr)
+ {
+ assert (e.size () == 1 && !e[0].pipe.empty ());
+
+ const command& c (e[0].pipe.back ());
+
+ // Must be enforced by the caller.
+ //
+ assert (!c.out && !c.err && !c.exit);
+
+ cf (env, c.arguments,
+ fdopen_null (), nullptr /* pipe */,
+ nullopt /* deadline */,
+ ll);
+ }
+
const char* s (nullptr);
switch (t)
@@ -113,22 +137,22 @@ namespace build2
cout << ind_ << s << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
}
virtual bool
- run_if (scope&,
- const command_expr& e,
- size_t i,
- const location&) override
+ run_cond (scope&,
+ const command_expr& e,
+ const iteration_index* ii, size_t i,
+ const location&) override
{
cout << ind_ << "? " << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
@@ -146,13 +170,33 @@ namespace build2
}
private:
+ void
+ print_line_info (const iteration_index* ii, size_t i) const
+ {
+ cout << " #";
+
+ if (line_)
+ cout << ' ' << i;
+
+ if (iterations_ && ii != nullptr)
+ {
+ string s;
+ for (const iteration_index* i (ii); i != nullptr; i = i->prev)
+ s.insert (0, " i" + to_string (i->index));
+
+ cout << s;
+ }
+ }
+
+ private:
bool scope_;
bool id_;
bool line_;
+ bool iterations_;
string ind_;
};
- // Usage: argv[0] [-s] [-i] [-l] [<testscript-name>]
+ // Usage: argv[0] [-s] [-i] [-l] [-r] [<testscript-name>]
//
int
main (int argc, char* argv[])
@@ -162,18 +206,19 @@ namespace build2
// Fake build system driver, default verbosity.
//
init_diag (1);
- init (nullptr, argv[0]);
+ init (nullptr, argv[0], true);
// Serial execution.
//
scheduler sched (1);
global_mutexes mutexes (1);
- file_cache fcache;
+ file_cache fcache (true);
context ctx (sched, mutexes, fcache);
bool scope (false);
bool id (false);
bool line (false);
+ bool iterations (false);
path name;
for (int i (1); i != argc; ++i)
@@ -186,6 +231,8 @@ namespace build2
id = true;
else if (a == "-l")
line = true;
+ else if (a == "-r")
+ iterations = true;
else
{
name = path (move (a));
@@ -218,7 +265,7 @@ namespace build2
tt.assign (
ctx.var_pool.rw ().insert<target_triplet> ("test.target")));
- v = cast<target_triplet> (ctx.global_scope["build.host"]);
+ v = *ctx.build_host;
testscript& st (
ctx.targets.insert<testscript> (work,
@@ -236,7 +283,7 @@ namespace build2
script s (tt, st, dir_path (work) /= "test-driver");
p.pre_parse (cin, s);
- print_runner r (scope, id, line);
+ print_runner r (scope, id, line, iterations);
p.execute (s, r);
}
catch (const failed&)
diff --git a/libbuild2/test/script/runner.cxx b/libbuild2/test/script/runner.cxx
index af5f30a..98d6868 100644
--- a/libbuild2/test/script/runner.cxx
+++ b/libbuild2/test/script/runner.cxx
@@ -3,6 +3,8 @@
#include <libbuild2/test/script/runner.hxx>
+#include <libbuild2/filesystem.hxx>
+
#include <libbuild2/script/run.hxx>
#include <libbuild2/test/common.hxx>
@@ -140,7 +142,9 @@ namespace build2
void default_runner::
run (scope& sp,
const command_expr& expr, command_type ct,
- size_t li, const location& ll)
+ const iteration_index* ii, size_t li,
+ const function<command_function>& cf,
+ const location& ll)
{
// Noop for teardown commands if keeping tests output is requested.
//
@@ -162,40 +166,55 @@ namespace build2
text << ": " << c << expr;
}
- // Print test id once per test expression.
+ // Print test id once per test expression and only for the topmost
+ // one.
//
auto df = make_diag_frame (
- [&sp](const diag_record& dr)
+ [&sp, print = (sp.exec_level == 0)](const diag_record& dr)
{
- // Let's not depend on how the path representation can be improved
- // for readability on printing.
- //
- dr << info << "test id: " << sp.id_path.posix_string ();
+ if (print)
+ {
+ // Let's not depend on how the path representation can be
+ // improved for readability on printing.
+ //
+ dr << info << "test id: " << sp.id_path.posix_string ();
+ }
});
- build2::script::run (sp, expr, li, ll);
+ ++sp.exec_level;
+ build2::script::run (sp, expr, ii, li, ll, cf);
+ --sp.exec_level;
}
bool default_runner::
- run_if (scope& sp,
- const command_expr& expr,
- size_t li, const location& ll)
+ run_cond (scope& sp,
+ const command_expr& expr,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
if (verb >= 3)
text << ": ?" << expr;
- // Print test id once per test expression.
+ // Print test id once per test expression and only for the topmost
+ // one.
//
auto df = make_diag_frame (
- [&sp](const diag_record& dr)
+ [&sp, print = (sp.exec_level == 0)](const diag_record& dr)
{
- // Let's not depend on how the path representation can be improved
- // for readability on printing.
- //
- dr << info << "test id: " << sp.id_path.posix_string ();
+ if (print)
+ {
+ // Let's not depend on how the path representation can be
+ // improved for readability on printing.
+ //
+ dr << info << "test id: " << sp.id_path.posix_string ();
+ }
});
- return build2::script::run_if (sp, expr, li, ll);
+ ++sp.exec_level;
+ bool r (build2::script::run_cond (sp, expr, ii, li, ll));
+ --sp.exec_level;
+
+ return r;
}
}
}
diff --git a/libbuild2/test/script/runner.hxx b/libbuild2/test/script/runner.hxx
index b6a038d..687d991 100644
--- a/libbuild2/test/script/runner.hxx
+++ b/libbuild2/test/script/runner.hxx
@@ -48,14 +48,21 @@ namespace build2
// Location is the start position of this command line in the
// testscript. It can be used in diagnostics.
//
+ // Optionally, execute the specified function instead of the last
+ // pipe command.
+ //
virtual void
run (scope&,
const command_expr&, command_type,
- size_t index,
+ const iteration_index*, size_t index,
+ const function<command_function>&,
const location&) = 0;
virtual bool
- run_if (scope&, const command_expr&, size_t, const location&) = 0;
+ run_cond (scope&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) = 0;
// Location is the scope end location (for diagnostics, etc).
//
@@ -84,11 +91,15 @@ namespace build2
virtual void
run (scope&,
const command_expr&, command_type,
- size_t,
+ const iteration_index*, size_t,
+ const function<command_function>&,
const location&) override;
virtual bool
- run_if (scope&, const command_expr&, size_t, const location&) override;
+ run_cond (scope&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) override;
virtual void
leave (scope&, const location&) override;
diff --git a/libbuild2/test/script/script.cxx b/libbuild2/test/script/script.cxx
index 3a8ceac..f7827f6 100644
--- a/libbuild2/test/script/script.cxx
+++ b/libbuild2/test/script/script.cxx
@@ -30,7 +30,7 @@ namespace build2
scope_base::
scope_base (script& s)
: root (s),
- vars (s.test_target.ctx, false /* global */)
+ vars (s.test_target.ctx, false /* shared */) // Note: managed.
{
vars.assign (root.wd_var) = dir_path ();
}
@@ -115,7 +115,7 @@ namespace build2
}
void scope::
- set_variable (string&& nm,
+ set_variable (string nm,
names&& val,
const string& attrs,
const location& ll)
@@ -197,12 +197,12 @@ namespace build2
test_var (var_pool.insert<path> ("test")),
options_var (var_pool.insert<strings> ("test.options")),
arguments_var (var_pool.insert<strings> ("test.arguments")),
- redirects_var (var_pool.insert<strings> ("test.redirects")),
- cleanups_var (var_pool.insert<strings> ("test.cleanups")),
+ redirects_var (var_pool.insert<cmdline> ("test.redirects")),
+ cleanups_var (var_pool.insert<cmdline> ("test.cleanups")),
wd_var (var_pool.insert<dir_path> ("~")),
id_var (var_pool.insert<path> ("@")),
- cmd_var (var_pool.insert<strings> ("*")),
+ cmd_var (var_pool.insert<cmdline> ("*")),
cmdN_var {
&var_pool.insert<path> ("0"),
&var_pool.insert<string> ("1"),
@@ -268,7 +268,7 @@ namespace build2
v = path (n->dir);
else
{
- // Must be a target name.
+ // Must be a target name. Could be from src (e.g., a script).
//
// @@ OUT: what if this is a @-qualified pair of names?
//
@@ -355,7 +355,7 @@ namespace build2
// in parallel). Plus, if there is no such variable, then we cannot
// possibly find any value.
//
- const variable* pvar (context.var_pool.find (n));
+ const variable* pvar (root.target_scope.var_pool ().find (n));
if (pvar == nullptr)
return lookup_type ();
@@ -410,11 +410,12 @@ namespace build2
// First assemble the $* value and save the test variable value into
// the test program set.
//
- strings s;
+ cmdline s;
- auto append = [&s] (const strings& v)
+ auto append = [&s] (const strings& vs)
{
- s.insert (s.end (), v.begin (), v.end ());
+ for (const string& v: vs)
+ s.push_back (name (v)); // Simple name.
};
// If the test variable can't be looked up for any reason (is NULL,
@@ -423,7 +424,7 @@ namespace build2
if (auto l = lookup (root.test_var))
{
const path& p (cast<path> (l));
- s.push_back (p.representation ());
+ s.push_back (name (p.representation ()));
test_programs[0] = &p;
@@ -441,10 +442,16 @@ namespace build2
size_t n (s.size ());
if (auto l = lookup (root.redirects_var))
- append (cast<strings> (l));
+ {
+ const auto& v (cast<cmdline> (l));
+ s.insert (s.end (), v.begin (), v.end ());
+ }
if (auto l = lookup (root.cleanups_var))
- append (cast<strings> (l));
+ {
+ const auto& v (cast<cmdline> (l));
+ s.insert (s.end (), v.begin (), v.end ());
+ }
// Set the $N values if present.
//
@@ -455,9 +462,9 @@ namespace build2
if (i < n)
{
if (i == 0)
- v = path (s[i]);
+ v = path (s[i].value);
else
- v = s[i];
+ v = s[i].value;
}
else
v = nullptr; // Clear any old values.
@@ -465,6 +472,88 @@ namespace build2
// Set $*.
//
+ // We need to effective-quote the $test $test.options, $test.arguments
+ // part of it since they will be re-lexed. See the Testscript manual
+ // for details on quoting semantics. In particular, we cannot escape
+ // the special character (|<>&) so we have to rely on quoting. We can
+ // use single-quoting for everything except if the value contains a
+ // single quote. In which case we should probably just do separately-
+ // quoted regions (similar to shell), for example:
+ //
+ // <''>
+ //
+ // Can be quoted as:
+ //
+ // '<'"''"'>'
+ //
+ for (size_t i (0); i != n; ++i)
+ {
+ string& v (s[i].value);
+
+ // Check if the quoting is required for this value.
+ //
+ if (!parser::need_cmdline_relex (v))
+ continue;
+
+ // If the value doesn't contain the single-quote character, then
+ // single-quote it.
+ //
+ size_t p (v.find ('\''));
+
+ if (p == string::npos)
+ {
+ v = '\'' + v + '\'';
+ continue;
+ }
+
+ // Otherwise quote the regions.
+ //
+ // Note that we double-quote the single-quote character sequences
+ // and single-quote all the other regions.
+ //
+ string r;
+ char q (p == 0 ? '"' : '\''); // Current region quoting mode.
+
+ r += q; // Open the first region.
+
+ for (char c: v)
+ {
+ // If we are in the double-quoting mode, then switch to the
+ // single-quoting mode if a non-single-quote character is
+ // encountered.
+ //
+ if (q == '"')
+ {
+ if (c != '\'')
+ {
+ r += q; // Close the double-quoted region.
+ q = '\''; // Set the single-quoting mode.
+ r += q; // Open the single-quoted region.
+ }
+ }
+ //
+ // If we are in the single-quoting mode, then switch to the
+ // double-quoting mode if the single-quote character is
+ // encountered.
+ //
+ else
+ {
+ if (c == '\'')
+ {
+ r += q; // Close the single-quoted region.
+ q = '"'; // Set the double-quoting mode.
+ r += q; // Open the double-quoted region.
+ }
+ }
+
+ r += c;
+ }
+
+ r += q; // Close the last region.
+
+ v = move (r);
+ }
+
assign (root.cmd_var) = move (s);
}
diff --git a/libbuild2/test/script/script.hxx b/libbuild2/test/script/script.hxx
index 22f6725..9409b01 100644
--- a/libbuild2/test/script/script.hxx
+++ b/libbuild2/test/script/script.hxx
@@ -21,16 +21,19 @@ namespace build2
namespace script
{
using build2::script::line;
+ using build2::script::line_type;
using build2::script::lines;
using build2::script::redirect;
using build2::script::redirect_type;
- using build2::script::line_type;
- using build2::script::command_expr;
- using build2::script::expr_term;
using build2::script::command;
+ using build2::script::expr_term;
+ using build2::script::command_expr;
+ using build2::script::iteration_index;
using build2::script::environment_vars;
using build2::script::deadline;
using build2::script::timeout;
+ using build2::script::pipe_command;
+ using build2::script::command_function;
class parser; // Required by VC for 'friend class parser' declaration.
@@ -94,6 +97,22 @@ namespace build2
scope_state state = scope_state::unknown;
+ // The command expression execution nesting level. Can be maintained
+ // by the runner to, for example, only perform some housekeeping on
+ // the topmost level (add the test id to the diagnostics, etc).
+ //
+ // Note that the command expression execution can be nested, so that
+ // the outer expression execution is not completed before all the
+ // inner expressions are executed. As for example in:
+ //
+ // echo 'a b' | for x
+ // echo 'c d' | for y
+ // test $x $y
+ // end
+ // end
+ //
+ size_t exec_level = 0;
+
// Test program paths.
//
// Currently always contains a single element (see test_program() for
@@ -103,8 +122,8 @@ namespace build2
//
small_vector<const path*, 1> test_programs;
- void
- set_variable (string&& name,
+ virtual void
+ set_variable (string name,
names&&,
const string& attrs,
const location&) override;
diff --git a/libbuild2/test/target.cxx b/libbuild2/test/target.cxx
index ce88baa..852abdf 100644
--- a/libbuild2/test/target.cxx
+++ b/libbuild2/test/target.cxx
@@ -56,7 +56,7 @@ namespace build2
&testscript_target_pattern,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/test/target.hxx b/libbuild2/test/target.hxx
index 1dd7307..e6c549f 100644
--- a/libbuild2/test/target.hxx
+++ b/libbuild2/test/target.hxx
@@ -18,11 +18,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT testscript: public file
{
public:
- using file::file;
+ testscript (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/token.cxx b/libbuild2/token.cxx
index ab14388..cc102cc 100644
--- a/libbuild2/token.cxx
+++ b/libbuild2/token.cxx
@@ -29,21 +29,30 @@ namespace build2
os << (r ? "\n" : "<newline>");
break;
}
- case token_type::pair_separator:
+ case token_type::word:
{
if (r)
- os << t.value[0];
+ os << t.value;
else
- os << "<pair separator " << t.value[0] << ">";
+ os << '\'' << t.value << '\'';
break;
}
- case token_type::word:
+ case token_type::escape:
{
if (r)
- os << t.value;
+ os << '\\' << t.value;
else
- os << '\'' << t.value << '\'';
+ os << "<escape sequence \\" << t.value << ">";
+
+ break;
+ }
+ case token_type::pair_separator:
+ {
+ if (r)
+ os << t.value[0];
+ else
+ os << "<pair separator " << t.value[0] << ">";
break;
}
diff --git a/libbuild2/token.hxx b/libbuild2/token.hxx
index fca888c..f9ede65 100644
--- a/libbuild2/token.hxx
+++ b/libbuild2/token.hxx
@@ -30,6 +30,7 @@ namespace build2
eos,
newline,
word,
+ escape, // token::value is <...> in $\<...>
pair_separator, // token::value[0] is the pair separator char.
colon, // :
@@ -159,16 +160,13 @@ namespace build2
token (string v, bool s,
quote_type qt, bool qc, bool qf,
uint64_t l, uint64_t c)
- : token (token_type::word, move (v), s,
- qt, qc, qf,
- l, c,
- &token_printer) {}
+ : token (token_type::word, move (v), s, qt, qc, qf, l, c) {}
token (token_type t,
string v, bool s,
quote_type qt, bool qc, bool qf,
uint64_t l, uint64_t c,
- printer_type* p)
+ printer_type* p = &token_printer)
: type (t), separated (s),
qtype (qt), qcomp (qc), qfirst (qf),
value (move (v)),
diff --git a/libbuild2/types-parsers.cxx b/libbuild2/types-parsers.cxx
new file mode 100644
index 0000000..9c3dc52
--- /dev/null
+++ b/libbuild2/types-parsers.cxx
@@ -0,0 +1,153 @@
+// file : libbuild2/types-parsers.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/types-parsers.hxx>
+
+#include <sstream>
+
+#include <libbuild2/lexer.hxx>
+#include <libbuild2/parser.hxx>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ template <typename T>
+ static void
+ parse_path (T& x, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ x = T (v);
+
+ if (x.empty ())
+ throw invalid_value (o, v);
+ }
+ catch (const invalid_path&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
+ void parser<path>::
+ parse (path& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ parse_path (x, s);
+ }
+
+ void parser<dir_path>::
+ parse (dir_path& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ parse_path (x, s);
+ }
+
+ static names
+ parse_names (const char* o, const char* v)
+ {
+ using build2::parser;
+ using std::istringstream;
+
+ istringstream is (v);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ // @@ TODO: currently this issues diagnostics to diag_stream.
+ // Perhaps we should redirect it? Also below.
+ //
+ path_name in (o);
+ lexer l (is, in, 1 /* line */, "\'\"\\$("); // Effective.
+ parser p (nullptr);
+ return p.parse_names (l, nullptr, parser::pattern_mode::preserve);
+ }
+
+ void parser<name>::
+ parse (name& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ names r (parse_names (o, v));
+
+ if (r.size () != 1)
+ throw invalid_value (o, v);
+
+ x = move (r.front ());
+ xs = true;
+ }
+ catch (const failed&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
+ void parser<pair<name, optional<name>>>::
+ parse (pair<name, optional<name>>& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ names r (parse_names (o, v));
+
+ if (r.size () == 1)
+ {
+ x.first = move (r.front ());
+ x.second = nullopt;
+ }
+ else if (r.size () == 2 && r.front ().pair == '@')
+ {
+ x.first = move (r.front ());
+ x.second = move (r.back ());
+ }
+ else
+ throw invalid_value (o, v);
+
+ xs = true;
+ }
+ catch (const failed&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
+ void parser<structured_result_format>::
+ parse (structured_result_format& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+ if (v == "lines")
+ x = structured_result_format::lines;
+ else if (v == "json")
+ x = structured_result_format::json;
+ else
+ throw invalid_value (o, v);
+ }
+ }
+ }
+}
diff --git a/libbuild2/types-parsers.hxx b/libbuild2/types-parsers.hxx
new file mode 100644
index 0000000..42fc60d
--- /dev/null
+++ b/libbuild2/types-parsers.hxx
@@ -0,0 +1,83 @@
+// file : libbuild2/types-parsers.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+// CLI parsers, included into the generated source files.
+//
+
+#ifndef LIBBUILD2_TYPES_PARSERS_HXX
+#define LIBBUILD2_TYPES_PARSERS_HXX
+
+#include <libbuild2/types.hxx>
+
+#include <libbuild2/common-options.hxx> // build2::build::cli namespace
+#include <libbuild2/options-types.hxx>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ class scanner;
+
+ template <typename T>
+ struct parser;
+
+ template <>
+ struct parser<path>
+ {
+ static void
+ parse (path&, bool&, scanner&);
+
+ static void
+ merge (path& b, const path& a) {b = a;}
+ };
+
+ template <>
+ struct parser<dir_path>
+ {
+ static void
+ parse (dir_path&, bool&, scanner&);
+
+ static void
+ merge (dir_path& b, const dir_path& a) {b = a;}
+ };
+
+ template <>
+ struct parser<name>
+ {
+ static void
+ parse (name&, bool&, scanner&);
+
+ static void
+ merge (name& b, const name& a) {b = a;}
+ };
+
+ template <>
+ struct parser<pair<name, optional<name>>>
+ {
+ static void
+ parse (pair<name, optional<name>>&, bool&, scanner&);
+
+ static void
+ merge (pair<name, optional<name>>& b,
+ const pair<name, optional<name>>& a) {b = a;}
+ };
+
+ template <>
+ struct parser<structured_result_format>
+ {
+ static void
+ parse (structured_result_format&, bool&, scanner&);
+
+ static void
+ merge (structured_result_format& b, const structured_result_format& a)
+ {
+ b = a;
+ }
+ };
+ }
+ }
+}
+
+#endif // LIBBUILD2_TYPES_PARSERS_HXX
diff --git a/libbuild2/types.hxx b/libbuild2/types.hxx
index af1a4de..ea84701 100644
--- a/libbuild2/types.hxx
+++ b/libbuild2/types.hxx
@@ -15,6 +15,7 @@
#include <map>
#include <set>
+#include <list>
#include <array>
#include <tuple>
#include <regex>
@@ -29,14 +30,22 @@
#include <functional> // hash, function, reference_wrapper
#include <initializer_list>
-#include <mutex>
#include <atomic>
-#include <thread>
-#include <condition_variable>
-#include <libbutl/ft/shared_mutex.hxx>
-#if defined(__cpp_lib_shared_mutex) || defined(__cpp_lib_shared_timed_mutex)
-# include <shared_mutex>
+#ifndef LIBBUTL_MINGW_STDTHREAD
+# include <mutex>
+# include <thread>
+# include <condition_variable>
+
+# include <libbutl/ft/shared_mutex.hxx>
+# if defined(__cpp_lib_shared_mutex) || defined(__cpp_lib_shared_timed_mutex)
+# include <shared_mutex>
+# endif
+#else
+# include <libbutl/mingw-mutex.hxx>
+# include <libbutl/mingw-thread.hxx>
+# include <libbutl/mingw-condition_variable.hxx>
+# include <libbutl/mingw-shared_mutex.hxx>
#endif
#include <ios> // ios_base::failure
@@ -59,6 +68,7 @@
#include <libbutl/target-triplet.hxx>
#include <libbutl/semantic-version.hxx>
#include <libbutl/standard-version.hxx>
+#include <libbutl/move-only-function.hxx>
#include <libbuild2/export.hxx>
@@ -82,9 +92,12 @@ namespace build2
using std::pair;
using std::tuple;
using std::string;
- using std::function;
using std::reference_wrapper;
+ using std::function;
+ using butl::move_only_function;
+ using butl::move_only_function_ex;
+
using strings = std::vector<string>;
using cstrings = std::vector<const char*>;
@@ -102,6 +115,7 @@ namespace build2
using std::multiset;
using std::array;
using std::vector;
+ using std::list;
using butl::vector_view; // <libbutl/vector-view.hxx>
using butl::small_vector; // <libbutl/small-vector.hxx>
@@ -185,20 +199,27 @@ namespace build2
}
#endif
+#ifndef LIBBUTL_MINGW_STDTHREAD
using std::mutex;
using mlock = std::unique_lock<mutex>;
using std::condition_variable;
-#if defined(__cpp_lib_shared_mutex)
+ using std::defer_lock;
+ using std::adopt_lock;
+
+ using std::thread;
+ namespace this_thread = std::this_thread;
+
+# if defined(__cpp_lib_shared_mutex)
using shared_mutex = std::shared_mutex;
using ulock = std::unique_lock<shared_mutex>;
using slock = std::shared_lock<shared_mutex>;
-#elif defined(__cpp_lib_shared_timed_mutex)
+# elif defined(__cpp_lib_shared_timed_mutex)
using shared_mutex = std::shared_timed_mutex;
using ulock = std::unique_lock<shared_mutex>;
using slock = std::shared_lock<shared_mutex>;
-#else
+# else
// Because we have this fallback, we need to be careful not to create
// multiple shared locks in the same thread.
//
@@ -213,13 +234,23 @@ namespace build2
using ulock = std::unique_lock<shared_mutex>;
using slock = ulock;
-#endif
+# endif
+#else // LIBBUTL_MINGW_STDTHREAD
+ using mingw_stdthread::mutex;
+ using mlock = mingw_stdthread::unique_lock<mutex>;
- using std::defer_lock;
- using std::adopt_lock;
+ using mingw_stdthread::condition_variable;
- using std::thread;
- namespace this_thread = std::this_thread;
+ using mingw_stdthread::defer_lock;
+ using mingw_stdthread::adopt_lock;
+
+ using mingw_stdthread::thread;
+ namespace this_thread = mingw_stdthread::this_thread;
+
+ using shared_mutex = mingw_stdthread::shared_mutex;
+ using ulock = mingw_stdthread::unique_lock<shared_mutex>;
+ using slock = mingw_stdthread::shared_lock<shared_mutex>;
+#endif
// Global, MT-safe information cache. Normally used for caching information
// (versions, target triplets, search paths, etc) extracted from other
@@ -317,6 +348,20 @@ namespace build2
using paths = std::vector<path>;
using dir_paths = std::vector<dir_path>;
+ // Path printing potentially relative with trailing slash for directories.
+ //
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const path&); // utility.cxx
+
+ inline ostream&
+ operator<< (ostream& os, const dir_path& d) // For overload resolution.
+ {
+ return build2::operator<< (os, static_cast<const path&> (d));
+ }
+
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const path_name_view&); // utility.cxx
+
// <libbutl/timestamp.hxx>
//
using butl::system_clock;
@@ -334,8 +379,10 @@ namespace build2
using butl::sha256;
// <libbutl/process.hxx>
+ //
using butl::process;
using butl::process_env;
+ using butl::process_exit;
using butl::process_path;
using butl::process_error;
@@ -381,8 +428,14 @@ namespace build2
process_path_ex () = default;
};
+ // Print as recall[@effect].
+ //
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const process_path&); // utility.cxx
+
// <libbutl/fdstream.hxx>
//
+ using butl::nullfd;
using butl::auto_fd;
using butl::fdpipe;
using butl::ifdstream;
@@ -464,9 +517,9 @@ namespace build2
location_value (const location&);
- location_value (location_value&&);
+ location_value (location_value&&) noexcept;
location_value (const location_value&);
- location_value& operator= (location_value&&);
+ location_value& operator= (location_value&&) noexcept;
location_value& operator= (const location_value&);
};
@@ -478,26 +531,6 @@ namespace build2
operator<< (ostream&, run_phase); // utility.cxx
}
-// In order to be found (via ADL) these have to be either in std:: or in
-// butl::. The latter is a bad idea since libbutl includes the default
-// implementation. They are defined in utility.cxx.
-//
-namespace std
-{
- // Path printing potentially relative with trailing slash for directories.
- //
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::path&);
-
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::path_name_view&);
-
- // Print as recall[@effect].
- //
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::process_path&);
-}
-
// <libbuild2/name.hxx>
//
#include <libbuild2/name.hxx>
diff --git a/libbuild2/types.ixx b/libbuild2/types.ixx
index 750c8c7..ee2a605 100644
--- a/libbuild2/types.ixx
+++ b/libbuild2/types.ixx
@@ -10,7 +10,7 @@ namespace build2
{
if (!l.empty ())
{
- o << l.file;
+ build2::operator<< (o, l.file); // Disambiguate.
if (l.line != 0)
{
@@ -43,7 +43,7 @@ namespace build2
}
inline location_value::
- location_value (location_value&& l)
+ location_value (location_value&& l) noexcept
: location (l.line, l.column),
file (std::move (l.file))
{
@@ -58,7 +58,7 @@ namespace build2
}
inline location_value& location_value::
- operator= (location_value&& l)
+ operator= (location_value&& l) noexcept
{
if (this != &l)
{
diff --git a/libbuild2/utility-installed.cxx b/libbuild2/utility-installed.cxx
index 441e31b..e23add1 100644
--- a/libbuild2/utility-installed.cxx
+++ b/libbuild2/utility-installed.cxx
@@ -14,6 +14,14 @@ namespace build2
#ifdef BUILD2_INSTALL_LIB
const dir_path build_install_lib (BUILD2_INSTALL_LIB);
#endif
+
+#ifdef BUILD2_INSTALL_BUILDFILE
+ const dir_path build_install_buildfile (BUILD2_INSTALL_BUILDFILE);
+#endif
+
+#ifdef BUILD2_INSTALL_DATA
+ const dir_path build_install_data (BUILD2_INSTALL_DATA);
+#endif
}
#endif
diff --git a/libbuild2/utility-uninstalled.cxx b/libbuild2/utility-uninstalled.cxx
index a6bad55..f836de6 100644
--- a/libbuild2/utility-uninstalled.cxx
+++ b/libbuild2/utility-uninstalled.cxx
@@ -7,4 +7,16 @@ namespace build2
{
const bool build_installed = false;
const dir_path build_install_lib; // Empty.
+
+#ifdef BUILD2_INSTALL_BUILDFILE
+ const dir_path build_install_buildfile (BUILD2_INSTALL_BUILDFILE);
+#else
+ const dir_path build_install_buildfile; // Empty.
+#endif
+
+#ifdef BUILD2_INSTALL_DATA
+ const dir_path build_install_data (BUILD2_INSTALL_DATA);
+#else
+ const dir_path build_install_data; // Empty (during bootstrap).
+#endif
}
diff --git a/libbuild2/utility.cxx b/libbuild2/utility.cxx
index f7f3d41..1135851 100644
--- a/libbuild2/utility.cxx
+++ b/libbuild2/utility.cxx
@@ -3,8 +3,18 @@
#include <libbuild2/utility.hxx>
+#ifndef _WIN32
+# include <signal.h> // signal()
+#else
+# include <libbutl/win32-utility.hxx>
+#endif
+
#include <time.h> // tzset() (POSIX), _tzset() (Windows)
+#ifdef __GLIBCXX__
+# include <locale>
+#endif
+
#include <cerrno> // ENOENT
#include <cstring> // strlen(), str[n]cmp()
#include <iostream> // cerr
@@ -19,7 +29,6 @@
using namespace std;
using namespace butl;
-//
// <libbuild2/types.hxx>
//
namespace build2
@@ -31,12 +40,9 @@ namespace build2
{
return os << run_phase_[static_cast<uint8_t> (p)];
}
-}
-namespace std
-{
ostream&
- operator<< (ostream& os, const ::butl::path& p)
+ operator<< (ostream& os, const path& p)
{
using namespace build2;
@@ -47,7 +53,7 @@ namespace std
}
ostream&
- operator<< (ostream& os, const ::butl::path_name_view& v)
+ operator<< (ostream& os, const path_name_view& v)
{
assert (!v.empty ());
@@ -55,7 +61,7 @@ namespace std
}
ostream&
- operator<< (ostream& os, const ::butl::process_path& p)
+ operator<< (ostream& os, const process_path& p)
{
using namespace build2;
@@ -76,11 +82,55 @@ namespace std
}
}
+// <libbuild2/utility.hxx>
+//
namespace build2
{
- //
- // <libbuild2/utility.hxx>
- //
+ static const char hex_digits[] = "0123456789abcdef";
+
+ string
+ to_string (uint64_t i, int b, size_t w)
+ {
+ // One day we can switch to C++17 std::to_chars().
+ //
+ string r;
+ switch (b)
+ {
+ case 10:
+ {
+ r = to_string (i);
+ if (w > r.size ())
+ r.insert (0, w - r.size (), '0');
+ break;
+ }
+ case 16:
+ {
+ r.reserve (18);
+ r += "0x";
+
+ for (size_t j (64); j != 0; )
+ {
+ j -= 4;
+ size_t d ((i >> j) & 0x0f);
+
+ // Omit leading zeros but watch out for the i==0 corner case.
+ //
+ if (d != 0 || r.size () != 2 || j == 0)
+ r += hex_digits[d];
+ }
+
+ if (w > r.size () - 2)
+ r.insert (2, w - (r.size () - 2), '0');
+
+ break;
+ }
+ default:
+ throw invalid_argument ("unsupported base");
+ }
+
+ return r;
+ }
+
void (*terminate) (bool);
process_path argv0;
@@ -126,13 +176,13 @@ namespace build2
if (p.absolute ())
{
if (p == b)
- return cur ? "." + p.separator_string () : string ();
+ return cur ? '.' + p.separator_string () : string ();
#ifndef _WIN32
if (!home.empty ())
{
if (p == home)
- return "~" + p.separator_string ();
+ return '~' + p.separator_string ();
}
#endif
@@ -210,11 +260,10 @@ namespace build2
process
run_start (uint16_t verbosity,
const process_env& pe,
- const char* args[],
+ const char* const* args,
int in,
int out,
- bool err,
- const dir_path& cwd,
+ int err,
const location& l)
try
{
@@ -228,17 +277,15 @@ namespace build2
args,
in,
out,
- (err ? 2 : 1),
- (!cwd.empty ()
- ? cwd.string ().c_str ()
- : pe.cwd != nullptr ? pe.cwd->string ().c_str () : nullptr),
+ err,
+ pe.cwd != nullptr ? pe.cwd->string ().c_str () : nullptr,
pe.vars);
}
catch (const process_error& e)
{
if (e.child)
{
- // Note: run_finish() expects this exact message.
+ // Note: run_finish_impl() below expects this exact message.
//
cerr << "unable to execute " << args[0] << ": " << e << endl;
@@ -253,7 +300,7 @@ namespace build2
}
bool
- run_wait (const char* args[], process& pr, const location& loc)
+ run_wait (const char* const* args, process& pr, const location& loc)
try
{
return pr.wait ();
@@ -264,55 +311,330 @@ namespace build2
}
bool
- run_finish_impl (const char* args[],
+ run_finish_impl (const char* const* args,
process& pr,
- bool err,
+ bool f,
const string& l,
+ uint16_t v,
+ bool omit_normal,
const location& loc)
- try
{
tracer trace ("run_finish");
- if (pr.wait ())
- return true;
-
- const process_exit& e (*pr.exit);
-
- if (!e.normal ())
- fail (loc) << "process " << args[0] << " " << e;
+ try
+ {
+ if (pr.wait ())
+ return true;
+ }
+ catch (const process_error& e)
+ {
+ fail (loc) << "unable to execute " << args[0] << ": " << e << endf;
+ }
- // Normall but non-zero exit status.
+ // Note: see similar code in diag_buffer::close().
+ //
+ const process_exit& pe (*pr.exit);
+ bool ne (pe.normal ());
+
+ // Even if the user redirected the diagnostics, one error that we want to
+ // let through is the inability to execute the program itself. We cannot
+ // reserve a special exit status to signal this so we will just have to
+ // compare the output. In a sense, we treat this as a special case of
+ // abnormal termination. This particular situation will result in a single
+ // error line printed by run_start() above.
//
- if (err)
+ if (ne && l.compare (0, 18, "unable to execute ") == 0)
+ fail (loc) << l;
+
+ if (omit_normal && ne)
+ {
+ // While we assume diagnostics has already been issued (to stderr), if
+ // that's not the case, it's a real pain to debug. So trace it. (And
+ // if you think that doesn't happen in sensible programs, check GCC
+ // bug #107448).
+ //
+ l4 ([&]{trace << "process " << args[0] << " " << pe;});
+ }
+ else
{
- // While we assuming diagnostics has already been issued (to STDERR), if
- // that's not the case, it's a real pain to debug. So trace it.
+ // It's unclear whether we should print this only if printing the
+ // command line (we could also do things differently for normal/abnormal
+ // exit). Let's print this always and see how it wears. Note that we now
+ // rely on this in, for example, process_finish(), extract_metadata().
+ //
+ // Note: make sure keep the above trace if decide not to print.
//
- l4 ([&]{trace << "process " << args[0] << " " << e;});
+ diag_record dr;
+ dr << error (loc) << "process " << args[0] << " " << pe;
- throw failed ();
+ if (verb >= 1 && verb <= v)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
}
- // Even if the user asked to suppress diagnostiscs, one error that we
- // want to let through is the inability to execute the program itself.
- // We cannot reserve a special exit status to signal this so we will
- // just have to compare the output. This particular situation will
- // result in a single error line printed by run_start() above.
- //
- if (l.compare (0, 18, "unable to execute ") == 0)
- fail (loc) << l;
+ if (f || !ne)
+ throw failed ();
return false;
}
- catch (const process_error& e)
+
+ bool
+ run_finish_impl (diag_buffer& dbuf,
+ const char* const* args,
+ process& pr,
+ bool f,
+ uint16_t v,
+ bool on,
+ const location& loc)
{
- fail (loc) << "unable to execute " << args[0] << ": " << e << endf;
+ try
+ {
+ pr.wait ();
+ }
+ catch (const process_error& e)
+ {
+ fail (loc) << "unable to execute " << args[0] << ": " << e << endf;
+ }
+
+ const process_exit& pe (*pr.exit);
+
+ dbuf.close (args, pe, v, on, loc);
+
+ if (pe)
+ return true;
+
+ if (f || !pe.normal ())
+ throw failed ();
+
+ return false;
}
void
- run_io_error (const char* args[], const io_error& e)
+ run (context& ctx,
+ const process_env& pe,
+ const char* const* args,
+ uint16_t v)
{
- fail << "io error reading " << args[0] << " output: " << e << endf;
+ if (ctx.phase == run_phase::load)
+ {
+ process pr (run_start (pe, args));
+ run_finish (args, pr, v);
+ }
+ else
+ {
+ process pr (run_start (pe,
+ args,
+ 0 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+ diag_buffer dbuf (ctx, args[0], pr);
+ dbuf.read ();
+ run_finish (dbuf, args, pr, v);
+ }
+ }
+
+ bool
+ run (context& ctx,
+ uint16_t verbosity,
+ const process_env& pe,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ const function<bool (string&, bool)>& f,
+ bool tr,
+ bool err,
+ bool ignore_exit,
+ sha256* checksum)
+ {
+ assert (!err || !ignore_exit);
+
+ if (!err || ctx.phase == run_phase::load)
+ {
+ process pr (run_start (verbosity,
+ pe,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ err ? 2 : 1 /* stderr */));
+
+ string l; // Last line of output.
+ try
+ {
+ ifdstream is (move (pr.in_ofd), fdstream_mode::skip);
+
+ bool empty (true);
+
+ // Make sure we keep the last line.
+ //
+ for (bool last (is.peek () == ifdstream::traits_type::eof ());
+ !last && getline (is, l); )
+ {
+ last = (is.peek () == ifdstream::traits_type::eof ());
+
+ if (tr)
+ trim (l);
+
+ if (checksum != nullptr)
+ checksum->append (l);
+
+ if (empty)
+ {
+ empty = f (l, last);
+
+ if (!empty && checksum == nullptr)
+ break;
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (run_wait (args, pr))
+ fail << "io error reading " << args[0] << " output: " << e << endf;
+
+ // If the child process has failed then assume the io error was
+ // caused by that and let run_finish() deal with it.
+ }
+
+ // Omit normal exit code diagnostics if err is false.
+ //
+ if (!(run_finish_impl (args, pr, err, l, finish_verbosity, !err) ||
+ ignore_exit))
+ return false;
+ }
+ else
+ {
+ // We have to use the non-blocking setup since we have to read from stdout
+ // and stderr simultaneously.
+ //
+ process pr (run_start (verbosity,
+ pe,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+
+ // Note that while we read both streams until eof in the normal
+ // circumstances, we cannot use fdstream_mode::skip for the exception
+ // case on both of them: we may end up being blocked trying to read one
+ // stream while the process may be blocked writing to the other. So in
+ // case of an exception we only skip the diagnostics and close stdout
+ // hard. The latter should happen first so the order of the dbuf/is
+ // variables is important.
+ //
+ diag_buffer dbuf (ctx, args[0], pr, (fdstream_mode::non_blocking |
+ fdstream_mode::skip));
+ try
+ {
+ ifdstream is (move (pr.in_ofd),
+ fdstream_mode::non_blocking,
+ ifdstream::badbit);
+
+ bool empty (true);
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically get an
+ // inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ // To detect the last line we are going keep the previous line and
+ // only call the function once we've read the next.
+ //
+ optional<string> pl;
+
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
+ {
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ {
+ if (pl && empty)
+ f (*pl, true /* last */);
+
+ ist.fd = nullfd;
+ }
+ else
+ {
+ if (checksum != nullptr || empty)
+ {
+ if (tr)
+ trim (l);
+
+ if (checksum != nullptr)
+ checksum->append (l);
+
+ if (empty)
+ {
+ if (pl)
+ {
+ if ((empty = f (*pl, false /* last */)))
+ swap (l, *pl);
+
+ // Note that we cannot bail out like in the other version
+ // since we don't have the skip mode on is. Plus, we might
+ // still have the diagnostics.
+ }
+ else
+ pl = move (l);
+ }
+ }
+
+ l.clear ();
+ }
+
+ continue;
+ }
+
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (run_wait (args, pr))
+ {
+ // Note that we will drop the diagnostics in this case since reading
+ // it could have been the cause of this error.
+ //
+ fail << "io error reading " << args[0] << " output: " << e << endf;
+ }
+
+ // If the child process has failed then assume the io error was caused
+ // by that and let run_finish() deal with it.
+ }
+
+ run_finish_impl (dbuf, args, pr, true /* fail */, finish_verbosity);
+ }
+
+ return true;
+ }
+
+ cstrings
+ process_args (const char* program, const strings& args)
+ {
+ cstrings r;
+ r.reserve (args.size () + 2);
+
+ r.push_back (program);
+
+ for (const string& a: args)
+ r.push_back (a.c_str ());
+
+ r.push_back (nullptr);
+ return r;
}
fdpipe
@@ -556,8 +878,73 @@ namespace build2
}
void
+ init_process ()
+ {
+ // This is a little hack to make out baseutils for Windows work when
+ // called with absolute path. In a nutshell, MSYS2's exec*p() doesn't
+ // search in the parent's executable directory, only in PATH. And since we
+ // are running without a shell (that would read /etc/profile which sets
+ // PATH to some sensible values), we are only getting Win32 PATH values.
+ // And MSYS2 /bin is not one of them. So what we are going to do is add
+ // /bin at the end of PATH (which will be passed as is by the MSYS2
+ // machinery). This will make MSYS2 search in /bin (where our baseutils
+ // live). And for everyone else this should be harmless since it is not a
+ // valid Win32 path.
+ //
+#ifdef _WIN32
+ {
+ string mp;
+ if (optional<string> p = getenv ("PATH"))
+ {
+ mp = move (*p);
+ mp += ';';
+ }
+ mp += "/bin";
+
+ setenv ("PATH", mp);
+ }
+#endif
+
+ // On POSIX ignore SIGPIPE which is signaled to a pipe-writing process if
+ // the pipe reading end is closed. Note that by default this signal
+ // terminates a process. Also note that there is no way to disable this
+ // behavior on a file descriptor basis or for the write() function call.
+ //
+#ifndef _WIN32
+ if (signal (SIGPIPE, SIG_IGN) == SIG_ERR)
+ fail << "unable to ignore broken pipe (SIGPIPE) signal: "
+ << system_error (errno, generic_category ()); // Sanitize.
+#endif
+
+ // Initialize time conversion data that is used by localtime_r().
+ //
+#ifndef _WIN32
+ tzset ();
+#else
+ _tzset ();
+#endif
+
+ // A data race happens in the libstdc++ (as of GCC 7.2) implementation of
+ // the ctype<char>::narrow() function (bug #77704). The issue is easily
+ // triggered by the testscript runner that indirectly (via regex) uses
+ // ctype<char> facet of the global locale (and can potentially be
+ // triggered by other locale-aware code). We work around this by
+ // pre-initializing the global locale facet internal cache.
+ //
+#ifdef __GLIBCXX__
+ {
+ const ctype<char>& ct (use_facet<ctype<char>> (locale ()));
+
+ for (size_t i (0); i != 256; ++i)
+ ct.narrow (static_cast<char> (i), '\0');
+ }
+#endif
+ }
+
+ void
init (void (*t) (bool),
const char* a0,
+ bool ss,
optional<bool> mc,
optional<path> cs,
optional<path> cg)
@@ -592,6 +979,18 @@ namespace build2
}
script::regex::init ();
+
+ if (!ss)
+ {
+#ifdef _WIN32
+ // On Windows disable displaying error reporting dialog box for the
+ // current and child processes unless we are in the stop mode. Failed
+ // that we may have multiple dialog boxes popping up.
+ //
+ SetErrorMode (SetErrorMode (0) | // Returns the current mode.
+ SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
+#endif
+ }
}
optional<uint64_t>
diff --git a/libbuild2/utility.hxx b/libbuild2/utility.hxx
index b62d2ab..b534f41 100644
--- a/libbuild2/utility.hxx
+++ b/libbuild2/utility.hxx
@@ -4,14 +4,15 @@
#ifndef LIBBUILD2_UTILITY_HXX
#define LIBBUILD2_UTILITY_HXX
-#include <tuple> // make_tuple()
-#include <memory> // make_shared()
-#include <string> // to_string()
-#include <utility> // move(), forward(), declval(), make_pair(), swap()
-#include <cassert> // assert()
-#include <iterator> // make_move_iterator()
-#include <algorithm> // *
-#include <functional> // ref(), cref()
+#include <tuple> // make_tuple()
+#include <memory> // make_shared()
+#include <string> // to_string()
+#include <utility> // move(), forward(), declval(), make_pair(), swap()
+#include <cassert> // assert()
+#include <iterator> // make_move_iterator(), back_inserter()
+#include <algorithm> // *
+#include <functional> // ref(), cref()
+#include <type_traits>
#include <libbutl/ft/lang.hxx>
@@ -50,10 +51,17 @@ namespace build2
using std::make_tuple;
using std::make_shared;
using std::make_move_iterator;
- using std::to_string;
+ using std::back_inserter;
using std::stoul;
using std::stoull;
+ using std::to_string;
+
+ // Currently only supports base 10 and 16. Note: adds `0x` if base 16.
+ //
+ LIBBUILD2_SYMEXPORT string
+ to_string (uint64_t, int base, size_t width = 0);
+
// <libbutl/utility.hxx>
//
using butl::reverse_iterate;
@@ -69,6 +77,7 @@ namespace build2
using butl::alpha;
using butl::alnum;
using butl::digit;
+ using butl::wspace;
using butl::trim;
using butl::next_word;
@@ -90,12 +99,27 @@ namespace build2
// <libbutl/fdstream.hxx>
//
+ using butl::fdopen_null;
using butl::open_file_or_stdin;
using butl::open_file_or_stdout;
// <libbutl/path-pattern.hxx>
//
using butl::path_pattern;
+ using butl::path_match;
+
+ // Perform process-wide initializations/adjustments/workarounds. Should be
+ // called once early in main(). In particular, besides other things, this
+ // functions does the following:
+ //
+ // - Sets PATH to include baseutils /bin on Windows.
+ //
+ // - Ignores SIGPIPE.
+ //
+ // - Calls tzset().
+ //
+ LIBBUILD2_SYMEXPORT void
+ init_process ();
// Diagnostics state (verbosity level, etc; see <libbuild2/diagnostics.hxx>).
//
@@ -113,6 +137,7 @@ namespace build2
init_diag (uint16_t verbosity,
bool silent = false,
optional<bool> progress = nullopt,
+ optional<bool> diag_color = nullopt,
bool no_lines = false,
bool no_columns = false,
bool stderr_term = false);
@@ -122,13 +147,21 @@ namespace build2
LIBBUILD2_SYMEXPORT extern bool silent;
// --[no-]progress
+ // --[no-]diag-color
//
LIBBUILD2_SYMEXPORT extern optional<bool> diag_progress_option;
+ LIBBUILD2_SYMEXPORT extern optional<bool> diag_color_option;
LIBBUILD2_SYMEXPORT extern bool diag_no_line; // --no-line
LIBBUILD2_SYMEXPORT extern bool diag_no_column; // --no-column
- LIBBUILD2_SYMEXPORT extern bool stderr_term; // True if stderr is a terminal.
+ // True if stderr is a terminal.
+ //
+ LIBBUILD2_SYMEXPORT extern bool stderr_term;
+
+ // True if the color can be used on the stderr terminal.
+ //
+ LIBBUILD2_SYMEXPORT extern bool stderr_term_color;
// Global state (verbosity, home/work directories, etc).
@@ -138,6 +171,7 @@ namespace build2
LIBBUILD2_SYMEXPORT void
init (void (*terminate) (bool),
const char* argv0,
+ bool serial_stop,
optional<bool> mtime_check = nullopt,
optional<path> config_sub = nullopt,
optional<path> config_guess = nullopt);
@@ -156,11 +190,15 @@ namespace build2
LIBBUILD2_SYMEXPORT extern const standard_version build_version;
LIBBUILD2_SYMEXPORT extern const string build_version_interface;
- // Whether running installed build and, if so, the library installation
- // directory (empty otherwise).
+ // Whether running installed build as well as the library installation
+ // directory (only if installed, empty otherwise), the exported buildfile
+ // installation directory (only if configured, empty otherwise), and data
+ // installation directory (only if installed, src_root otherwise).
//
LIBBUILD2_SYMEXPORT extern const bool build_installed;
LIBBUILD2_SYMEXPORT extern const dir_path build_install_lib; // $install.lib
+ LIBBUILD2_SYMEXPORT extern const dir_path build_install_buildfile; // $install.buildfile
+ LIBBUILD2_SYMEXPORT extern const dir_path build_install_data; // $install.data
// --[no-]mtime-check
//
@@ -211,7 +249,7 @@ namespace build2
// Basic process utilities.
//
- // The run*() functions with process_path assume that you are printing
+ // The run*() functions with process_path/_env assume that you are printing
// the process command line yourself.
// Search for a process executable. Issue diagnostics and throw failed in
@@ -245,126 +283,55 @@ namespace build2
[[noreturn]] LIBBUILD2_SYMEXPORT void
run_search_fail (const path&, const location& = location ());
- // Wait for process termination returning true if the process exited
- // normally with a zero code and false otherwise. The latter case is
- // normally followed up with a call to run_finish().
- //
- LIBBUILD2_SYMEXPORT bool
- run_wait (const char* args[], process&, const location& = location ());
-
- bool
- run_wait (cstrings& args, process&, const location& = location ());
-
- // Wait for process termination. Issue diagnostics and throw failed in case
- // of abnormal termination. If the process has terminated normally but with
- // a non-zero exit status, then assume the diagnostics has already been
- // issued and just throw failed. The last argument is used in cooperation
- // with run_start() in case STDERR is redirected to STDOUT.
- //
- void
- run_finish (const char* args[],
- process&,
- const string& = string (),
- const location& = location ());
-
- void
- run_finish (cstrings& args, process& pr, const location& l = location ());
-
- // As above but if the process has exited normally with a non-zero code,
- // then return false rather than throwing.
- //
- bool
- run_finish_code (const char* args[],
- process&,
- const string& = string (),
- const location& = location ());
-
- // Start a process with the specified arguments. If in is -1, then redirect
- // STDIN to a pipe (can also be -2 to redirect to /dev/null or equivalent).
- // If out is -1, redirect STDOUT to a pipe. If error is false, then
- // redirecting STDERR to STDOUT (this can be used to suppress diagnostics
- // from the child process). Issue diagnostics and throw failed in case of an
- // error.
+ // Start a process with the specified arguments. Issue diagnostics and throw
+ // failed in case of an error. If in is -1, then redirect stdin to a pipe
+ // (can also be -2 to redirect it to /dev/null or equivalent). If out is -1,
+ // then redirect stdout to a pipe. If stderr is redirected to stdout (can
+ // be used to analyze diagnostics from the child process), then, in case of
+ // an error, the last line read from stdout must be passed to run_finish()
+ // below.
//
LIBBUILD2_SYMEXPORT process
run_start (uint16_t verbosity,
const process_env&, // Implicit-constructible from process_path.
- const char* args[],
+ const char* const* args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& = location ());
+ int err = 2,
+ const location& = {});
inline process
run_start (uint16_t verbosity,
const process_env& pe,
- cstrings& args,
+ const cstrings& args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& l = location ())
+ int err = 2,
+ const location& l = {})
{
- return run_start (verbosity, pe, args.data (), in, out, error, cwd, l);
+ return run_start (verbosity, pe, args.data (), in, out, err, l);
}
inline process
run_start (const process_env& pe,
- const char* args[],
+ const char* const* args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& l = location ())
+ int err = 2,
+ const location& l = {})
{
- return run_start (verb_never, pe, args, in, out, error, cwd, l);
+ return run_start (verb_never, pe, args, in, out, err, l);
}
inline process
run_start (const process_env& pe,
- cstrings& args,
+ const cstrings& args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& l = location ())
- {
- return run_start (pe, args.data (), in, out, error, cwd, l);
- }
-
- inline void
- run (const process_env& pe, // Implicit-constructible from process_path.
- const char* args[])
+ int err = 2,
+ const location& l = {})
{
- process pr (run_start (pe, args));
- run_finish (args, pr);
- }
-
- inline void
- run (const process_env& pe, // Implicit-constructible from process_path.
- cstrings& args)
- {
- run (pe, args.data ());
- }
-
- inline void
- run (const process_path& p,
- const char* args[],
- const dir_path& cwd,
- const char* const* env = nullptr)
- {
- process pr (run_start (process_env (p, env), args, 0, 1, true, cwd));
- run_finish (args, pr);
- }
-
- inline void
- run (const process_path& p,
- cstrings& args,
- const dir_path& cwd,
- const char* const* env = nullptr)
- {
- run (p, args.data (), cwd, env);
+ return run_start (pe, args.data (), in, out, err, l);
}
// As above, but search for the process (including updating args[0]) and
@@ -375,16 +342,16 @@ namespace build2
const char* args[],
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
+ int err = 2,
const char* const* env = nullptr,
- const location& l = location ())
+ const dir_path& cwd = {},
+ const location& l = {})
{
process_path pp (run_search (args[0], l));
return run_start (verbosity,
- process_env (pp, env), args,
- in, out, error,
- cwd, l);
+ process_env (pp, cwd, env), args,
+ in, out, err,
+ l);
}
inline process
@@ -392,55 +359,215 @@ namespace build2
cstrings& args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
+ int err = 2,
const char* const* env = nullptr,
- const location& l = location ())
+ const dir_path& cwd = {},
+ const location& l = {})
{
- return run_start (verbosity, args.data (), in, out, error, cwd, env, l);
+ return run_start (verbosity, args.data (), in, out, err, env, cwd, l);
}
+ // Wait for process termination returning true if the process exited
+ // normally with a zero code and false otherwise. The latter case is
+ // normally followed up with a call to run_finish().
+ //
+ LIBBUILD2_SYMEXPORT bool
+ run_wait (const char* const* args, process&, const location& = location ());
+
+ bool
+ run_wait (const cstrings& args, process&, const location& = location ());
+
+ // Wait for process termination, issues diagnostics, and throw failed.
+ //
+ // If the child process exited abnormally or normally with non-0 code, then
+ // print the error diagnostics to this effect. Additionally, if the
+ // verbosity level is between 1 and the specified value, then print the
+ // command line as info after the error. If omit_normal is true, then don't
+ // print either for the normal exit (usually used for custom diagnostics or
+ // when process failure can be tolerated).
+ //
+ // Normally the specified verbosity will be 1 and the command line args
+ // represent the verbosity level 2 (logical) command line. Or, to put it
+ // another way, it should be 1 less than what gets passed to run_start().
+ // Note that args should only represent a single command in a pipe (see
+ // print_process() for details).
+ //
+ // See also diag_buffer::close().
+ //
+ // The line argument is used in cooperation with run_start() to diagnose a
+ // failure to exec in case stderr is redirected to stdout (see the
+ // implementation for details).
+ //
+ void
+ run_finish (const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ void
+ run_finish (const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ void
+ run_finish (const char* const* args,
+ process&,
+ const string& line,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ // As above but if the process has exited normally with a non-zero code,
+ // then return false rather than throwing.
+ //
+ // Note that the normal non-0 exit diagnostics is omitted by default
+ // assuming appropriate custom diagnostics will be issued, if required.
+ //
+ bool
+ run_finish_code (const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ bool
+ run_finish_code (const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ bool
+ run_finish_code (const char* const* args,
+ process&,
+ const string&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ // As above but with diagnostics buffering.
+ //
+ // Specifically, this version first waits for the process termination, then
+ // calls diag_buffer::close(verbosity, omit_normal), and finally throws
+ // failed if the process didn't exit with 0 code.
+ //
+ class diag_buffer;
+
+ void
+ run_finish (diag_buffer&,
+ const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ void
+ run_finish (diag_buffer&,
+ const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ // As above but if the process has exited normally with a non-zero code,
+ // then return false rather than throwing.
+ //
+ // Note that the normal non-0 exit diagnostics is omitted by default
+ // assuming appropriate custom diagnostics will be issued, if required.
+ //
+ bool
+ run_finish_code (diag_buffer&,
+ const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ bool
+ run_finish_code (diag_buffer&,
+ const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ // Run the process with the specified arguments by calling the above start
+ // and finish functions. Buffer diagnostics unless in the load phase.
+ //
+ LIBBUILD2_SYMEXPORT void
+ run (context&,
+ const process_env& pe, // Implicit-constructible from process_path.
+ const char* const* args,
+ uint16_t finish_verbosity);
+
inline void
- run (uint16_t verbosity,
- const char* args[],
- const dir_path& cwd = dir_path (),
- const char* const* env = nullptr)
+ run (context& ctx,
+ const process_env& pe,
+ const cstrings& args,
+ uint16_t finish_verbosity)
{
- process pr (run_start (verbosity, args, 0, 1, true, cwd, env));
- run_finish (args, pr);
+ run (ctx, pe, args.data (), finish_verbosity);
}
+ // As above but pass cwd/env vars as arguments rather than as part of
+ // process_env.
+ //
inline void
- run (uint16_t verbosity,
- cstrings& args,
- const dir_path& cwd = dir_path (),
- const char* const* env = nullptr)
+ run (context& ctx,
+ const process_path& p,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ const char* const* env,
+ const dir_path& cwd = {})
{
- run (verbosity, args.data (), cwd, env);
+ run (ctx, process_env (p, cwd, env), args, finish_verbosity);
+ }
+
+ inline void
+ run (context& ctx,
+ const process_path& p,
+ const cstrings& args,
+ uint16_t finish_verbosity,
+ const char* const* env,
+ const dir_path& cwd = {})
+ {
+ run (ctx, p, args.data (), finish_verbosity, env, cwd);
}
// Start the process as above and then call the specified function on each
// trimmed line of the output until it returns a non-empty object T (tested
// with T::empty()) which is then returned to the caller.
//
+ // If verbosity is specified, print the process commands line at that level
+ // (with the verbosite-1 value passed run_finish()).
+ //
+ // If error is false, then redirecting stderr to stdout and don't fail if
+ // the process exits normally but with non-0 code (can be used to suppress
+ // and/or analyze diagnostics from the child process). Otherwise, buffer
+ // diagnostics unless in the load phase.
+ //
// The predicate can move the value out of the passed string but, if error
// is false, only in case of a "content match" (so that any diagnostics
// lines are left intact). The function signature should be:
//
// T (string& line, bool last)
//
- // If ignore_exit is true, then the program's exit status is ignored (if it
- // is false and the program exits with the non-zero status, then an empty T
- // instance is returned).
+ // If, in addition to error being false, ignore_exit is true, then the
+ // program's normal exit status is ignored (if it is false and the program
+ // exits with the non-zero status, then an empty T instance is returned).
//
// If checksum is not NULL, then feed it the content of each trimmed line
// (including those that come after the callback returns non-empty object).
//
template <typename T, typename F>
T
- run (uint16_t verbosity,
+ run (context&,
+ uint16_t verbosity,
const process_env&, // Implicit-constructible from process_path.
- const char* args[],
+ const char* const* args,
F&&,
bool error = true,
bool ignore_exit = false,
@@ -448,20 +575,55 @@ namespace build2
template <typename T, typename F>
inline T
- run (const process_env& pe, // Implicit-constructible from process_path.
- const char* args[],
+ run (context& ctx,
+ uint16_t verbosity,
+ const process_env& pe,
+ const cstrings& args,
+ F&& f,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr)
+ {
+ return run<T> (ctx,
+ verbosity,
+ pe, args.data (),
+ forward<F> (f),
+ error, ignore_exit, checksum);
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context&,
+ const process_env&,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ F&&,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr);
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ const process_env& pe,
+ const cstrings& args,
+ uint16_t finish_verbosity,
F&& f,
bool error = true,
bool ignore_exit = false,
sha256* checksum = nullptr)
{
- return run<T> (
- verb_never, pe, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ pe, args.data (),
+ finish_verbosity,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const char* args[],
F&& f,
bool error = true,
@@ -469,15 +631,38 @@ namespace build2
sha256* checksum = nullptr)
{
process_path pp (run_search (args[0]));
- return run<T> (
- verbosity, pp, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ pp, args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ uint16_t verbosity,
+ cstrings& args,
+ F&& f,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr)
+ {
+ return run<T> (ctx,
+ verbosity,
+ args.data (),
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
+ // As above but run a program without any arguments or with one argument.
+ //
// run <prog>
//
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const path& prog,
F&& f,
bool error = true,
@@ -485,13 +670,20 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {prog.string ().c_str (), nullptr};
- return run<T> (
- verbosity, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
template <typename T, typename F>
- inline T
- run (uint16_t verbosity,
+ inline typename std::enable_if<
+ (!std::is_same<typename std::decay<F>::type, const char**>::value &&
+ !std::is_same<typename std::remove_reference<F>::type, cstrings>::value),
+ T>::type
+ run (context& ctx,
+ uint16_t verbosity,
const process_env& pe, // Implicit-constructible from process_path.
F&& f,
bool error = true,
@@ -499,15 +691,19 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {pe.path->recall_string (), nullptr};
- return run<T> (
- verbosity, pe, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ pe, args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
// run <prog> <arg>
//
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const path& prog,
const char* arg,
F&& f,
@@ -516,13 +712,17 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {prog.string ().c_str (), arg, nullptr};
- return run<T> (
- verbosity, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const process_env& pe, // Implicit-constructible from process_path.
const char* arg,
F&& f,
@@ -531,8 +731,47 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {pe.path->recall_string (), arg, nullptr};
- return run<T> (
- verbosity, pe, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ pe, args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
+ }
+
+ // As above but a lower-level interface that erases T and F and can also be
+ // used to suppress trimming.
+ //
+ // The passed function should return true if it should be called again
+ // (i.e., the object is still empty in the T & F interface) and false
+ // otherwise.
+ //
+ // The first version ruturn true if the result is usable and false
+ // otherwise, depending on the process exit code and error/ignore_exit
+ // values. (In the latter case, the T & F interface makes the resulting
+ // object empty).
+ //
+ LIBBUILD2_SYMEXPORT bool
+ run (context&,
+ uint16_t verbosity,
+ const process_env&,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ const function<bool (string& line, bool last)>&,
+ bool trim = true,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr);
+
+ // Concatenate the program path and arguments into a shallow NULL-terminated
+ // vector of C-strings.
+ //
+ LIBBUILD2_SYMEXPORT cstrings
+ process_args (const char* program, const strings& args);
+
+ inline cstrings
+ process_args (const string& program, const strings& args)
+ {
+ return process_args (program.c_str (), args);
}
// File descriptor streams.
diff --git a/libbuild2/utility.ixx b/libbuild2/utility.ixx
index aedfc94..58ea8db 100644
--- a/libbuild2/utility.ixx
+++ b/libbuild2/utility.ixx
@@ -6,42 +6,195 @@
namespace build2
{
inline bool
- run_wait (cstrings& args, process& pr, const location& loc)
+ run_wait (const cstrings& args, process& pr, const location& loc)
{
return run_wait (args.data (), pr, loc);
}
- // Note: currently this function is also used in a run() implementations.
+ // Note: these functions are also used in the run() implementations.
//
LIBBUILD2_SYMEXPORT bool
- run_finish_impl (const char*[],
+ run_finish_impl (const char* const*,
process&,
- bool error,
+ bool fail,
const string&,
- const location& = location ());
+ uint16_t,
+ bool = false,
+ const location& = {});
+
+ LIBBUILD2_SYMEXPORT bool
+ run_finish_impl (diag_buffer&,
+ const char* const*,
+ process&,
+ bool fail,
+ uint16_t,
+ bool = false,
+ const location& = {});
inline void
- run_finish (const char* args[],
+ run_finish (const char* const* args,
process& pr,
- const string& l,
+ uint16_t v,
+ bool on,
const location& loc)
{
- run_finish_impl (args, pr, true /* error */, l, loc);
+ run_finish_impl (args, pr, true /* fail */, string (), v, on, loc);
}
inline void
- run_finish (cstrings& args, process& pr, const location& loc)
+ run_finish (const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish (args.data (), pr, v, on, loc);
+ }
+
+ inline void
+ run_finish (const char* const* args,
+ process& pr,
+ const string& l,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish_impl (args, pr, true, l, v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (const char* const* args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ return run_finish_impl (args, pr, false, string (), v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
{
- run_finish (args.data (), pr, string (), loc);
+ return run_finish_code (args.data (), pr, v, on, loc);
}
inline bool
- run_finish_code (const char* args[],
+ run_finish_code (const char* const* args,
process& pr,
const string& l,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ return run_finish_impl (args, pr, false, l, v, on, loc);
+ }
+
+ inline void
+ run_finish (diag_buffer& dbuf,
+ const char* const* args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish_impl (dbuf, args, pr, true /* fail */, v, on, loc);
+ }
+
+ inline void
+ run_finish (diag_buffer& dbuf,
+ const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish_impl (dbuf, args.data (), pr, true, v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (diag_buffer& dbuf,
+ const char* const* args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ return run_finish_impl (dbuf, args, pr, false, v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (diag_buffer& dbuf,
+ const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
const location& loc)
{
- return run_finish_impl (args, pr, false /* error */, l, loc);
+ return run_finish_impl (dbuf, args.data (), pr, false, v, on, loc);
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ uint16_t verbosity,
+ const process_env& pe,
+ const char* const* args,
+ F&& f,
+ bool err,
+ bool ignore_exit,
+ sha256* checksum)
+ {
+ T r;
+ if (!run (ctx,
+ verbosity,
+ pe, args,
+ verbosity - 1,
+ [&r, &f] (string& l, bool last) // Small function optimmization.
+ {
+ r = f (l, last);
+ return r.empty ();
+ },
+ true /* trim */,
+ err,
+ ignore_exit,
+ checksum))
+ r = T ();
+
+ return r;
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ const process_env& pe,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ F&& f,
+ bool err,
+ bool ignore_exit,
+ sha256* checksum)
+ {
+ T r;
+ if (!run (ctx,
+ verb_never,
+ pe, args,
+ finish_verbosity,
+ [&r, &f] (string& l, bool last)
+ {
+ r = f (l, last);
+ return r.empty ();
+ },
+ true /* trim */,
+ err,
+ ignore_exit,
+ checksum))
+ r = T ();
+
+ return r;
}
inline void
diff --git a/libbuild2/utility.txx b/libbuild2/utility.txx
index bb25288..d2fc29c 100644
--- a/libbuild2/utility.txx
+++ b/libbuild2/utility.txx
@@ -54,68 +54,4 @@ namespace build2
return p;
}
-
- [[noreturn]] LIBBUILD2_SYMEXPORT void
- run_io_error (const char*[], const io_error&);
-
- template <typename T, typename F>
- T
- run (uint16_t verbosity,
- const process_env& pe,
- const char* args[],
- F&& f,
- bool err,
- bool ignore_exit,
- sha256* checksum)
- {
- process pr (run_start (verbosity,
- pe,
- args,
- 0 /* stdin */,
- -1 /* stdout */,
- err));
- T r;
- string l; // Last line of output.
-
- try
- {
- ifdstream is (move (pr.in_ofd), butl::fdstream_mode::skip);
-
- // Make sure we keep the last line.
- //
- for (bool last (is.peek () == ifdstream::traits_type::eof ());
- !last && getline (is, l); )
- {
- last = (is.peek () == ifdstream::traits_type::eof ());
-
- trim (l);
-
- if (checksum != nullptr)
- checksum->append (l);
-
- if (r.empty ())
- {
- r = f (l, last);
-
- if (!r.empty () && checksum == nullptr)
- break;
- }
- }
-
- is.close ();
- }
- catch (const io_error& e)
- {
- if (run_wait (args, pr))
- run_io_error (args, e);
-
- // If the child process has failed then assume the io error was
- // caused by that and let run_finish() deal with it.
- }
-
- if (!(run_finish_impl (args, pr, err, l) || ignore_exit))
- r = T ();
-
- return r;
- }
}
diff --git a/libbuild2/variable.cxx b/libbuild2/variable.cxx
index 8ed9605..078c13a 100644
--- a/libbuild2/variable.cxx
+++ b/libbuild2/variable.cxx
@@ -3,10 +3,15 @@
#include <libbuild2/variable.hxx>
-#include <cstring> // memcmp()
+#include <cstring> // memcmp(), memcpy()
#include <libbutl/path-pattern.hxx>
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/parser.hxx>
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/target.hxx>
#include <libbuild2/diagnostics.hxx>
@@ -47,7 +52,7 @@ namespace build2
}
value::
- value (value&& v)
+ value (value&& v) noexcept
: type (v.type), null (v.null), extra (v.extra)
{
if (!null)
@@ -57,7 +62,7 @@ namespace build2
else if (type->copy_ctor != nullptr)
type->copy_ctor (*this, v, true);
else
- data_ = v.data_; // Copy as POD.
+ memcpy (data_, v.data_, size_); // Copy as POD.
}
}
@@ -72,7 +77,7 @@ namespace build2
else if (type->copy_ctor != nullptr)
type->copy_ctor (*this, v, false);
else
- data_ = v.data_; // Copy as POD.
+ memcpy (data_, v.data_, size_); // Copy as POD.
}
}
@@ -99,12 +104,14 @@ namespace build2
if (null)
new (&data_) names (move (v).as<names> ());
else
+ // Note: can throw (see small_vector for details).
+ //
as<names> () = move (v).as<names> ();
}
else if (auto f = null ? type->copy_ctor : type->copy_assign)
f (*this, v, true);
else
- data_ = v.data_; // Assign as POD.
+ memcpy (data_, v.data_, size_); // Assign as POD.
null = v.null;
}
@@ -143,7 +150,7 @@ namespace build2
else if (auto f = null ? type->copy_ctor : type->copy_assign)
f (*this, v, false);
else
- data_ = v.data_; // Assign as POD.
+ memcpy (data_, v.data_, size_); // Assign as POD.
null = v.null;
}
@@ -367,8 +374,8 @@ namespace build2
// Typification is kind of like caching so we reuse that mutex shard.
//
shared_mutex& m (
- ctx.mutexes.variable_cache[
- hash<value*> () (&v) % ctx.mutexes.variable_cache_size]);
+ ctx.mutexes->variable_cache[
+ hash<value*> () (&v) % ctx.mutexes->variable_cache_size]);
// Note: v.type is rechecked by typify() under lock.
//
@@ -377,7 +384,7 @@ namespace build2
}
void
- untypify (value& v)
+ untypify (value& v, bool reduce)
{
if (v.type == nullptr)
return;
@@ -389,7 +396,7 @@ namespace build2
}
names ns;
- names_view nv (v.type->reverse (v, ns));
+ names_view nv (v.type->reverse (v, ns, reduce));
if (nv.empty () || nv.data () == ns.data ())
{
@@ -451,14 +458,14 @@ namespace build2
m = "invalid " + t + " value ";
if (n.simple ())
- m += "'" + n.value + "'";
+ m += '\'' + n.value + '\'';
else if (n.directory ())
- m += "'" + n.dir.representation () + "'";
+ m += '\'' + n.dir.representation () + '\'';
else
- m += "name '" + to_string (n) + "'";
+ m += "name '" + to_string (n) + '\'';
}
- throw invalid_argument (m);
+ throw invalid_argument (move (m));
}
// names
@@ -470,7 +477,7 @@ namespace build2
bool value_traits<bool>::
convert (const name& n, const name* r)
{
- if (r == nullptr && !n.pattern && n.simple () )
+ if (r == nullptr && !n.pattern && n.simple ())
{
const string& s (n.value);
@@ -493,6 +500,7 @@ namespace build2
type_name,
sizeof (bool),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -503,7 +511,9 @@ namespace build2
&simple_reverse<bool>,
nullptr, // No cast (cast data_ directly).
nullptr, // No compare (compare as POD).
- nullptr // Never empty.
+ nullptr, // Never empty.
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// int64_t value
@@ -515,13 +525,22 @@ namespace build2
{
try
{
- // May throw invalid_argument or out_of_range.
- //
- size_t i;
- int64_t r (stoll (n.value, &i));
+ const string& v (n.value);
- if (i == n.value.size ())
- return r;
+ if (!wspace (v[0]))
+ {
+ // Note that unlike uint64, we don't support hex notation for int64.
+
+ // May throw invalid_argument or out_of_range.
+ //
+ size_t i;
+ int64_t r (stoll (v, &i));
+
+ if (i == v.size ())
+ return r;
+
+ // Fall through.
+ }
// Fall through.
}
@@ -541,6 +560,7 @@ namespace build2
type_name,
sizeof (int64_t),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -551,7 +571,9 @@ namespace build2
&simple_reverse<int64_t>,
nullptr, // No cast (cast data_ directly).
nullptr, // No compare (compare as POD).
- nullptr // Never empty.
+ nullptr, // Never empty.
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// uint64_t value
@@ -563,13 +585,24 @@ namespace build2
{
try
{
- // May throw invalid_argument or out_of_range.
- //
- size_t i;
- uint64_t r (stoull (n.value, &i));
+ const string& v (n.value);
+
+ if (!wspace (v[0]))
+ {
+ // Note: see also similar code in to_json_value().
+ //
+ int b (v[0] == '0' && (v[1] == 'x' || v[1] == 'X') ? 16 : 10);
- if (i == n.value.size ())
- return r;
+ // May throw invalid_argument or out_of_range.
+ //
+ size_t i;
+ uint64_t r (stoull (v, &i, b));
+
+ if (i == v.size ())
+ return r;
+
+ // Fall through.
+ }
// Fall through.
}
@@ -589,6 +622,7 @@ namespace build2
type_name,
sizeof (uint64_t),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -599,7 +633,9 @@ namespace build2
&simple_reverse<uint64_t>,
nullptr, // No cast (cast data_ directly).
nullptr, // No compare (compare as POD).
- nullptr // Never empty.
+ nullptr, // Never empty.
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// string value
@@ -612,28 +648,31 @@ namespace build2
// the common cases (unqualified, unpaired simple name or directory).
//
- // We can only convert project-qualified simple and directory names.
+ // We can only convert project-qualified untyped names.
//
- if (n.pattern ||
- !(n.simple (true) || n.directory (true)))
+ if (n.pattern || n.typed ())
throw_invalid_argument (n, nullptr, "string");
if (r != nullptr)
{
- if (r->pattern ||
- !(r->simple (true) || r->directory (true)))
+ if (r->pattern || r->typed ())
throw_invalid_argument (*r, nullptr, "string");
}
string s;
- if (n.directory (true))
+ if (n.simple (true))
+ s.swap (n.value);
+ else
+ {
// Note that here we cannot assume what's in dir is really a
// path (think s/foo/bar/) so we have to reverse it exactly.
//
s = move (n.dir).representation (); // Move out of path.
- else
- s.swap (n.value);
+
+ if (!n.value.empty ())
+ s += n.value; // Separator is already there.
+ }
// Convert project qualification to its string representation.
//
@@ -657,10 +696,15 @@ namespace build2
s += '%';
}
- if (r->directory (true))
- s += move (r->dir).representation ();
- else
+ if (r->simple (true))
s += r->value;
+ else
+ {
+ s += move (r->dir).representation ();
+
+ if (!r->value.empty ())
+ s += r->value;
+ }
}
return s;
@@ -675,6 +719,7 @@ namespace build2
type_name,
sizeof (string),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<string>,
&default_copy_ctor<string>,
@@ -685,7 +730,9 @@ namespace build2
&simple_reverse<string>,
nullptr, // No cast (cast data_ directly).
&simple_compare<string>,
- &default_empty<string>
+ &default_empty<string>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// path value
@@ -742,6 +789,7 @@ namespace build2
type_name,
sizeof (path),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<path>,
&default_copy_ctor<path>,
@@ -752,7 +800,9 @@ namespace build2
&simple_reverse<path>,
nullptr, // No cast (cast data_ directly).
&simple_compare<path>,
- &default_empty<path>
+ &default_empty<path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// dir_path value
@@ -809,6 +859,7 @@ namespace build2
sizeof (dir_path),
&value_traits<path>::value_type, // Base (assuming direct cast works for
// both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<dir_path>,
&default_copy_ctor<dir_path>,
@@ -819,7 +870,9 @@ namespace build2
&simple_reverse<dir_path>,
nullptr, // No cast (cast data_ directly).
&simple_compare<dir_path>,
- &default_empty<dir_path>
+ &default_empty<dir_path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// abs_dir_path value
@@ -843,7 +896,14 @@ namespace build2
return abs_dir_path (move (d));
}
- catch (const invalid_path&) {} // Fall through.
+ catch (invalid_path& e)
+ {
+ // We moved from name so reconstruct the path. Let's always make it
+ // simple since we may not be able to construct dir_path. Should be
+ // good enough for diagnostics.
+ //
+ n.value = move (e.path);
+ }
}
throw_invalid_argument (n, r, "abs_dir_path");
@@ -857,6 +917,7 @@ namespace build2
sizeof (abs_dir_path),
&value_traits<dir_path>::value_type, // Base (assuming direct cast works
// for both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<abs_dir_path>,
&default_copy_ctor<abs_dir_path>,
@@ -867,7 +928,9 @@ namespace build2
&simple_reverse<abs_dir_path>,
nullptr, // No cast (cast data_ directly).
&simple_compare<abs_dir_path>,
- &default_empty<abs_dir_path>
+ &default_empty<abs_dir_path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// name value
@@ -882,10 +945,10 @@ namespace build2
}
static names_view
- name_reverse (const value& v, names&)
+ name_reverse (const value& v, names&, bool reduce)
{
const name& n (v.as<name> ());
- return n.empty () ? names_view (nullptr, 0) : names_view (&n, 1);
+ return reduce && n.empty () ? names_view (nullptr, 0) : names_view (&n, 1);
}
const char* const value_traits<name>::type_name = "name";
@@ -895,6 +958,7 @@ namespace build2
type_name,
sizeof (name),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<name>,
&default_copy_ctor<name>,
@@ -905,7 +969,9 @@ namespace build2
&name_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<name>,
- &default_empty<name>
+ &default_empty<name>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// name_pair
@@ -949,13 +1015,13 @@ namespace build2
}
static names_view
- name_pair_reverse (const value& v, names& ns)
+ name_pair_reverse (const value& v, names& ns, bool reduce)
{
const name_pair& p (v.as<name_pair> ());
const name& f (p.first);
const name& s (p.second);
- if (f.empty () && s.empty ())
+ if (reduce && f.empty () && s.empty ())
return names_view (nullptr, 0);
if (f.empty ())
@@ -977,6 +1043,7 @@ namespace build2
type_name,
sizeof (name_pair),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<name_pair>,
&default_copy_ctor<name_pair>,
@@ -987,7 +1054,9 @@ namespace build2
&name_pair_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<name_pair>,
- &default_empty<name_pair>
+ &default_empty<name_pair>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// process_path value
@@ -1107,10 +1176,14 @@ namespace build2
}
static names_view
- process_path_reverse (const value& v, names& s)
+ process_path_reverse (const value& v, names& s, bool)
{
const auto& x (v.as<process_path> ());
+ // Note that strictly speaking process_path doesn't have empty
+ // representation (see convert() above). Thus we always return reduced
+ // representation.
+ //
if (!x.empty ())
{
s.reserve (x.effect.empty () ? 1 : 2);
@@ -1127,6 +1200,7 @@ namespace build2
type_name,
sizeof (process_path),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<process_path>,
&process_path_copy_ctor<process_path>,
@@ -1137,7 +1211,9 @@ namespace build2
&process_path_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<process_path>,
- &default_empty<process_path>
+ &default_empty<process_path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// process_path_ex value
@@ -1275,10 +1351,13 @@ namespace build2
}
static names_view
- process_path_ex_reverse (const value& v, names& s)
+ process_path_ex_reverse (const value& v, names& s, bool)
{
const auto& x (v.as<process_path_ex> ());
+ // Note that process_path_ex only has reduced empty representation (see
+ // convert() above).
+ //
if (!x.empty ())
{
s.reserve ((x.effect.empty () ? 1 : 2) +
@@ -1322,6 +1401,7 @@ namespace build2
sizeof (process_path_ex),
&value_traits< // Base (assuming direct cast works
process_path>::value_type, // for both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<process_path_ex>,
&process_path_ex_copy_ctor,
@@ -1332,7 +1412,9 @@ namespace build2
&process_path_ex_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<process_path>, // For now compare as process_path.
- &default_empty<process_path_ex>
+ &default_empty<process_path_ex>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// target_triplet value
@@ -1363,6 +1445,7 @@ namespace build2
type_name,
sizeof (target_triplet),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<target_triplet>,
&default_copy_ctor<target_triplet>,
@@ -1373,7 +1456,9 @@ namespace build2
&simple_reverse<target_triplet>,
nullptr, // No cast (cast data_ directly).
&simple_compare<target_triplet>,
- &default_empty<target_triplet>
+ &default_empty<target_triplet>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// project_name value
@@ -1407,6 +1492,7 @@ namespace build2
type_name,
sizeof (project_name),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<project_name>,
&default_copy_ctor<project_name>,
@@ -1417,7 +1503,1105 @@ namespace build2
&simple_reverse<project_name>,
nullptr, // No cast (cast data_ directly).
&simple_compare<project_name>,
- &default_empty<project_name>
+ &default_empty<project_name>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
+ };
+
+ // json
+ //
+ static string
+ to_string_value (name& n, const char* what)
+ {
+ if (n.typed () || n.qualified () || n.pattern)
+ throw_invalid_argument (n, nullptr, what);
+
+ string s;
+
+ if (n.simple ())
+ s.swap (n.value);
+ else
+ {
+ // Note that here we cannot assume what's in dir is really a path (think
+ // s/foo/bar/) so we have to reverse it exactly.
+ //
+ s = move (n.dir).representation (); // Move out of path.
+
+ if (!n.value.empty ())
+ s += n.value; // Separator is already there.
+ }
+
+ return s;
+ }
+
+ static json_value
+ to_json_value (name& n, const char* what)
+ {
+ if (n.typed () || n.qualified () || n.pattern)
+ throw_invalid_argument (n, nullptr, what);
+
+ string s;
+
+ if (n.simple ())
+ s.swap (n.value);
+ else
+ {
+ // Note that here we cannot assume what's in dir is really a path (think
+ // s/foo/bar/) so we have to reverse it exactly.
+ //
+ s = move (n.dir).representation (); // Move out of path.
+
+ if (!n.value.empty ())
+ s += n.value; // Separator is already there.
+
+ // A path is always interpreted as a JSON string.
+ //
+ return json_value (move (s));
+ }
+
+ bool f;
+ if (s.empty ())
+ return json_value (string ());
+ if (s == "null")
+ return json_value ();
+ else if ((f = (s == "true")) || s == "false")
+ return json_value (f);
+ else if (s.find_first_not_of (
+ "0123456789", (f = (s[0] == '-')) ? 1 : 0) == string::npos)
+ {
+ name n (move (s));
+ return f
+ ? json_value (value_traits<int64_t>::convert (n, nullptr))
+ : json_value (value_traits<uint64_t>::convert (n, nullptr));
+ }
+ //
+ // Handle the hex notation similar to <uint64_t>::convert() (and JSON5).
+ //
+ else if (s[0] == '0' &&
+ (s[1] == 'x' || s[1] == 'X') &&
+ s.size () > 2 &&
+ s.find_first_not_of ("0123456789aAbBcCdDeEfF", 2) == string::npos)
+ {
+ return json_value (
+ value_traits<uint64_t>::convert (name (move (s)), nullptr),
+ true /* hex */);
+ }
+ else
+ {
+ // If this is not a JSON representation of string, array, or object,
+ // then treat it as a string.
+ //
+ // Note that the special `"`, `{`, and `[` characters could be preceded
+ // with whitespaces. Note: see similar test in json_object below.
+ //
+ size_t p (s.find_first_not_of (" \t\n\r"));
+
+ if (p == string::npos || (s[p] != '"' && s[p] != '{' && s[p] != '['))
+ return json_value (move (s));
+
+ // Parse as valid JSON input text.
+ //
+#ifndef BUILD2_BOOTSTRAP
+ try
+ {
+ json_parser p (s, nullptr /* name */);
+ return json_value (p);
+ }
+ catch (const invalid_json_input& e)
+ {
+ // Turned out printing line/column/offset can be misleading since we
+ // could be parsing a single name from a potential list of names.
+ // feels like without also printing the value this is of not much use.
+ //
+#if 0
+ string m ("invalid json input at line ");
+ m += to_string (e.line);
+ m += ", column ";
+ m += to_string (e.column);
+ m += ", byte offset ";
+ m += to_string (e.position);
+ m += ": ";
+ m += e.what ();
+#else
+ string m ("invalid json input: ");
+ m += e.what ();
+#endif
+ throw invalid_argument (move (m));
+ }
+#else
+ throw invalid_argument ("json parsing requested during bootstrap");
+#endif
+ }
+ }
+
+ json_value value_traits<json_value>::
+ convert (name&& l, name* r)
+ {
+ // Here we expect either a simple value or a serialized representation.
+ //
+ if (r != nullptr)
+ throw invalid_argument ("pair in json element value");
+
+ return to_json_value (l, "json element");
+ }
+
+ json_value value_traits<json_value>::
+ convert (names&& ns)
+ {
+ size_t n (ns.size ());
+
+ if (n == 0)
+ {
+ // Note: this is the ([json] ) case, not ([json] ""). See also the
+ // relevant note in json_reverse() below.
+ //
+ return json_value (); // null
+ }
+ else if (n == 1)
+ {
+ return to_json_value (ns.front (), "json");
+ }
+ else
+ {
+ if (ns.front ().pair) // object
+ {
+ json_value r (json_type::object);
+ r.object.reserve (n / 2);
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ if (!i->pair)
+ throw invalid_argument (
+ "expected pair in json member value '" + to_string (*i) + '\'');
+
+ // Note that we could support JSON-quoted member names but it's
+ // unclear why would someone want that (and if they do, they can
+ // always specify JSON text instead).
+ //
+ // @@ The empty pair value ([json] one@ ) which is currently empty
+ // string is inconsistent with empty value ([json] ) above which
+ // is null. Maybe we could distinguish the one@ and one@"" cases
+ // via type hints?
+ //
+ string n (to_string_value (*i, "json member name"));
+ json_value v (to_json_value (*++i, "json member"));
+
+ // Check for duplicates. One can use append/prepend to merge.
+ //
+ if (find_if (r.object.begin (), r.object.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }) != r.object.end ())
+ {
+ throw invalid_argument (
+ "duplicate json object member '" + n + '\'');
+ }
+
+ r.object.push_back (json_member {move (n), move (v)});
+ }
+
+ return r;
+ }
+ else // array
+ {
+ json_value r (json_type::array);
+ r.array.reserve (n);
+
+ for (name& n: ns)
+ {
+ if (n.pair)
+ throw invalid_argument (
+ "unexpected pair in json array element value '" +
+ to_string (n) + '\'');
+
+ r.array.push_back (to_json_value (n, "json array element"));
+ }
+
+ return r;
+ }
+ }
+ }
+
+ static void
+ json_assign (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_value>;
+
+ try
+ {
+ traits::assign (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json value";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_append (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_value>;
+
+ try
+ {
+ traits::append (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json value";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_prepend (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_value>;
+
+ try
+ {
+ traits::prepend (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json value";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ name value_traits<json_value>::
+ reverse (const json_value& v)
+ {
+ switch (v.type)
+ {
+ case json_type::null:
+ {
+ // Note that here we cannot return empty (e.g., to be consistent with
+ // other places) because we treat empty name (as opposed to empty
+ // names) as string, not null (see to_json_value() above).
+ //
+ // Thankfully this version of reverse() is only used when json_value
+ // representation is needed as part of a container. Which means in
+ // "consumption" contexts (e.g., result of subscript) null will still
+ // decay to empty.
+ //
+#if 1
+ return name ("null");
+#else
+ return name ();
+#endif
+ }
+ case json_type::boolean:
+ {
+ return name (v.boolean ? "true" : "false");
+ }
+ case json_type::signed_number:
+ {
+ return value_traits<int64_t>::reverse (v.signed_number);
+ }
+ case json_type::unsigned_number:
+ {
+ return value_traits<uint64_t>::reverse (v.unsigned_number);
+ }
+ case json_type::hexadecimal_number:
+ {
+ return name (to_string (v.unsigned_number, 16));
+ }
+ case json_type::string:
+ //
+ // @@ Hm, it would be nice if this somehow got mapped to unquoted
+ // string but still be round-trippable to JSON value. Perhaps via
+ // the type hint idea? This is pretty bad. See also subscript we
+ // hacked around this somewhat.
+ //
+ // Note that it may be tempting to fix this by only quoting strings
+ // that would otherwise be mis-interpreted (null, true, all digits,
+ // etc). But that would be worse: things would seem to work but
+ // fall apart in the perhaps unlikely event of encountering one of
+ // the problematic values. It is better to produce a consistent
+ // result.
+ //
+ case json_type::array:
+ case json_type::object:
+ {
+ // Serialize as JSON output text.
+ //
+ string o;
+
+#ifndef BUILD2_BOOTSTRAP
+ try
+ {
+ // Disable pretty-printing so that the output is all on the same
+ // line. While it's not going to be easy to read for larger JSON
+ // outputs, it will fit better into the existing model where none of
+ // the value representations use formatting newlines. If a pretty-
+ // printed representation is required, then the $json.serialize()
+ // function can be used to obtain it.
+ //
+ json_buffer_serializer s (o, 0 /* indentation */);
+ v.serialize (s);
+ }
+ catch (const invalid_json_output& e)
+ {
+ // Note that while it feels like value_traits::reverse() should
+ // throw invalid_argument, we don't currently handle it anywhere so
+ // for now let's just fail.
+ //
+ // Note: the same diagnostics as in $json.serialize().
+ //
+ diag_record dr;
+ dr << fail << "invalid json value: " << e;
+
+ if (e.event)
+ dr << info << "while serializing " << to_string (*e.event);
+
+ if (e.offset != string::npos)
+ dr << info << "offending byte offset " << e.offset;
+ }
+#else
+ fail << "json serialization requested during bootstrap";
+#endif
+ return name (move (o));
+ }
+ }
+
+ assert (false);
+ return name ();
+ }
+
+ static names_view
+ json_reverse (const value& x, names& ns, bool reduce)
+ {
+ const json_value& v (x.as<json_value> ());
+
+ // @@ Hm, it would be nice if JSON null somehow got mapped to [null]/empty
+ // but still be round-trippable to JSON null. Perhaps via type hint?
+ //
+ // But won't `print ([json] null)` printing nothing be surprising.
+ // Also, it's not clear that mapping JSON null to out [null] is a good
+ // idea since our [null] means "no value" while JSON null means "null
+ // value".
+ //
+ // Maybe the current semantics is the best: we map our [null] and empty
+ // names to JSON null (naturally) but we always reverse JSON null to
+ // the JSON "null" literal. Or maybe we could reverse it to null but
+ // type-hint it that it's a spelling or [null]/empty. Quite fuzzy,
+ // admittedly. In our model null values decay to empty so JSON null
+ // decaying to "null" literal is strange. Let's try and see how it
+ // goes. See also json_subscript_impl() below.
+ //
+ if (v.type != json_type::null || !reduce)
+ ns.push_back (value_traits<json_value>::reverse (v));
+
+ return ns;
+ }
+
+ static int
+ json_compare (const value& l, const value& r)
+ {
+ return l.as<json_value> ().compare (r.as<json_value> ());
+ }
+
+ // Return the value as well as the indication of whether the index/name is
+ // in range.
+ //
+ static pair<value, bool>
+ json_subscript_impl (const value& val, value* val_data,
+ uint64_t i, const string& n, bool index)
+ {
+ const json_value& jv (val.as<json_value> ());
+
+ json_value jr;
+
+ if (index)
+ {
+ if (i >= (jv.type == json_type::array ? jv.array.size () :
+ jv.type == json_type::object ? jv.object.size () :
+ jv.type == json_type::null ? 0 : 1))
+ return make_pair (value (), false);
+
+ switch (jv.type)
+ {
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ case json_type::string:
+ {
+ // Steal the value if possible.
+ //
+ jr = (&val == val_data
+ ? json_value (move (const_cast<json_value&> (jv)))
+ : json_value (jv));
+ break;
+ }
+ case json_type::array:
+ {
+ // Steal the value if possible.
+ //
+ const json_value& r (jv.array[i]);
+ jr = (&val == val_data
+ ? json_value (move (const_cast<json_value&> (r)))
+ : json_value (r));
+ break;
+ }
+ case json_type::object:
+ {
+ // Represent as an object with one member.
+ //
+ new (&jr.object) json_value::object_type ();
+ jr.type = json_type::object;
+
+ // Steal the member if possible.
+ //
+ const json_member& m (jv.object[i]);
+ jr.object.push_back (&val == val_data
+ ? json_member (move (const_cast<json_member&> (m)))
+ : json_member (m));
+ break;
+ }
+ case json_type::null:
+ assert (false);
+ }
+ }
+ else
+ {
+ auto i (find_if (jv.object.begin (),
+ jv.object.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }));
+
+ if (i == jv.object.end ())
+ return make_pair (value (), false);
+
+ // Steal the member value if possible.
+ //
+ jr = (&val == val_data
+ ? json_value (move (const_cast<json_value&> (i->value)))
+ : json_value (i->value));
+ }
+
+ // @@ As a temporary work around for the lack of type hints (see
+ // json_reverse() for background), reverse simple JSON values to the
+ // corresponding fundamental type values. The thinking here is that
+ // subscript (and iteration) is primarily meant for consumption (as
+ // opposed to reverse() where it is used to build up values and thus
+ // needs things to be fully reversible). Once we add type hints, then
+ // this should become unnecessary and we should be able to just always
+ // return json_value. See also $json.member_value() where we do the
+ // same thing.
+ //
+ // @@ TODO: split this function into two (index/name) once get rid of this.
+ //
+ value r;
+ switch (jr.type)
+ {
+ // Seeing that we are reversing for consumption, it feels natural to
+ // reverse JSON null to our [null] rather than empty. This, in
+ // particular, helps chained subscript.
+ //
+#if 0
+ case json_type::null: r = value (names {}); break;
+#else
+ case json_type::null: r = value (); break;
+#endif
+ case json_type::boolean: r = value (jr.boolean); break;
+ case json_type::signed_number: r = value (jr.signed_number); break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: r = value (jr.unsigned_number); break;
+ case json_type::string: r = value (move (jr.string)); break;
+ case json_type::array:
+ case json_type::object: r = value (move (jr)); break;
+ }
+
+ return make_pair (move (r), true);
+ }
+
+ static value
+ json_subscript (const value& val, value* val_data,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ const json_value* jv (val.null ? nullptr : &val.as<json_value> ());
+
+ // For consistency with other places treat JSON null value as maybe
+ // missing array/object. In particular, we don't want to fail trying to
+ // lookup by-name on a null value which could have been an object.
+ //
+ if (jv != nullptr && jv->type == json_type::null)
+ jv = nullptr;
+
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ bool index;
+ uint64_t i (0);
+ string n;
+
+ // Always interpret uint64-typed subscript as index even for objects.
+ // This can be used to, for example, to iterate with an index over object
+ // members.
+ //
+ if (!sub.null && sub.type == &value_traits<uint64_t>::value_type)
+ {
+ i = sub.as<uint64_t> ();
+ index = true;
+ }
+ else
+ {
+ // How we interpret the subscript depends on the JSON value type. For
+ // objects we treat it as a string (member name) and for everything else
+ // as an index.
+ //
+ // What if the value is null and we don't have a JSON type? In this case
+ // we treat as a string since a valid number is also a valid string.
+ //
+ try
+ {
+ if (jv == nullptr || jv->type == json_type::object)
+ {
+ n = convert<string> (move (sub));
+ index = false;
+ }
+ else
+ {
+ i = convert<uint64_t> (move (sub));
+ index = true;
+ }
+ }
+ catch (const invalid_argument& e)
+ {
+ // We will likely be trying to interpret a member name as an integer
+ // due to the incorrect value type so issue appropriate diagnostics.
+ //
+ diag_record dr;
+ dr << fail (sloc) << "invalid json value subscript: " << e;
+
+ if (jv != nullptr && jv->type != json_type::object)
+ dr << info << "json value type is " << jv->type;
+
+ dr << info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern" << endf;
+ }
+ }
+
+ value r (jv != nullptr
+ ? json_subscript_impl (val, val_data, i, n, index).first
+ : value ());
+
+ // Typify null values so that we get called for chained subscripts.
+ //
+ if (r.null)
+ r.type = &value_traits<json_value>::value_type;
+
+ return r;
+ }
+
+ static void
+ json_iterate (const value& val,
+ const function<void (value&&, bool first)>& f)
+ {
+ // Implement in terms of subscript for consistency (in particular,
+ // iterating over simple values like number, string).
+ //
+ for (uint64_t i (0);; ++i)
+ {
+ pair<value, bool> e (json_subscript_impl (val, nullptr, i, {}, true));
+
+ if (!e.second)
+ break;
+
+ f (move (e.first), i == 0);
+ }
+ }
+
+ const json_value value_traits<json_value>::empty_instance;
+ const char* const value_traits<json_value>::type_name = "json";
+
+ // Note that whether the json value is a container or not depends on its
+ // payload type. However, for our purposes it feels correct to assume it is
+ // a container rather than not with itself as the element type (see
+ // value_traits::{container, element_type} usage for details).
+ //
+ const value_type value_traits<json_value>::value_type
+ {
+ type_name,
+ sizeof (json_value),
+ nullptr, // No base.
+ true, // Container.
+ &value_traits<json_value>::value_type, // Element (itself).
+ &default_dtor<json_value>,
+ &default_copy_ctor<json_value>,
+ &default_copy_assign<json_value>,
+ &json_assign,
+ json_append,
+ json_prepend,
+ &json_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &json_compare,
+ &default_empty<json_value>,
+ &json_subscript,
+ &json_iterate
+ };
+
+ // json_array
+ //
+ json_array value_traits<json_array>::
+ convert (names&& ns)
+ {
+ json_array r;
+
+ size_t n (ns.size ());
+ if (n == 0)
+ ; // Empty.
+ else if (n == 1)
+ {
+ // Tricky: this can still be JSON input text that is an array. And if
+ // it's not, then make it an element of an array.
+ //
+ // @@ Hm, this is confusing: [json_array] a = null ! Maybe not? But then
+ // this won't work: [json_array] a = ([json_array] null). Maybe
+ // distinguish in assign?
+ //
+ json_value v (to_json_value (ns.front (), "json"));
+
+ if (v.type == json_type::array)
+ r.array = move (v.array);
+ else
+ r.array.push_back (move (v));
+ }
+ else
+ {
+ r.array.reserve (n);
+
+ for (name& n: ns)
+ {
+ if (n.pair)
+ throw invalid_argument (
+ "unexpected pair in json array element value '" +
+ to_string (n) + '\'');
+
+ r.array.push_back (to_json_value (n, "json array element"));
+ }
+ }
+
+ return r;
+ }
+
+ static void
+ json_array_assign (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_array>;
+
+ try
+ {
+ traits::assign (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json array";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_array_append (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using arr_traits = value_traits<json_array>;
+
+ try
+ {
+ arr_traits::append (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json array";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_array_prepend (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using arr_traits = value_traits<json_array>;
+
+ try
+ {
+ arr_traits::prepend (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json array";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ const json_array value_traits<json_array>::empty_instance;
+ const char* const value_traits<json_array>::type_name = "json_array";
+
+ const value_type value_traits<json_array>::value_type
+ {
+ type_name,
+ sizeof (json_array),
+ &value_traits<json_value>::value_type, // Base (assuming direct cast works
+ // for both).
+ true, // Container.
+ &value_traits<json_value>::value_type, // Element (json_value).
+ &default_dtor<json_array>,
+ &default_copy_ctor<json_array>,
+ &default_copy_assign<json_array>,
+ &json_array_assign,
+ &json_array_append,
+ &json_array_prepend,
+ &json_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &json_compare,
+ &default_empty<json_array>,
+ &json_subscript,
+ &json_iterate
+ };
+
+ // json_object
+ //
+ json_object value_traits<json_object>::
+ convert (names&& ns)
+ {
+ json_object r;
+
+ size_t n (ns.size ());
+ if (n == 0)
+ ; // Empty.
+ else if (n == 1)
+ {
+ // Tricky: this can still be JSON input text that is an object. So do
+ // a similar check as in to_json_value() above.
+ //
+ name& n (ns.front ());
+
+ if (!n.simple () || n.pattern)
+ throw_invalid_argument (n, nullptr, "json object");
+
+ string& s (n.value);
+ size_t p (s.find_first_not_of (" \t\n\r"));
+
+ if (p == string::npos || s[p] != '{')
+ {
+ // Unlike for array above, we cannot turn any value into a member.
+ //
+ throw invalid_argument ("expected json object instead of '" + s + '\'');
+ }
+
+ json_value v (to_json_value (ns.front (), "json object"));
+ assert (v.type == json_type::object);
+ r.object = move (v.object);
+ }
+ else
+ {
+ r.object.reserve (n / 2);
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ if (!i->pair)
+ throw invalid_argument (
+ "expected pair in json member value '" + to_string (*i) + '\'');
+
+ string n (to_string_value (*i, "json member name"));
+ json_value v (to_json_value (*++i, "json member"));
+
+ if (find_if (r.object.begin (), r.object.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }) != r.object.end ())
+ {
+ throw invalid_argument (
+ "duplicate json object member '" + n + '\'');
+ }
+
+ r.object.push_back (json_member {move (n), move (v)});
+ }
+ }
+
+ return r;
+ }
+
+ static void
+ json_object_assign (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_object>;
+
+ try
+ {
+ traits::assign (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json object";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_object_append (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using obj_traits = value_traits<json_object>;
+
+ try
+ {
+ obj_traits::append (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json object";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_object_prepend (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using obj_traits = value_traits<json_object>;
+
+ try
+ {
+ obj_traits::prepend (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json object";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ const json_object value_traits<json_object>::empty_instance;
+ const char* const value_traits<json_object>::type_name = "json_object";
+
+ const value_type value_traits<json_object>::value_type
+ {
+ type_name,
+ sizeof (json_object),
+ &value_traits<json_value>::value_type, // Base (assuming direct cast works
+ // for both).
+ true, // Container.
+ &value_traits<json_value>::value_type, // Element (json_value).
+ &default_dtor<json_object>,
+ &default_copy_ctor<json_object>,
+ &default_copy_assign<json_object>,
+ &json_object_assign,
+ &json_object_append,
+ &json_object_prepend,
+ &json_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &json_compare,
+ &default_empty<json_object>,
+ &json_subscript,
+ &json_iterate
+ };
+
+ // cmdline
+ //
+ cmdline value_traits<cmdline>::
+ convert (names&& ns)
+ {
+ return cmdline (make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ void value_traits<cmdline>::
+ assign (value& v, cmdline&& x)
+ {
+ if (v)
+ v.as<cmdline> () = move (x);
+ else
+ new (&v.data_) cmdline (move (x));
+ }
+
+ void value_traits<cmdline>::
+ append (value& v, cmdline&& x)
+ {
+ if (v)
+ {
+ cmdline& p (v.as<cmdline> ());
+
+ if (p.empty ())
+ p.swap (x);
+ else
+ p.insert (p.end (),
+ make_move_iterator (x.begin ()),
+ make_move_iterator (x.end ()));
+ }
+ else
+ new (&v.data_) cmdline (move (x));
+ }
+
+ void value_traits<cmdline>::
+ prepend (value& v, cmdline&& x)
+ {
+ if (v)
+ {
+ cmdline& p (v.as<cmdline> ());
+
+ if (!p.empty ())
+ x.insert (x.end (),
+ make_move_iterator (p.begin ()),
+ make_move_iterator (p.end ()));
+
+ p.swap (x);
+ }
+ else
+ new (&v.data_) cmdline (move (x));
+ }
+
+ static void
+ cmdline_assign (value& v, names&& ns, const variable*)
+ {
+ if (!v)
+ {
+ new (&v.data_) cmdline ();
+ v.null = false;
+ }
+
+ v.as<cmdline> ().assign (make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ static void
+ cmdline_append (value& v, names&& ns, const variable*)
+ {
+ if (!v)
+ {
+ new (&v.data_) cmdline ();
+ v.null = false;
+ }
+
+ auto& x (v.as<cmdline> ());
+ x.insert (x.end (),
+ make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ static void
+ cmdline_prepend (value& v, names&& ns, const variable*)
+ {
+ if (!v)
+ {
+ new (&v.data_) cmdline ();
+ v.null = false;
+ }
+
+ auto& x (v.as<cmdline> ());
+ x.insert (x.begin (),
+ make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ static names_view
+ cmdline_reverse (const value& v, names&, bool)
+ {
+ const auto& x (v.as<cmdline> ());
+ return names_view (x.data (), x.size ());
+ }
+
+ static int
+ cmdline_compare (const value& l, const value& r)
+ {
+ return vector_compare<name> (l, r);
+ }
+
+ const cmdline value_traits<cmdline>::empty_instance;
+
+ const char* const value_traits<cmdline>::type_name = "cmdline";
+
+ const value_type value_traits<cmdline>::value_type
+ {
+ type_name,
+ sizeof (cmdline),
+ nullptr, // No base.
+ true, // Container.
+ &value_traits<string>::value_type, // Element type.
+ &default_dtor<cmdline>,
+ &default_copy_ctor<cmdline>,
+ &default_copy_assign<cmdline>,
+ &cmdline_assign,
+ &cmdline_append,
+ &cmdline_prepend,
+ &cmdline_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &cmdline_compare,
+ &default_empty<cmdline>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// variable_pool
@@ -1428,6 +2612,17 @@ namespace build2
const variable_visibility* v,
const bool* o) const
{
+ assert (var.owner == this);
+
+ if (outer_ != nullptr)
+ {
+ // Project-private variable. Assert visibility/overridability, the same
+ // as in insert().
+ //
+ assert ((o == nullptr || !*o) &&
+ (v == nullptr || *v >= variable_visibility::project));
+ }
+
// Check overridability (all overrides, if any, should already have
// been entered; see context ctor for details).
//
@@ -1509,7 +2704,7 @@ namespace build2
}
static inline void
- merge_pattern (const variable_pool::pattern& p,
+ merge_pattern (const variable_patterns::pattern& p,
const build2::value_type*& t,
const variable_visibility*& v,
const bool*& o)
@@ -1560,20 +2755,68 @@ namespace build2
const bool* o,
bool pat)
{
- assert (!global_ || global_->phase == run_phase::load);
+ if (outer_ != nullptr)
+ {
+ // Project-private pool.
+ //
+ if (n.find ('.') != string::npos) // Qualified.
+ return outer_->insert (move (n), t, v, o, pat);
+
+ // Unqualified.
+ //
+ // The pool chaining semantics for insertion: first check the outer pool
+ // then, if not found, insert in own pool.
+ //
+ if (const variable* var = outer_->find (n))
+ {
+ // Verify type/visibility/overridability.
+ //
+ // Should we assert or fail? Currently the buildfile parser goes
+ // through update() to set these so let's do assert for now. We also
+ // require equality (these are a handful of special variables).
+ //
+ assert ((t == nullptr || t == var->type) &&
+ (v == nullptr || *v == var->visibility) &&
+ (o == nullptr || *o || var->overrides == nullptr));
+
+ return pair<variable&, bool> (const_cast<variable&> (*var), false);
+ }
+
+ // Project-private variable. Assert visibility/overridability and fall
+ // through. Again, we expect the buildfile parser to verify and diagnose
+ // these.
+ //
+ // Note: similar code in update().
+ //
+ assert ((o == nullptr || !*o) &&
+ (v == nullptr || *v >= variable_visibility::project));
+ }
+ else if (shared_)
+ {
+ // Public pool.
+ //
+ // Make sure all the unqualified variables are pre-entered during
+ // initialization.
+ //
+ assert (shared_->load_generation == 0 || n.find ('.') != string::npos);
+ }
+
+ assert (!shared_ || shared_->phase == run_phase::load);
// Apply pattern.
//
+ using pattern = variable_patterns::pattern;
+
const pattern* pa (nullptr);
auto pt (t); auto pv (v); auto po (o);
- if (pat)
+ if (pat && patterns_ != nullptr)
{
if (n.find ('.') != string::npos)
{
// Reverse means from the "largest" (most specific).
//
- for (const pattern& p: reverse_iterate (patterns_))
+ for (const pattern& p: reverse_iterate (patterns_->patterns_))
{
if (match_pattern (n, p.prefix, p.suffix, p.multi))
{
@@ -1590,6 +2833,7 @@ namespace build2
variable {
move (n),
nullptr,
+ nullptr,
pt,
nullptr,
pv != nullptr ? *pv : variable_visibility::project}));
@@ -1597,7 +2841,10 @@ namespace build2
variable& var (r.first->second);
if (r.second)
+ {
+ var.owner = this;
var.aliases = &var;
+ }
else // Note: overridden variable will always exist.
{
// This is tricky: if the pattern does not require a match, then we
@@ -1625,7 +2872,15 @@ namespace build2
const variable& variable_pool::
insert_alias (const variable& var, string n)
{
- assert (var.aliases != nullptr && var.overrides == nullptr);
+ if (outer_ != nullptr)
+ {
+ assert (n.find ('.') != string::npos); // Qualified.
+ return outer_->insert_alias (var, move (n));
+ }
+
+ assert (var.owner == this &&
+ var.aliases != nullptr &&
+ var.overrides == nullptr);
variable& a (insert (move (n),
var.type,
@@ -1646,15 +2901,15 @@ namespace build2
return a;
}
- void variable_pool::
- insert_pattern (const string& p,
- optional<const value_type*> t,
- optional<bool> o,
- optional<variable_visibility> v,
- bool retro,
- bool match)
+ void variable_patterns::
+ insert (const string& p,
+ optional<const value_type*> t,
+ optional<bool> o,
+ optional<variable_visibility> v,
+ bool retro,
+ bool match)
{
- assert (!global_ || global_->phase == run_phase::load);
+ assert (!shared_ || shared_->phase == run_phase::load);
size_t pn (p.size ());
@@ -1688,9 +2943,9 @@ namespace build2
// Apply retrospectively to existing variables.
//
- if (retro)
+ if (retro && pool_ != nullptr)
{
- for (auto& p: map_)
+ for (auto& p: pool_->map_)
{
variable& var (p.second);
@@ -1707,10 +2962,10 @@ namespace build2
}
if (j == e)
- update (var,
- t ? *t : nullptr,
- v ? &*v : nullptr,
- o ? &*o : nullptr); // Not changing the key.
+ pool_->update (var,
+ t ? *t : nullptr,
+ v ? &*v : nullptr,
+ o ? &*o : nullptr); // Not changing the key.
}
}
}
@@ -1718,7 +2973,66 @@ namespace build2
// variable_map
//
- const variable_map empty_variable_map (nullptr /* context */);
+ const variable_map empty_variable_map (variable_map::owner::empty);
+
+ // Need scope/target definition thus not inline.
+ //
+ variable_map::
+ variable_map (const scope& s, bool shared)
+ : shared_ (shared), owner_ (owner::scope), scope_ (&s), ctx (&s.ctx)
+ {
+ }
+
+ variable_map::
+ variable_map (const target& t, bool shared)
+ : shared_ (shared), owner_ (owner::target), target_ (&t), ctx (&t.ctx)
+ {
+ }
+
+ variable_map::
+ variable_map (const prerequisite& p, bool shared)
+ : shared_ (shared),
+ owner_ (owner::prereq), prereq_ (&p),
+ ctx (&p.scope.ctx)
+ {
+ }
+
+ variable_map::
+ variable_map (variable_map&& v, const prerequisite& p, bool shared)
+ : shared_ (shared),
+ owner_ (owner::scope), prereq_ (&p),
+ ctx (&p.scope.ctx),
+ m_ (move (v.m_))
+ {
+ }
+
+ variable_map::
+ variable_map (const variable_map& v, const prerequisite& p, bool shared)
+ : shared_ (shared),
+ owner_ (owner::scope), prereq_ (&p),
+ ctx (&p.scope.ctx),
+ m_ (v.m_)
+ {
+ }
+
+ lookup variable_map::
+ lookup (const string& name) const
+ {
+ lookup_type r;
+
+ const scope* bs (owner_ == owner::scope ? scope_ :
+ owner_ == owner::target ? &target_->base_scope () :
+ owner_ == owner::prereq ? &prereq_->scope :
+ nullptr);
+
+ if (const variable* var = bs->var_pool ().find (name))
+ {
+ auto p (lookup (*var));
+ r = lookup_type (p.first, &p.second, this);
+ }
+
+ return r;
+ }
auto variable_map::
lookup (const variable& var, bool typed, bool aliased) const ->
@@ -1761,24 +3075,43 @@ namespace build2
auto* r (const_cast<value_data*> (p.first));
if (r != nullptr)
+ {
+ r->extra = 0;
r->version++;
+ }
return pair<value_data*, const variable&> (r, p.second);
}
+ value& variable_map::
+ assign (const string& name)
+ {
+ assert (owner_ != owner::context);
+
+ const scope* bs (owner_ == owner::scope ? scope_ :
+ owner_ == owner::target ? &target_->base_scope () :
+ owner_ == owner::prereq ? &prereq_->scope :
+ nullptr);
+
+ return insert (bs->var_pool ()[name]).first;
+ }
+
pair<value&, bool> variable_map::
- insert (const variable& var, bool typed)
+ insert (const variable& var, bool typed, bool reset_extra)
{
- assert (!global_ || ctx->phase == run_phase::load);
+ assert (!shared_ || ctx->phase == run_phase::load);
auto p (m_.emplace (var, value_data (typed ? var.type : nullptr)));
value_data& r (p.first->second);
if (!p.second)
{
+ if (reset_extra)
+ r.extra = 0;
+
// Check if this is the first access after being assigned a type.
//
- // Note: we still need atomic in case this is not a global state.
+ // Note: we still need atomic in case this is not a shared state.
//
if (typed && var.type != nullptr)
typify (r, var);
@@ -1789,21 +3122,47 @@ namespace build2
return pair<value&, bool> (r, p.second);
}
+ auto variable_map::
+ find (const string& name) const -> const_iterator
+ {
+ assert (owner_ != owner::context);
+
+ const scope* bs (owner_ == owner::scope ? scope_ :
+ owner_ == owner::target ? &target_->base_scope () :
+ owner_ == owner::prereq ? &prereq_->scope :
+ nullptr);
+
+
+ const variable* var (bs->var_pool ().find (name));
+ return var != nullptr ? find (*var) : end ();
+ }
+
bool variable_map::
erase (const variable& var)
{
- assert (!global_ || ctx->phase == run_phase::load);
+ assert (!shared_ || ctx->phase == run_phase::load);
return m_.erase (var) != 0;
}
+ variable_map::const_iterator variable_map::
+ erase (const_iterator i)
+ {
+ assert (!shared_ || ctx->phase == run_phase::load);
+
+ return const_iterator (m_.erase (i), *this);
+ }
+
// variable_pattern_map
//
variable_map& variable_pattern_map::
insert (pattern_type type, string&& text)
{
+ // Note that this variable map is special and we use context as its owner
+ // (see variable_map for details).
+ //
auto r (map_.emplace (pattern {type, false, move (text), {}},
- variable_map (ctx, global_)));
+ variable_map (ctx, shared_)));
// Compile the regex.
//
@@ -1963,10 +3322,16 @@ namespace build2
template struct LIBBUILD2_DEFEXPORT
value_traits<vector<pair<string, optional<bool>>>>;
+ template struct LIBBUILD2_DEFEXPORT value_traits<set<string>>;
+ template struct LIBBUILD2_DEFEXPORT value_traits<set<json_value>>;
+
template struct LIBBUILD2_DEFEXPORT
value_traits<map<string, string>>;
template struct LIBBUILD2_DEFEXPORT
+ value_traits<map<json_value, json_value>>;
+
+ template struct LIBBUILD2_DEFEXPORT
value_traits<map<string, optional<string>>>;
template struct LIBBUILD2_DEFEXPORT
diff --git a/libbuild2/variable.hxx b/libbuild2/variable.hxx
index 54d573b..aed3350 100644
--- a/libbuild2/variable.hxx
+++ b/libbuild2/variable.hxx
@@ -4,7 +4,8 @@
#ifndef LIBBUILD2_VARIABLE_HXX
#define LIBBUILD2_VARIABLE_HXX
-#include <type_traits> // aligned_storage
+#include <cstddef> // max_align_t
+#include <type_traits> // is_*
#include <unordered_map>
#include <libbutl/prefix-map.hxx>
@@ -14,8 +15,11 @@
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
+#include <libbuild2/json.hxx>
+
#include <libbuild2/context.hxx>
#include <libbuild2/target-type.hxx>
+#include <libbuild2/diagnostics.hxx>
#include <libbuild2/export.hxx>
@@ -47,7 +51,11 @@ namespace build2
template <typename T> const value_type* is_a () const;
- // Element type, if this is a vector.
+ // True if the type is a container.
+ //
+ bool container;
+
+ // Element type, if this is a container and the element type is named.
//
const value_type* element_type;
@@ -74,9 +82,11 @@ namespace build2
void (*const prepend) (value&, names&&, const variable*);
// Reverse the value back to a vector of names. Storage can be used by the
- // implementation if necessary. Cannot be NULL.
+ // implementation if necessary. If reduce is true, then for an empty
+ // simple value return an empty list rather than a list of one empty name.
+ // Note that the value cannot be NULL.
//
- names_view (*const reverse) (const value&, names& storage);
+ names_view (*const reverse) (const value&, names& storage, bool reduce);
// Cast value::data_ storage to value type so that the result can be
// static_cast to const T*. If it is NULL, then cast data_ directly. Note
@@ -90,7 +100,33 @@ namespace build2
// If NULL, then the value is never empty.
//
+ // Note that this is "semantically empty", not necessarily
+ // "representationally empty". For example, an empty JSON array is
+ // semantically empty but its representation (`[]`) is not.
+ //
bool (*const empty) (const value&);
+
+ // Custom subscript function. If NULL, then the generic implementation is
+ // used.
+ //
+ // Note that val can be NULL. If val_data points to val, then it can be
+ // moved from. The sloc and bloc arguments are the subscript and brace
+ // locations, respectively.
+ //
+ // Note: should normally be consistent with iterate.
+ //
+ value (*/*const*/ subscript) (const value& val,
+ value* val_data,
+ value&& subscript,
+ const location& sloc,
+ const location& bloc);
+
+ // Custom iteration function. It should invoked the specified function for
+ // each element in order. If NULL, then the generic implementation is
+ // used. The passed value is never NULL.
+ //
+ void (*const iterate) (const value&,
+ const function<void (value&&, bool first)>&);
};
// The order of the enumerators is arranged so that their integral values
@@ -106,6 +142,10 @@ namespace build2
scope, // This scope (no outer scopes).
target, // Target and target type/pattern-specific.
prereq // Prerequisite-specific.
+
+ // Note: remember to update the visibility attribute parsing if adding any
+ // new values here. As well as the $builtin.visibility() function
+ // documentation.
};
// VC14 reports ambiguity but seems to work if we don't provide any.
@@ -145,13 +185,27 @@ namespace build2
return o << to_string (v);
}
- // variable
+ // A variable.
+ //
+ // A variable can be public, project-private, or script-private, which
+ // corresponds to the variable pool it belongs to (see variable_pool). The
+ // two variables from the same pool are considered the same if they have the
+ // same name. The variable access (public/private) rules are:
//
- // The two variables are considered the same if they have the same name.
+ // - Qualified variable are by default public while unqualified -- private.
+ //
+ // - Private must have project or lesser visibility and not be overridable.
+ //
+ // - An unqualified public variable can only be pre-entered during the
+ // context construction (to make sure it is not entered as private).
+ //
+ // - There is no scope-private variables in our model due to side-loading,
+ // target type/pattern-specific append, etc.
//
// Variables can be aliases of each other in which case they form a circular
// linked list (the aliases pointer for variable without any aliases points
- // to the variable itself).
+ // to the variable itself). This mechanism should only be used for variables
+ // of the same access (normally public).
//
// If the variable is overridden on the command line, then override is the
// linked list of the special override variables. Their names are derived
@@ -198,6 +252,7 @@ namespace build2
struct variable
{
string name;
+ const variable_pool* owner;
const variable* aliases; // Circular linked list.
const value_type* type; // If NULL, then not (yet) typed.
unique_ptr<const variable> overrides;
@@ -276,7 +331,13 @@ namespace build2
// Extra data that is associated with the value that can be used to store
// flags, etc. It is initialized to 0 and copied (but not assigned) from
// one value to another but is otherwise untouched (not even when the
- // value is reset to NULL).
+ // value is reset to NULL) unless it is part of variable_map::value_data,
+ // in which case it is reset to 0 on each modification (version
+ // increment; however, see reset_extra flag in variable_map::insert()).
+ //
+ // (The reset on each modification semantics is used to implement the
+ // default value distinction as currently done in the config module but
+ // later probably will be done for ?= and $origin()).
//
// Note: if deciding to use for something make sure it is not overlapping
// with an existing usage.
@@ -290,6 +351,10 @@ namespace build2
// Check in a type-independent way if the value is empty. The value must
// not be NULL.
//
+ // Note that this is "semantically empty", not necessarily
+ // "representationally empty". For example, an empty JSON array is
+ // semantically empty but its representation (`[]`) is not.
+ //
bool
empty () const;
@@ -327,9 +392,13 @@ namespace build2
value&
operator= (nullptr_t) {if (!null) reset (); return *this;}
- value (value&&);
+ // Note that we have the noexcept specification even though copy_ctor()
+ // could potentially throw (for example, for std::map).
+ //
+ value (value&&) noexcept;
+
explicit value (const value&);
- value& operator= (value&&);
+ value& operator= (value&&); // Note: can throw for untyped RHS.
value& operator= (const value&);
value& operator= (reference_wrapper<value>);
value& operator= (reference_wrapper<const value>);
@@ -338,8 +407,8 @@ namespace build2
//
public:
// Assign/append/prepend a typed value. For assign, LHS should be either
- // of the same type or untyped. For append, LHS should be either of the
- // same type or untyped and NULL.
+ // of the same type or untyped. For append/prepend, LHS should be either
+ // of the same type or untyped and NULL.
//
template <typename T> value& operator= (T);
template <typename T> value& operator+= (T);
@@ -388,8 +457,8 @@ namespace build2
// specialization below). Types that don't fit will have to be handled
// with an extra dynamic allocation.
//
- static constexpr size_t size_ = sizeof (name_pair);
- std::aligned_storage<size_>::type data_;
+ static constexpr size_t size_ = sizeof (name_pair);
+ alignas (std::max_align_t) unsigned char data_[size_];
// Make sure we have sufficient storage for untyped values.
//
@@ -429,38 +498,37 @@ namespace build2
template <typename T> T& cast (value&);
template <typename T> T&& cast (value&&);
template <typename T> const T& cast (const value&);
- template <typename T> const T& cast (const lookup&);
+ template <typename T> const T& cast (lookup);
// As above but returns NULL if the value is NULL (or not defined, in
// case of lookup).
//
template <typename T> T* cast_null (value&);
template <typename T> const T* cast_null (const value&);
- template <typename T> const T* cast_null (const lookup&);
+ template <typename T> const T* cast_null (lookup);
// As above but returns empty value if the value is NULL (or not defined, in
// case of lookup).
//
template <typename T> const T& cast_empty (const value&);
- template <typename T> const T& cast_empty (const lookup&);
+ template <typename T> const T& cast_empty (lookup);
// As above but returns the specified default if the value is NULL (or not
// defined, in case of lookup). Note that the return is by value, not by
// reference.
//
template <typename T> T cast_default (const value&, const T&);
- template <typename T> T cast_default (const lookup&, const T&);
+ template <typename T> T cast_default (lookup, const T&);
// As above but returns false/true if the value is NULL (or not defined,
// in case of lookup). Note that the template argument is only for
// documentation and should be bool (or semantically compatible).
//
template <typename T> T cast_false (const value&);
- template <typename T> T cast_false (const lookup&);
+ template <typename T> T cast_false (lookup);
template <typename T> T cast_true (const value&);
- template <typename T> T cast_true (const lookup&);
-
+ template <typename T> T cast_true (lookup);
// Assign value type to the value. The variable is optional and is only used
// for diagnostics.
@@ -473,20 +541,22 @@ namespace build2
typify_atomic (context&, value&, const value_type&, const variable*);
// Remove value type from the value reversing it to names. This is similar
- // to reverse() below except that it modifies the value itself.
+ // to reverse() below except that it modifies the value itself. Note that
+ // the reduce semantics applies to empty but not null.
//
- LIBBUILD2_SYMEXPORT void untypify (value&);
+ LIBBUILD2_SYMEXPORT void untypify (value&, bool reduce);
// Reverse the value back to names. The value should not be NULL and storage
- // should be empty.
+ // should be empty. If reduce is true, then for an empty simple value return
+ // an empty list rather than a list of one empty name.
//
vector_view<const name>
- reverse (const value&, names& storage);
+ reverse (const value&, names& storage, bool reduce);
vector_view<name>
- reverse (value&, names& storage);
+ reverse (value&, names& storage, bool reduce);
- // Variable lookup result, AKA, binding of a name to a value.
+ // Variable lookup result, AKA, binding of a variable to a value.
//
// A variable can be undefined, NULL, or contain a (potentially empty)
// value.
@@ -629,7 +699,7 @@ namespace build2
// case (container) if invalid_argument is thrown, the names are not
// guaranteed to be unchanged.
//
- //template <typename T> T convert (names&&); (declaration causes ambiguity)
+ template <typename T> T convert (names&&);
// Convert value to T. If value is already of type T, then simply cast it.
// Otherwise call convert(names) above. If value is NULL, then throw
@@ -907,7 +977,7 @@ namespace build2
// pair of two empties).
//
// @@ Maybe we should redo this with optional<> to signify which half can
- // be missing?
+ // be missing? See also dump_value(json).
//
template <>
struct LIBBUILD2_SYMEXPORT value_traits<name_pair>
@@ -1115,12 +1185,35 @@ namespace build2
static const pair_vector_value_type<K, V> value_type;
};
+ // set<T>
+ //
+ template <typename T>
+ struct set_value_type;
+
+ template <typename T>
+ struct value_traits<set<T>>
+ {
+ static_assert (sizeof (set<T>) <= value::size_, "insufficient space");
+
+ static set<T> convert (names&&);
+ static void assign (value&, set<T>&&);
+ static void append (value&, set<T>&&);
+ static void prepend (value&, set<T>&&);
+ static bool empty (const set<T>& x) {return x.empty ();}
+
+ static const set<T> empty_instance;
+ static const set_value_type<T> value_type;
+ };
+
// map<K, V>
//
// Either K or V can be optional<T> making the key or value optional.
//
- // Note that append/+= is non-overriding (like insert()) while prepend/=+
- // is (like insert_or_assign()).
+ // Note that append/+= is overriding (like insert_or_assign()) while
+ // prepend/=+ is not (like insert()). In a sense, whatever appears last
+ // (from left to right) is kept, which is consistent with what we expect to
+ // happen when specifying the same key repeatedly in a representation (e.g.,
+ // a@0 a@1).
//
template <typename K, typename V>
struct map_value_type;
@@ -1141,12 +1234,116 @@ namespace build2
static const map_value_type<K, V> value_type;
};
+ // json
+ //
+ // Note that we do not expose json_member as a value type instead
+ // representing it as an object with one member. While we could expose
+ // member (and reverse it as a pair since there is no valid JSON
+ // representation for a standalone member), this doesn't seem to buy us much
+ // but will cause complications (for example, in supporting append/prepend).
+ // On the other hand, representing a member as an object only requires a bit
+ // of what looks like harmless looseness in a few contexts (such as the
+ // $json.member_*() functions).
+ //
+ // Note that similar to map, JSON object append/+= is overriding while
+ // prepend/=+ is not. In a sense, whatever appears last (from left to right)
+ // is kept, which is consistent with what we expect to happen when
+ // specifying the same name repeatedly (provided it's not considered
+ // invalid) in a representation (e.g., {"a":1,"a":2}).
+ //
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<json_value>
+ {
+ static_assert (sizeof (json_value) <= value::size_, "insufficient space");
+
+ static json_value convert (names&&);
+ static void assign (value&, json_value&&);
+ static void append (value&, json_value&&);
+ static void prepend (value&, json_value&&);
+ static bool empty (const json_value&); // null or empty array/object
+
+ // These are provided to make it possible to use json_value as a container
+ // element.
+ //
+ static json_value convert (name&&, name*);
+ static name reverse (const json_value&);
+ static int compare (const json_value& x, const json_value& y) {
+ return x.compare (y);}
+
+ static const json_value empty_instance; // null
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<json_array>
+ {
+ static_assert (sizeof (json_array) <= value::size_, "insufficient space");
+
+ static json_array convert (names&&);
+ static void assign (value&, json_array&&);
+ static void append (value&, json_value&&); // Note: value, not array.
+ static void prepend (value&, json_value&&);
+ static bool empty (const json_array& v) {return v.array.empty ();}
+
+ static const json_array empty_instance; // empty array
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<json_object>
+ {
+ static_assert (sizeof (json_object) <= value::size_, "insufficient space");
+
+ static json_object convert (names&&);
+ static void assign (value&, json_object&&);
+ static void append (value&, json_value&&); // Note: value, not object.
+ static void prepend (value&, json_value&&);
+ static bool empty (const json_object& v) {return v.object.empty ();}
+
+ static const json_object empty_instance; // empty object
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
+ // Canned command line to be re-lexed (used in {Build,Test}scripts).
+ //
+ // Note that because the executable can be specific as a target or as
+ // process_path_ex, this is a list of names rather than a list of strings.
+ // Note also that unlike vector<name> this type allows name pairs.
+ //
+ struct cmdline: vector<name>
+ {
+ using vector<name>::vector;
+
+ cmdline () {} // For Clang.
+ };
+
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<cmdline>
+ {
+ static_assert (sizeof (cmdline) <= value::size_, "insufficient space");
+
+ static cmdline convert (names&&);
+ static void assign (value&, cmdline&&);
+ static void append (value&, cmdline&&);
+ static void prepend (value&, cmdline&&);
+ static bool empty (const cmdline& x) {return x.empty ();}
+
+ static const cmdline empty_instance;
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
// Explicitly pre-instantiate and export value_traits templates for
// vector/map value types used in the build2 project. Note that this is not
// merely an optimization since not doing so we may end up with multiple
// value type objects for the same traits type (and we use their addressed
// as identity; see cast(const value&) for an example).
//
+ // NOTE: REMEMBER TO UPDATE dump_value(json) IF CHANGING ANYTHING HERE!
+ //
extern template struct LIBBUILD2_DECEXPORT value_traits<strings>;
extern template struct LIBBUILD2_DECEXPORT value_traits<vector<name>>;
extern template struct LIBBUILD2_DECEXPORT value_traits<paths>;
@@ -1166,10 +1363,16 @@ namespace build2
extern template struct LIBBUILD2_DECEXPORT
value_traits<vector<pair<string, optional<bool>>>>;
+ extern template struct LIBBUILD2_DECEXPORT value_traits<set<string>>;
+ extern template struct LIBBUILD2_DECEXPORT value_traits<set<json_value>>;
+
extern template struct LIBBUILD2_DECEXPORT
value_traits<map<string, string>>;
extern template struct LIBBUILD2_DECEXPORT
+ value_traits<map<json_value, json_value>>;
+
+ extern template struct LIBBUILD2_DECEXPORT
value_traits<map<string, optional<string>>>;
extern template struct LIBBUILD2_DECEXPORT
@@ -1196,9 +1399,12 @@ namespace build2
// Variable pool.
//
- // The global (as in, context-wide) version is protected by the phase mutex.
+ // The shared versions (as in, context or project-wide) are protected by the
+ // phase mutex and thus can only be modified during the load phase.
//
- class variable_pool
+ class variable_patterns;
+
+ class LIBBUILD2_SYMEXPORT variable_pool
{
public:
// Find existing (assert exists).
@@ -1218,7 +1424,7 @@ namespace build2
//
// Note also that a pattern and later insertions may restrict (but not
// relax) visibility and overridability.
-
+ //
const variable&
insert (string name)
{
@@ -1276,6 +1482,12 @@ namespace build2
}
const variable&
+ insert (string name, const value_type* type)
+ {
+ return insert (move (name), type, nullptr, nullptr).first;
+ }
+
+ const variable&
insert (string name,
const value_type* type,
bool overridable,
@@ -1296,70 +1508,74 @@ namespace build2
// Overridable aliased variables are most likely a bad idea: without a
// significant effort, the overrides will only be applied along the alias
// names (i.e., there would be no cross-alias overriding). So for now we
- // don't allow this (use the common variable mechanism instead).
+ // don't allow this (manually handle multiple names by merging their
+ // values instead).
//
- LIBBUILD2_SYMEXPORT const variable&
+ // Note: currently only public variables can be aliased.
+ //
+ const variable&
insert_alias (const variable& var, string name);
- // Insert a variable pattern. Any variable that matches this pattern will
- // have the specified type, visibility, and overridability. If match is
- // true, then individual insertions of the matching variable must match
- // the specified type/visibility/overridability. Otherwise, individual
- // insertions can provide alternative values and the pattern values are a
- // fallback (if you specify false you better be very clear about what you
- // are trying to achieve).
+ // Iteration.
//
- // The pattern must be in the form [<prefix>.](*|**)[.<suffix>] where '*'
- // matches single component stems (i.e., 'foo' but not 'foo.bar') and '**'
- // matches single and multi-component stems. Note that only multi-
- // component variables are considered for pattern matching (so just '*'
- // won't match anything).
+ public:
+ using key = butl::map_key<string>;
+ using map = std::unordered_map<key, variable>;
+
+ using const_iterator = butl::map_iterator_adapter<map::const_iterator>;
+
+ const_iterator begin () const {return const_iterator (map_.begin ());}
+ const_iterator end () const {return const_iterator (map_.end ());}
+
+ // Construction.
//
- // The patterns are matched in the more-specific-first order where the
- // pattern is considered more specific if it has a greater sum of its
- // prefix and suffix lengths. If the prefix and suffix are equal, then the
- // '*' pattern is considered more specific than '**'. If neither is more
- // specific, then they are matched in the reverse order of insertion.
+ // There are three specific variable pool instances:
//
- // If retro is true then a newly inserted pattern is also applied
- // retrospectively to all the existing variables that match but only
- // if no more specific pattern already exists (which is then assumed
- // to have been applied). So if you use this functionality, watch out
- // for the insertion order (you probably want more specific first).
+ // shared outer
+ // ----------------
+ // true null -- public variable pool in context
+ // true not null -- project-private pool in scope::root_extra
+ // with outer pointing to context::var_pool
+ // false not null -- temporary scope-private pool in temp_scope
+ // with outer pointing to context::var_pool
+ // false null -- script-private pool in script::environment
//
- public:
- LIBBUILD2_SYMEXPORT void
- insert_pattern (const string& pattern,
- optional<const value_type*> type,
- optional<bool> overridable,
- optional<variable_visibility>,
- bool retro = false,
- bool match = true);
+ // Notice that the script-private pool doesn't rely on outer and does
+ // its own pool chaining. So currently we assume that if outer is not
+ // NULL, then this is a project-private pool.
+ //
+ private:
+ friend class context;
+ friend class temp_scope;
- template <typename T>
- void
- insert_pattern (const string& p,
- optional<bool> overridable,
- optional<variable_visibility> v,
- bool retro = false,
- bool match = true)
- {
- insert_pattern (
- p, &value_traits<T>::value_type, overridable, v, retro, match);
- }
+ // Shared pool (public or project-private). The shared argument is
+ // flag/context.
+ //
+ variable_pool (context* shared,
+ variable_pool* outer,
+ const variable_patterns* patterns)
+ : shared_ (shared), outer_ (outer), patterns_ (patterns) {}
public:
- void
- clear () {map_.clear ();}
+ // Script-private pool.
+ //
+ explicit
+ variable_pool (const variable_patterns* patterns = nullptr)
+ : shared_ (nullptr), outer_ (nullptr), patterns_ (patterns) {}
+
+ variable_pool (variable_pool&&) = delete;
+ variable_pool& operator= (variable_pool&&) = delete;
- variable_pool (): variable_pool (nullptr) {}
+ variable_pool (const variable_pool&) = delete;
+ variable_pool& operator= (const variable_pool&) = delete;
- // RW access (only for the global pool).
+ public:
+ // RW access (only for shared pools plus the temp_scope special case).
//
variable_pool&
rw () const
{
- assert (global_->phase == run_phase::load);
+ assert (shared_ == nullptr || shared_->phase == run_phase::load);
return const_cast<variable_pool&> (*this);
}
@@ -1375,14 +1591,16 @@ namespace build2
// Note that in insert() NULL overridable is interpreted as false unless
// overridden by a pattern while in update() NULL overridable is ignored.
//
- LIBBUILD2_SYMEXPORT pair<variable&, bool>
+ pair<variable&, bool>
insert (string name,
const value_type*,
const variable_visibility*,
const bool* overridable,
bool pattern = true);
- LIBBUILD2_SYMEXPORT void
+ // Note: the variable must belong to this pool.
+ //
+ void
update (variable&,
const value_type*,
const variable_visibility*,
@@ -1391,9 +1609,6 @@ namespace build2
// Variable map.
//
private:
- using key = butl::map_key<string>;
- using map = std::unordered_map<key, variable>;
-
pair<map::iterator, bool>
insert (variable&& var)
{
@@ -1402,19 +1617,127 @@ namespace build2
// gets hairy very quickly (there is no std::hash for C-strings). So
// let's rely on small object-optimized std::string for now.
//
- string n (var.name);
+ string n (var.name); // @@ PERF (maybe keep reuse buffer at least?)
auto r (map_.insert (map::value_type (&n, move (var))));
if (r.second)
+ {
+#if 0
+ if (shared_ && outer_ == nullptr) // Global pool in context.
+ {
+ size_t n (map_.bucket_count ());
+ if (n > buckets_)
+ {
+ text << "variable_pool buckets: " << buckets_ << " -> " << n
+ << " (" << map_.size () << ")";
+ buckets_ = n;
+ }
+ }
+#endif
r.first->first.p = &r.first->second.name;
+ }
return r;
}
+ private:
+ friend class variable_patterns;
+
+ context* shared_;
+ variable_pool* outer_;
+ const variable_patterns* patterns_;
map map_;
- // Patterns.
+#if 0
+ size_t buckets_ = 0;
+#endif
+ };
+
+ // Variable patterns.
+ //
+ // This mechanism is used to assign variable types/visibility/overridability
+ // based on the variable name pattern. This mechanism can only be used for
+ // qualified variables and is thus only provided for the public variable
+ // pool.
+ //
+ // Similar to variable_pool, the shared versions are protected by the phase
+ // mutex and thus can only be modified during the load phase.
+ //
+ class LIBBUILD2_SYMEXPORT variable_patterns
+ {
+ public:
+ // Insert a variable pattern. Any variable that matches this pattern will
+ // have the specified type, visibility, and overridability. If match is
+ // true, then individual insertions of the matching variable must match
+ // the specified type/visibility/overridability. Otherwise, individual
+ // insertions can provide alternative values and the pattern values are a
+ // fallback (if you specify false you better be very clear about what you
+ // are trying to achieve).
+ //
+ // The pattern must be in the form [<prefix>.](*|**)[.<suffix>] where '*'
+ // matches single component stems (i.e., 'foo' but not 'foo.bar') and '**'
+ // matches single and multi-component stems. Note that only multi-
+ // component variables are considered for pattern matching (so just '*'
+ // won't match anything).
+ //
+ // The patterns are matched in the more-specific-first order where the
+ // pattern is considered more specific if it has a greater sum of its
+ // prefix and suffix lengths. If the prefix and suffix are equal, then the
+ // '*' pattern is considered more specific than '**'. If neither is more
+ // specific, then they are matched in the reverse order of insertion.
+ //
+ // If retro is true then a newly inserted pattern is also applied
+ // retrospectively to all the existing variables that match but only
+ // if no more specific pattern already exists (which is then assumed
+ // to have been applied). So if you use this functionality, watch out
+ // for the insertion order (you probably want more specific first).
+ //
+ void
+ insert (const string& pattern,
+ optional<const value_type*> type,
+ optional<bool> overridable,
+ optional<variable_visibility>,
+ bool retro = false,
+ bool match = true);
+
+ template <typename T>
+ void
+ insert (const string& p,
+ optional<bool> overridable,
+ optional<variable_visibility> v,
+ bool retro = false,
+ bool match = true)
+ {
+ insert (p, &value_traits<T>::value_type, overridable, v, retro, match);
+ }
+
+ public:
+ // The shared argument is flag/context. The pool argument is for
+ // retrospective pattern application.
+ //
+ explicit
+ variable_patterns (context* shared, variable_pool* pool)
+ : shared_ (shared), pool_ (pool) {}
+
+ variable_patterns (variable_patterns&&) = delete;
+ variable_patterns& operator= (variable_patterns&&) = delete;
+
+ variable_patterns (const variable_patterns&) = delete;
+ variable_patterns& operator= (const variable_patterns&) = delete;
+
+ public:
+ // RW access (only for shared pools).
//
+ variable_patterns&
+ rw () const
+ {
+ assert (shared_->phase == run_phase::load);
+ return const_cast<variable_patterns&> (*this);
+ }
+
+ variable_patterns&
+ rw (scope&) const {return const_cast<variable_patterns&> (*this);}
+
public:
struct pattern
{
@@ -1442,17 +1765,11 @@ namespace build2
};
private:
- multiset<pattern> patterns_;
-
- // Global pool flag/context.
- //
- private:
- friend class context;
-
- explicit
- variable_pool (context* global): global_ (global) {}
+ friend class variable_pool;
- context* global_;
+ context* shared_;
+ variable_pool* pool_;
+ multiset<pattern> patterns_;
};
}
@@ -1493,7 +1810,10 @@ namespace build2
using value::value;
using value::operator=;
- size_t version = 0; // Incremented on each modification (variable_cache).
+ // Incremented on each modification, at which point we also reset
+ // value::extra to 0.
+ //
+ size_t version = 0;
};
// Note that we guarantee ascending iteration order (e.g., for predictable
@@ -1535,8 +1855,13 @@ namespace build2
lookup_type
operator[] (const variable& var) const
{
- auto p (lookup (var));
- return lookup_type (p.first, &p.second, this);
+ lookup_type r;
+ if (!empty ())
+ {
+ auto p (lookup (var));
+ r = lookup_type (p.first, &p.second, this);
+ }
+ return r;
}
lookup_type
@@ -1549,12 +1874,17 @@ namespace build2
lookup_type
operator[] (const string& name) const
{
- const variable* var (ctx != nullptr
- ? ctx->var_pool.find (name)
- : nullptr);
- return var != nullptr ? operator[] (*var) : lookup_type ();
+ assert (owner_ != owner::context);
+
+ lookup_type r;
+ if (!empty ())
+ r = lookup (name);
+ return r;
}
+ lookup_type
+ lookup (const string& name) const;
+
// If typed is false, leave the value untyped even if the variable is. If
// aliased is false, then don't consider aliases (used by the variable
// override machinery where the aliases chain is repurrposed for something
@@ -1574,6 +1904,18 @@ namespace build2
const_iterator (r.second, *this));
}
+ pair<const_iterator, const_iterator>
+ lookup_namespace (string ns) const
+ {
+ // It's ok to use the temporary here since we compare names and don't
+ // insert anything.
+ //
+ return lookup_namespace (variable {
+ move (ns),
+ nullptr, nullptr, nullptr, nullptr,
+ variable_visibility::project});
+ }
+
// Convert a lookup pointing to a value belonging to this variable map
// to its non-const version. Note that this is only safe on the original
// values (see lookup_original()).
@@ -1583,6 +1925,7 @@ namespace build2
{
assert (l.vars == this);
value& r (const_cast<value&> (*l.value));
+ r.extra = 0;
static_cast<value_data&> (r).version++;
return r;
}
@@ -1599,24 +1942,37 @@ namespace build2
return assign (*var);
}
- // Note that the variable is expected to have already been registered.
+ // Note that the variable is expected to have already been inserted.
//
value&
- assign (const string& name) {return insert (ctx->var_pool[name]).first;}
+ assign (const string& name);
// As above but also return an indication of whether the new value (which
// will be NULL) was actually inserted. Similar to find(), if typed is
- // false, leave the value untyped even if the variable is.
+ // false, leave the value untyped even if the variable is. If reset_extra
+ // is false, then don't reset the existing value's value::extra.
//
pair<value&, bool>
- insert (const variable&, bool typed = true);
+ insert (const variable&, bool typed = true, bool reset_extra = true);
- // Note: does not deal with aliases.
+ // Note: the following functions do not deal with aliases.
//
+ const_iterator
+ find (const variable& var) const
+ {
+ return const_iterator (m_.find (var), *this);
+ }
+
+ const_iterator
+ find (const string& name) const;
+
bool
erase (const variable&);
const_iterator
+ erase (const_iterator);
+
+ const_iterator
begin () const {return const_iterator (m_.begin (), *this);}
const_iterator
@@ -1629,21 +1985,58 @@ namespace build2
size () const {return m_.size ();}
public:
- // Global should be true if this map is part of the global build state
- // (e.g., scopes, etc).
+ // Shared should be true if this map is part of the shared build state
+ // (e.g., scopes) and thus should only be modified during the load phase.
//
explicit
- variable_map (context& c, bool global = false)
- : ctx (&c), global_ (global) {}
+ variable_map (const scope& owner, bool shared = false);
+
+ explicit
+ variable_map (const target& owner, bool shared = false);
+
+ explicit
+ variable_map (const prerequisite& owner, bool shared = false);
+
+ variable_map (variable_map&&, const prerequisite&, bool shared = false);
+ variable_map (const variable_map&, const prerequisite&, bool shared = false);
+
+ variable_map&
+ operator= (variable_map&& v) noexcept {m_ = move (v.m_); return *this;}
+
+ variable_map&
+ operator= (const variable_map& v) {m_ = v.m_; return *this;}
+
+ // The context owner is for special "managed" variable maps. Note that
+ // such maps cannot lookup/insert variable names specified as strings.
+ //
+ variable_map (context& c, bool shared)
+ : shared_ (shared), owner_ (owner::context), ctx (&c) {}
+
+ // Note: std::map's move constructor can throw.
+ //
+ variable_map (variable_map&& v)
+ : shared_ (v.shared_), owner_ (v.owner_), ctx (v.ctx), m_ (move (v.m_))
+ {
+ assert (owner_ == owner::context);
+ }
+
+ variable_map (const variable_map& v)
+ : shared_ (v.shared_), owner_ (v.owner_), ctx (v.ctx), m_ (v.m_)
+ {
+ assert (v.owner_ == owner::context);
+ }
void
clear () {m_.clear ();}
- // Implementation details (only used for empty_variable_map).
+ // Implementation details.
//
public:
+ enum class owner {empty, context, scope, target, prereq};
+
explicit
- variable_map (context* c): ctx (c) {}
+ variable_map (owner o, context* c = nullptr, bool shared = false)
+ : shared_ (shared), owner_ (o), ctx (c) {}
private:
friend class variable_type_map;
@@ -1652,9 +2045,18 @@ namespace build2
typify (const value_data&, const variable&) const;
private:
+ friend class target_set;
+
+ bool shared_;
+ owner owner_;
+ union
+ {
+ const scope* scope_;
+ const target* target_;
+ const prerequisite* prereq_;
+ };
context* ctx;
map_type m_;
- bool global_;
};
LIBBUILD2_SYMEXPORT extern const variable_map empty_variable_map;
@@ -1787,8 +2189,8 @@ namespace build2
using const_iterator = map_type::const_iterator;
using const_reverse_iterator = map_type::const_reverse_iterator;
- variable_pattern_map (context& c, bool global)
- : ctx (c), global_ (global) {}
+ variable_pattern_map (context& c, bool shared)
+ : ctx (c), shared_ (shared) {}
// Note that here we assume the "outer" pattern format (delimiters, flags,
// etc) is valid.
@@ -1804,7 +2206,7 @@ namespace build2
operator[] (string text)
{
return map_.emplace (pattern {pattern_type::path, false, move (text), {}},
- variable_map (ctx, global_)).first->second;
+ variable_map (ctx, shared_)).first->second;
}
const_iterator begin () const {return map_.begin ();}
@@ -1816,7 +2218,7 @@ namespace build2
private:
context& ctx;
map_type map_;
- bool global_;
+ bool shared_;
};
class LIBBUILD2_SYMEXPORT variable_type_map
@@ -1826,13 +2228,13 @@ namespace build2
variable_pattern_map>;
using const_iterator = map_type::const_iterator;
- variable_type_map (context& c, bool global): ctx (c), global_ (global) {}
+ variable_type_map (context& c, bool shared): ctx (c), shared_ (shared) {}
variable_pattern_map&
operator[] (const target_type& t)
{
return map_.emplace (
- t, variable_pattern_map (ctx, global_)).first->second;
+ t, variable_pattern_map (ctx, shared_)).first->second;
}
const_iterator begin () const {return map_.begin ();}
@@ -1862,7 +2264,7 @@ namespace build2
private:
context& ctx;
map_type map_;
- bool global_;
+ bool shared_;
};
}
diff --git a/libbuild2/variable.ixx b/libbuild2/variable.ixx
index a84c012..ca84a33 100644
--- a/libbuild2/variable.ixx
+++ b/libbuild2/variable.ixx
@@ -224,7 +224,7 @@ namespace build2
template <typename T>
inline const T&
- cast (const lookup& l)
+ cast (lookup l)
{
return cast<T> (*l);
}
@@ -245,7 +245,7 @@ namespace build2
template <typename T>
inline const T*
- cast_null (const lookup& l)
+ cast_null (lookup l)
{
return l ? &cast<T> (*l) : nullptr;
}
@@ -259,7 +259,7 @@ namespace build2
template <typename T>
inline const T&
- cast_empty (const lookup& l)
+ cast_empty (lookup l)
{
return l ? cast<T> (l) : value_traits<T>::empty_instance;
}
@@ -273,7 +273,7 @@ namespace build2
template <typename T>
inline T
- cast_default (const lookup& l, const T& d)
+ cast_default (lookup l, const T& d)
{
return l ? cast<T> (l) : d;
}
@@ -287,7 +287,7 @@ namespace build2
template <typename T>
inline T
- cast_false (const lookup& l)
+ cast_false (lookup l)
{
return l && cast<T> (l);
}
@@ -301,7 +301,7 @@ namespace build2
template <typename T>
inline T
- cast_true (const lookup& l)
+ cast_true (lookup l)
{
return !l || cast<T> (l);
}
@@ -326,18 +326,21 @@ namespace build2
}
inline vector_view<const name>
- reverse (const value& v, names& storage)
+ reverse (const value& v, names& storage, bool reduce)
{
assert (v &&
storage.empty () &&
(v.type == nullptr || v.type->reverse != nullptr));
- return v.type == nullptr ? v.as<names> () : v.type->reverse (v, storage);
+
+ return v.type == nullptr
+ ? v.as<names> ()
+ : v.type->reverse (v, storage, reduce);
}
inline vector_view<name>
- reverse (value& v, names& storage)
+ reverse (value& v, names& storage, bool reduce)
{
- names_view cv (reverse (static_cast<const value&> (v), storage));
+ names_view cv (reverse (static_cast<const value&> (v), storage, reduce));
return vector_view<name> (const_cast<name*> (cv.data ()), cv.size ());
}
@@ -359,13 +362,53 @@ namespace build2
// This one will be SFINAE'd out unless T is a container.
//
+ // If T is both (e.g., json_value), then make this version preferable.
+ //
template <typename T>
inline auto
- convert (names&& ns) -> decltype (value_traits<T>::convert (move (ns)))
+ convert_impl (names&& ns, int)
+ -> decltype (value_traits<T>::convert (move (ns)))
{
return value_traits<T>::convert (move (ns));
}
+ // This one will be SFINAE'd out unless T is a simple value.
+ //
+ // If T is both (e.g., json_value), then make this version less preferable.
+ //
+ template <typename T>
+ auto // NOTE: not inline!
+ convert_impl (names&& ns, ...) ->
+ decltype (value_traits<T>::convert (move (ns[0]), nullptr))
+ {
+ size_t n (ns.size ());
+
+ if (n == 0)
+ {
+ if (value_traits<T>::empty_value)
+ return T ();
+ }
+ else if (n == 1)
+ {
+ return convert<T> (move (ns[0]));
+ }
+ else if (n == 2 && ns[0].pair != '\0')
+ {
+ return convert<T> (move (ns[0]), move (ns[1]));
+ }
+
+ throw invalid_argument (
+ string ("invalid ") + value_traits<T>::type_name +
+ (n == 0 ? " value: empty" : " value: multiple names"));
+ }
+
+ template <typename T>
+ inline T
+ convert (names&& ns)
+ {
+ return convert_impl<T> (move (ns), 0);
+ }
+
// bool value
//
inline void value_traits<bool>::
@@ -850,6 +893,44 @@ namespace build2
new (&v.data_) vector<pair<K, V>> (move (x));
}
+ // set<T> value
+ //
+ template <typename T>
+ inline void value_traits<set<T>>::
+ assign (value& v, set<T>&& x)
+ {
+ if (v)
+ v.as<set<T>> () = move (x);
+ else
+ new (&v.data_) set<T> (move (x));
+ }
+
+ template <typename T>
+ inline void value_traits<set<T>>::
+ append (value& v, set<T>&& x)
+ {
+ if (v)
+ {
+ set<T>& p (v.as<set<T>> ());
+
+ if (p.empty ())
+ p.swap (x);
+ else
+ // Keys (being const) can only be copied.
+ //
+ p.insert (x.begin (), x.end ());
+ }
+ else
+ new (&v.data_) set<T> (move (x));
+ }
+
+ template <typename T>
+ inline void value_traits<set<T>>::
+ prepend (value& v, set<T>&& x)
+ {
+ append (v, move (x));
+ }
+
// map<K, V> value
//
template <typename K, typename V>
@@ -903,21 +984,141 @@ namespace build2
new (&v.data_) map<K, V> (move (x));
}
- // variable_pool
+ // json
//
- inline const variable& variable_pool::
- operator[] (const string& n) const
+ inline bool value_traits<json_value>::
+ empty (const json_value& v)
{
- const variable* r (find (n));
- assert (r != nullptr);
- return *r;
+ // Note: should be consistent with $json.size().
+ //
+ switch (v.type)
+ {
+ case json_type::null: return true;
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ case json_type::string: break;
+ case json_type::array: return v.array.empty ();
+ case json_type::object: return v.object.empty ();
+ }
+
+ return false;
}
+ inline void value_traits<json_value>::
+ assign (value& v, json_value&& x)
+ {
+ if (v)
+ v.as<json_value> () = move (x);
+ else
+ new (&v.data_) json_value (move (x));
+ }
+
+ inline void value_traits<json_value>::
+ append (value& v, json_value&& x)
+ {
+ if (v)
+ v.as<json_value> ().append (move (x));
+ else
+ new (&v.data_) json_value (move (x));
+ }
+
+ inline void value_traits<json_value>::
+ prepend (value& v, json_value&& x)
+ {
+ if (v)
+ v.as<json_value> ().prepend (move (x));
+ else
+ new (&v.data_) json_value (move (x));
+ }
+
+ // json_array
+ //
+ inline void value_traits<json_array>::
+ assign (value& v, json_array&& x)
+ {
+ if (v)
+ v.as<json_array> () = move (x);
+ else
+ new (&v.data_) json_array (move (x));
+ }
+
+ inline void value_traits<json_array>::
+ append (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_array ();
+
+ v.as<json_array> ().append (move (x));
+ }
+
+ inline void value_traits<json_array>::
+ prepend (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_array ();
+
+ v.as<json_array> ().prepend (move (x));
+ }
+
+ // json_object
+ //
+ inline void value_traits<json_object>::
+ assign (value& v, json_object&& x)
+ {
+ if (v)
+ v.as<json_object> () = move (x);
+ else
+ new (&v.data_) json_object (move (x));
+ }
+
+ inline void value_traits<json_object>::
+ append (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_object ();
+
+ v.as<json_object> ().append (move (x));
+ }
+
+ inline void value_traits<json_object>::
+ prepend (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_object ();
+
+ v.as<json_object> ().prepend (move (x));
+ }
+
+ // variable_pool
+ //
inline const variable* variable_pool::
find (const string& n) const
{
+ // The pool chaining semantics for lookup: first check own pool then, if
+ // not found, check the outer pool.
+ //
auto i (map_.find (&n));
- return i != map_.end () ? &i->second : nullptr;
+ if (i != map_.end ())
+ return &i->second;
+
+ if (outer_ != nullptr)
+ {
+ i = outer_->map_.find (&n);
+ if (i != outer_->map_.end ())
+ return &i->second;
+ }
+
+ return nullptr;
+ }
+
+ inline const variable& variable_pool::
+ operator[] (const string& n) const
+ {
+ const variable* r (find (n));
+ assert (r != nullptr);
+ return *r;
}
// variable_map
diff --git a/libbuild2/variable.txx b/libbuild2/variable.txx
index b1c4112..0b831e9 100644
--- a/libbuild2/variable.txx
+++ b/libbuild2/variable.txx
@@ -27,34 +27,6 @@ namespace build2
return false;
}
- // This one will be SFINAE'd out unless T is a simple value.
- //
- template <typename T>
- auto
- convert (names&& ns) ->
- decltype (value_traits<T>::convert (move (ns[0]), nullptr))
- {
- size_t n (ns.size ());
-
- if (n == 0)
- {
- if (value_traits<T>::empty_value)
- return T ();
- }
- else if (n == 1)
- {
- return convert<T> (move (ns[0]));
- }
- else if (n == 2 && ns[0].pair != '\0')
- {
- return convert<T> (move (ns[0]), move (ns[1]));
- }
-
- throw invalid_argument (
- string ("invalid ") + value_traits<T>::type_name +
- (n == 0 ? " value: empty" : " value: multiple names"));
- }
-
[[noreturn]] LIBBUILD2_SYMEXPORT void
convert_throw (const value_type* from, const value_type& to);
@@ -229,13 +201,13 @@ namespace build2
template <typename T>
names_view
- simple_reverse (const value& v, names& s)
+ simple_reverse (const value& v, names& s, bool reduce)
{
const T& x (v.as<T> ());
- // Represent an empty simple value as empty name sequence rather than
- // a single empty name. This way, for example, during serialization we
- // end up with a much saner looking:
+ // Unless requested otherwise, represent an empty simple value as empty
+ // name sequence rather than a single empty name. This way, for example,
+ // during serialization we end up with a much saner looking:
//
// config.import.foo =
//
@@ -245,6 +217,8 @@ namespace build2
//
if (!value_traits<T>::empty (x))
s.emplace_back (value_traits<T>::reverse (x));
+ else if (!reduce)
+ s.push_back (name ());
return s;
}
@@ -477,6 +451,7 @@ namespace build2
convert (names&& ns)
{
vector<T> v;
+ v.reserve (ns.size ()); // Normally there won't be any pairs.
// Similar to vector_append() below except we throw instead of issuing
// diagnostics.
@@ -492,7 +467,7 @@ namespace build2
if (n.pair != '@')
throw invalid_argument (
- string ("invalid pair character: '") + n.pair + "'");
+ string ("invalid pair character: '") + n.pair + '\'');
}
v.push_back (value_traits<T>::convert (move (n), r));
@@ -509,6 +484,8 @@ namespace build2
? v.as<vector<T>> ()
: *new (&v.data_) vector<T> ());
+ p.reserve (p.size () + ns.size ()); // Normally there won't be any pairs.
+
// Convert each element to T while merging pairs.
//
for (auto i (ns.begin ()); i != ns.end (); ++i)
@@ -589,8 +566,8 @@ namespace build2
}
template <typename T>
- static names_view
- vector_reverse (const value& v, names& s)
+ names_view
+ vector_reverse (const value& v, names& s, bool)
{
auto& vv (v.as<vector<T>> ());
s.reserve (vv.size ());
@@ -602,7 +579,7 @@ namespace build2
}
template <typename T>
- static int
+ int
vector_compare (const value& l, const value& r)
{
auto& lv (l.as<vector<T>> ());
@@ -624,6 +601,68 @@ namespace build2
return 0;
}
+ // Provide subscript for vector<T> for efficiency.
+ //
+ template <typename T>
+ value
+ vector_subscript (const value& val, value* val_data,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ size_t i;
+ try
+ {
+ i = static_cast<size_t> (convert<uint64_t> (move (sub)));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid " << value_traits<vector<T>>::value_type.name
+ << " value subscript: " << e <<
+ info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern";
+ }
+
+ value r;
+ if (!val.null)
+ {
+ const auto& v (val.as<vector<T>> ());
+ if (i < v.size ())
+ {
+ const T& e (v[i]);
+
+ // Steal the value if possible.
+ //
+ r = &val == val_data ? T (move (const_cast<T&> (e))) : T (e);
+ }
+ }
+
+ // Typify null values so that type-specific subscript (e.g., for
+ // json_value) gets called for chained subscripts.
+ //
+ if (r.null)
+ r.type = &value_traits<T>::value_type;
+
+ return r;
+ }
+
+ // Provide iterate for vector<T> for efficiency.
+ //
+ template <typename T>
+ void
+ vector_iterate (const value& val,
+ const function<void (value&&, bool first)>& f)
+ {
+ const auto& v (val.as<vector<T>> ()); // Never NULL.
+
+ for (auto b (v.begin ()), i (b), e (v.end ()); i != e; ++i)
+ {
+ f (value (*i), i == b);
+ }
+ }
+
// Make sure these are static-initialized together. Failed that VC will make
// sure it's done in the wrong order.
//
@@ -635,6 +674,8 @@ namespace build2
vector_value_type (value_type&& v)
: value_type (move (v))
{
+ // Note: vector<T> always has a convenience alias.
+ //
type_name = value_traits<T>::type_name;
type_name += 's';
name = type_name.c_str ();
@@ -651,7 +692,8 @@ namespace build2
nullptr, // Patched above.
sizeof (vector<T>),
nullptr, // No base.
- &value_traits<T>::value_type,
+ true, // Container.
+ &value_traits<T>::value_type, // Element type.
&default_dtor<vector<T>>,
&default_copy_ctor<vector<T>>,
&default_copy_assign<vector<T>>,
@@ -661,7 +703,9 @@ namespace build2
&vector_reverse<T>,
nullptr, // No cast (cast data_ directly).
&vector_compare<T>,
- &default_empty<vector<T>>
+ &default_empty<vector<T>>,
+ &vector_subscript<T>,
+ &vector_iterate<T>
};
// vector<pair<K, V>> value
@@ -701,8 +745,8 @@ namespace build2
}
template <typename K, typename V>
- static names_view
- pair_vector_reverse (const value& v, names& s)
+ names_view
+ pair_vector_reverse (const value& v, names& s, bool)
{
auto& vv (v.as<vector<pair<K, V>>> ());
s.reserve (2 * vv.size ());
@@ -714,7 +758,7 @@ namespace build2
}
template <typename K, typename V>
- static int
+ int
pair_vector_compare (const value& l, const value& r)
{
auto& lv (l.as<vector<pair<K, V>>> ());
@@ -749,10 +793,13 @@ namespace build2
pair_vector_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += '_';
+ // vector<pair<K,V>>
+ //
+ type_name = "vector<pair<";
+ type_name += value_traits<K>::type_name;
+ type_name += ',';
type_name += value_traits<V>::type_name;
- type_name += "_pair_vector";
+ type_name += ">>";
name = type_name.c_str ();
}
};
@@ -768,10 +815,13 @@ namespace build2
pair_vector_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += "_optional_";
+ // vector<pair<K,optional<V>>>
+ //
+ type_name = "vector<pair<";
+ type_name += value_traits<K>::type_name;
+ type_name += ",optional<";
type_name += value_traits<V>::type_name;
- type_name += "_pair_vector";
+ type_name += ">>>";
name = type_name.c_str ();
}
};
@@ -784,11 +834,13 @@ namespace build2
pair_vector_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = "optional_";
+ // vector<pair<optional<K>,V>>
+ //
+ type_name = "vector<pair<optional<";
type_name += value_traits<K>::type_name;
- type_name += '_';
+ type_name += ">,";
type_name += value_traits<V>::type_name;
- type_name += "_pair_vector";
+ type_name += ">>";
name = type_name.c_str ();
}
};
@@ -803,7 +855,8 @@ namespace build2
nullptr, // Patched above.
sizeof (vector<pair<K, V>>),
nullptr, // No base.
- nullptr, // No element.
+ true, // Container.
+ nullptr, // No element (not named).
&default_dtor<vector<pair<K, V>>>,
&default_copy_ctor<vector<pair<K, V>>>,
&default_copy_assign<vector<pair<K, V>>>,
@@ -813,7 +866,244 @@ namespace build2
&pair_vector_reverse<K, V>,
nullptr, // No cast (cast data_ directly).
&pair_vector_compare<K, V>,
- &default_empty<vector<pair<K, V>>>
+ &default_empty<vector<pair<K, V>>>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
+ };
+
+ // set<T> value
+ //
+ template <typename T>
+ set<T> value_traits<set<T>>::
+ convert (names&& ns)
+ {
+ set<T> s;
+
+ // Similar to set_append() below except we throw instead of issuing
+ // diagnostics.
+ //
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i);
+ name* r (nullptr);
+
+ if (n.pair)
+ {
+ r = &*++i;
+
+ if (n.pair != '@')
+ throw invalid_argument (
+ string ("invalid pair character: '") + n.pair + '\'');
+ }
+
+ s.insert (value_traits<T>::convert (move (n), r));
+ }
+
+ return s;
+ }
+
+ template <typename T>
+ void
+ set_append (value& v, names&& ns, const variable* var)
+ {
+ set<T>& s (v ? v.as<set<T>> () : *new (&v.data_) set<T> ());
+
+ // Convert each element to T while merging pairs.
+ //
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i);
+ name* r (nullptr);
+
+ if (n.pair)
+ {
+ r = &*++i;
+
+ if (n.pair != '@')
+ {
+ diag_record dr (fail);
+
+ dr << "unexpected pair style for "
+ << value_traits<T>::value_type.name << " value "
+ << "'" << n << "'" << n.pair << "'" << *r << "'";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+ }
+ }
+
+ try
+ {
+ s.insert (value_traits<T>::convert (move (n), r));
+ }
+ catch (const invalid_argument& e)
+ {
+ diag_record dr (fail);
+
+ dr << e;
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << info << "while converting ";
+ if (n.pair)
+ dr << " element pair '" << n << "'@'" << *r << "'";
+ else
+ dr << " element '" << n << "'";
+ }
+ }
+ }
+
+ template <typename T>
+ void
+ set_assign (value& v, names&& ns, const variable* var)
+ {
+ if (v)
+ v.as<set<T>> ().clear ();
+
+ set_append<T> (v, move (ns), var);
+ }
+
+ template <typename T>
+ names_view
+ set_reverse (const value& v, names& s, bool)
+ {
+ auto& sv (v.as<set<T>> ());
+ s.reserve (sv.size ());
+
+ for (const T& x: sv)
+ s.push_back (value_traits<T>::reverse (x));
+
+ return s;
+ }
+
+ template <typename T>
+ int
+ set_compare (const value& l, const value& r)
+ {
+ auto& ls (l.as<set<T>> ());
+ auto& rs (r.as<set<T>> ());
+
+ auto li (ls.begin ()), le (ls.end ());
+ auto ri (rs.begin ()), re (rs.end ());
+
+ for (; li != le && ri != re; ++li, ++ri)
+ if (int r = value_traits<T>::compare (*li, *ri))
+ return r;
+
+ if (li == le && ri != re) // l shorter than r.
+ return -1;
+
+ if (ri == re && li != le) // r shorter than l.
+ return 1;
+
+ return 0;
+ }
+
+ // Map subscript to set::contains().
+ //
+ template <typename T>
+ value
+ set_subscript (const value& val, value*,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ T k;
+ try
+ {
+ k = convert<T> (move (sub));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid " << value_traits<set<T>>::value_type.name
+ << " value subscript: " << e <<
+ info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern";
+ }
+
+ bool r (false);
+ if (!val.null)
+ {
+ const auto& s (val.as<set<T>> ());
+ r = s.find (k) != s.end ();
+ }
+
+ return value (r);
+ }
+
+ // Provide iterate for set<T> for efficiency.
+ //
+ template <typename T>
+ void
+ set_iterate (const value& val,
+ const function<void (value&&, bool first)>& f)
+ {
+ const auto& v (val.as<set<T>> ()); // Never NULL.
+
+ for (auto b (v.begin ()), i (b), e (v.end ()); i != e; ++i)
+ {
+ f (value (*i), i == b);
+ }
+ }
+
+ // Make sure these are static-initialized together. Failed that VC will make
+ // sure it's done in the wrong order.
+ //
+ template <typename T>
+ struct set_value_type: value_type
+ {
+ string type_name;
+
+ set_value_type (value_type&& v)
+ : value_type (move (v))
+ {
+ // set<T>
+ //
+ type_name = "set<";
+ type_name += value_traits<T>::type_name;
+ type_name += '>';
+ name = type_name.c_str ();
+ }
+ };
+
+ // Convenience aliases for certain set<T> cases.
+ //
+ template <>
+ struct set_value_type<string>: value_type
+ {
+ set_value_type (value_type&& v)
+ : value_type (move (v))
+ {
+ name = "string_set";
+ }
+ };
+
+ template <typename T>
+ const set<T> value_traits<set<T>>::empty_instance;
+
+ template <typename T>
+ const set_value_type<T>
+ value_traits<set<T>>::value_type = build2::value_type // VC14 wants =.
+ {
+ nullptr, // Patched above.
+ sizeof (set<T>),
+ nullptr, // No base.
+ true, // Container.
+ &value_traits<T>::value_type, // Element type.
+ &default_dtor<set<T>>,
+ &default_copy_ctor<set<T>>,
+ &default_copy_assign<set<T>>,
+ &set_assign<T>,
+ &set_append<T>,
+ &set_append<T>, // Prepend the same as append.
+ &set_reverse<T>,
+ nullptr, // No cast (cast data_ directly).
+ &set_compare<T>,
+ &default_empty<set<T>>,
+ &set_subscript<T>,
+ &set_iterate<T>
};
// map<K, V> value
@@ -839,7 +1129,9 @@ namespace build2
"element",
var));
- p.emplace (move (v.first), move (v.second));
+ // Poor man's emplace_or_assign().
+ //
+ p.emplace (move (v.first), V ()).first->second = move (v.second);
}
}
@@ -864,9 +1156,7 @@ namespace build2
"element",
var));
- // Poor man's emplace_or_assign().
- //
- p.emplace (move (v.first), V ()).first->second = move (v.second);
+ p.emplace (move (v.first), move (v.second));
}
}
@@ -881,8 +1171,8 @@ namespace build2
}
template <typename K, typename V>
- static names_view
- map_reverse (const value& v, names& s)
+ names_view
+ map_reverse (const value& v, names& s, bool)
{
auto& vm (v.as<map<K, V>> ());
s.reserve (2 * vm.size ());
@@ -894,7 +1184,7 @@ namespace build2
}
template <typename K, typename V>
- static int
+ int
map_compare (const value& l, const value& r)
{
auto& lm (l.as<map<K, V>> ());
@@ -918,6 +1208,59 @@ namespace build2
return 0;
}
+ // Note that unlike json_value, we don't provide index support for maps.
+ // There are two reasons for this: Firstly, consider map<uint64_t,...>.
+ // Secondly, even something like map<string,...> may contain integers as
+ // keys (in JSON, there is a strong convention for object member names not
+ // to be integers). Instead, we provide the $keys() function which allows
+ // one to implement an index-based access with a bit of overhead, if needed.
+ //
+ template <typename K, typename V>
+ value
+ map_subscript (const value& val, value* val_data,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ K k;
+ try
+ {
+ k = convert<K> (move (sub));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid " << value_traits<map<K, V>>::value_type.name
+ << " value subscript: " << e <<
+ info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern";
+ }
+
+ value r;
+ if (!val.null)
+ {
+ const auto& m (val.as<map<K, V>> ());
+ auto i (m.find (k));
+ if (i != m.end ())
+ {
+ // Steal the value if possible.
+ //
+ r = (&val == val_data
+ ? V (move (const_cast<V&> (i->second)))
+ : V (i->second));
+ }
+ }
+
+ // Typify null values so that type-specific subscript (e.g., for
+ // json_value) gets called for chained subscripts.
+ //
+ if (r.null)
+ r.type = &value_traits<V>::value_type;
+
+ return r;
+ }
+
// Make sure these are static-initialized together. Failed that VC will make
// sure it's done in the wrong order.
//
@@ -929,11 +1272,15 @@ namespace build2
map_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += '_';
+ // map<K,V>
+ //
+ type_name = "map<";
+ type_name += value_traits<K>::type_name;
+ type_name += ',';
type_name += value_traits<V>::type_name;
- type_name += "_map";
+ type_name += '>';
name = type_name.c_str ();
+ subscript = &map_subscript<K, V>;
}
};
@@ -948,11 +1295,15 @@ namespace build2
map_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += "_optional_";
+ // map<K,optional<V>>
+ //
+ type_name = "map<";
+ type_name += value_traits<K>::type_name;
+ type_name += ",optional<";
type_name += value_traits<V>::type_name;
- type_name += "_map";
+ type_name += ">>";
name = type_name.c_str ();
+ // @@ TODO: subscript
}
};
@@ -964,18 +1315,42 @@ namespace build2
map_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = "optional_";
+ // map<optional<K>,V>
+ //
+ type_name = "map<optional<";
type_name += value_traits<K>::type_name;
- type_name += '_';
+ type_name += ">,";
type_name += value_traits<V>::type_name;
- type_name += "_map";
+ type_name += '>';
name = type_name.c_str ();
+ // @@ TODO: subscript
+ }
+ };
+
+ // Convenience aliases for certain map<T,T> cases.
+ //
+ template <>
+ struct map_value_type<string, string>: value_type
+ {
+ map_value_type (value_type&& v)
+ : value_type (move (v))
+ {
+ name = "string_map";
+ subscript = &map_subscript<string, string>;
}
};
template <typename K, typename V>
const map<K, V> value_traits<map<K, V>>::empty_instance;
+ // Note that custom iteration would be better (more efficient, return typed
+ // value), but we don't yet have pair<> as value type so we let the generic
+ // implementation return an untyped pair.
+ //
+ // BTW, one negative consequence of returning untyped pair is that
+ // $first()/$second() don't return types values either, which is quite
+ // unfortunate for something like json_map.
+ //
template <typename K, typename V>
const map_value_type<K, V>
value_traits<map<K, V>>::value_type = build2::value_type // VC14 wants =
@@ -983,7 +1358,8 @@ namespace build2
nullptr, // Patched above.
sizeof (map<K, V>),
nullptr, // No base.
- nullptr, // No element.
+ true, // Container.
+ nullptr, // No element (pair<> not a value type yet).
&default_dtor<map<K, V>>,
&default_copy_ctor<map<K, V>>,
&default_copy_assign<map<K, V>>,
@@ -993,7 +1369,9 @@ namespace build2
&map_reverse<K, V>,
nullptr, // No cast (cast data_ directly).
&map_compare<K, V>,
- &default_empty<map<K, V>>
+ &default_empty<map<K, V>>,
+ nullptr, // Subscript (patched in by map_value_type above).
+ nullptr // Iterate.
};
// variable_cache
@@ -1014,8 +1392,8 @@ namespace build2
: 0);
shared_mutex& m (
- ctx.mutexes.variable_cache[
- hash<variable_cache*> () (this) % ctx.mutexes.variable_cache_size]);
+ ctx.mutexes->variable_cache[
+ hash<variable_cache*> () (this) % ctx.mutexes->variable_cache_size]);
slock sl (m);
ulock ul (m, defer_lock);
@@ -1070,6 +1448,7 @@ namespace build2
e.stem_version = sver;
+ e.value.extra = 0; // For consistency (we don't really use it).
e.value.version++; // Value changed.
}
else
diff --git a/libbuild2/version/init.cxx b/libbuild2/version/init.cxx
index 05d5fe0..b3657bc 100644
--- a/libbuild2/version/init.cxx
+++ b/libbuild2/version/init.cxx
@@ -3,6 +3,8 @@
#include <libbuild2/version/init.hxx>
+#include <cstring> // strchr()
+
#include <libbutl/manifest-parser.hxx>
#include <libbuild2/scope.hxx>
@@ -143,61 +145,98 @@ namespace build2
}
else if (nv.name == "depends")
{
- // According to the package manifest spec, the format of the
- // 'depends' value is as follows:
- //
- // depends: [?][*] <alternatives> [; <comment>]
- //
- // <alternatives> := <dependency> [ '|' <dependency>]*
- // <dependency> := <name> [<constraint>]
- // <constraint> := <comparison> | <range>
- // <comparison> := ('==' | '>' | '<' | '>=' | '<=') <version>
- // <range> := ('(' | '[') <version> <version> (')' | ']')
- //
- // Note that we don't do exhaustive validation here leaving it
- // to the package manager.
- //
string v (move (nv.value));
- size_t p;
+ // Parse the dependency and add it to the map (see
+ // bpkg::dependency_alternatives class for dependency syntax).
+ //
+ // Note that currently we only consider simple dependencies:
+ // singe package without alternatives, clauses, or newlines.
+ // In the future, if/when we add full support, we will likely
+ // keep this as a fast path.
+ //
+ // Also note that we don't do exhaustive validation here leaving
+ // it to the package manager.
// Get rid of the comment.
//
+ // Note that we can potentially mis-detect the comment
+ // separator, since ';' can be a part of some of the dependency
+ // alternative clauses. If that's the case, we will skip the
+ // dependency later.
+ //
+ size_t p;
if ((p = v.find (';')) != string::npos)
v.resize (p);
- // Get rid of conditional/runtime markers. Note that enither of
- // them is valid in the rest of the value.
+ // Skip the dependency if it is not a simple one.
+ //
+ // Note that we will check for the presence of the reflect
+ // clause later since `=` can also be in the constraint.
+ //
+ if (v.find_first_of ("{?|\n") != string::npos)
+ continue;
+
+ // Find the beginning of the dependency package name, skipping
+ // the build-time marker, if present.
//
- if ((p = v.find_last_of ("?*")) != string::npos)
- v.erase (0, p + 1);
+ bool buildtime (v[0] == '*');
+ size_t b (buildtime ? v.find_first_not_of (" \t", 1) : 0);
- // Parse as |-separated "words".
+ if (b == string::npos)
+ fail (l) << "invalid dependency " << v << ": no package name";
+
+ // Find the end of the dependency package name.
+ //
+ p = v.find_first_of (" \t=<>[(~^", b);
+
+ // Dependency name (without leading/trailing white-spaces).
//
- for (size_t b (0), e (0); next_word (v, b, e, '|'); )
+ string n (v, b, p == string::npos ? p : p - b);
+
+ string vc; // Empty if no constraint is specified
+
+ // Position to the first non-whitespace character after the
+ // dependency name, which, if present, can be a part of the
+ // version constraint or the reflect clause.
+ //
+ if (p != string::npos)
+ p = v.find_first_not_of (" \t", p);
+
+ if (p != string::npos)
+ {
+ // Check if this is definitely not a version constraint and
+ // drop this dependency if that's the case.
+ //
+ if (strchr ("=<>[(~^", v[p]) == nullptr)
+ continue;
+
+ // Ok, we have a constraint, check that there is no reflect
+ // clause after it (the only other valid `=` in a constraint
+ // is in the immediately following character as part of
+ // `==`, `<=`, or `>=`).
+ //
+ if (v.size () > p + 2 && v.find ('=', p + 2) != string::npos)
+ continue;
+
+ vc.assign (v, p, string::npos);
+ trim (vc);
+ }
+
+ // Finally, add the dependency to the map.
+ //
+ try
+ {
+ package_name pn (move (n));
+ string v (pn.variable ());
+
+ ds.emplace (move (v),
+ dependency {move (pn), move (vc), buildtime});
+ }
+ catch (const invalid_argument& e)
{
- string d (v, b, e - b);
- trim (d);
-
- p = d.find_first_of (" \t=<>[(~^");
- string n (d, 0, p);
- string c (p != string::npos ? string (d, p) : string ());
-
- trim (n);
- trim (c);
-
- try
- {
- package_name pn (move (n));
- string v (pn.variable ());
-
- ds.emplace (move (v), dependency {move (pn), move (c)});
- }
- catch (const invalid_argument& e)
- {
- fail (l) << "invalid package name for dependency "
- << d << ": " << e;
- }
+ fail (l) << "invalid dependency package name '" << n << "': "
+ << e;
}
}
}
@@ -246,7 +285,9 @@ namespace build2
{
auto i (ds.find ("build2"));
- if (i != ds.end () && !i->second.constraint.empty ())
+ if (i != ds.end () &&
+ i->second.buildtime &&
+ !i->second.constraint.empty ())
try
{
check_build_version (
@@ -349,7 +390,7 @@ namespace build2
if (cast_false<bool> (rs["install.booted"]))
{
rs.insert_rule<manifest> (
- perform_install_id, "version.manifest", manifest_install_rule_);
+ perform_install_id, "version.install", manifest_install_rule_);
}
return true;
diff --git a/libbuild2/version/module.hxx b/libbuild2/version/module.hxx
index e80870e..8549e03 100644
--- a/libbuild2/version/module.hxx
+++ b/libbuild2/version/module.hxx
@@ -22,6 +22,7 @@ namespace build2
{
package_name name;
string constraint;
+ bool buildtime;
};
using dependencies = map<string, dependency>;
diff --git a/libbuild2/version/rule.cxx b/libbuild2/version/rule.cxx
index 4da4e3f..65c1117 100644
--- a/libbuild2/version/rule.cxx
+++ b/libbuild2/version/rule.cxx
@@ -46,12 +46,31 @@ namespace build2
// in_rule
//
+
+ // Wrap the in::rule's perform_update recipe into a data-carrying recipe.
+ //
+ // To optimize this a bit further (i.e., to avoid the dynamic memory
+ // allocation) we are going to call in::rule::perform_update() directly
+ // (after all it's virtual and thus part of the in_rule's interface).
+ //
+ struct match_data
+ {
+ const module& mod;
+ const in_rule& rule;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return rule.perform_update (a, t);
+ }
+ };
+
bool in_rule::
- match (action a, target& xt, const string&) const
+ match (action a, target& xt) const
{
tracer trace ("version::in_rule::match");
- file& t (static_cast<file&> (xt));
+ file& t (xt.as<file> ());
const scope& rs (t.root_scope ());
bool fm (false); // Found manifest.
@@ -74,14 +93,26 @@ namespace build2
if (!fi)
l5 ([&]{trace << "no in file prerequisite for target " << t;});
- bool r (fm && fi);
-
- // If we match, lookup and cache the module for the update operation.
+ // If we match, derive the file name early as recommended by the in
+ // rule.
//
- if (r && a == perform_update_id)
- t.data (rs.find_module<module> (module::name));
+ if (fm && fi)
+ t.derive_path ();
+
+ return fm && fi;
+ }
- return r;
+ recipe in_rule::
+ apply (action a, target& t) const
+ {
+ recipe r (rule::apply (a, t));
+
+ // Lookup and cache the module for the update operation.
+ //
+ return a == perform_update_id
+ ? match_data {*t.root_scope ().find_module<module> (module::name),
+ *this}
+ : move (r);
}
string in_rule::
@@ -90,6 +121,7 @@ namespace build2
const target& t,
const string& n,
optional<uint64_t> flags,
+ const substitution_map* smap,
const optional<string>& null) const
{
assert (!flags);
@@ -97,7 +129,7 @@ namespace build2
// Note that this code will be executed during up-to-date check for each
// substitution so let's try not to do anything overly sub-optimal here.
//
- const module& m (*t.data<const module*> ());
+ const module& m (t.data<match_data> (a).mod);
// Split it into the package name and the variable/condition name.
//
@@ -113,8 +145,7 @@ namespace build2
a,
t,
p == string::npos ? n : string (n, p + 1),
- nullopt,
- null);
+ nullopt, smap, null);
}
string pn (n, 0, p);
@@ -216,13 +247,13 @@ namespace build2
if (mav->snapshot ())
{
- r += (p ? "(" : "");
+ if (p) r += '(';
r += cmp (vm, " < ", mav->version) + " || (";
r += cmp (vm, " == ", mav->version) + " && ";
- r += cmp (sm, (mao ? " < " : " <= "), mav->snapshot_sn) + ")";
+ r += cmp (sm, (mao ? " < " : " <= "), mav->snapshot_sn) + ')';
- r += (p ? ")" : "");
+ if (p) r += ')';
}
else
r = cmp (vm, (mao ? " < " : " <= "), mav->version);
@@ -236,13 +267,13 @@ namespace build2
if (miv->snapshot ())
{
- r += (p ? "(" : "");
+ if (p) r += '(';
r += cmp (vm, " > ", miv->version) + " || (";
r += cmp (vm, " == ", miv->version) + " && ";
- r += cmp (sm, (mio ? " > " : " >= "), miv->snapshot_sn) + ")";
+ r += cmp (sm, (mio ? " > " : " >= "), miv->snapshot_sn) + ')';
- r += (p ? ")" : "");
+ if (p) r += ')';
}
else
r = cmp (vm, (mio ? " > " : " >= "), miv->version);
@@ -302,7 +333,7 @@ namespace build2
// manifest_install_rule
//
bool manifest_install_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t) const
{
// We only match project's manifest.
//
@@ -315,7 +346,7 @@ namespace build2
if (s.root_scope () != &s || s.src_path () != t.dir)
return false;
- return file_rule::match (a, t, "");
+ return file_rule::match (a, t);
}
auto_rmfile manifest_install_rule::
diff --git a/libbuild2/version/rule.hxx b/libbuild2/version/rule.hxx
index c174f40..0bdc090 100644
--- a/libbuild2/version/rule.hxx
+++ b/libbuild2/version/rule.hxx
@@ -20,10 +20,13 @@ namespace build2
class in_rule: public in::rule
{
public:
- in_rule (): rule ("version.in 2", "version.in") {}
+ in_rule (): rule ("version.in 2", "version") {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
+
+ virtual recipe
+ apply (action, target&) const override;
virtual string
lookup (const location&,
@@ -31,6 +34,7 @@ namespace build2
const target&,
const string&,
optional<uint64_t>,
+ const substitution_map*,
const optional<string>&) const override;
};
@@ -42,7 +46,7 @@ namespace build2
manifest_install_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual auto_rmfile
install_pre (const file&, const install_dir&) const override;
diff --git a/libbuild2/version/snapshot-git.cxx b/libbuild2/version/snapshot-git.cxx
index 2ae3f5b..ab0224a 100644
--- a/libbuild2/version/snapshot-git.cxx
+++ b/libbuild2/version/snapshot-git.cxx
@@ -21,7 +21,7 @@ namespace build2
static global_cache<snapshot, dir_path> cache;
snapshot
- extract_snapshot_git (dir_path rep_root)
+ extract_snapshot_git (context& ctx, dir_path rep_root)
{
if (const snapshot* r = cache.find (rep_root))
return *r;
@@ -82,7 +82,11 @@ namespace build2
args[args_i + 1] = "--porcelain";
args[args_i + 2] = nullptr;
+ // @@ PERF: redo with custom stream reading code (then could also
+ // get rid of context).
+ //
r.committed = run<string> (
+ ctx,
3 /* verbosity */,
pp,
args,
@@ -108,7 +112,8 @@ namespace build2
// (reluctantly) assume that the only reason git cat-file fails is if
// there is no HEAD (that we equal with the "new repository" condition
// which is, strictly speaking, might not be the case either). So we
- // suppress any diagnostics, and handle non-zero exit code.
+ // suppress any diagnostics, and handle non-zero exit code (and so no
+ // diagnostics buffering is needed, plus we are in the load phase).
//
string data;
@@ -117,12 +122,12 @@ namespace build2
args[args_i + 2] = "HEAD";
args[args_i + 3] = nullptr;
- process pr (run_start (3 /* verbosity */,
+ process pr (run_start (3 /* verbosity */,
pp,
args,
- 0 /* stdin */,
- -1 /* stdout */,
- false /* error */));
+ 0 /* stdin */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
string l;
try
@@ -201,7 +206,7 @@ namespace build2
// that.
}
- if (run_finish_code (args, pr, l))
+ if (run_finish_code (args, pr, l, 2 /* verbosity */))
{
if (r.sn == 0)
fail << "unable to extract git commit id/date for " << rep_root;
diff --git a/libbuild2/version/snapshot.cxx b/libbuild2/version/snapshot.cxx
index d20e633..000bcba 100644
--- a/libbuild2/version/snapshot.cxx
+++ b/libbuild2/version/snapshot.cxx
@@ -12,7 +12,7 @@ namespace build2
namespace version
{
snapshot
- extract_snapshot_git (dir_path);
+ extract_snapshot_git (context&, dir_path);
static const path git (".git");
@@ -46,7 +46,7 @@ namespace build2
if (butl::entry_exists (d / git,
true /* follow_symlinks */,
true /* ignore_errors */))
- return extract_snapshot_git (move (d));
+ return extract_snapshot_git (rs.ctx, move (d));
}
return snapshot ();