aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2016-02-29 10:57:40 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2016-02-29 10:57:40 +0200
commit3cf3b73ffc6881d5428a735736a347f6e143b366 (patch)
tree3559fa9d2d44cc11e07987752027f7c2a9e3e23e
parent2a4f52c46f2081aaeb2664e8026d3d067142e3d5 (diff)
Implement auxiliary dependency database (.d files), use in cxx.compile
This is part of the "High Fidelity Build" work.
-rw-r--r--build2/buildfile5
-rw-r--r--build2/cli/rule.cxx6
-rw-r--r--build2/config/utility24
-rw-r--r--build2/config/utility.cxx12
-rw-r--r--build2/config/utility.ixx17
-rw-r--r--build2/config/utility.txx16
-rw-r--r--build2/cxx/compile.cxx785
-rw-r--r--build2/cxx/utility17
-rw-r--r--build2/cxx/utility.cxx14
-rw-r--r--build2/cxx/utility.txx29
-rw-r--r--build2/depdb143
-rw-r--r--build2/depdb.cxx196
-rw-r--r--build2/install/rule.cxx6
-rw-r--r--build2/test/rule.cxx4
-rw-r--r--build2/types5
-rw-r--r--build2/utility42
-rw-r--r--build2/utility.cxx49
-rw-r--r--build2/utility.ixx27
-rw-r--r--tests/depdb/buildfile8
-rw-r--r--tests/depdb/driver.cxx166
20 files changed, 1188 insertions, 383 deletions
diff --git a/build2/buildfile b/build2/buildfile
index 9321026..785a359 100644
--- a/build2/buildfile
+++ b/build2/buildfile
@@ -9,6 +9,7 @@ exe{b}: \
{ cxx}{ b } \
{hxx ixx cxx}{ b-options } \
{hxx txx cxx}{ context } \
+ {hxx cxx}{ depdb } \
{hxx cxx}{ diagnostics } \
{hxx cxx}{ dump } \
{hxx ixx cxx}{ file } \
@@ -28,7 +29,7 @@ exe{b}: \
{hxx }{ target-type } \
{hxx cxx}{ token } \
{hxx }{ types } \
- {hxx cxx}{ utility } \
+ {hxx ixx cxx}{ utility } \
{hxx ixx txx cxx}{ variable } \
{hxx }{ version } \
bin/{hxx cxx}{ module } \
@@ -39,7 +40,7 @@ exe{b}: \
cli/{hxx cxx}{ target } \
config/{hxx cxx}{ module } \
config/{hxx cxx}{ operation } \
- config/{hxx ixx txx cxx}{ utility } \
+ config/{hxx txx cxx}{ utility } \
cxx/{hxx cxx}{ compile } \
cxx/{hxx cxx}{ install } \
cxx/{hxx cxx}{ link } \
diff --git a/build2/cli/rule.cxx b/build2/cli/rule.cxx
index edd8bc8..b584ccc 100644
--- a/build2/cli/rule.cxx
+++ b/build2/cli/rule.cxx
@@ -14,8 +14,6 @@
#include <build2/cli/target>
-#include <build2/config/utility>
-
using namespace std;
using namespace butl;
@@ -97,7 +95,7 @@ namespace build2
t.c = &search<cxx::cxx> (t.dir, t.name, nullptr, nullptr);
t.c->group = &t;
- if (!config::find_option ("--suppress-inline", t, "cli.options"))
+ if (!find_option ("--suppress-inline", t, "cli.options"))
{
t.i = &search<cxx::ixx> (t.dir, t.name, nullptr, nullptr);
t.i->group = &t;
@@ -276,7 +274,7 @@ namespace build2
if (t.i != nullptr)
append_extension (args, *t.i, "--ixx-suffix", "ixx");
- config::append_options (args, t, "cli.options");
+ append_options (args, t, "cli.options");
if (!relo.empty ())
{
diff --git a/build2/config/utility b/build2/config/utility
index 2f53521..9218106 100644
--- a/build2/config/utility
+++ b/build2/config/utility
@@ -94,33 +94,9 @@ namespace build2
//
bool
specified (scope& root, const string& ns);
-
- // @@ Why are these here?
- //
-
- // Add all the values from a variable to the C-string list. T is
- // either target or scope. The variable is expected to be of type
- // strings.
- //
- template <typename T>
- void
- append_options (cstrings& args, T& s, const char* var);
-
- // As above but from the strings value directly.
- //
- void
- append_options (cstrings& args, const const_strings_value&);
-
- // Check if a specified option is present in the variable value.
- // T is either target or scope.
- //
- template <typename T>
- bool
- find_option (const char* option, T& s, const char* var);
}
}
#include <build2/config/utility.txx>
-#include <build2/config/utility.ixx>
#endif // BUILD2_CONFIG_UTILITY
diff --git a/build2/config/utility.cxx b/build2/config/utility.cxx
index d5e0afe..c617690 100644
--- a/build2/config/utility.cxx
+++ b/build2/config/utility.cxx
@@ -76,17 +76,5 @@ namespace build2
return false;
}
-
- void
- append_options (cstrings& args, const const_strings_value& sv)
- {
- if (!sv.empty ())
- {
- args.reserve (args.size () + sv.size ());
-
- for (const string& s: sv)
- args.push_back (s.c_str ());
- }
- }
}
}
diff --git a/build2/config/utility.ixx b/build2/config/utility.ixx
deleted file mode 100644
index 7f666d4..0000000
--- a/build2/config/utility.ixx
+++ /dev/null
@@ -1,17 +0,0 @@
-// file : build2/config/utility.ixx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-namespace build2
-{
- namespace config
- {
- template <typename T>
- inline void
- append_options (cstrings& args, T& s, const char* var)
- {
- if (auto l = s[var])
- append_options (args, as<strings> (*l));
- }
- }
-}
diff --git a/build2/config/utility.txx b/build2/config/utility.txx
index fd32599..d3c57a5 100644
--- a/build2/config/utility.txx
+++ b/build2/config/utility.txx
@@ -25,21 +25,5 @@ namespace build2
return result (root.assign (var) = def_value, true);
}
-
- template <typename T>
- bool
- find_option (const char* option, T& s, const char* var)
- {
- if (auto l = s[var])
- {
- for (const string& s: as<strings> (*l))
- {
- if (s == option)
- return true;
- }
- }
-
- return false;
- }
}
}
diff --git a/build2/cxx/compile.cxx b/build2/cxx/compile.cxx
index bc1681b..3223fb2 100644
--- a/build2/cxx/compile.cxx
+++ b/build2/cxx/compile.cxx
@@ -12,11 +12,12 @@
#include <butl/fdstream>
#include <butl/path-map>
+#include <build2/depdb>
#include <build2/scope>
+#include <build2/context>
#include <build2/variable>
#include <build2/algorithm>
#include <build2/diagnostics>
-#include <build2/context>
#include <build2/bin/target>
#include <build2/cxx/target>
@@ -59,11 +60,13 @@ namespace build2
}
static void
- inject_prerequisites (action, target&, cxx&, scope&);
+ inject_prerequisites (action, target&, cxx&, scope&, depdb&);
recipe compile::
apply (action a, target& xt, const match_result& mr) const
{
+ tracer trace ("cxx::compile");
+
path_target& t (static_cast<path_target&> (xt));
// Derive file name from target name.
@@ -120,23 +123,111 @@ namespace build2
t.prerequisite_targets.push_back (&pt);
}
- // Inject additional prerequisites. We only do it when
- // performing update since chances are we will have to
- // update some of our prerequisites in the process (auto-
- // generated source code).
+ // Inject additional prerequisites. We only do it when performing update
+ // since chances are we will have to update some of our prerequisites in
+ // the process (auto-generated source code).
//
if (a == perform_update_id)
{
- // The cached prerequisite target should be the same as what
- // is in t.prerequisite_targets since we used standard
- // search() and match() above.
+ scope& rs (t.root_scope ());
+ const string& sys (as<string> (*rs["cxx.host.system"]));
+
+ // The cached prerequisite target should be the same as what is in
+ // t.prerequisite_targets since we used standard search() and match()
+ // above.
//
// @@ Ugly.
//
cxx& st (
dynamic_cast<cxx&> (
mr.target != nullptr ? *mr.target : *mr.prerequisite->target));
- inject_prerequisites (a, t, st, mr.prerequisite->scope);
+
+ depdb dd (t.path () + ".d");
+
+ // First should come the rule name/version.
+ //
+ string* dl (dd.read ());
+ if (dl == nullptr || *dl != "cxx.compile 1")
+ {
+ dd.write ("cxx.compile 1");
+
+ if (dl != nullptr)
+ level4 ([&]{trace << "rule mismatch forcing update of " << t;});
+ }
+
+ // Then the compiler checksum.
+ //
+ {
+ sha256 csum (as<string> (*rs["config.cxx"]));
+
+ dl = dd.read ();
+ if (dl == nullptr || *dl != csum.string ())
+ {
+ dd.write (csum.string ());
+
+ if (dl != nullptr)
+ level4 ([&]{trace << "compiler mismatch forcing update of " << t;});
+ }
+ }
+
+ // Then the options checksum.
+ //
+ {
+ // The idea is to keep them exactly as they are passed to the
+ // compiler since the order may be significant.
+ //
+ sha256 csum;
+
+ // Hash cxx.export.poptions from prerequisite libraries.
+ //
+ for (prerequisite& p: group_prerequisites (t))
+ {
+ target& pt (*p.target); // Already searched and matched.
+
+ if (pt.is_a<lib> () || pt.is_a<liba> () || pt.is_a<libso> ())
+ hash_lib_options (csum, pt, "cxx.export.poptions");
+ }
+
+ hash_options (csum, t, "cxx.poptions");
+ hash_options (csum, t, "cxx.coptions");
+ hash_std (csum, t);
+
+ if (t.is_a<objso> ())
+ {
+ if (sys != "darwin")
+ csum.append ("-fPIC");
+ }
+
+ dl = dd.read ();
+ if (dl == nullptr || *dl != csum.string ())
+ {
+ dd.write (csum.string ());
+
+ if (dl != nullptr)
+ level4 ([&]{trace << "options mismatch forcing update of " << t;});
+ }
+ }
+
+ // Then the source file.
+ //
+ dl = dd.read ();
+ if (dl == nullptr || *dl != st.path ().string ())
+ {
+ dd.write (st.path ());
+
+ if (dl != nullptr)
+ level4 ([&]{trace << "source file mismatch forcing update of " << t;});
+ }
+
+ // If any of the above checks resulted in a mismatch (different
+ // compiler, options, or source file), then force the target update.
+ //
+ if (dd.writing ())
+ t.mtime (timestamp_nonexistent);
+
+ inject_prerequisites (a, t, st, mr.prerequisite->scope, dd);
+
+ dd.close ();
}
switch (a)
@@ -360,357 +451,473 @@ namespace build2
}
static void
- inject_prerequisites (action a, target& t, cxx& s, scope& ds)
+ inject_prerequisites (action a, target& t, cxx& s, scope& ds, depdb& dd)
{
tracer trace ("cxx::compile::inject_prerequisites");
- scope& rs (t.root_scope ());
- const string& cxx (as<string> (*rs["config.cxx"]));
- const string& sys (as<string> (*rs["cxx.host.system"]));
+ level6 ([&]{trace << "target: " << t;});
- cstrings args {cxx.c_str ()};
+ // If things go wrong (and they often do in this area), give the user a
+ // bit extra context.
+ //
+ auto g (
+ make_exception_guard (
+ [&s]()
+ {
+ info << "while extracting header dependencies from " << s;
+ }));
- // Add cxx.export.poptions from prerequisite libraries. Note
- // that here we don't need to see group members (see apply()).
+ scope& rs (t.root_scope ());
+
+ // Initialize lazily, only if required.
//
- for (prerequisite& p: group_prerequisites (t))
+ cstrings args;
+ string cxx_std; // Storage.
+
+ auto init_args = [&t, &s, &rs, &args, &cxx_std] ()
{
- target& pt (*p.target); // Already searched and matched.
+ const string& cxx (as<string> (*rs["config.cxx"]));
+ const string& sys (as<string> (*rs["cxx.host.system"]));
- if (pt.is_a<lib> () || pt.is_a<liba> () || pt.is_a<libso> ())
- append_lib_options (args, pt, "cxx.export.poptions");
- }
+ args.push_back (cxx.c_str ());
- append_options (args, t, "cxx.poptions");
+ // Add cxx.export.poptions from prerequisite libraries. Note
+ // that here we don't need to see group members (see apply()).
+ //
+ for (prerequisite& p: group_prerequisites (t))
+ {
+ target& pt (*p.target); // Already searched and matched.
- // @@ Some C++ options (e.g., -std, -m) affect the preprocessor.
- // Or maybe they are not C++ options? Common options?
- //
- append_options (args, t, "cxx.coptions");
+ if (pt.is_a<lib> () || pt.is_a<liba> () || pt.is_a<libso> ())
+ append_lib_options (args, pt, "cxx.export.poptions");
+ }
- string std; // Storage.
- append_std (args, t, std);
+ append_options (args, t, "cxx.poptions");
- if (t.is_a<objso> ())
- {
- if (sys != "darwin") // fPIC by default.
- args.push_back ("-fPIC");
- }
+ // Some C++ options (e.g., -std, -m) affect the preprocessor.
+ //
+ append_options (args, t, "cxx.coptions");
- args.push_back ("-M"); // Note: -MM -MG skips missing <>-included.
- args.push_back ("-MG"); // Treat missing headers as generated.
- args.push_back ("-MQ"); // Quoted target name.
- args.push_back ("*"); // Old versions can't handle empty target name.
+ append_std (args, t, cxx_std);
- // We are using absolute source file path in order to get absolute
- // paths in the result. Any relative paths in the result are non-
- // existent, potentially auto-generated headers.
- //
- // @@ We will also have to use absolute -I paths to guarantee
- // that. Or just detect relative paths and error out?
- //
- args.push_back (s.path ().string ().c_str ());
- args.push_back (nullptr);
+ if (t.is_a<objso> ())
+ {
+ if (sys != "darwin") // fPIC by default.
+ args.push_back ("-fPIC");
+ }
- level6 ([&]{trace << "target: " << t;});
+ args.push_back ("-M"); // Note: -MM -MG skips missing <>-included.
+ args.push_back ("-MG"); // Treat missing headers as generated.
+ args.push_back ("-MQ"); // Quoted target name.
+ args.push_back ("*"); // Old versions can't do empty target name.
+
+ // We are using absolute source file path in order to get absolute
+ // paths in the result. Any relative paths in the result are non-
+ // existent, potentially auto-generated headers.
+ //
+ // @@ We will also have to use absolute -I paths to guarantee
+ // that. Or just detect relative paths and error out?
+ //
+ args.push_back (s.path ().string ().c_str ());
+ args.push_back (nullptr);
+ };
// Build the prefix map lazily only if we have non-existent files.
// Also reuse it over restarts since it doesn't change.
//
prefix_map pm;
- // If any prerequisites that we have extracted changed, then we
- // have to redo the whole thing. The reason for this is auto-
- // generated headers: the updated header may now include a yet-
- // non-existent header. Unless we discover this and generate it
- // (which, BTW, will trigger another restart since that header,
- // in turn, can also include auto-generated headers), we will
- // end up with an error during compilation proper.
+ // If any prerequisites that we have extracted changed, then we have to
+ // redo the whole thing. The reason for this is auto- generated headers:
+ // the updated header may now include a yet- non-existent header. Unless
+ // we discover this and generate it (which, BTW, will trigger another
+ // restart since that header, in turn, can also include auto-generated
+ // headers), we will end up with an error during compilation proper.
//
- // One complication with this restart logic is that we will see
- // a "prefix" of prerequisites that we have already processed
- // (i.e., they are already in our prerequisite_targets list) and
- // we don't want to keep redoing this over and over again. One
- // thing to note, however, is that the prefix that we have seen
- // on the previous run must appear exactly the same in the
- // subsequent run. The reason for this is that none of the files
- // that it can possibly be based on have changed and thus it
- // should be exactly the same. To put it another way, the
- // presence or absence of a file in the dependency output can
- // only depend on the previous files (assuming the compiler
- // outputs them as it encounters them and it is hard to think
- // of a reason why would someone do otherwise). And we have
- // already made sure that all those files are up to date. And
- // here is the way we are going to exploit this: we are going
- // to keep track of how many prerequisites we have processed so
- // far and on restart skip right to the next one.
+ // One complication with this restart logic is that we will see a
+ // "prefix" of prerequisites that we have already processed (i.e., they
+ // are already in our prerequisite_targets list) and we don't want to
+ // keep redoing this over and over again. One thing to note, however, is
+ // that the prefix that we have seen on the previous run must appear
+ // exactly the same in the subsequent run. The reason for this is that
+ // none of the files that it can possibly be based on have changed and
+ // thus it should be exactly the same. To put it another way, the
+ // presence or absence of a file in the dependency output can only
+ // depend on the previous files (assuming the compiler outputs them as
+ // it encounters them and it is hard to think of a reason why would
+ // someone do otherwise). And we have already made sure that all those
+ // files are up to date. And here is the way we are going to exploit
+ // this: we are going to keep track of how many prerequisites we have
+ // processed so far and on restart skip right to the next one.
//
- // Also, before we do all that, make sure the source file itself
- // if up to date.
+ // And one more thing: most of the time this list of headers would stay
+ // unchanged and extracting them by running the compiler every time is a
+ // bit wasteful. So we are going to cache them in the depdb. If the db
+ // hasn't been invalidated yet (e.g., because the compiler options have
+ // changed), then we start by reading from it. If anything is out of
+ // date then we use the same restart and skip logic to switch to the
+ // compiler run.
//
- execute_direct (a, s);
- size_t skip_count (0);
- for (bool restart (true); restart; )
+ // Update the target "smartly". Return true if it has changed or if the
+ // passed timestamp is not timestamp_unknown and is older than the
+ // target.
+ //
+ // There would normally be a lot of headers for every source file (think
+ // all the system headers) and just calling execute_direct() on all of
+ // them can get expensive. At the same time, most of these headers are
+ // existing files that we will never be updating (again, system headers,
+ // for example) and the rule that will match them is the fallback
+ // file_rule. That rule has an optimization: it returns noop_recipe
+ // (which causes the target state to be automatically set to unchanged)
+ // if the file is known to be up to date.
+ //
+ auto update = [&trace, a] (path_target& pt, timestamp ts) -> bool
{
- restart = false;
+ if (pt.state () != target_state::unchanged)
+ {
+ // We only want to restart if our call to execute() actually
+ // caused an update. In particular, the target could already
+ // have been in target_state::changed because of a dependency
+ // extraction run for some other source file.
+ //
+ target_state os (pt.state ());
+ target_state ns (execute_direct (a, pt));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ level6 ([&]{trace << "updated " << pt;});
+ return true;
+ }
+ }
+
+ if (ts != timestamp_unknown)
+ {
+ timestamp mt (pt.mtime ());
+
+ // See execute_prerequisites() for rationale behind the equal part.
+ //
+ return ts < mt || (ts == mt && pt.state () != target_state::changed);
+ }
- if (verb >= 3)
- print_process (args);
+ return false;
+ };
- try
+ // Update and add header file to the list of prerequisite targets.
+ // Depending on the cache flag, the file is assumed to either have comes
+ // from the depdb cache or from the compiler run. Return whether the
+ // extraction process should be restarted.
+ //
+ auto add = [&trace, &update, &pm, a, &t, &ds, &dd] (path f, bool cache)
+ -> bool
+ {
+ if (!f.absolute ())
{
- process pr (args.data (), 0, -1); // Open pipe to stdout.
- ifdstream is (pr.in_ofd);
+ f.normalize ();
+
+ // This is probably as often an error as an auto-generated file, so
+ // trace at level 4.
+ //
+ level4 ([&]{trace << "non-existent header '" << f << "'";});
+
+ // If we already did this and build_prefix_map() returned empty,
+ // then we would have failed below.
+ //
+ if (pm.empty ())
+ pm = build_prefix_map (t);
- size_t skip (skip_count);
- for (bool first (true), second (true); !(restart || is.eof ()); )
+ // First try the whole file. Then just the directory.
+ //
+ // @@ Has to be a separate map since the prefix can be
+ // the same as the file name.
+ //
+ // auto i (pm.find (f));
+
+ // Find the most qualified prefix of which we are a sub-path.
+ //
+ auto i (pm.end ());
+
+ if (!pm.empty ())
{
- string l;
- getline (is, l);
+ const dir_path& d (f.directory ());
+ i = pm.upper_bound (d);
- if (is.fail () && !is.eof ())
- fail << "error reading C++ compiler -M output";
+ // Get the greatest less than, if any. We might still not be a
+ // sub. Note also that we still have to check the last element is
+ // upper_bound() returned end().
+ //
+ if (i == pm.begin () || !d.sub ((--i)->first))
+ i = pm.end ();
+ }
- size_t pos (0);
+ if (i == pm.end ())
+ fail << "unable to map presumably auto-generated header '"
+ << f << "' to a project";
- if (first)
- {
- // Empty output should mean the wait() call below will return
- // false.
- //
- if (l.empty ())
- break;
+ f = i->second / f;
+ }
+ else
+ {
+ // We used to just normalize the path but that could result in an
+ // invalid path (e.g., on CentOS 7 with Clang 3.4) because of the
+ // symlinks. So now we realize (i.e., realpath(3)) it instead.
+ //
+ f.realize ();
+ }
- assert (l[0] == '*' && l[1] == ':' && l[2] == ' ');
+ level6 ([&]{trace << "injecting " << f;});
- first = false;
+ // Verify/add it to the dependency database.
+ //
+ if (!cache)
+ {
+ string* dl (dd.read ());
+ if (dl == nullptr || *dl != f.string ())
+ dd.write (f);
+ }
- // While normally we would have the source file on the
- // first line, if too long, it will be moved to the next
- // line and all we will have on this line is "*: \".
- //
- if (l.size () == 4 && l[3] == '\\')
- continue;
- else
- pos = 3; // Skip "*: ".
+ // Split the name into its directory part, the name part, and
+ // extension. Here we can assume the name part is a valid filesystem
+ // name.
+ //
+ // Note that if the file has no extension, we record an empty
+ // extension rather than NULL (which would signify that the default
+ // extension should be added).
+ //
+ dir_path d (f.directory ());
+ string n (f.leaf ().base ().string ());
+ const char* es (f.extension ());
+ const string* e (&extension_pool.find (es != nullptr ? es : ""));
- // Fall through to the 'second' block.
- }
+ // Determine the target type.
+ //
+ const target_type* tt (nullptr);
+
+ // See if this directory is part of any project out_root hierarchy.
+ // Note that this will miss all the headers that come from src_root
+ // (so they will be treated as generic C headers below). Generally,
+ // we don't have the ability to determine that some file belongs to
+ // src_root of some project. But that's not a problem for our
+ // purposes: it is only important for us to accurately determine
+ // target types for headers that could be auto-generated.
+ //
+ scope& b (scopes.find (d));
+ if (b.root_scope () != nullptr)
+ tt = map_extension (b, n, *e);
- if (second)
- {
- second = false;
- next (l, pos); // Skip the source file.
- }
+ // If it is outside any project, or the project doesn't have
+ // such an extension, assume it is a plain old C header.
+ //
+ if (tt == nullptr)
+ tt = &h::static_type;
- // If things go wrong (and they often do in this area), give
- // the user a bit extra context.
- //
- auto g (
- make_exception_guard (
- [&s]()
- {
- info << "while extracting dependencies from " << s;
- }));
+ // Find or insert target.
+ //
+ path_target& pt (
+ static_cast<path_target&> (search (*tt, d, n, e, &ds)));
- while (pos != l.size ())
- {
- string fs (next (l, pos));
+ // Assign path.
+ //
+ if (pt.path ().empty ())
+ pt.path (move (f));
+ else
+ assert (pt.path () == f);
- // Skip until where we left off.
- //
- if (skip != 0)
- {
- skip--;
- continue;
- }
+ // Match to a rule.
+ //
+ build2::match (a, pt);
- path f (move (fs));
+ // Update.
+ //
+ // If this header came from the depdb, make sure it is no older than
+ // the db itself (if it has changed since the db was written, then
+ // chances are the cached data is stale).
+ //
+ bool restart (update (pt, cache ? dd.mtime () : timestamp_unknown));
- if (!f.absolute ())
- {
- f.normalize ();
+ // Add to our prerequisite target list.
+ //
+ t.prerequisite_targets.push_back (&pt);
- // This is probably as often an error as an auto-generated
- // file, so trace at level 4.
- //
- level4 ([&]{trace << "non-existent header '" << f << "'";});
+ return restart;
+ };
- // If we already did it and build_prefix_map() returned empty,
- // then we would have failed below.
- //
- if (pm.empty ())
- pm = build_prefix_map (t);
+ // If nothing so far has invalidated the dependency database, then
+ // try the cached data before running the compiler.
+ //
+ bool cache (dd.reading ());
- // First try the whole file. Then just the directory.
- //
- // @@ Has to be a separate map since the prefix can be
- // the same as the file name.
- //
- // auto i (pm.find (f));
+ // But, before we do all that, make sure the source file itself if up to
+ // date.
+ //
+ if (update (s, dd.mtime ()))
+ {
+ // If the file got updated or is newer than the database, then we
+ // cannot rely on the cache any further. However, the cached data
+ // could actually still be valid so the compiler run will validate it.
+ //
+ // We do need to update the database timestamp, however. Failed that,
+ // we will keep re-validating the cached data over and over again.
+ //
+ if (cache)
+ {
+ cache = false;
+ dd.touch ();
+ }
+ }
- // Find the most qualified prefix of which we are a
- // sub-path.
- //
- auto i (pm.end ());
+ size_t skip_count (0);
+ for (bool restart (true); restart; cache = false)
+ {
+ restart = false;
- if (!pm.empty ())
- {
- const dir_path& d (f.directory ());
- i = pm.upper_bound (d);
-
- // Get the greatest less than, if any. We might
- // still not be a sub. Note also that we still
- // have to check the last element is upper_bound()
- // returned end().
- //
- if (i == pm.begin () || !d.sub ((--i)->first))
- i = pm.end ();
- }
+ if (cache)
+ {
+ // If any, this is always the first run.
+ //
+ assert (skip_count == 0);
- if (i == pm.end ())
- fail << "unable to map presumably auto-generated header '"
- << f << "' to a project";
+ while (dd.more ())
+ {
+ string* l (dd.read ());
- f = i->second / f;
- }
- else
- {
- // We used to just normalize the path but that could result in
- // an invalid path (e.g., on CentOS 7 with Clang 3.4) because
- // of the symlinks. So now we realize (i.e., realpath(3)) it
- // instead.
- //
- f.realize ();
- }
+ // If the line is invalid, run the compiler.
+ //
+ if (l == nullptr)
+ {
+ restart = true;
+ break;
+ }
- level6 ([&]{trace << "injecting " << f;});
+ restart = add (path (move (*l)), true);
+ skip_count++;
- // Split the name into its directory part, the name part, and
- // extension. Here we can assume the name part is a valid
- // filesystem name.
- //
- // Note that if the file has no extension, we record an empty
- // extension rather than NULL (which would signify that the
- // default extension should be added).
- //
- dir_path d (f.directory ());
- string n (f.leaf ().base ().string ());
- const char* es (f.extension ());
- const string* e (&extension_pool.find (es != nullptr ? es : ""));
+ // The same idea as in the source file update above.
+ //
+ if (restart)
+ {
+ level6 ([&]{trace << "restarting";});
+ dd.touch ();
+ break;
+ }
+ }
+ }
+ else
+ {
+ try
+ {
+ if (args.empty ())
+ init_args ();
- // Determine the target type.
- //
- const target_type* tt (nullptr);
-
- // See if this directory is part of any project out_root
- // hierarchy. Note that this will miss all the headers
- // that come from src_root (so they will be treated as
- // generic C headers below). Generally, we don't have
- // the ability to determine that some file belongs to
- // src_root of some project. But that's not a problem
- // for our purposes: it is only important for us to
- // accurately determine target types for headers that
- // could be auto-generated.
- //
- scope& b (scopes.find (d));
- if (b.root_scope () != nullptr)
- tt = map_extension (b, n, *e);
+ if (verb >= 3)
+ print_process (args);
- // If it is outside any project, or the project doesn't have
- // such an extension, assume it is a plain old C header.
- //
- if (tt == nullptr)
- tt = &h::static_type;
+ process pr (args.data (), 0, -1); // Open pipe to stdout.
+ ifdstream is (pr.in_ofd);
- // Find or insert target.
- //
- path_target& pt (
- static_cast<path_target&> (search (*tt, d, n, e, &ds)));
+ size_t skip (skip_count);
+ for (bool first (true), second (true); !(restart || is.eof ()); )
+ {
+ string l;
+ getline (is, l);
- // Assign path.
- //
- if (pt.path ().empty ())
- pt.path (move (f));
+ if (is.fail () && !is.eof ())
+ fail << "error reading C++ compiler -M output";
- // Match to a rule.
- //
- build2::match (a, pt);
+ size_t pos (0);
- // Update it.
- //
- // There would normally be a lot of headers for every source
- // file (think all the system headers) and this can get
- // expensive. At the same time, most of these headers are
- // existing files that we will never be updated (again,
- // system headers, for example) and the rule that will match
- // them is fallback file_rule. That rule has an optimization
- // in that it returns noop_recipe (which causes the target
- // state to be automatically set to unchanged) if the file
- // is known to be up to date.
- //
- if (pt.state () != target_state::unchanged)
+ if (first)
+ {
+ // Empty output should mean the wait() call below will return
+ // false.
+ //
+ if (l.empty ())
+ break;
+
+ assert (l[0] == '*' && l[1] == ':' && l[2] == ' ');
+
+ first = false;
+
+ // While normally we would have the source file on the first
+ // line, if too long, it will be moved to the next line and
+ // all we will have on this line is "*: \".
+ //
+ if (l.size () == 4 && l[3] == '\\')
+ continue;
+ else
+ pos = 3; // Skip "*: ".
+
+ // Fall through to the 'second' block.
+ }
+
+ if (second)
+ {
+ second = false;
+ next (l, pos); // Skip the source file.
+ }
+
+ while (pos != l.size ())
{
- // We only want to restart if our call to execute() actually
- // caused an update. In particular, the target could already
- // have been in target_state::changed because of a dependency
- // extraction run for some other source file.
+ string f (next (l, pos));
+
+ // Skip until where we left off.
//
- target_state os (pt.state ());
- target_state ns (execute_direct (a, pt));
+ if (skip != 0)
+ {
+ skip--;
+ continue;
+ }
+
+ restart = add (path (move (f)), false);
+ skip_count++;
- if (ns != os && ns != target_state::unchanged)
+ if (restart)
{
- level6 ([&]{trace << "updated " << pt << ", restarting";});
- restart = true;
+ level6 ([&]{trace << "restarting";});
+ break;
}
}
-
- // Add to our prerequisite target list.
- //
- t.prerequisite_targets.push_back (&pt);
- skip_count++;
}
- }
- // We may not have read all the output (e.g., due to a restart).
- // Before we used to just close the file descriptor to signal to the
- // other end that we are not interested in the rest. This works fine
- // with GCC but Clang (3.7.0) finds this impolite and complains,
- // loudly (broken pipe). So now we are going to skip until the end.
- //
- if (!is.eof ())
- is.ignore (numeric_limits<streamsize>::max ());
- is.close ();
+ // We may not have read all the output (e.g., due to a restart).
+ // Before we used to just close the file descriptor to signal to
+ // the other end that we are not interested in the rest. This
+ // works fine with GCC but Clang (3.7.0) finds this impolite and
+ // complains, loudly (broken pipe). So now we are going to skip
+ // until the end.
+ //
+ if (!is.eof ())
+ is.ignore (numeric_limits<streamsize>::max ());
+ is.close ();
- // We assume the child process issued some diagnostics.
- //
- if (!pr.wait ())
- {
- // In case of a restarts, we closed our end of the pipe early
- // which might have caused the other end to fail. So far we
- // experienced this on Fedora 23 with GCC 5.3.1 and there were
- // no diagnostics issued, just the non-zero exit status. If we
- // do get diagnostics, then we will have to read and discard the
- // output until eof.
+ // We assume the child process issued some diagnostics.
//
- if (!restart)
- throw failed ();
+ if (!pr.wait ())
+ {
+ // In case of a restarts, we closed our end of the pipe early
+ // which might have caused the other end to fail. So far we
+ // experienced this on Fedora 23 with GCC 5.3.1 and there were
+ // no diagnostics issued, just the non-zero exit status. If we
+ // do get diagnostics, then we will have to read and discard the
+ // output until eof.
+ //
+ if (!restart)
+ throw failed ();
+ }
}
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e.what ();
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e.what ();
- // In a multi-threaded program that fork()'ed but did not exec(),
- // it is unwise to try to do any kind of cleanup (like unwinding
- // the stack and running destructors).
- //
- if (e.child ())
- exit (1);
+ // In a multi-threaded program that fork()'ed but did not exec(),
+ // it is unwise to try to do any kind of cleanup (like unwinding
+ // the stack and running destructors).
+ //
+ if (e.child ())
+ exit (1);
- throw failed ();
+ throw failed ();
+ }
}
}
}
diff --git a/build2/cxx/utility b/build2/cxx/utility
index 7072c7b..307e1b4 100644
--- a/build2/cxx/utility
+++ b/build2/cxx/utility
@@ -10,25 +10,28 @@
#include <build2/target>
-#include <build2/config/utility>
-
namespace build2
{
namespace cxx
{
- using config::append_options;
-
// T is either target or scope.
//
template <typename T>
void
- append_std (cstrings& args, T&, string& storage);
+ append_std (cstrings&, T&, string& storage);
- // Append library options from one of the cxx.export.* variables
+ template <typename T>
+ void
+ hash_std (sha256&, T&);
+
+ // Append or hash library options from one of the cxx.export.* variables
// recursively, prerequisite libraries first.
//
void
- append_lib_options (cstrings& args, target&, const char* variable);
+ append_lib_options (cstrings&, target&, const char* variable);
+
+ void
+ hash_lib_options (sha256&, target&, const char* variable);
}
}
diff --git a/build2/cxx/utility.cxx b/build2/cxx/utility.cxx
index 0a26aea..15980d4 100644
--- a/build2/cxx/utility.cxx
+++ b/build2/cxx/utility.cxx
@@ -25,5 +25,19 @@ namespace build2
append_options (args, l, var);
}
+
+ void
+ hash_lib_options (sha256& csum, target& l, const char* var)
+ {
+ using namespace bin;
+
+ for (target* t: l.prerequisite_targets)
+ {
+ if (t->is_a<lib> () || t->is_a<liba> () || t->is_a<libso> ())
+ hash_lib_options (csum, *t, var);
+ }
+
+ hash_options (csum, l, var);
+ }
}
}
diff --git a/build2/cxx/utility.txx b/build2/cxx/utility.txx
index 8324107..ed35fdb 100644
--- a/build2/cxx/utility.txx
+++ b/build2/cxx/utility.txx
@@ -9,15 +9,15 @@ namespace build2
namespace cxx
{
template <typename T>
- void
- append_std (cstrings& args, T& t, string& s)
+ bool
+ translate_std (T& t, string& s)
{
if (auto l = t["cxx.std"])
{
const string& v (as<string> (*l));
- // Translate 11 to 0x and 14 to 1y for compatibility with
- // older versions of the compiler.
+ // Translate 11 to 0x and 14 to 1y for compatibility with older
+ // versions of the compiler.
//
s = "-std=c++";
@@ -28,8 +28,27 @@ namespace build2
else
s += v;
- args.push_back (s.c_str ());
+ return true;
}
+
+ return false;
+ }
+
+ template <typename T>
+ inline void
+ append_std (cstrings& args, T& t, string& s)
+ {
+ if (translate_std (t, s))
+ args.push_back (s.c_str ());
+ }
+
+ template <typename T>
+ inline void
+ hash_std (sha256& csum, T& t)
+ {
+ string s;
+ if (translate_std (t, s))
+ csum.append (s);
}
}
}
diff --git a/build2/depdb b/build2/depdb
new file mode 100644
index 0000000..effdc92
--- /dev/null
+++ b/build2/depdb
@@ -0,0 +1,143 @@
+// file : build2/depdb -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_DEPDB
+#define BUILD2_DEPDB
+
+#include <fstream>
+#include <cstring> // strlen()
+
+#include <build2/types>
+#include <build2/utility>
+
+namespace build2
+{
+ // Auxiliary dependency database (those .d files).
+ //
+ // This is a strange beast: a line-oriented, streaming database that can, at
+ // some point, be switched from reading to (over)writing. The idea is to
+ // store auxiliary/ad-hoc dependency information in the "invalidation"
+ // order. That is, if an earlier line is out of date, then all the
+ // subsequent ones are out of date as well.
+ //
+ // As an example, consider a dependency database for foo.o which is built
+ // from foo.cxx by the cxx.compile rule. The first line could be the rule
+ // name itself (perhaps with the version). If a different rule is now
+ // building foo.o, then any dep info that was saved by cxx.compile is
+ // probably useless. Next we can have the command line options that were
+ // used to build foo.o. Then could come the source file name followed by the
+ // extracted header dependencies. If the compile options or the source file
+ // name have changed, then the header dependencies are likely to have
+ // changed as well.
+ //
+ // As an example, here is what our foo.o.d could look like (the first line
+ // is the database format version and the last '\0' character is the end
+ // marker):
+ //
+ // 1
+ // cxx.compile 1
+ // g++-4.8 -I/tmp/foo -O3
+ // /tmp/foo/foo.cxx
+ // /tmp/foo/foo.hxx
+ // /usr/include/string.h
+ // /usr/include/stdlib.h
+ // /tmp/foo/bar.hxx
+ // ^@
+ //
+ // Uses iostream failure and system_error exceptions to signal errors.
+ //
+ class depdb
+ {
+ public:
+ // Open the database for reading. Note that if the file does not exist,
+ // has wrong format version, or is corrupt, then the database will be
+ // immediately switched to writing.
+ //
+ depdb (const path&);
+
+ // Return the modification time of the database. This value only makes
+ // sense while reading (in the write mode it will be timestamp_unknown).
+ //
+ timestamp
+ mtime () const {return mtime_;}
+
+ // Update the database modification time in close() even if otherwise
+ // no modifications are necessary (i.e., the database is in the read
+ // mode and is at eof).
+ //
+ void
+ touch () {touch_ = true;}
+
+ // Close the database. Note that if this function is not called, then
+ // the database may be left in the old/currupt state.
+ //
+ void
+ close ();
+
+ // Read the next line. If the result is not NULL, then it is a pointer to
+ // the next line in the database (which you are free to move from). If you
+ // then call write(), this line will be overwritten.
+ //
+ // If the result is NULL, then it means no next line is available. This
+ // can be due to several reasons:
+ //
+ // - eof reached (you can detect this by calling more() before read())
+ // - database is already in the write mode
+ // - the next line (and the rest of the database are corrupt)
+ //
+ string*
+ read () {return state_ == state::write ? nullptr : read_ ();}
+
+ // Return true if the database is in the read mode and there is at least
+ // one more line available. Note that there is no guarantee that the line
+ // is not corrupt. In other words, read() can still return NULL, it just
+ // won't be because of eof.
+ //
+ bool
+ more () {return state_ == state::read;}
+
+ bool
+ reading () {return state_ != state::write;}
+
+ bool
+ writing () {return state_ == state::write;}
+
+ // Write the next line. Note that this switches the database into the
+ // write mode and no further reading will be possible.
+ //
+ void
+ write (const string& l) {write (l.c_str (), l.size ());}
+
+ void
+ write (const path& p) {write (p.string ());}
+
+ void
+ write (const char* s) {write (s, std::strlen (s));}
+
+ void
+ write (const char*, size_t);
+
+ void
+ write (char);
+
+ private:
+ void
+ change (bool flush = true);
+
+ string*
+ read_ ();
+
+ private:
+ timestamp mtime_;
+ std::fstream fs_;
+
+ std::fstream::pos_type pos_; // Start of the last returned line.
+ string line_;
+
+ enum class state {read, read_eof, write} state_;
+ bool touch_;
+ };
+}
+
+#endif // BUILD2_DEPDB
diff --git a/build2/depdb.cxx b/build2/depdb.cxx
new file mode 100644
index 0000000..5623405
--- /dev/null
+++ b/build2/depdb.cxx
@@ -0,0 +1,196 @@
+// file : build2/depdb.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/depdb>
+
+#include <butl/filesystem> // file_mtime()
+
+using namespace std;
+using namespace butl;
+
+namespace build2
+{
+ depdb::
+ depdb (const path& f)
+ : mtime_ (file_mtime (f)), touch_ (false)
+ {
+ fs_.exceptions (fstream::failbit | fstream::badbit);
+
+ if (mtime_ != timestamp_nonexistent)
+ {
+ // Open an existing file.
+ //
+ fs_.open (f.string (), fstream::in | fstream::out | fstream::binary);
+ state_ = state::read;
+ fs_.exceptions (fstream::badbit);
+
+ // Read the database format version.
+ //
+ string* l (read ());
+ if (l == nullptr || *l != "1")
+ write ('1');
+ }
+ else
+ {
+ fs_.open (f.string (), fstream::out | fstream::binary);
+
+ state_ = state::write;
+ mtime_ = timestamp_unknown;
+
+ write ('1');
+ }
+ }
+
+ void depdb::
+ change (bool flush)
+ {
+ assert (state_ != state::write);
+
+ fs_.clear ();
+ fs_.exceptions (fstream::failbit | fstream::badbit);
+
+ // Consider this scenario: we are overwriting an old line (so it ends with
+ // a newline and the "end marker") but the operation failed half way
+ // through. Now we have the prefix from the new line, the suffix from the
+ // old, and everything looks valid. So what we need is to somehow
+ // invalidate the old content so that it can never combine with (partial)
+ // new content to form a valid line. One way would be to truncate the file
+ // but that is not straightforward (see note in close()). Alternatively,
+ // we can replace everything with the "end markers".
+ //
+ fs_.seekg (0, fstream::end);
+ fstream::pos_type end (fs_.tellg ());
+
+ if (end != pos_)
+ {
+ fs_.seekp (pos_);
+
+ for (auto i (end - pos_); i != 0; --i)
+ fs_.put ('\0');
+
+ if (flush)
+ fs_.flush ();
+ }
+
+ fs_.seekp (pos_); // Must be done when changing from read to write.
+
+ state_ = state::write;
+ mtime_ = timestamp_unknown;
+ }
+
+ string* depdb::
+ read_ ()
+ {
+ // Save the start position of this line so that we can overwrite it.
+ //
+ pos_ = fs_.tellg ();
+
+ // Note that we intentionally check for eof after updating the write
+ // position.
+ //
+ if (state_ == state::read_eof)
+ return nullptr;
+
+ getline (fs_, line_); // Calls data_.erase().
+
+ // The line should always end with a newline. If it doesn't, then this
+ // line (and the rest of the database) is assumed corrupted. Also peek at
+ // the character after the newline. We should either have the next line or
+ // '\0', which is our "end marker", that is, it indicates the database
+ // was properly closed.
+ //
+ fstream::int_type c;
+ if (fs_.fail () || // Nothing got extracted.
+ fs_.eof () || // Eof reached before delimiter.
+ (c = fs_.peek ()) == fstream::traits_type::eof ())
+ {
+ // Preemptively switch to writing. While we could have delayed this
+ // until the user called write(), if the user calls read() again (for
+ // whatever misguided reason) we will mess up the overwrite position.
+ //
+ change ();
+ return nullptr;
+ }
+
+ // Handle the "end marker". Note that the caller can still switch to the
+ // write mode on this line. And, after calling read() again, write to the
+ // next line (i.e., start from the "end marker").
+ //
+ if (c == '\0')
+ state_ = state::read_eof;
+
+ return &line_;
+ }
+
+ void depdb::
+ write (const char* s, size_t n)
+ {
+ // Switch to writing if we are still reading.
+ //
+ if (state_ != state::write)
+ change ();
+
+ fs_.write (s, static_cast<streamsize> (n));
+ fs_.put ('\n');
+ }
+
+ void depdb::
+ write (char c)
+ {
+ // Switch to writing if we are still reading.
+ //
+ if (state_ != state::write)
+ change ();
+
+ fs_.put (c);
+ fs_.put ('\n');
+ }
+
+ void depdb::
+ close ()
+ {
+ // If we are at eof, then it means all lines are good, there is the "end
+ // marker" at the end, and we don't need to do anything, except, maybe
+ // touch the file. Otherwise, we need to add the "end marker" and truncate
+ // the rest.
+ //
+ if (state_ == state::read_eof)
+ {
+ // While there are utime(2)/utimensat(2) (and probably something similar
+ // for Windows), for now we just overwrite the "end marker". Hopefully
+ // no implementation will be smart enough to recognize this is a no-op
+ // and skip updating mtime (which would probably be incorrect).
+ //
+ // It would be interesting to one day write an implementation that uses
+ // POSIX file OI, futimens(), and ftruncate() and see how much better it
+ // performs.
+ //
+ if (touch_)
+ {
+ fs_.clear ();
+ fs_.exceptions (fstream::failbit | fstream::badbit);
+ fs_.seekp (0, fstream::cur); // Required to switch from read to write.
+ fs_.put ('\0');
+ }
+ }
+ else
+ {
+ if (state_ != state::write)
+ {
+ pos_ = fs_.tellg (); // The last line is accepted.
+ change (false); // Don't flush.
+ }
+
+ fs_.put ('\0'); // The "end marker".
+
+ // Truncating an fstream is actually a non-portable pain in the butt.
+ // What if we leave the junk after the "end marker"? These files are
+ // pretty small and chances are they will occupy the filesystem's block
+ // size (usually 4KB) whether they are truncated or not. So it might
+ // actually be faster not to truncate.
+ }
+
+ fs_.close ();
+ }
+}
diff --git a/build2/install/rule.cxx b/build2/install/rule.cxx
index e8184b9..c570a41 100644
--- a/build2/install/rule.cxx
+++ b/build2/install/rule.cxx
@@ -12,8 +12,6 @@
#include <build2/algorithm>
#include <build2/diagnostics>
-#include <build2/config/utility>
-
using namespace std;
using namespace butl;
@@ -243,7 +241,7 @@ namespace build2
args.push_back ("-d");
if (base.options.d != nullptr) //@@ VAR
- config::append_options (args, base.options);
+ append_options (args, base.options);
args.push_back ("-m");
args.push_back (base.dir_mode.c_str ());
@@ -289,7 +287,7 @@ namespace build2
args.push_back (base.cmd.c_str ());
if (base.options.d != nullptr) //@@ VAR
- config::append_options (args, base.options);
+ append_options (args, base.options);
args.push_back ("-m");
args.push_back (base.mode.c_str ());
diff --git a/build2/test/rule.cxx b/build2/test/rule.cxx
index c7de054..403499f 100644
--- a/build2/test/rule.cxx
+++ b/build2/test/rule.cxx
@@ -11,8 +11,6 @@
#include <build2/algorithm>
#include <build2/diagnostics>
-#include <build2/config/utility> // add_options()
-
using namespace std;
using namespace butl;
@@ -300,7 +298,7 @@ namespace build2
}
if (l)
- config::append_options (args, as<strings> (*l));
+ append_options (args, as<strings> (*l));
}
// The format of args shall be:
diff --git a/build2/types b/build2/types
index 0c34efb..7c803a1 100644
--- a/build2/types
+++ b/build2/types
@@ -20,6 +20,7 @@
#include <system_error>
#include <butl/path>
+#include <butl/sha256>
#include <butl/optional>
#include <butl/timestamp>
@@ -94,6 +95,10 @@ namespace build2
using butl::timestamp_nonexistent;
using butl::operator<<;
+ // <butl/sha256>
+ //
+ using butl::sha256;
+
// <build2/name>
//
}
diff --git a/build2/utility b/build2/utility
index a61fb08..187834e 100644
--- a/build2/utility
+++ b/build2/utility
@@ -39,6 +39,46 @@ namespace build2
extern const path empty_path;
extern const dir_path empty_dir_path;
+ // Append all the values from a variable to the C-string list. T is either
+ // target or scope. The variable is expected to be of type strings.
+ //
+ template <typename T>
+ void
+ append_options (cstrings&, T&, const char* variable);
+
+ template <typename T>
+ void
+ hash_options (sha256&, T&, const char* variable);
+
+ // As above but from the strings value directly.
+ //
+ class value;
+ template <typename> struct lookup;
+ template <typename, typename> struct vector_value;
+ using const_strings_value = vector_value<string, const names>;
+
+ void
+ append_options (cstrings&, const lookup<const value>&);
+
+ void
+ hash_options (sha256&, const lookup<const value>&);
+
+ void
+ append_options (cstrings&, const const_strings_value&);
+
+ void
+ hash_options (sha256&, const const_strings_value&);
+
+ // Check if a specified option is present in the variable value.
+ // T is either target or scope.
+ //
+ template <typename T>
+ bool
+ find_option (const char* option, T&, const char* variable);
+
+ bool
+ find_option (const char* option, const lookup<const value>&);
+
// Parse version string in the X.Y.Z[-{a|b}N] to a version integer in the
// AABBCCDD form, where:
//
@@ -122,4 +162,6 @@ namespace build2
};
}
+#include <build2/utility.ixx>
+
#endif // BUILD2_UTILITY
diff --git a/build2/utility.cxx b/build2/utility.cxx
index b517deb..c3dd99f 100644
--- a/build2/utility.cxx
+++ b/build2/utility.cxx
@@ -7,6 +7,7 @@
#include <cstdlib> // strtol()
#include <build2/context>
+#include <build2/variable>
#include <build2/diagnostics>
using namespace std;
@@ -49,6 +50,54 @@ namespace build2
const path empty_path;
const dir_path empty_dir_path;
+ void
+ append_options (cstrings& args, const lookup<const value>& l)
+ {
+ if (l)
+ append_options (args, as<strings> (*l));
+ }
+
+ void
+ hash_options (sha256& csum, const lookup<const value>& l)
+ {
+ if (l)
+ hash_options (csum, as<strings> (*l));
+ }
+
+ void
+ append_options (cstrings& args, const const_strings_value& sv)
+ {
+ if (!sv.empty ())
+ {
+ args.reserve (args.size () + sv.size ());
+
+ for (const string& s: sv)
+ args.push_back (s.c_str ());
+ }
+ }
+
+ void
+ hash_options (sha256& csum, const const_strings_value& sv)
+ {
+ for (const string& s: sv)
+ csum.append (s);
+ }
+
+ bool
+ find_option (const char* option, const lookup<const value>& l)
+ {
+ if (l)
+ {
+ for (const string& s: as<strings> (*l))
+ {
+ if (s == option)
+ return true;
+ }
+ }
+
+ return false;
+ }
+
unsigned int
to_version (const string& s)
{
diff --git a/build2/utility.ixx b/build2/utility.ixx
new file mode 100644
index 0000000..04ba9f5
--- /dev/null
+++ b/build2/utility.ixx
@@ -0,0 +1,27 @@
+// file : build2/utility.ixx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+namespace build2
+{
+ template <typename T>
+ inline void
+ append_options (cstrings& args, T& s, const char* var)
+ {
+ append_options (args, s[var]);
+ }
+
+ template <typename T>
+ inline void
+ hash_options (sha256& csum, T& s, const char* var)
+ {
+ hash_options (csum, s[var]);
+ }
+
+ template <typename T>
+ inline bool
+ find_option (const char* option, T& s, const char* var)
+ {
+ return find_option (option, s[var]);
+ }
+}
diff --git a/tests/depdb/buildfile b/tests/depdb/buildfile
new file mode 100644
index 0000000..53e9cd3
--- /dev/null
+++ b/tests/depdb/buildfile
@@ -0,0 +1,8 @@
+# file : tests/depdb/buildfile
+# copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+# license : MIT; see accompanying LICENSE file
+
+import libs = libbutl%lib{butl}
+
+exe{driver}: cxx{driver ../../build2/{depdb}} $libs
+exe{driver}: test.arguments = $out_base/test.d
diff --git a/tests/depdb/driver.cxx b/tests/depdb/driver.cxx
new file mode 100644
index 0000000..f156991
--- /dev/null
+++ b/tests/depdb/driver.cxx
@@ -0,0 +1,166 @@
+// file : tests/depdb/driver.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <iostream>
+
+#include <butl/filesystem>
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/depdb>
+
+using namespace std;
+using namespace build2;
+
+int
+main (int argc, char* argv[])
+{
+ if (argc != 2)
+ {
+ cerr << "usage: " << argv[0] << " <db-file>" << endl;
+ return 1;
+ }
+
+ path p (argv[1]);
+ auto rm = [&p] () {try_rmfile (p);};
+
+ // Create empty database.
+ //
+ {
+ rm ();
+
+ {
+ depdb d (p);
+ assert (d.writing ());
+ d.close ();
+ }
+
+ {
+ depdb d (p);
+ assert (d.reading ());
+ assert (!d.more ());
+ assert (d.read () == nullptr);
+ d.close ();
+ }
+ }
+
+ // No close/end marker.
+ //
+ {
+ rm ();
+
+ {
+ depdb d (p);
+ assert (d.writing ());
+ // No close.
+ }
+
+ {
+ depdb d (p);
+ assert (d.writing ());
+ d.close ();
+ }
+
+ {
+ depdb d (p);
+ assert (d.reading ());
+ }
+ }
+
+ // Overwrite/append/truncate.
+ //
+ {
+ rm ();
+
+ {
+ depdb d (p);
+ d.write ("foo");
+ d.close ();
+ }
+
+ {
+ depdb d (p);
+ assert (*d.read () == "foo");
+ assert (!d.more ());
+ d.close ();
+ }
+
+ {
+ depdb d (p);
+ assert (*d.read () == "foo");
+ assert (!d.more ());
+ d.write ("FOO");
+ d.close ();
+ }
+
+ {
+ depdb d (p);
+ assert (*d.read () == "FOO");
+ assert (!d.more ());
+ assert (d.read () == nullptr);
+ assert (d.read () == nullptr);
+ d.write ("BAR");
+ d.close ();
+ }
+
+ {
+ depdb d (p);
+ assert (*d.read () == "FOO");
+ assert (d.more ());
+ d.write ("foo");
+ d.close (); // Truncate.
+ }
+
+ {
+ depdb d (p);
+ assert (*d.read () == "foo");
+ assert (!d.more ());
+ }
+
+ // Stray end marker.
+ //
+ {
+ depdb d (p);
+ assert (*d.read () == "foo");
+ d.write ("fox");
+ // No close.
+ }
+
+ {
+ depdb d (p);
+ assert (d.more ());
+ assert (*d.read () == "fox");
+ assert (!d.more ());
+ }
+ }
+
+ // Read/truncate.
+ //
+ {
+ rm ();
+
+ {
+ depdb d (p);
+ d.write ("foo");
+ d.write ("bar");
+ d.close ();
+ }
+
+ {
+ depdb d (p);
+ assert (*d.read () == "foo");
+ assert (d.more ());
+ d.close (); // Truncate bar.
+ }
+
+ {
+ depdb d (p);
+ assert (*d.read () == "foo");
+ assert (!d.more ());
+ }
+ }
+
+ rm ();
+}