aboutsummaryrefslogtreecommitdiff
path: root/build2/cc
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2016-08-09 11:31:53 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2016-08-12 17:04:22 +0200
commit9fa5f73d00905568e8979d0c93ec4a8f645c81d5 (patch)
treef2bf937fa256c0ef2c9bbe05d3655d1985719405 /build2/cc
parenta1b2319ff2ddc8a6f139ee364cabe236ca62e23e (diff)
Implement support for C compilation
We now have two new modules: cc (c-common) and c.
Diffstat (limited to 'build2/cc')
-rw-r--r--build2/cc/common172
-rw-r--r--build2/cc/compile82
-rw-r--r--build2/cc/compile.cxx1480
-rw-r--r--build2/cc/guess125
-rw-r--r--build2/cc/guess.cxx1052
-rw-r--r--build2/cc/init55
-rw-r--r--build2/cc/init.cxx321
-rw-r--r--build2/cc/install39
-rw-r--r--build2/cc/install.cxx70
-rw-r--r--build2/cc/link78
-rw-r--r--build2/cc/link.cxx1850
-rw-r--r--build2/cc/module59
-rw-r--r--build2/cc/module.cxx291
-rw-r--r--build2/cc/msvc.cxx342
-rw-r--r--build2/cc/target48
-rw-r--r--build2/cc/target.cxx39
-rw-r--r--build2/cc/types32
-rw-r--r--build2/cc/utility64
-rw-r--r--build2/cc/utility.cxx115
-rw-r--r--build2/cc/utility.ixx33
-rw-r--r--build2/cc/windows-manifest.cxx136
-rw-r--r--build2/cc/windows-rpath.cxx273
22 files changed, 6756 insertions, 0 deletions
diff --git a/build2/cc/common b/build2/cc/common
new file mode 100644
index 0000000..95f205a
--- /dev/null
+++ b/build2/cc/common
@@ -0,0 +1,172 @@
+// file : build2/cc/common -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_COMMON
+#define BUILD2_CC_COMMON
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/variable>
+
+#include <build2/cc/types>
+
+namespace build2
+{
+ namespace cc
+ {
+ // Data entries that define a concrete c-family module (e.g., c or cxx).
+ // These classes are used as a virtual bases by the rules as well as the
+ // modules. This way the member variables can be referenced as is, without
+ // any extra decorations (in other words, it is a bunch of data members
+ // that can be shared between several classes/instances).
+ //
+ struct config_data
+ {
+ lang x_lang;
+
+ const char* x; // Module name ("c", "cxx").
+ const char* x_name; // Compiler name ("c", "c++").
+ const char* x_default; // Compiler default ("gcc", "g++").
+
+ const variable& config_x;
+ const variable& config_x_poptions;
+ const variable& config_x_coptions;
+ const variable& config_x_loptions;
+ const variable& config_x_libs;
+
+ const variable& x_poptions;
+ const variable& x_coptions;
+ const variable& x_loptions;
+ const variable& x_libs;
+
+ const variable& c_poptions; // cc.*
+ const variable& c_coptions;
+ const variable& c_loptions;
+ const variable& c_libs;
+
+ const variable& x_export_poptions;
+ const variable& x_export_coptions;
+ const variable& x_export_loptions;
+ const variable& x_export_libs;
+
+ const variable& c_export_poptions; // cc.export.*
+ const variable& c_export_coptions;
+ const variable& c_export_loptions;
+ const variable& c_export_libs;
+
+ const variable& x_std;
+
+ const variable& x_id;
+ const variable& x_id_type;
+ const variable& x_id_variant;
+
+ const variable& x_version;
+ const variable& x_version_major;
+ const variable& x_version_minor;
+ const variable& x_version_patch;
+ const variable& x_version_build;
+
+ const variable& x_signature;
+ const variable& x_checksum;
+
+ const variable& x_target;
+ const variable& x_target_cpu;
+ const variable& x_target_vendor;
+ const variable& x_target_system;
+ const variable& x_target_version;
+ const variable& x_target_class;
+ };
+
+ struct data: config_data
+ {
+ const char* x_compile; // Rule names.
+ const char* x_link;
+ const char* x_install;
+
+ // Cached values for some commonly-used variables.
+ //
+ const string& cid; // x.id
+ const string& ctg; // x.target
+ const string& tsys; // x.target.system
+ const string& tclass; // x.target.class
+
+ const target_type& x_src; // Source target type (c{}, cxx{}).
+
+ // Array of target types that are considered headers. Keep them in the
+ // most likely to appear order and terminate with NULL.
+ //
+ const target_type* const* x_hdr;
+
+ template <typename T>
+ bool
+ x_header (const T& t) const
+ {
+ for (const target_type* const* ht (x_hdr); *ht != nullptr; ++ht)
+ if (t.is_a (**ht))
+ return true;
+
+ return false;
+ }
+
+ // Array of target types that can be #include'd. Used to reverse-lookup
+ // extensions to target types. Keep them in the most likely to appear
+ // order and terminate with NULL.
+ //
+ const target_type* const* x_inc;
+
+ // Aggregate-like constructor with from-base support.
+ //
+ data (const config_data& cd,
+ const char* compile,
+ const char* link,
+ const char* install,
+ const string& id,
+ const string& tg,
+ const string& sys,
+ const string& class_,
+ const target_type& src,
+ const target_type* const* hdr,
+ const target_type* const* inc)
+ : config_data (cd),
+ x_compile (compile), x_link (link), x_install (install),
+ cid (id), ctg (tg), tsys (sys), tclass (class_),
+ x_src (src), x_hdr (hdr), x_inc (inc) {}
+ };
+
+ class common: protected data
+ {
+ public:
+ common (data&& d): data (move (d)) {}
+
+ // Language standard (x.std) mapping. T is either target or scope.
+ //
+ template <typename T>
+ void
+ append_std (cstrings& args, scope& root, T& t, string& storage) const
+ {
+ if (auto l = t[x_std])
+ if (translate_std (storage, root, *l))
+ args.push_back (storage.c_str ());
+ }
+
+ template <typename T>
+ void
+ hash_std (sha256& csum, scope& root, T& t) const
+ {
+ string s;
+ if (auto l = t[x_std])
+ if (translate_std (s, root, *l))
+ csum.append (s);
+ }
+
+ // Return true if there is an option (stored in the first argument).
+ //
+ virtual bool
+ translate_std (string&, scope&, const value&) const = 0;
+ };
+ }
+}
+
+#endif // BUILD2_CC_COMMON
diff --git a/build2/cc/compile b/build2/cc/compile
new file mode 100644
index 0000000..6e20836
--- /dev/null
+++ b/build2/cc/compile
@@ -0,0 +1,82 @@
+// file : build2/cc/compile -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_COMPILE
+#define BUILD2_CC_COMPILE
+
+#include <butl/path-map>
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/rule>
+
+#include <build2/cc/types>
+#include <build2/cc/common>
+
+namespace build2
+{
+ class depdb;
+
+ namespace cc
+ {
+ class link;
+
+ class compile: public rule, virtual common
+ {
+ public:
+ compile (data&&, const link&);
+
+ virtual match_result
+ match (action, target&, const string& hint) const;
+
+ virtual recipe
+ apply (action, target&, const match_result&) const;
+
+ target_state
+ perform_update (action, target&) const;
+
+ target_state
+ perform_clean (action, target&) const;
+
+ private:
+ // Mapping of include prefixes (e.g., foo in <foo/bar>) for auto-
+ // generated headers to directories where they will be generated.
+ //
+ // We are using a prefix map of directories (dir_path_map) instead
+ // of just a map in order also cover sub-paths (e.g., <foo/more/bar>
+ // if we continue with the example). Specifically, we need to make
+ // sure we don't treat foobar as a sub-directory of foo.
+ //
+ // @@ The keys should be normalized.
+ //
+ using prefix_map = butl::dir_path_map<dir_path>;
+
+ void
+ append_prefixes (prefix_map&, target&, const variable&) const;
+
+ void
+ append_lib_prefixes (prefix_map&, target&, lorder) const;
+
+ prefix_map
+ build_prefix_map (target&, lorder) const;
+
+ // Reverse-lookup target type from extension.
+ //
+ const target_type*
+ map_extension (scope&, const string&, const string&) const;
+
+ // Header dependency injection.
+ //
+ void
+ inject (action, target&, lorder, file&, scope&, depdb&) const;
+
+ private:
+ const link& link_;
+ const string rule_id;
+ };
+ }
+}
+
+#endif // BUILD2_CC_COMPILE
diff --git a/build2/cc/compile.cxx b/build2/cc/compile.cxx
new file mode 100644
index 0000000..b5bcc50
--- /dev/null
+++ b/build2/cc/compile.cxx
@@ -0,0 +1,1480 @@
+// file : build2/cc/compile.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/cc/compile>
+
+#include <cstdlib> // exit()
+#include <iostream> // cerr
+
+#include <build2/depdb>
+#include <build2/scope>
+#include <build2/context>
+#include <build2/variable>
+#include <build2/algorithm>
+#include <build2/diagnostics>
+
+#include <build2/bin/target>
+
+#include <build2/cc/link> // search_library()
+#include <build2/cc/target> // h
+#include <build2/cc/utility>
+
+using namespace std;
+using namespace butl;
+
+namespace build2
+{
+ namespace cc
+ {
+ using namespace bin;
+
+ compile::
+ compile (data&& d, const link& l)
+ : common (move (d)),
+ link_ (l),
+ rule_id (string (x) += ".compile 1")
+ {
+ }
+
+ match_result compile::
+ match (action a, target& t, const string&) const
+ {
+ tracer trace (x, "compile::match");
+
+ // @@ TODO:
+ //
+ // - check prerequisites: single source file
+ // - if path already assigned, verify extension?
+ //
+
+ // See if we have a source file. Iterate in reverse so that a source
+ // file specified for an obj*{} member overrides the one specified for
+ // the group. Also "see through" groups.
+ //
+ for (prerequisite_member p: reverse_group_prerequisite_members (a, t))
+ {
+ if (p.is_a (x_src))
+ return p;
+ }
+
+ l4 ([&]{trace << "no " << x_lang << " source file for target " << t;});
+ return nullptr;
+ }
+
+ recipe compile::
+ apply (action a, target& xt, const match_result& mr) const
+ {
+ tracer trace (x, "compile::apply");
+
+ file& t (static_cast<file&> (xt));
+
+ scope& bs (t.base_scope ());
+ scope& rs (*bs.root_scope ());
+ otype ct (compile_type (t));
+
+ // Derive file name from target name.
+ //
+ if (t.path ().empty ())
+ {
+ const char* e (nullptr);
+
+ if (tsys == "win32-msvc")
+ {
+ switch (ct)
+ {
+ case otype::e: e = "exe.obj"; break;
+ case otype::a: e = "lib.obj"; break;
+ case otype::s: e = "dll.obj"; break;
+ }
+ }
+ else if (tsys == "mingw32")
+ {
+ switch (ct)
+ {
+ case otype::e: e = "exe.o"; break;
+ case otype::a: e = "a.o"; break;
+ case otype::s: e = "dll.o"; break;
+ }
+ }
+ else if (tsys == "darwin")
+ {
+ switch (ct)
+ {
+ case otype::e: e = "o"; break;
+ case otype::a: e = "a.o"; break;
+ case otype::s: e = "dylib.o"; break;
+ }
+ }
+ else
+ {
+ switch (ct)
+ {
+ case otype::e: e = "o"; break;
+ case otype::a: e = "a.o"; break;
+ case otype::s: e = "so.o"; break;
+ }
+ }
+
+ t.derive_path (e);
+ }
+
+ // Inject dependency on the output directory.
+ //
+ fsdir* dir (inject_fsdir (a, t));
+
+ // Search and match all the existing prerequisites. The injection code
+ // takes care of the ones it is adding.
+ //
+ // When cleaning, ignore prerequisites that are not in the same or a
+ // subdirectory of our project root.
+ //
+ optional<dir_paths> lib_paths; // Extract lazily.
+
+ for (prerequisite_member p: group_prerequisite_members (a, t))
+ {
+ // A dependency on a library is there so that we can get its
+ // *.export.poptions. In particular, making sure it is executed before
+ // us will only restrict parallelism. But we do need to pre-match it
+ // in order to get its prerequisite_targets populated. This is the
+ // "library meta-information protocol". See also append_lib_options()
+ // above.
+ //
+ if (p.is_a<lib> () || p.is_a<liba> () || p.is_a<libs> ())
+ {
+ if (a.operation () == update_id)
+ {
+ // Handle imported libraries. We know that for such libraries
+ // we don't need to do match() in order to get options (if
+ // any, they would be set by search_library()).
+ //
+ if (p.proj () == nullptr ||
+ link_.search_library (lib_paths, p.prerequisite) == nullptr)
+ {
+ match_only (a, p.search ());
+ }
+ }
+
+ continue;
+ }
+
+ target& pt (p.search ());
+
+ if (a.operation () == clean_id && !pt.dir.sub (rs.out_path ()))
+ continue;
+
+ build2::match (a, pt);
+ t.prerequisite_targets.push_back (&pt);
+ }
+
+ // Inject additional prerequisites. We only do it when performing update
+ // since chances are we will have to update some of our prerequisites in
+ // the process (auto-generated source code).
+ //
+ if (a == perform_update_id)
+ {
+ // The cached prerequisite target should be the same as what is in
+ // t.prerequisite_targets since we used standard search() and match()
+ // above.
+ //
+ file& src (mr.as_target<file> ());
+
+ // Make sure the output directory exists.
+ //
+ // Is this the right thing to do? It does smell a bit, but then we do
+ // worse things in inject_prerequisites() below. There is also no way
+ // to postpone this until update since we need to extract and inject
+ // header dependencies now (we don't want to be calling search() and
+ // match() in update), which means we need to cache them now as well.
+ // So the only alternative, it seems, is to cache the updates to the
+ // database until later which will sure complicate (and slow down)
+ // things.
+ //
+ if (dir != nullptr)
+ execute_direct (a, *dir);
+
+ depdb dd (t.path () + ".d");
+
+ // First should come the rule name/version.
+ //
+ if (dd.expect (rule_id) != nullptr)
+ l4 ([&]{trace << "rule mismatch forcing update of " << t;});
+
+ // Then the compiler checksum. Note that here we assume it
+ // incorporates the (default) target so that if the compiler changes
+ // but only in what it targets, then the checksum will still change.
+ //
+ if (dd.expect (cast<string> (rs[x_checksum])) != nullptr)
+ l4 ([&]{trace << "compiler mismatch forcing update of " << t;});
+
+ // Then the options checksum.
+ //
+ // The idea is to keep them exactly as they are passed to the compiler
+ // since the order may be significant.
+ //
+ sha256 cs;
+
+ // Hash *.export.poptions from prerequisite libraries.
+ //
+ lorder lo (link_order (bs, ct));
+ for (prerequisite& p: group_prerequisites (t))
+ {
+ target* pt (p.target); // Already searched and matched.
+
+ if (lib* l = pt->is_a<lib> ())
+ pt = &link_member (*l, lo);
+
+ if (pt->is_a<liba> () || pt->is_a<libs> ())
+ hash_lib_options (cs, *pt, lo,
+ c_export_poptions,
+ x_export_poptions);
+ }
+
+ hash_options (cs, t, c_poptions);
+ hash_options (cs, t, x_poptions);
+ hash_options (cs, t, c_coptions);
+ hash_options (cs, t, x_coptions);
+ hash_std (cs, rs, t);
+
+ if (ct == otype::s)
+ {
+ // On Darwin, Win32 -fPIC is the default.
+ //
+ if (tclass == "linux" || tclass == "freebsd")
+ cs.append ("-fPIC");
+ }
+
+ if (dd.expect (cs.string ()) != nullptr)
+ l4 ([&]{trace << "options mismatch forcing update of " << t;});
+
+ // Finally the source file.
+ //
+ if (dd.expect (src.path ()) != nullptr)
+ l4 ([&]{trace << "source file mismatch forcing update of " << t;});
+
+ // If any of the above checks resulted in a mismatch (different
+ // compiler, options, or source file), or if the database is newer
+ // than the target (interrupted update) then force the target update.
+ //
+ if (dd.writing () || dd.mtime () > t.mtime ())
+ t.mtime (timestamp_nonexistent);
+
+ inject (a, t, lo, src, mr.prerequisite->scope, dd);
+
+ dd.close ();
+ }
+
+ switch (a)
+ {
+ case perform_update_id:
+ return [this] (action a, target& t) {return perform_update (a, t);};
+ case perform_clean_id:
+ return [this] (action a, target& t) {return perform_clean (a, t);};
+ default:
+ return noop_recipe; // Configure update.
+ }
+ }
+
+ // Reverse-lookup target type from extension.
+ //
+ const target_type* compile::
+ map_extension (scope& s, const string& n, const string& e) const
+ {
+ // We will just have to try all of the possible ones, in the "most
+ // likely to match" order.
+ //
+ const variable& var (var_pool["extension"]);
+
+ auto test = [&s, &n, &e, &var] (const target_type& tt) -> bool
+ {
+ if (auto l = s.find (var, tt, n))
+ if (cast<string> (l) == e)
+ return true;
+
+ return false;
+ };
+
+ for (const target_type* const* p (x_inc); *p != nullptr; ++p)
+ if (test (**p)) return *p;
+
+ return nullptr;
+ }
+
+ void compile::
+ append_prefixes (prefix_map& m, target& t, const variable& var) const
+ {
+ tracer trace (x, "append_prefixes");
+
+ // If this target does not belong to any project (e.g, an
+ // "imported as installed" library), then it can't possibly
+ // generate any headers for us.
+ //
+ scope* rs (t.base_scope ().root_scope ());
+ if (rs == nullptr)
+ return;
+
+ const dir_path& out_base (t.dir);
+ const dir_path& out_root (rs->out_path ());
+
+ if (auto l = t[var])
+ {
+ const auto& v (cast<strings> (l));
+
+ for (auto i (v.begin ()), e (v.end ()); i != e; ++i)
+ {
+ // -I can either be in the "-Ifoo" or "-I foo" form. For VC it can
+ // also be /I.
+ //
+ const string& o (*i);
+
+ if (o.size () < 2 || (o[0] != '-' && o[0] != '/') || o[1] != 'I')
+ continue;
+
+ dir_path d;
+ if (o.size () == 2)
+ {
+ if (++i == e)
+ break; // Let the compiler complain.
+
+ d = dir_path (*i);
+ }
+ else
+ d = dir_path (*i, 2, string::npos);
+
+ l6 ([&]{trace << "-I '" << d << "'";});
+
+ // If we are relative or not inside our project root, then
+ // ignore.
+ //
+ if (d.relative () || !d.sub (out_root))
+ continue;
+
+ // If the target directory is a sub-directory of the include
+ // directory, then the prefix is the difference between the
+ // two. Otherwise, leave it empty.
+ //
+ // The idea here is to make this "canonical" setup work auto-
+ // magically:
+ //
+ // 1. We include all files with a prefix, e.g., <foo/bar>.
+ // 2. The library target is in the foo/ sub-directory, e.g.,
+ // /tmp/foo/.
+ // 3. The poptions variable contains -I/tmp.
+ //
+ dir_path p (out_base.sub (d) ? out_base.leaf (d) : dir_path ());
+
+ auto j (m.find (p));
+
+ if (j != m.end ())
+ {
+ if (j->second != d)
+ {
+ // We used to reject duplicates but it seems this can
+ // be reasonably expected to work according to the order
+ // of the -I options.
+ //
+ if (verb >= 4)
+ trace << "overriding dependency prefix '" << p << "'\n"
+ << " old mapping to " << j->second << "\n"
+ << " new mapping to " << d;
+
+ j->second = d;
+ }
+ }
+ else
+ {
+ l6 ([&]{trace << "'" << p << "' = '" << d << "'";});
+ m.emplace (move (p), move (d));
+ }
+ }
+ }
+ }
+
+ // Append library prefixes based on the *.export.poptions variables
+ // recursively, prerequisite libraries first.
+ //
+ void compile::
+ append_lib_prefixes (prefix_map& m, target& l, lorder lo) const
+ {
+ for (target* t: l.prerequisite_targets)
+ {
+ if (t == nullptr)
+ continue;
+
+ if (lib* l = t->is_a<lib> ())
+ t = &link_member (*l, lo); // Pick one of the members.
+
+ if (t->is_a<liba> () || t->is_a<libs> ())
+ append_lib_prefixes (m, *t, lo);
+ }
+
+ append_prefixes (m, l, c_export_poptions);
+ append_prefixes (m, l, x_export_poptions);
+ }
+
+ auto compile::
+ build_prefix_map (target& t, lorder lo) const -> prefix_map
+ {
+ prefix_map m;
+
+ // First process the include directories from prerequisite libraries.
+ // Note that here we don't need to see group members (see apply()).
+ //
+ for (prerequisite& p: group_prerequisites (t))
+ {
+ target* pt (p.target); // Already searched and matched.
+
+ if (lib* l = pt->is_a<lib> ())
+ pt = &link_member (*l, lo); // Pick one of the members.
+
+ if (pt->is_a<liba> () || pt->is_a<libs> ())
+ append_lib_prefixes (m, *pt, lo);
+ }
+
+ // Then process our own.
+ //
+ append_prefixes (m, t, c_poptions);
+ append_prefixes (m, t, x_poptions);
+
+ return m;
+ }
+
+ // Return the next make prerequisite starting from the specified
+ // position and update position to point to the start of the
+ // following prerequisite or l.size() if there are none left.
+ //
+ static string
+ next_make (const string& l, size_t& p)
+ {
+ size_t n (l.size ());
+
+ // Skip leading spaces.
+ //
+ for (; p != n && l[p] == ' '; p++) ;
+
+ // Lines containing multiple prerequisites are 80 characters max.
+ //
+ string r;
+ r.reserve (n);
+
+ // Scan the next prerequisite while watching out for escape sequences.
+ //
+ for (; p != n && l[p] != ' '; p++)
+ {
+ char c (l[p]);
+
+ if (p + 1 != n)
+ {
+ if (c == '$')
+ {
+ // Got to be another (escaped) '$'.
+ //
+ if (l[p + 1] == '$')
+ ++p;
+ }
+ else if (c == '\\')
+ {
+ // This may or may not be an escape sequence depending on whether
+ // what follows is "escapable".
+ //
+ switch (c = l[++p])
+ {
+ case '\\': break;
+ case ' ': break;
+ default: c = '\\'; --p; // Restore.
+ }
+ }
+ }
+
+ r += c;
+ }
+
+ // Skip trailing spaces.
+ //
+ for (; p != n && l[p] == ' '; p++) ;
+
+ // Skip final '\'.
+ //
+ if (p == n - 1 && l[p] == '\\')
+ p++;
+
+ return r;
+ }
+
+ // Extract the include path from the VC /showIncludes output line. Return
+ // empty string if the line is not an include note or include error. Set
+ // the good_error flag if it is an include error (which means the process
+ // will terminate with the error status that needs to be ignored).
+ //
+ static string
+ next_show (const string& l, bool& good_error)
+ {
+ // The include error should be the last line that we handle.
+ //
+ assert (!good_error);
+
+ // VC /showIncludes output. The first line is the file being
+ // compiled. Then we have the list of headers, one per line, in this
+ // form (text can presumably be translated):
+ //
+ // Note: including file: C:\Program Files (x86)\[...]\iostream
+ //
+ // Finally, if we hit a non-existent header, then we end with an error
+ // line in this form:
+ //
+ // x.cpp(3): fatal error C1083: Cannot open include file: 'd/h.hpp':
+ // No such file or directory
+ //
+
+ // Distinguishing between the include note and the include error is
+ // easy: we can just check for C1083. Distinguising between the note and
+ // other errors/warnings is harder: an error could very well end with
+ // what looks like a path so we cannot look for the note but rather have
+ // to look for an error. Here we assume that a line containing ' CNNNN:'
+ // is an error. Should be robust enough in the face of language
+ // translation, etc.
+ //
+ size_t p (l.find (':'));
+ size_t n (l.size ());
+
+ for (; p != string::npos; p = ++p != n ? l.find (':', p) : string::npos)
+ {
+ auto isnum = [](char c) {return c >= '0' && c <= '9';};
+
+ if (p > 5 &&
+ l[p - 6] == ' ' &&
+ l[p - 5] == 'C' &&
+ isnum (l[p - 4]) &&
+ isnum (l[p - 3]) &&
+ isnum (l[p - 2]) &&
+ isnum (l[p - 1]))
+ {
+ p -= 4; // Start of the error code.
+ break;
+ }
+ }
+
+ if (p == string::npos)
+ {
+ // Include note. We assume the path is always at the end but
+ // need to handle both absolute Windows and POSIX ones.
+ //
+ size_t p (l.rfind (':'));
+
+ if (p != string::npos)
+ {
+ // See if this one is part of the Windows drive letter.
+ //
+ if (p > 1 && p + 1 < n && // 2 chars before, 1 after.
+ l[p - 2] == ' ' &&
+ alpha (l[p - 1]) &&
+ path::traits::is_separator (l[p + 1]))
+ p = l.rfind (':', p - 2);
+ }
+
+ if (p != string::npos)
+ {
+ // VC uses indentation to indicate the include nesting so there
+ // could be any number of spaces after ':'. Skip them.
+ //
+ p = l.find_first_not_of (' ', p + 1);
+ }
+
+ if (p == string::npos)
+ fail << "unable to parse /showIncludes include note line";
+
+ return string (l, p);
+ }
+ else if (l.compare (p, 4, "1083") == 0)
+ {
+ // Include error. The path is conveniently quoted with ''.
+ //
+ size_t p2 (l.rfind ('\''));
+
+ if (p2 != string::npos && p2 != 0)
+ {
+ size_t p1 (l.rfind ('\'', p2 - 1));
+
+ if (p1 != string::npos)
+ {
+ good_error = true;
+ return string (l, p1 + 1 , p2 - p1 - 1);
+ }
+ }
+
+ error << "unable to parse /showIncludes include error line";
+ throw failed ();
+ }
+ else
+ {
+ // Some other error.
+ //
+ return string ();
+ }
+ }
+
+ void compile::
+ inject (action a,
+ target& t,
+ lorder lo,
+ file& src,
+ scope& ds,
+ depdb& dd) const
+ {
+ tracer trace (x, "compile::inject");
+
+ l6 ([&]{trace << "target: " << t;});
+
+ // If things go wrong (and they often do in this area), give the user a
+ // bit extra context.
+ //
+ auto g (
+ make_exception_guard (
+ [&src]()
+ {
+ info << "while extracting header dependencies from " << src;
+ }));
+
+ scope& rs (t.root_scope ());
+
+ // Initialize lazily, only if required.
+ //
+ cstrings args;
+ string std; // Storage.
+
+ auto init_args = [&t, lo, &src, &rs, &args, &std, this] ()
+ {
+ args.push_back (cast<path> (rs[config_x]).string ().c_str ());
+
+ // Add *.export.poptions from prerequisite libraries. Note that here
+ // we don't need to see group members (see apply()).
+ //
+ for (prerequisite& p: group_prerequisites (t))
+ {
+ target* pt (p.target); // Already searched and matched.
+
+ if (lib* l = pt->is_a<lib> ())
+ pt = &link_member (*l, lo);
+
+ if (pt->is_a<liba> () || pt->is_a<libs> ())
+ append_lib_options (args, *pt, lo,
+ c_export_poptions,
+ x_export_poptions);
+ }
+
+ append_options (args, t, c_poptions);
+ append_options (args, t, x_poptions);
+
+ // Some compile options (e.g., -std, -m) affect the preprocessor.
+ //
+ append_options (args, t, c_coptions);
+ append_options (args, t, x_coptions);
+
+ append_std (args, rs, t, std);
+
+ if (t.is_a<objs> ())
+ {
+ // On Darwin, Win32 -fPIC is the default.
+ //
+ if (tclass == "linux" || tclass == "freebsd")
+ args.push_back ("-fPIC");
+ }
+
+ if (cid == "msvc")
+ {
+ args.push_back ("/nologo");
+
+ // See perform_update() for details on overriding the default
+ // exceptions and runtime.
+ //
+ if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+ args.push_back ("/EHsc");
+
+ if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ args.push_back ("/MD");
+
+ args.push_back ("/EP"); // Preprocess to stdout.
+ args.push_back ("/showIncludes"); // Goes to sterr becasue of /EP.
+ args.push_back (x_lang == lang::c ? "/TC" : "/TP"); // Compile as.
+ }
+ else
+ {
+ args.push_back ("-M"); // Note: -MM -MG skips missing <>-included.
+ args.push_back ("-MG"); // Treat missing headers as generated.
+
+ // Previously we used '*' as a target name but it gets expanded to
+ // the current directory file names by GCC (4.9) that comes with
+ // MSYS2 (2.4). Yes, this is the (bizarre) behavior of GCC being
+ // executed in the shell with -MQ '*' option and not just -MQ *.
+ //
+ args.push_back ("-MQ"); // Quoted target name.
+ args.push_back ("^"); // Old versions can't do empty target name.
+ }
+
+ // We are using absolute source file path in order to get absolute
+ // paths in the result. Any relative paths in the result are non-
+ // existent, potentially auto-generated headers.
+ //
+ // @@ We will also have to use absolute -I paths to guarantee
+ // that. Or just detect relative paths and error out?
+ //
+ args.push_back (src.path ().string ().c_str ());
+ args.push_back (nullptr);
+ };
+
+ // Build the prefix map lazily only if we have non-existent files.
+ // Also reuse it over restarts since it doesn't change.
+ //
+ prefix_map pm;
+
+ // If any prerequisites that we have extracted changed, then we have to
+ // redo the whole thing. The reason for this is auto-generated headers:
+ // the updated header may now include a yet-non-existent header. Unless
+ // we discover this and generate it (which, BTW, will trigger another
+ // restart since that header, in turn, can also include auto-generated
+ // headers), we will end up with an error during compilation proper.
+ //
+ // One complication with this restart logic is that we will see a
+ // "prefix" of prerequisites that we have already processed (i.e., they
+ // are already in our prerequisite_targets list) and we don't want to
+ // keep redoing this over and over again. One thing to note, however, is
+ // that the prefix that we have seen on the previous run must appear
+ // exactly the same in the subsequent run. The reason for this is that
+ // none of the files that it can possibly be based on have changed and
+ // thus it should be exactly the same. To put it another way, the
+ // presence or absence of a file in the dependency output can only
+ // depend on the previous files (assuming the compiler outputs them as
+ // it encounters them and it is hard to think of a reason why would
+ // someone do otherwise). And we have already made sure that all those
+ // files are up to date. And here is the way we are going to exploit
+ // this: we are going to keep track of how many prerequisites we have
+ // processed so far and on restart skip right to the next one.
+ //
+ // And one more thing: most of the time this list of headers would stay
+ // unchanged and extracting them by running the compiler every time is a
+ // bit wasteful. So we are going to cache them in the depdb. If the db
+ // hasn't been invalidated yet (e.g., because the compiler options have
+ // changed), then we start by reading from it. If anything is out of
+ // date then we use the same restart and skip logic to switch to the
+ // compiler run.
+ //
+
+ // Update the target "smartly". Return true if it has changed or if the
+ // passed timestamp is not timestamp_unknown and is older than the
+ // target.
+ //
+ // There would normally be a lot of headers for every source file (think
+ // all the system headers) and just calling execute_direct() on all of
+ // them can get expensive. At the same time, most of these headers are
+ // existing files that we will never be updating (again, system headers,
+ // for example) and the rule that will match them is the fallback
+ // file_rule. That rule has an optimization: it returns noop_recipe
+ // (which causes the target state to be automatically set to unchanged)
+ // if the file is known to be up to date.
+ //
+ auto update = [&trace, a] (path_target& pt, timestamp ts) -> bool
+ {
+ if (pt.state () != target_state::unchanged)
+ {
+ // We only want to restart if our call to execute() actually
+ // caused an update. In particular, the target could already
+ // have been in target_state::changed because of a dependency
+ // extraction run for some other source file.
+ //
+ target_state os (pt.state ());
+ target_state ns (execute_direct (a, pt));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "updated " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ return true;
+ }
+ }
+
+ if (ts != timestamp_unknown)
+ {
+ timestamp mt (pt.mtime ());
+
+ // See execute_prerequisites() for rationale behind the equal part.
+ //
+ return ts < mt || (ts == mt && pt.state () != target_state::changed);
+ }
+
+ return false;
+ };
+
+ // Update and add a header file to the list of prerequisite targets.
+ // Depending on the cache flag, the file is assumed to either have come
+ // from the depdb cache or from the compiler run. Return whether the
+ // extraction process should be restarted.
+ //
+ auto add = [&trace, &update, &pm, a, &t, lo, &ds, &dd, this]
+ (path f, bool cache) -> bool
+ {
+ if (!f.absolute ())
+ {
+ f.normalize ();
+
+ // This is probably as often an error as an auto-generated file, so
+ // trace at level 4.
+ //
+ l4 ([&]{trace << "non-existent header '" << f << "'";});
+
+ // If we already did this and build_prefix_map() returned empty,
+ // then we would have failed below.
+ //
+ if (pm.empty ())
+ pm = build_prefix_map (t, lo);
+
+ // First try the whole file. Then just the directory.
+ //
+ // @@ Has to be a separate map since the prefix can be
+ // the same as the file name.
+ //
+ // auto i (pm.find (f));
+
+ // Find the most qualified prefix of which we are a sub-path.
+ //
+ auto i (pm.end ());
+
+ if (!pm.empty ())
+ {
+ const dir_path& d (f.directory ());
+ i = pm.upper_bound (d);
+
+ // Get the greatest less than, if any. We might still not be a
+ // sub. Note also that we still have to check the last element if
+ // upper_bound() returned end().
+ //
+ if (i == pm.begin () || !d.sub ((--i)->first))
+ i = pm.end ();
+ }
+
+ if (i == pm.end ())
+ fail << "unable to map presumably auto-generated header '"
+ << f << "' to a project";
+
+ f = i->second / f;
+ }
+ else
+ {
+ // We used to just normalize the path but that could result in an
+ // invalid path (e.g., on CentOS 7 with Clang 3.4) because of the
+ // symlinks. So now we realize (i.e., realpath(3)) it instead. If
+ // it comes from the depdb, in which case we've already done that.
+ //
+ if (!cache)
+ f.realize ();
+ }
+
+ l6 ([&]{trace << "injecting " << f;});
+
+ // Split the name into its directory part, the name part, and
+ // extension. Here we can assume the name part is a valid filesystem
+ // name.
+ //
+ // Note that if the file has no extension, we record an empty
+ // extension rather than NULL (which would signify that the default
+ // extension should be added).
+ //
+ dir_path d (f.directory ());
+ string n (f.leaf ().base ().string ());
+ const char* es (f.extension ());
+ const string* e (&extension_pool.find (es != nullptr ? es : ""));
+
+ // Determine the target type.
+ //
+ const target_type* tt (nullptr);
+
+ // See if this directory is part of any project out_root hierarchy.
+ // Note that this will miss all the headers that come from src_root
+ // (so they will be treated as generic C headers below). Generally,
+ // we don't have the ability to determine that some file belongs to
+ // src_root of some project. But that's not a problem for our
+ // purposes: it is only important for us to accurately determine
+ // target types for headers that could be auto-generated.
+ //
+ // While at it also try to determine if this target is from the src
+ // or out tree of said project.
+ //
+ dir_path out;
+
+ scope& bs (scopes.find (d));
+ if (scope* rs = bs.root_scope ())
+ {
+ tt = map_extension (bs, n, *e);
+
+ if (bs.out_path () != bs.src_path () && d.sub (bs.src_path ()))
+ out = out_src (d, *rs);
+ }
+
+ // If it is outside any project, or the project doesn't have such an
+ // extension, assume it is a plain old C header.
+ //
+ if (tt == nullptr)
+ tt = &h::static_type;
+
+ // Find or insert target.
+ //
+ // @@ OPT: move d, out, n
+ //
+ path_target& pt (
+ static_cast<path_target&> (search (*tt, d, out, n, e, &ds)));
+
+ // Assign path.
+ //
+ if (pt.path ().empty ())
+ pt.path (move (f));
+ else
+ assert (pt.path () == f);
+
+ // Match to a rule.
+ //
+ build2::match (a, pt);
+
+ // Update.
+ //
+ // If this header came from the depdb, make sure it is no older than
+ // the db itself (if it has changed since the db was written, then
+ // chances are the cached data is stale).
+ //
+ bool restart (update (pt, cache ? dd.mtime () : timestamp_unknown));
+
+ // Verify/add it to the dependency database. We do it after update in
+ // order not to add bogus files (non-existent and without a way to
+ // update).
+ //
+ if (!cache)
+ dd.expect (pt.path ());
+
+ // Add to our prerequisite target list.
+ //
+ t.prerequisite_targets.push_back (&pt);
+
+ return restart;
+ };
+
+ // If nothing so far has invalidated the dependency database, then
+ // try the cached data before running the compiler.
+ //
+ bool cache (dd.reading ());
+
+ // But, before we do all that, make sure the source file itself if up to
+ // date.
+ //
+ if (update (src, dd.mtime ()))
+ {
+ // If the file got updated or is newer than the database, then we
+ // cannot rely on the cache any further. However, the cached data
+ // could actually still be valid so the compiler run will validate it.
+ //
+ // We do need to update the database timestamp, however. Failed that,
+ // we will keep re-validating the cached data over and over again.
+ //
+ if (cache)
+ {
+ cache = false;
+ dd.touch ();
+ }
+ }
+
+ size_t skip_count (0);
+ for (bool restart (true); restart; cache = false)
+ {
+ restart = false;
+
+ if (cache)
+ {
+ // If any, this is always the first run.
+ //
+ assert (skip_count == 0);
+
+ while (dd.more ())
+ {
+ string* l (dd.read ());
+
+ // If the line is invalid, run the compiler.
+ //
+ if (l == nullptr)
+ {
+ restart = true;
+ break;
+ }
+
+ restart = add (path (move (*l)), true);
+ skip_count++;
+
+ // The same idea as in the source file update above.
+ //
+ if (restart)
+ {
+ l6 ([&]{trace << "restarting";});
+ dd.touch ();
+ break;
+ }
+ }
+ }
+ else
+ {
+ try
+ {
+ if (args.empty ())
+ init_args ();
+
+ if (verb >= 3)
+ print_process (args);
+
+ // For VC with /EP we need a pipe to stderr and stdout should go
+ // to /dev/null.
+ //
+ process pr (args.data (),
+ 0,
+ cid == "msvc" ? -2 : -1,
+ cid == "msvc" ? -1 : 2);
+
+ try
+ {
+ // We may not read all the output (e.g., due to a restart).
+ // Before we used to just close the file descriptor to signal to
+ // the other end that we are not interested in the rest. This
+ // works fine with GCC but Clang (3.7.0) finds this impolite and
+ // complains, loudly (broken pipe). So now we are going to skip
+ // until the end.
+ //
+ ifdstream is (cid == "msvc" ? pr.in_efd : pr.in_ofd,
+ fdstream_mode::text | fdstream_mode::skip,
+ ifdstream::badbit);
+
+ // In some cases we may need to ignore the error return
+ // status. The good_error flag keeps track of that. Similarly
+ // we sometimes expect the error return status based on the
+ // output we see. The bad_error flag is for that.
+ //
+ bool good_error (false), bad_error (false);
+
+ size_t skip (skip_count);
+ for (bool first (true), second (false);
+ !(restart || is.eof ()); )
+ {
+ string l;
+ getline (is, l);
+
+ if (is.fail ())
+ {
+ if (is.eof ()) // Trailing newline.
+ break;
+
+ throw ifdstream::failure ("");
+ }
+
+ l6 ([&]{trace << "header dependency line '" << l << "'";});
+
+ // Parse different dependency output formats.
+ //
+ if (cid == "msvc")
+ {
+ if (first)
+ {
+ // The first line should be the file we are compiling. If
+ // it is not, then something went wrong even before we
+ // could compile anything (e.g., file does not exist). In
+ // this case the first line (and everything after it) is
+ // presumably diagnostics.
+ //
+ if (l != src.path ().leaf ().string ())
+ {
+ text << l;
+ bad_error = true;
+ break;
+ }
+
+ first = false;
+ continue;
+ }
+
+ string f (next_show (l, good_error));
+
+ if (f.empty ()) // Some other diagnostics.
+ {
+ text << l;
+ bad_error = true;
+ break;
+ }
+
+ // Skip until where we left off.
+ //
+ if (skip != 0)
+ {
+ // We can't be skipping over a non-existent header.
+ //
+ assert (!good_error);
+ skip--;
+ }
+ else
+ {
+ restart = add (path (move (f)), false);
+ skip_count++;
+
+ // If the header does not exist, we better restart.
+ //
+ assert (!good_error || restart);
+
+ if (restart)
+ l6 ([&]{trace << "restarting";});
+ }
+ }
+ else
+ {
+ // Make dependency declaration.
+ //
+ size_t pos (0);
+
+ if (first)
+ {
+ // Empty output should mean the wait() call below will
+ // return false.
+ //
+ if (l.empty ())
+ {
+ bad_error = true;
+ break;
+ }
+
+ assert (l[0] == '^' && l[1] == ':' && l[2] == ' ');
+
+ first = false;
+ second = true;
+
+ // While normally we would have the source file on the
+ // first line, if too long, it will be moved to the next
+ // line and all we will have on this line is "^: \".
+ //
+ if (l.size () == 4 && l[3] == '\\')
+ continue;
+ else
+ pos = 3; // Skip "^: ".
+
+ // Fall through to the 'second' block.
+ }
+
+ if (second)
+ {
+ second = false;
+ next_make (l, pos); // Skip the source file.
+ }
+
+ while (pos != l.size ())
+ {
+ string f (next_make (l, pos));
+
+ // Skip until where we left off.
+ //
+ if (skip != 0)
+ {
+ skip--;
+ continue;
+ }
+
+ restart = add (path (move (f)), false);
+ skip_count++;
+
+ if (restart)
+ {
+ l6 ([&]{trace << "restarting";});
+ break;
+ }
+ }
+ }
+ }
+
+ // In case of VC, we are parsing stderr and if things go south,
+ // we need to copy the diagnostics for the user to see.
+ //
+ // Note that the eof check is important: if the stream is at
+ // eof, this and all subsequent writes to cerr will fail (and
+ // you won't see a thing).
+ //
+ if (is.peek () != ifdstream::traits_type::eof () &&
+ cid == "msvc" &&
+ bad_error)
+ cerr << is.rdbuf ();
+
+ is.close ();
+
+ // We assume the child process issued some diagnostics.
+ //
+ if (!pr.wait ())
+ {
+ if (!good_error) // Ignore expected errors (restart).
+ throw failed ();
+ }
+ else if (bad_error)
+ fail << "expected error exist status from " << x_lang
+ << " compiler";
+ }
+ catch (const ifdstream::failure&)
+ {
+ pr.wait ();
+ fail << "unable to read " << x_lang << " compiler header "
+ << "dependency output";
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e.what ();
+
+ // In a multi-threaded program that fork()'ed but did not exec(),
+ // it is unwise to try to do any kind of cleanup (like unwinding
+ // the stack and running destructors).
+ //
+ if (e.child ())
+ exit (1);
+
+ throw failed ();
+ }
+ }
+ }
+ }
+
+ // Filter cl.exe noise (msvc.cxx).
+ //
+ void
+ msvc_filter_cl (ifdstream&, const path& src);
+
+ target_state compile::
+ perform_update (action a, target& xt) const
+ {
+ file& t (static_cast<file&> (xt));
+ file* s (execute_prerequisites<file> (x_src, a, t, t.mtime ()));
+
+ if (s == nullptr)
+ return target_state::unchanged;
+
+ scope& bs (t.base_scope ());
+ scope& rs (*bs.root_scope ());
+ otype ct (compile_type (t));
+
+ cstrings args {cast<path> (rs[config_x]).string ().c_str ()};
+
+ // Translate paths to relative (to working directory) ones. This
+ // results in easier to read diagnostics.
+ //
+ path relo (relative (t.path ()));
+ path rels (relative (s->path ()));
+
+ // Add *.export.poptions from prerequisite libraries. Note that here we
+ // don't need to see group members (see apply()).
+ //
+ lorder lo (link_order (bs, ct));
+ for (prerequisite& p: group_prerequisites (t))
+ {
+ target* pt (p.target); // Already searched and matched.
+
+ if (lib* l = pt->is_a<lib> ())
+ pt = &link_member (*l, lo);
+
+ if (pt->is_a<liba> () || pt->is_a<libs> ())
+ append_lib_options (args, *pt, lo,
+ c_export_poptions,
+ x_export_poptions);
+ }
+
+ append_options (args, t, c_poptions);
+ append_options (args, t, x_poptions);
+ append_options (args, t, c_coptions);
+ append_options (args, t, x_coptions);
+
+ string std, out, out1; // Storage.
+
+ append_std (args, rs, t, std);
+
+ if (cid == "msvc")
+ {
+ // The /F*: option variants with separate names only became available
+ // in VS2013/12.0. Why do we bother? Because the command line suddenly
+ // becomes readable.
+ //
+ uint64_t ver (cast<uint64_t> (rs[x_version_major]));
+
+ args.push_back ("/nologo");
+
+ // While we want to keep the low-level build as "pure" as possible,
+ // the two misguided defaults, exceptions and runtime, just have to be
+ // fixed. Otherwise the default build is pretty much unusable. But we
+ // also make sure that the user can easily disable our defaults: if we
+ // see any relevant options explicitly specified, we take our hands
+ // off.
+ //
+ // For C looks like no /EH* (exceptions supported but no C++ objects
+ // destroyed) is a reasonable default.
+ //
+ if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+ args.push_back ("/EHsc");
+
+ // The runtime is a bit more interesting. At first it may seem like a
+ // good idea to be a bit clever and use the static runtime if we are
+ // building obja{}. And for obje{} we could decide which runtime to
+ // use based on the library link order: if it is static-only, then we
+ // could assume the static runtime. But it is indeed too clever: when
+ // building liba{} we have no idea who is going to use it. It could be
+ // an exe{} that links both static and shared libraries (and is
+ // therefore built with the shared runtime). And to safely use the
+ // static runtime, everything must be built with /MT and there should
+ // be no DLLs in the picture. So we are going to play it safe and
+ // always default to the shared runtime.
+ //
+ // In a similar vein, it would seem reasonable to use the debug runtime
+ // if we are compiling with debug. But, again, there will be fireworks
+ // if we have some projects built with debug and some without and then
+ // we try to link them together (which is not an unreasonable thing to
+ // do). So by default we will always use the release runtime.
+ //
+ if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ args.push_back ("/MD");
+
+ // The presence of /Zi or /ZI causes the compiler to write debug info
+ // to the .pdb file. By default it is a shared file called vcNN.pdb
+ // (where NN is the VC version) created (wait for it) in the current
+ // working directory (and not the directory of the .obj file). Also,
+ // because it is shared, there is a special Windows service that
+ // serializes access. We, of course, want none of that so we will
+ // create a .pdb per object file.
+ //
+ // Note that this also changes the name of the .idb file (used for
+ // minimal rebuild and incremental compilation): cl.exe take the /Fd
+ // value and replaces the .pdb extension with .idb.
+ //
+ // Note also that what we are doing here appears to be incompatible
+ // with PCH (/Y* options) and /Gm (minimal rebuild).
+ //
+ if (find_options ({"/Zi", "/ZI"}, args))
+ {
+ if (ver >= 18)
+ args.push_back ("/Fd:");
+ else
+ out1 = "/Fd";
+
+ out1 += relo.string ();
+ out1 += ".pdb";
+
+ args.push_back (out1.c_str ());
+ }
+
+ if (ver >= 18)
+ {
+ args.push_back ("/Fo:");
+ args.push_back (relo.string ().c_str ());
+ }
+ else
+ {
+ out = "/Fo" + relo.string ();
+ args.push_back (out.c_str ());
+ }
+
+ args.push_back ("/c"); // Compile only.
+ args.push_back (x_lang == lang::c ? "/TC" : "/TP"); // Compile as.
+ args.push_back (rels.string ().c_str ());
+ }
+ else
+ {
+ if (ct == otype::s)
+ {
+ // On Darwin, Win32 -fPIC is the default.
+ //
+ if (tclass == "linux" || tclass == "freebsd")
+ args.push_back ("-fPIC");
+ }
+
+ args.push_back ("-o");
+ args.push_back (relo.string ().c_str ());
+
+ args.push_back ("-c");
+ args.push_back (rels.string ().c_str ());
+ }
+
+ args.push_back (nullptr);
+
+ if (verb >= 2)
+ print_process (args);
+ else if (verb)
+ text << x_name << ' ' << *s;
+
+ try
+ {
+ // VC cl.exe sends diagnostics to stdout. It also prints the file name
+ // being compiled as the first line. So for cl.exe we redirect stdout
+ // to a pipe, filter that noise out, and send the rest to stderr.
+ //
+ // For other compilers redirect stdout to stderr, in case any of them
+ // tries to pull off something similar. For sane compilers this should
+ // be harmless.
+ //
+ bool filter (cid == "msvc");
+
+ process pr (args.data (), 0, (filter ? -1 : 2));
+
+ if (filter)
+ {
+ try
+ {
+ ifdstream is (pr.in_ofd, fdstream_mode::text, ifdstream::badbit);
+
+ msvc_filter_cl (is, rels);
+
+ // If anything remains in the stream, send it all to stderr. Note
+ // that the eof check is important: if the stream is at eof, this
+ // and all subsequent writes to cerr will fail (and you won't see
+ // a thing).
+ //
+ if (is.peek () != ifdstream::traits_type::eof ())
+ cerr << is.rdbuf ();
+
+ is.close ();
+ }
+ catch (const ifdstream::failure&) {} // Assume exits with error.
+ }
+
+ if (!pr.wait ())
+ throw failed ();
+
+ // Should we go to the filesystem and get the new mtime? We
+ // know the file has been modified, so instead just use the
+ // current clock time. It has the advantage of having the
+ // subseconds precision.
+ //
+ t.mtime (system_clock::now ());
+ return target_state::changed;
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e.what ();
+
+ // In a multi-threaded program that fork()'ed but did not exec(),
+ // it is unwise to try to do any kind of cleanup (like unwinding
+ // the stack and running destructors).
+ //
+ if (e.child ())
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ target_state compile::
+ perform_clean (action a, target& xt) const
+ {
+ file& t (static_cast<file&> (xt));
+
+ initializer_list<const char*> e;
+
+ if (cid == "msvc")
+ e = {".d", ".idb", ".pdb"};
+ else
+ e = {".d"};
+
+ return clean_extra (a, t, e);
+ }
+ }
+}
diff --git a/build2/cc/guess b/build2/cc/guess
new file mode 100644
index 0000000..977e081
--- /dev/null
+++ b/build2/cc/guess
@@ -0,0 +1,125 @@
+// file : build2/cc/guess -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_GUESS
+#define BUILD2_CC_GUESS
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/cc/types>
+
+namespace build2
+{
+ namespace cc
+ {
+ // Compiler id consisting of a type and optional variant. If the variant
+ // is not empty, then the id is spelled out as 'type-variant', similar to
+ // target triplets (this also means that the type cannot contain '-').
+ //
+ // Currently recognized compilers and their ids:
+ //
+ // gcc GCC gcc/g++
+ // clang Vanilla Clang clang/clang++
+ // clang-apple Apple Clang clang/clang++ and the gcc/g++ "alias"
+ // icc Intel icc/icpc
+ // msvc Microsoft cl.exe
+ //
+ struct compiler_id
+ {
+ std::string type;
+ std::string variant;
+
+ bool
+ empty () const {return type.empty ();}
+
+ std::string
+ string () const {return variant.empty () ? type : type + "-" + variant;}
+ };
+
+ inline ostream&
+ operator<< (ostream& os, const compiler_id& id)
+ {
+ return os << id.string ();
+ }
+
+ // Compiler version. Here we map the various compiler version formats to
+ // something that resembles the MAJOR.MINOR.PATCH-BUILD form of the
+ // Semantic Versioning. While the MAJOR.MINOR part is relatively
+ // straightforward, PATCH may be empty and BUILD can contain pretty much
+ // anything (including spaces).
+ //
+ // gcc A.B.C[ ...] {A, B, C, ...}
+ // clang A.B.C[( |-)...] {A, B, C, ...}
+ // clang-apple A.B[.C] ... {A, B, C, ...}
+ // icc A.B[.C.D] ... {A, B, C, D ...}
+ // msvc A.B.C[.D] {A, B, C, D}
+ //
+ // Note that the clang-apple version is a custom Apple version and does
+ // not correspond to the vanilla clang version.
+ //
+ struct compiler_version
+ {
+ std::string string;
+
+ // Currently all the compilers that we support have numeric MAJOR,
+ // MINOR, and PATCH components and it makes sense to represent them as
+ // integers for easy comparison. If we meet a compiler for which this
+ // doesn't hold, then we will probably just set these to 0 and let the
+ // user deal with the string representation.
+ //
+ uint64_t major;
+ uint64_t minor;
+ uint64_t patch;
+ std::string build;
+ };
+
+ // Compiler information.
+ //
+ // The signature is normally the -v/--version line that was used to guess
+ // the compiler id and its version.
+ //
+ // The checksum is used to detect compiler changes. It is calculated in a
+ // compiler-specific manner (usually the output of -v/--version) and is
+ // not bulletproof (e.g., it most likely won't detect that the underlying
+ // assembler or linker has changed). However, it should detect most
+ // common cases, such as an upgrade to a new version or a configuration
+ // change.
+ //
+ // Note that we assume the checksum incorporates the (default) target so
+ // that if the compiler changes but only in what it targets, then the
+ // checksum will still change. This is currently the case for all the
+ // compilers that we support.
+ //
+ // The target is the compiler's traget architecture triplet. Note that
+ // unlike all the preceding fields, this one takes into account the
+ // compile options (e.g., -m32).
+ //
+ // The pattern is the toolchain program pattern that could sometimes be
+ // derived for some toolchains. For example, i686-w64-mingw32-*.
+ //
+ struct compiler_info
+ {
+ compiler_id id;
+ compiler_version version;
+ string signature;
+ string checksum;
+ string target;
+ string pattern;
+ };
+
+ // In a sense this is analagous to the language standard which we handle
+ // via a virtual function in common. However, duplicating this hairy ball
+ // of fur in multiple places doesn't seem wise, especially considering
+ // that most of it will be the same, at least for C and C++.
+ //
+ compiler_info
+ guess (lang,
+ const path& xc,
+ const strings* c_coptions,
+ const strings* x_coptions);
+ }
+}
+
+#endif // BUILD2_CC_GUESS
diff --git a/build2/cc/guess.cxx b/build2/cc/guess.cxx
new file mode 100644
index 0000000..d80dddd
--- /dev/null
+++ b/build2/cc/guess.cxx
@@ -0,0 +1,1052 @@
+// file : build2/cc/guess.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/cc/guess>
+
+#include <cstring> // strlen()
+
+#include <build2/diagnostics>
+
+using namespace std;
+
+namespace build2
+{
+ namespace cc
+ {
+ // Pre-guess the compiler type based on the compiler executable name.
+ // Return empty string if can't make a guess (for example, because the
+ // compiler name is a generic 'c++'). Note that it only guesses the type,
+ // not the variant.
+ //
+ static string
+ pre_guess (lang xl, const path& xc)
+ {
+ tracer trace ("cc::pre_guess");
+
+ const string s (xc.leaf ().base ().string ());
+ size_t n (s.size ());
+
+ // Name separator characters (e.g., '-' in 'g++-4.8').
+ //
+ auto sep = [] (char c) -> bool
+ {
+ return c == '-' || c == '_' || c == '.';
+ };
+
+ auto stem = [&sep, &s, n] (const char* x) -> bool
+ {
+ size_t m (strlen (x));
+ size_t p (s.find (x, 0, m));
+
+ return p != string::npos &&
+ (p == 0 || sep (s[p - 1])) && // Separated at the beginning.
+ ((p += m) == n || sep (s[p])); // Separated at the end.
+ };
+
+ // Warn if the user specified a C compiler instead of C++ or vice versa.
+ //
+ lang o; // Other language.
+ const char* as (nullptr); // Actual stem.
+ const char* es (nullptr); // Expected stem.
+
+ switch (xl)
+ {
+ case lang::c:
+ {
+ // Keep msvc last since 'cl' is very generic.
+ //
+ if (stem ("gcc")) return "gcc";
+ if (stem ("clang")) return "clang";
+ if (stem ("icc")) return "icc";
+ if (stem ("cl")) return "msvc";
+
+ if (stem (as = "g++")) es = "gcc";
+ else if (stem (as = "clang++")) es = "clang";
+ else if (stem (as = "icpc")) es = "icc";
+ else if (stem (as = "c++")) es = "cc";
+
+ o = lang::cxx;
+ break;
+ }
+ case lang::cxx:
+ {
+ // Keep msvc last since 'cl' is very generic.
+ //
+ if (stem ("g++")) return "gcc";
+ if (stem ("clang++")) return "clang";
+ if (stem ("icpc")) return "icc";
+ if (stem ("cl")) return "msvc";
+
+ if (stem (as = "gcc")) es = "g++";
+ else if (stem (as = "clang")) es = "clang++";
+ else if (stem (as = "icc")) es = "icpc";
+ else if (stem (as = "cc")) es = "c++";
+
+ o = lang::c;
+ break;
+ }
+ }
+
+ if (es != nullptr)
+ warn << xc << " looks like a " << o << " compiler" <<
+ info << "should it be '" << es << "' instead of '" << as << "'?";
+
+ l4 ([&]{trace << "unable to guess compiler type of " << xc;});
+ return "";
+ }
+
+ // Guess the compiler type and variant by running it. If the pre argument
+ // is not empty, then only "confirm" the pre-guess. Return empty result if
+ // unable to guess.
+ //
+ struct guess_result
+ {
+ compiler_id id;
+ string signature;
+ string checksum;
+
+ bool
+ empty () const {return id.empty ();}
+ };
+
+ static guess_result
+ guess (lang, const path& xc, const string& pre)
+ {
+ tracer trace ("cc::guess");
+
+ guess_result r;
+
+ // Start with -v. This will cover gcc and clang.
+ //
+ // While icc also writes what may seem like something we can use to
+ // detect it:
+ //
+ // icpc version 16.0.2 (gcc version 4.9.0 compatibility)
+ //
+ // That first word is actually the executable name. So if we rename
+ // icpc to foocpc, we will get:
+ //
+ // foocpc version 16.0.2 (gcc version 4.9.0 compatibility)
+ //
+ // In fact, if someone renames icpc to g++, there will be no way for
+ // us to detect this. Oh, well, their problem.
+ //
+ if (r.id.empty () && (pre.empty () || pre == "gcc" || pre == "clang"))
+ {
+ auto f = [] (string& l) -> guess_result
+ {
+ // The gcc/g++ -v output will have a line (currently last) in the
+ // form:
+ //
+ // "gcc version X.Y.Z ..."
+ //
+ // The "version" word can probably be translated. For example:
+ //
+ // gcc version 3.4.4
+ // gcc version 4.2.1
+ // gcc version 4.8.2 (GCC)
+ // gcc version 4.8.5 (Ubuntu 4.8.5-2ubuntu1~14.04.1)
+ // gcc version 4.9.2 (Ubuntu 4.9.2-0ubuntu1~14.04)
+ // gcc version 5.1.0 (Ubuntu 5.1.0-0ubuntu11~14.04.1)
+ // gcc version 6.0.0 20160131 (experimental) (GCC)
+ //
+ if (l.compare (0, 4, "gcc ") == 0)
+ return guess_result {{"gcc", ""}, move (l), ""};
+
+ // The Apple clang/clang++ -v output will have a line (currently
+ // first) in the form:
+ //
+ // "Apple (LLVM|clang) version X.Y.Z ..."
+ //
+ // Apple clang version 3.1 (tags/Apple/clang-318.0.58) (based on LLVM 3.1svn)
+ // Apple clang version 4.0 (tags/Apple/clang-421.0.60) (based on LLVM 3.1svn)
+ // Apple clang version 4.1 (tags/Apple/clang-421.11.66) (based on LLVM 3.1svn)
+ // Apple LLVM version 4.2 (clang-425.0.28) (based on LLVM 3.2svn)
+ // Apple LLVM version 5.0 (clang-500.2.79) (based on LLVM 3.3svn)
+ // Apple LLVM version 5.1 (clang-503.0.40) (based on LLVM 3.4svn)
+ // Apple LLVM version 6.0 (clang-600.0.57) (based on LLVM 3.5svn)
+ // Apple LLVM version 6.1.0 (clang-602.0.53) (based on LLVM 3.6.0svn)
+ // Apple LLVM version 7.0.0 (clang-700.0.53)
+ // Apple LLVM version 7.0.0 (clang-700.1.76)
+ // Apple LLVM version 7.0.2 (clang-700.1.81)
+ // Apple LLVM version 7.3.0 (clang-703.0.16.1)
+ //
+ // Note that the gcc/g++ "aliases" for clang/clang++ also include
+ // this line but it is (currently) preceded by "Configured with:
+ // ...".
+ //
+ // Check for Apple clang before the vanilla one since the above line
+ // also includes "clang".
+ //
+ if (l.compare (0, 6, "Apple ") == 0 &&
+ (l.compare (6, 5, "LLVM ") == 0 ||
+ l.compare (6, 6, "clang ") == 0))
+ return guess_result {{"clang", "apple"}, move (l), ""};
+
+ // The vanilla clang/clang++ -v output will have a line (currently
+ // first) in the form:
+ //
+ // "[... ]clang version X.Y.Z[-...] ..."
+ //
+ // The "version" word can probably be translated. For example:
+ //
+ // FreeBSD clang version 3.4.1 (tags/RELEASE_34/dot1-final 208032) 20140512
+ // Ubuntu clang version 3.5.0-4ubuntu2~trusty2 (tags/RELEASE_350/final) (based on LLVM 3.5.0)
+ // Ubuntu clang version 3.6.0-2ubuntu1~trusty1 (tags/RELEASE_360/final) (based on LLVM 3.6.0)
+ // clang version 3.7.0 (tags/RELEASE_370/final)
+ //
+ if (l.find ("clang ") != string::npos)
+ return guess_result {{"clang", ""}, move (l), ""};
+
+ return guess_result ();
+ };
+
+ // The -v output contains other information (such as the compiler
+ // build configuration for gcc or the selected gcc installation for
+ // clang) which makes sense to include into the compiler checksum. So
+ // ask run() to calculate it for every line of the -v ouput.
+ //
+ // One notable consequence of this is that if the locale changes
+ // (e.g., via LC_ALL), then the compiler signature will most likely
+ // change as well because of the translated text.
+ //
+ sha256 cs;
+
+ // Suppress all the compiler errors because we may be trying an
+ // unsupported option.
+ //
+ r = run<guess_result> (xc, "-v", f, false, false, &cs);
+
+ if (!r.empty ())
+ r.checksum = cs.string ();
+ }
+
+ // Next try --version to detect icc.
+ //
+ if (r.empty () && (pre.empty () || pre == "icc"))
+ {
+ auto f = [] (string& l) -> guess_result
+ {
+ // The first line has the " (ICC) " in it, for example:
+ //
+ // icpc (ICC) 9.0 20060120
+ // icpc (ICC) 11.1 20100414
+ // icpc (ICC) 12.1.0 20110811
+ // icpc (ICC) 14.0.0 20130728
+ // icpc (ICC) 15.0.2 20150121
+ // icpc (ICC) 16.0.2 20160204
+ // icc (ICC) 16.0.2 20160204
+ //
+ if (l.find (" (ICC) ") != string::npos)
+ return guess_result {{"icc", ""}, move (l), ""};
+
+ return guess_result ();
+ };
+
+ r = run<guess_result> (xc, "--version", f, false);
+ }
+
+ // Finally try to run it without any options to detect msvc.
+ //
+ //
+ if (r.empty () && (pre.empty () || pre == "msvc"))
+ {
+ auto f = [] (string& l) -> guess_result
+ {
+ // Check for "Microsoft (R)" and "C/C++" in the first line as a
+ // signature since all other words/positions can be translated. For
+ // example:
+ //
+ // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 13.10.6030 for 80x86
+ // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 14.00.50727.762 for 80x86
+ // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 15.00.30729.01 for 80x86
+ // Compilador de optimizacion de C/C++ de Microsoft (R) version 16.00.30319.01 para x64
+ // Microsoft (R) C/C++ Optimizing Compiler Version 17.00.50727.1 for x86
+ // Microsoft (R) C/C++ Optimizing Compiler Version 18.00.21005.1 for x86
+ // Microsoft (R) C/C++ Optimizing Compiler Version 19.00.23026 for x86
+ //
+ // In the recent versions the architecture is either "x86", "x64",
+ // or "ARM".
+ //
+ if (l.find ("Microsoft (R)") != string::npos &&
+ l.find ("C/C++") != string::npos)
+ return guess_result {{"msvc", ""}, move (l), ""};
+
+ return guess_result ();
+ };
+
+ r = run<guess_result> (xc, f, false);
+ }
+
+ if (!r.empty ())
+ {
+ if (!pre.empty () && r.id.type != pre)
+ {
+ l4 ([&]{trace << "compiler type guess mismatch"
+ << ", pre-guessed " << pre
+ << ", determined " << r.id.type;});
+
+ r = guess_result ();
+ }
+ else
+ l5 ([&]{trace << xc << " is " << r.id << ": '"
+ << r.signature << "'";});
+ }
+ else
+ l4 ([&]{trace << "unable to determine compiler type of " << xc;});
+
+ return r;
+ }
+
+ static compiler_info
+ guess_gcc (lang,
+ const path& xc,
+ const strings* c_coptions,
+ const strings* x_coptions,
+ guess_result&& gr)
+ {
+ tracer trace ("cc::guess_gcc");
+
+ // Extract the version. The signature line has the following format
+ // though language words can be translated and even rearranged (see
+ // examples above).
+ //
+ // "gcc version A.B.C[ ...]"
+ //
+ string& s (gr.signature);
+
+ // Scan the string as words and look for one that looks like a version.
+ //
+ size_t b (0), e (0);
+ while (next_word (s, b, e))
+ {
+ // The third argument to find_first_not_of() is the length of the
+ // first argument, not the length of the interval to check. So to
+ // limit it to [b, e) we are also going to compare the result to the
+ // end of the word position (first space). In fact, we can just check
+ // if it is >= e.
+ //
+ if (s.find_first_not_of ("1234567890.", b, 11) >= e)
+ break;
+ }
+
+ if (b == e)
+ fail << "unable to extract gcc version from '" << s << "'";
+
+ compiler_version v;
+ v.string.assign (s, b, string::npos);
+
+ // Split the version into components.
+ //
+ size_t vb (b), ve (b);
+ auto next = [&s, b, e, &vb, &ve] (const char* m) -> uint64_t
+ {
+ try
+ {
+ if (next_word (s, e, vb, ve, '.'))
+ return stoull (string (s, vb, ve - vb));
+ }
+ catch (const invalid_argument&) {}
+ catch (const out_of_range&) {}
+
+ error << "unable to extract gcc " << m << " version from '"
+ << string (s, b, e - b) << "'";
+ throw failed ();
+ };
+
+ v.major = next ("major");
+ v.minor = next ("minor");
+ v.patch = next ("patch");
+
+ if (e != s.size ())
+ v.build.assign (s, e + 1, string::npos);
+
+ // Figure out the target architecture. This is actually a lot trickier
+ // than one would have hoped.
+ //
+ // There is the -dumpmachine option but gcc doesn't adjust it per the
+ // compile options (e.g., -m32). However, starting with 4.6 it has the
+ // -print-multiarch option which gives (almost) the right answer. The
+ // "almost" part has to do with it not honoring the -arch option (which
+ // is really what this compiler is building for). To get to that, we
+ // would have to resort to a hack like this:
+ //
+ // gcc -v -E - 2>&1 | grep cc1
+ // .../cc1 ... -mtune=generic -march=x86-64
+ //
+ // Also, -print-multiarch will print am empty line if the compiler
+ // actually wasn't built with multi-arch support.
+ //
+ // So for now this is what we are going to do for the time being: First
+ // try -print-multiarch. If that works out (recent gcc configure with
+ // multi-arch support), then use the result. Otherwise, fallback to
+ // -dumpmachine (older gcc or not multi-arch).
+ //
+ cstrings args {xc.string ().c_str (), "-print-multiarch"};
+ if (c_coptions != nullptr) append_options (args, *c_coptions);
+ if (x_coptions != nullptr) append_options (args, *x_coptions);
+ args.push_back (nullptr);
+
+ // The output of both -print-multiarch and -dumpmachine is a single line
+ // containing just the target triplet.
+ //
+ auto f = [] (string& l) {return move (l);};
+
+ string t (run<string> (args.data (), f, false));
+
+ if (t.empty ())
+ {
+ l5 ([&]{trace << xc << " doesn's support -print-multiarch, "
+ << "falling back to -dumpmachine";});
+
+ args[1] = "-dumpmachine";
+ t = run<string> (args.data (), f);
+ }
+
+ if (t.empty ())
+ fail << "unable to extract target architecture from " << xc
+ << " -print-multiarch or -dumpmachine output";
+
+ return compiler_info {
+ move (gr.id),
+ move (v),
+ move (gr.signature),
+ move (gr.checksum), // Calculated on whole -v output.
+ move (t),
+ string ()};
+ }
+
+ static compiler_info
+ guess_clang (lang,
+ const path& xc,
+ const strings* c_coptions,
+ const strings* x_coptions,
+ guess_result&& gr)
+ {
+ // Extract the version. Here we will try to handle both vanilla and
+ // Apple clang since the signature lines are fairly similar. They have
+ // the following format though language words can probably be translated
+ // and even rearranged (see examples above).
+ //
+ // "[... ]clang version A.B.C[( |-)...]"
+ // "Apple (clang|LLVM) version A.B[.C] ..."
+ //
+ string& s (gr.signature);
+
+ // Some overrides for testing.
+ //
+ //s = "clang version 3.7.0 (tags/RELEASE_370/final)";
+ //
+ //gr.id.variant = "apple";
+ //s = "Apple LLVM version 7.3.0 (clang-703.0.16.1)";
+ //s = "Apple clang version 3.1 (tags/Apple/clang-318.0.58) (based on LLVM 3.1svn)";
+
+ // Scan the string as words and look for one that looks like a version.
+ // Use '-' as a second delimiter to handle versions like
+ // "3.6.0-2ubuntu1~trusty1".
+ //
+ size_t b (0), e (0);
+ while (next_word (s, b, e, ' ', '-'))
+ {
+ // The third argument to find_first_not_of() is the length of the
+ // first argument, not the length of the interval to check. So to
+ // limit it to [b, e) we are also going to compare the result to the
+ // end of the word position (first space). In fact, we can just check
+ // if it is >= e.
+ //
+ if (s.find_first_not_of ("1234567890.", b, 11) >= e)
+ break;
+ }
+
+ if (b == e)
+ fail << "unable to extract clang version from '" << s << "'";
+
+ compiler_version v;
+ v.string.assign (s, b, string::npos);
+
+ // Split the version into components.
+ //
+ size_t vb (b), ve (b);
+ auto next = [&s, b, e, &vb, &ve] (const char* m, bool opt) -> uint64_t
+ {
+ try
+ {
+ if (next_word (s, e, vb, ve, '.'))
+ return stoull (string (s, vb, ve - vb));
+
+ if (opt)
+ return 0;
+ }
+ catch (const invalid_argument&) {}
+ catch (const out_of_range&) {}
+
+ error << "unable to extract clang " << m << " version from '"
+ << string (s, b, e - b) << "'";
+ throw failed ();
+ };
+
+ v.major = next ("major", false);
+ v.minor = next ("minor", false);
+ v.patch = next ("patch", gr.id.variant == "apple");
+
+ if (e != s.size ())
+ v.build.assign (s, e + 1, string::npos);
+
+ // Figure out the target architecture.
+ //
+ // Unlike gcc, clang doesn't have -print-multiarch. Its -dumpmachine,
+ // however, respects the compile options (e.g., -m32).
+ //
+ cstrings args {xc.string ().c_str (), "-dumpmachine"};
+ if (c_coptions != nullptr) append_options (args, *c_coptions);
+ if (x_coptions != nullptr) append_options (args, *x_coptions);
+ args.push_back (nullptr);
+
+ // The output of -dumpmachine is a single line containing just the
+ // target triplet.
+ //
+ string t (run<string> (args.data (), [] (string& l) {return move (l);}));
+
+ if (t.empty ())
+ fail << "unable to extract target architecture from " << xc
+ << " -dumpmachine output";
+
+ return compiler_info {
+ move (gr.id),
+ move (v),
+ move (gr.signature),
+ move (gr.checksum), // Calculated on whole -v output.
+ move (t),
+ string ()};
+ }
+
+ static compiler_info
+ guess_icc (lang xl,
+ const path& xc,
+ const strings* c_coptions,
+ const strings* x_coptions,
+ guess_result&& gr)
+ {
+ // Extract the version. If the version has the fourth component, then
+ // the signature line (extracted with --version) won't include it. So we
+ // will have to get a more elaborate line with -V. We will also have to
+ // do it to get the compiler target that respects the -m option: icc
+ // doesn't support -print-multiarch like gcc and its -dumpmachine
+ // doesn't respect -m like clang. In fact, its -dumpmachine is
+ // completely broken as it appears to print the compiler's host and not
+ // the target (e.g., .../bin/ia32/icpc prints x86_64-linux-gnu).
+ //
+ // Some examples of the signature lines from -V output:
+ //
+ // Intel(R) C++ Compiler for 32-bit applications, Version 9.1 Build 20070215Z Package ID: l_cc_c_9.1.047
+ // Intel(R) C++ Compiler for applications running on Intel(R) 64, Version 10.1 Build 20071116
+ // Intel(R) C++ Compiler for applications running on IA-32, Version 10.1 Build 20071116 Package ID: l_cc_p_10.1.010
+ // Intel C++ Intel 64 Compiler Professional for applications running on Intel 64, Version 11.0 Build 20081105 Package ID: l_cproc_p_11.0.074
+ // Intel(R) C++ Intel(R) 64 Compiler Professional for applications running on Intel(R) 64, Version 11.1 Build 20091130 Package ID: l_cproc_p_11.1.064
+ // Intel C++ Intel 64 Compiler XE for applications running on Intel 64, Version 12.0.4.191 Build 20110427
+ // Intel(R) C++ Intel(R) 64 Compiler for applications running on Intel(R) 64, Version 16.0.2.181 Build 20160204
+ // Intel(R) C++ Intel(R) 64 Compiler for applications running on IA-32, Version 16.0.2.181 Build 20160204
+ // Intel(R) C++ Intel(R) 64 Compiler for applications running on Intel(R) MIC Architecture, Version 16.0.2.181 Build 20160204
+ // Intel(R) C Intel(R) 64 Compiler for applications running on Intel(R) MIC Architecture, Version 16.0.2.181 Build 20160204
+ //
+ // We should probably also assume the language words can be translated
+ // and even rearranged.
+ //
+ string& s (gr.signature);
+ s.clear ();
+
+ auto f = [] (string& l)
+ {
+ return l.compare (0, 5, "Intel") == 0 && (l[5] == '(' || l[5] == ' ')
+ ? move (l)
+ : string ();
+ };
+
+ // The -V output is sent to STDERR.
+ //
+ s = run<string> (xc, "-V", f, false);
+
+ if (s.empty ())
+ fail << "unable to extract signature from " << xc << " -V output";
+
+ if (s.find (xl == lang::c ? " C " : " C++ ") == string::npos)
+ fail << xc << " does not appear to be the Intel " << xl
+ << " compiler" <<
+ info << "extracted signature: '" << s << "'";
+
+ // Scan the string as words and look for the version. It consist of only
+ // digits and periods and contains at least one period.
+ //
+
+ // Some overrides for testing.
+ //
+ //s = "Intel(R) C++ Compiler for 32-bit applications, Version 9.1 Build 20070215Z Package ID: l_cc_c_9.1.047";
+ //s = "Intel(R) C++ Compiler for applications running on Intel(R) 64, Version 10.1 Build 20071116";
+ //s = "Intel(R) C++ Compiler for applications running on IA-32, Version 10.1 Build 20071116 Package ID: l_cc_p_10.1.010";
+ //s = "Intel C++ Intel 64 Compiler Professional for applications running on Intel 64, Version 11.0 Build 20081105 Package ID: l_cproc_p_11.0.074";
+ //s = "Intel(R) C++ Intel(R) 64 Compiler Professional for applications running on Intel(R) 64, Version 11.1 Build 20091130 Package ID: l_cproc_p_11.1.064";
+ //s = "Intel C++ Intel 64 Compiler XE for applications running on Intel 64, Version 12.0.4.191 Build 20110427";
+
+ size_t b (0), e (0), n;
+ while (next_word (s, b, e, ' ', ',') != 0)
+ {
+ // The third argument to find_first_not_of() is the length of the
+ // first argument, not the length of the interval to check. So to
+ // limit it to [b, e) we are also going to compare the result to the
+ // end of the word position (first space). In fact, we can just check
+ // if it is >= e. Similar logic for find_first_of() except that we add
+ // space to the list of character to make sure we don't go too far.
+ //
+ if (s.find_first_not_of ("1234567890.", b, 11) >= e &&
+ s.find_first_of (". ", b, 2) < e)
+ break;
+ }
+
+ if (b == e)
+ fail << "unable to extract icc version from '" << s << "'";
+
+ compiler_version v;
+ v.string.assign (s, b, string::npos);
+
+ // Split the version into components.
+ //
+ size_t vb (b), ve (b);
+ auto next = [&s, b, e, &vb, &ve] (const char* m, bool opt) -> uint64_t
+ {
+ try
+ {
+ if (next_word (s, e, vb, ve, '.'))
+ return stoull (string (s, vb, ve - vb));
+
+ if (opt)
+ return 0;
+ }
+ catch (const invalid_argument&) {}
+ catch (const out_of_range&) {}
+
+ error << "unable to extract icc " << m << " version from '"
+ << string (s, b, e - b) << "'";
+ throw failed ();
+ };
+
+ v.major = next ("major", false);
+ v.minor = next ("minor", false);
+ v.patch = next ("patch", true);
+
+ if (vb != ve && next_word (s, e, vb, ve, '.'))
+ v.build.assign (s, vb, ve - vb);
+
+ if (e != s.size ())
+ {
+ if (!v.build.empty ())
+ v.build += ' ';
+
+ v.build.append (s, e + 1, string::npos);
+ }
+
+ // Figure out the target CPU by re-running the compiler with -V and
+ // compile options (which may include, e.g., -m32). The output will
+ // contain two CPU keywords: the first is the host and the second is the
+ // target (hopefully this won't get rearranged by the translation).
+ //
+ // The CPU keywords (based on the above samples) appear to be:
+ //
+ // "32-bit"
+ // "IA-32"
+ // "Intel" "64"
+ // "Intel(R)" "64"
+ // "Intel(R)" "MIC" (-dumpmachine says: x86_64-k1om-linux)
+ //
+ cstrings args {xc.string ().c_str (), "-V"};
+ if (c_coptions != nullptr) append_options (args, *c_coptions);
+ if (x_coptions != nullptr) append_options (args, *x_coptions);
+ args.push_back (nullptr);
+
+ // The -V output is sent to STDERR.
+ //
+ string t (run<string> (args.data (), f, false));
+
+ if (t.empty ())
+ fail << "unable to extract target architecture from " << xc
+ << " -V output";
+
+ string arch;
+ for (b = e = 0; (n = next_word (t, b, e, ' ', ',')) != 0; )
+ {
+ if (t.compare (b, n, "Intel(R)", 8) == 0 ||
+ t.compare (b, n, "Intel", 5) == 0)
+ {
+ if ((n = next_word (t, b, e, ' ', ',')) != 0)
+ {
+ if (t.compare (b, n, "64", 2) == 0)
+ {
+ arch = "x86_64";
+ }
+ else if (t.compare (b, n, "MIC", 3) == 0)
+ {
+ arch = "x86_64"; // Plus "-k1om-linux" from -dumpmachine below.
+ }
+ }
+ else
+ break;
+ }
+ else if (t.compare (b, n, "IA-32", 5) == 0 ||
+ t.compare (b, n, "32-bit", 6) == 0)
+ {
+ arch = "i386";
+ }
+ }
+
+ if (arch.empty ())
+ fail << "unable to extract icc target architecture from '" << t << "'";
+
+ // So we have the CPU but we still need the rest of the triplet. While
+ // icc currently doesn't support cross-compilation (at least on Linux)
+ // and we could have just used the build triplet (i.e., the architecture
+ // on which we are running), who knows what will happen in the future.
+ // So instead we are going to use -dumpmachine and substitute the CPU.
+ //
+ t = run<string> (xc, "-dumpmachine", [] (string& l) {return move (l);});
+
+ if (t.empty ())
+ fail << "unable to extract target architecture from " << xc
+ << " -dumpmachine output";
+
+ // The first component in the triplet is always CPU.
+ //
+ size_t p (t.find ('-'));
+
+ if (p == string::npos)
+ fail << "unable to parse icc target architecture '" << t << "'";
+
+ arch.append (t, p, string::npos);
+
+ // Use the signature line to generate the checksum.
+ //
+ sha256 cs (s);
+
+ return compiler_info {
+ move (gr.id),
+ move (v),
+ move (gr.signature),
+ cs.string (),
+ move (arch),
+ string ()};
+ }
+
+ static compiler_info
+ guess_msvc (lang,
+ const path& xc,
+ const strings*,
+ const strings*,
+ guess_result&& gr)
+ {
+ // Extract the version. The signature line has the following format
+ // though language words can be translated and even rearranged (see
+ // examples above).
+ //
+ // "Microsoft (R) C/C++ Optimizing Compiler Version A.B.C[.D] for CPU"
+ //
+ // The CPU keywords (based on the above samples) appear to be:
+ //
+ // "80x86"
+ // "x86"
+ // "x64"
+ // "ARM"
+ //
+ string& s (gr.signature);
+
+ // Some overrides for testing.
+ //
+ //s = "Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 15.00.30729.01 for 80x86";
+ //s = "Compilador de optimizacion de C/C++ de Microsoft (R) version 16.00.30319.01 para x64";
+
+ // Scan the string as words and look for the version. While doing this
+ // also keep an eye on the CPU keywords.
+ //
+ string arch;
+ size_t b (0), e (0);
+
+ auto check_cpu = [&arch, &s, &b, &e] () -> bool
+ {
+ size_t n (e - b);
+
+ if (s.compare (b, n, "x64", 3) == 0 ||
+ s.compare (b, n, "x86", 3) == 0 ||
+ s.compare (b, n, "ARM", 3) == 0 ||
+ s.compare (b, n, "80x86", 5) == 0)
+ {
+ arch.assign (s, b, n);
+ return true;
+ }
+
+ return false;
+ };
+
+ while (next_word (s, b, e, ' ', ','))
+ {
+ // First check for the CPU keywords in case in some language they come
+ // before the version.
+ //
+ if (check_cpu ())
+ continue;
+
+ // The third argument to find_first_not_of() is the length of the
+ // first argument, not the length of the interval to check. So to
+ // limit it to [b, e) we are also going to compare the result to the
+ // end of the word position (first space). In fact, we can just check
+ // if it is >= e.
+ //
+ if (s.find_first_not_of ("1234567890.", b, 11) >= e)
+ break;
+ }
+
+ if (b == e)
+ fail << "unable to extract msvc version from '" << s << "'";
+
+ compiler_version v;
+ v.string.assign (s, b, e - b);
+
+ // Split the version into components.
+ //
+ size_t vb (b), ve (b);
+ auto next = [&s, b, e, &vb, &ve] (const char* m) -> uint64_t
+ {
+ try
+ {
+ if (next_word (s, e, vb, ve, '.'))
+ return stoull (string (s, vb, ve - vb));
+ }
+ catch (const invalid_argument&) {}
+ catch (const out_of_range&) {}
+
+ error << "unable to extract msvc " << m << " version from '"
+ << string (s, b, e - b) << "'";
+ throw failed ();
+ };
+
+ v.major = next ("major");
+ v.minor = next ("minor");
+ v.patch = next ("patch");
+
+ if (next_word (s, e, vb, ve, '.'))
+ v.build.assign (s, vb, ve - vb);
+
+ // Continue scanning for the CPU.
+ //
+ if (e != s.size ())
+ {
+ while (next_word (s, b, e, ' ', ','))
+ {
+ if (check_cpu ())
+ break;
+ }
+ }
+
+ if (arch.empty ())
+ fail << "unable to extract msvc target architecture from "
+ << "'" << s << "'";
+
+ // Now we need to map x86, x64, and ARM to the target triplets. The
+ // problem is, there aren't any established ones so we got to invent
+ // them ourselves. Based on the discussion in <butl/triplet>, we need
+ // something in the CPU-VENDOR-OS-ABI form.
+ //
+ // The CPU part is fairly straightforward with x86 mapped to 'i386' (or
+ // maybe 'i686'), x64 to 'x86_64', and ARM to 'arm' (it could also
+ // include the version, e.g., 'amrv8').
+ //
+ // The (toolchain) VENDOR is also straightforward: 'microsoft'. Why not
+ // omit it? Two reasons: firstly, there are other compilers with the
+ // otherwise same target, for example Intel C/C++, and it could be
+ // useful to distinguish between them. Secondly, by having all four
+ // components we remove any parsing ambiguity.
+ //
+ // OS-ABI is where things are not as clear cut. The OS part shouldn't
+ // probably be just 'windows' since we have Win32 and WinCE. And WinRT.
+ // And Universal Windows Platform (UWP). So perhaps the following values
+ // for OS: 'win32', 'wince', 'winrt', 'winup'.
+ //
+ // For 'win32' the ABI part could signal the Microsoft C/C++ runtime by
+ // calling it 'msvc'. And seeing that the runtimes are incompatible from
+ // version to version, we should probably add the 'X.Y' version at the
+ // end (so we essentially mimic the DLL name, e.g, msvcr120.dll). Some
+ // suggested we also encode the runtime type (those /M* options) though
+ // I am not sure: the only "redistributable" runtime is multi-threaded
+ // release DLL.
+ //
+ // The ABI part for the other OS values needs thinking. For 'winrt' and
+ // 'winup' it probably makes sense to encode the WINAPI_FAMILY macro
+ // value (perhaps also with the version). Some of its values:
+ //
+ // WINAPI_FAMILY_APP Windows 10
+ // WINAPI_FAMILY_PC_APP Windows 8.1
+ // WINAPI_FAMILY_PHONE_APP Windows Phone 8.1
+ //
+ // For 'wince' we may also want to add the OS version, e.g., 'wince4.2'.
+ //
+ // Putting it all together, Visual Studio 2015 will then have the
+ // following target triplets:
+ //
+ // x86 i386-microsoft-win32-msvc14.0
+ // x64 x86_64-microsoft-win32-msvc14.0
+ // ARM arm-microsoft-winup-???
+ //
+ if (arch == "ARM")
+ fail << "cl.exe ARM/WinRT/UWP target is not yet supported";
+ else
+ {
+ if (arch == "x64")
+ arch = "x86_64-microsoft-win32-msvc";
+ else if (arch == "x86" || arch == "80x86")
+ arch = "i386-microsoft-win32-msvc";
+ else
+ assert (false);
+
+ // Mapping of compiler versions to runtime versions:
+ //
+ // 19.00 140/14.0 VS2015
+ // 18.00 120/12.0 VS2013
+ // 17.00 110/11.0 VS2012
+ // 16.00 100/10.0 VS2010
+ // 15.00 90/9.0 VS2008
+ // 14.00 80/8.0 VS2005
+ // 13.10 71/7.1 VS2003
+ //
+ /**/ if (v.major == 19 && v.minor == 0) arch += "14.0";
+ else if (v.major == 18 && v.minor == 0) arch += "12.0";
+ else if (v.major == 17 && v.minor == 0) arch += "11.0";
+ else if (v.major == 16 && v.minor == 0) arch += "10.0";
+ else if (v.major == 15 && v.minor == 0) arch += "9.0";
+ else if (v.major == 14 && v.minor == 0) arch += "8.0";
+ else if (v.major == 13 && v.minor == 10) arch += "7.1";
+ else fail << "unable to map msvc compiler version '" << v.string
+ << "' to runtime version";
+ }
+
+ // Derive the toolchain pattern.
+ //
+ // If the compiler name is/starts with 'cl' (e.g., cl.exe, cl-14),
+ // then replace it with '*' and use it as a pattern for lib, link,
+ // etc.
+ //
+ string pat;
+
+ if (xc.size () > 2)
+ {
+ const string& l (xc.leaf ().string ());
+ size_t n (l.size ());
+
+ if (n >= 2 &&
+ (l[0] == 'c' || l[0] == 'C') &&
+ (l[1] == 'l' || l[1] == 'L') &&
+ (n == 2 || l[2] == '.' || l[2] == '-'))
+ {
+ path p (xc.directory ());
+ p /= "*";
+ p += l.c_str () + 2;
+ pat = move (p).string ();
+ }
+ }
+
+ // Use the signature line to generate the checksum.
+ //
+ sha256 cs (s);
+
+ return compiler_info {
+ move (gr.id),
+ move (v),
+ move (gr.signature),
+ cs.string (),
+ move (arch),
+ move (pat)};
+ }
+
+ compiler_info
+ guess (lang xl,
+ const path& xc,
+ const strings* c_coptions,
+ const strings* x_coptions)
+ {
+ string pre (pre_guess (xl, xc));
+ guess_result gr;
+
+ // If we could pre-guess the type based on the excutable name, then
+ // try the test just for that compiler.
+ //
+ if (!pre.empty ())
+ {
+ gr = guess (xl, xc, pre);
+
+ if (gr.empty ())
+ warn << xc << " name looks like " << pre << " but it is not";
+ }
+
+ if (gr.empty ())
+ gr = guess (xl, xc, "");
+
+ if (gr.empty ())
+ fail << "unable to guess " << xl << " compiler type of " << xc;
+
+ compiler_info r;
+ const compiler_id& id (gr.id);
+
+ if (id.type == "gcc")
+ {
+ assert (id.variant.empty ());
+ r = guess_gcc (xl, xc, c_coptions, x_coptions, move (gr));
+ }
+ else if (id.type == "clang")
+ {
+ assert (id.variant.empty () || id.variant == "apple");
+ r = guess_clang (xl, xc, c_coptions, x_coptions, move (gr));
+ }
+ else if (id.type == "icc")
+ {
+ assert (id.variant.empty ());
+ r = guess_icc (xl, xc, c_coptions, x_coptions, move (gr));
+ }
+ else if (id.type == "msvc")
+ {
+ assert (id.variant.empty ());
+ r = guess_msvc (xl, xc, c_coptions, x_coptions, move (gr));
+ }
+ else
+ assert (false);
+
+ // Derive binutils pattern unless this has already been done by the
+ // compiler-specific code.
+ //
+ if (r.pattern.empty ())
+ {
+ // When cross-compiling the whole toolchain is normally prefixed with
+ // the target triplet, e.g., x86_64-w64-mingw32-{gcc,g++,ar,ld}.
+ //
+ // BTW, for GCC we also get gcc-{ar,ranlib} which add support for the
+ // LTO plugin though it seems more recent GNU binutils (2.25) are able
+ // to load the plugin when needed automatically. So it doesn't seem we
+ // should bother trying to support this on our end (one way we could
+ // do it is by passing config.bin.{ar,ranlib} as hints).
+ //
+ const string& t (r.target);
+ size_t n (t.size ());
+
+ if (xc.size () > n + 1)
+ {
+ const string& l (xc.leaf ().string ());
+
+ if (l.size () > n + 1 && l.compare (0, n, t) == 0 && l[n] == '-')
+ {
+ path p (xc.directory ());
+ p /= t;
+ p += "-*";
+ r.pattern = move (p).string ();
+ }
+ }
+ }
+
+ return r;
+ }
+ }
+}
diff --git a/build2/cc/init b/build2/cc/init
new file mode 100644
index 0000000..d8ebd0e
--- /dev/null
+++ b/build2/cc/init
@@ -0,0 +1,55 @@
+// file : build2/cc/init -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_INIT
+#define BUILD2_CC_INIT
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/module>
+
+namespace build2
+{
+ namespace cc
+ {
+ bool
+ vars_init (scope&,
+ scope&,
+ const location&,
+ unique_ptr<module_base>&,
+ bool,
+ bool,
+ const variable_map&);
+
+ bool
+ config_init (scope&,
+ scope&,
+ const location&,
+ unique_ptr<module_base>&,
+ bool,
+ bool,
+ const variable_map&);
+
+ bool
+ core_init (scope&,
+ scope&,
+ const location&,
+ unique_ptr<module_base>&,
+ bool,
+ bool,
+ const variable_map&);
+
+ bool
+ init (scope&,
+ scope&,
+ const location&,
+ unique_ptr<module_base>&,
+ bool,
+ bool,
+ const variable_map&);
+ }
+}
+
+#endif // BUILD2_CC_INIT
diff --git a/build2/cc/init.cxx b/build2/cc/init.cxx
new file mode 100644
index 0000000..2623c79
--- /dev/null
+++ b/build2/cc/init.cxx
@@ -0,0 +1,321 @@
+// file : build2/cc/init.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/cc/init>
+
+#include <butl/triplet>
+
+#include <build2/scope>
+#include <build2/context>
+#include <build2/diagnostics>
+
+#include <build2/config/utility>
+
+#include <build2/cc/target>
+
+using namespace std;
+using namespace butl;
+
+namespace build2
+{
+ namespace cc
+ {
+ bool
+ vars_init (scope& r,
+ scope&,
+ const location&,
+ unique_ptr<module_base>&,
+ bool first,
+ bool,
+ const variable_map&)
+ {
+ tracer trace ("cc::vars_init");
+ l5 ([&]{trace << "for " << r.out_path ();});
+
+ assert (first);
+
+ // Enter variables. Note: some overridable, some not.
+ //
+ auto& v (var_pool);
+
+ v.insert<strings> ("config.cc.poptions", true);
+ v.insert<strings> ("config.cc.coptions", true);
+ v.insert<strings> ("config.cc.loptions", true);
+ v.insert<strings> ("config.cc.libs", true);
+
+ v.insert<strings> ("cc.poptions");
+ v.insert<strings> ("cc.coptions");
+ v.insert<strings> ("cc.loptions");
+ v.insert<strings> ("cc.libs");
+
+ v.insert<strings> ("cc.export.poptions");
+ v.insert<strings> ("cc.export.coptions");
+ v.insert<strings> ("cc.export.loptions");
+ v.insert<strings> ("cc.export.libs");
+
+ // Hint variables (not overridable).
+ //
+ v.insert<string> ("config.cc.id");
+ v.insert<string> ("config.cc.target");
+ v.insert<string> ("config.cc.pattern");
+
+ return true;
+ }
+
+ bool
+ config_init (scope& r,
+ scope& b,
+ const location& loc,
+ unique_ptr<module_base>&,
+ bool first,
+ bool,
+ const variable_map& hints)
+ {
+ tracer trace ("cc::config_init");
+ l5 ([&]{trace << "for " << b.out_path ();});
+
+ // Load cc.vars.
+ //
+ if (first)
+ {
+ if (!cast_false<bool> (b["cc.vars.loaded"]))
+ load_module ("cc.vars", r, b, loc);
+ }
+
+ // Configure.
+ //
+ if (first)
+ {
+ // config.cc.id
+ //
+ {
+ // This value must be hinted.
+ //
+ r.assign<string> ("cc.id") = cast<string> (hints["config.cc.id"]);
+ }
+
+ // config.cc.target
+ //
+ {
+ // This value must be hinted and already canonicalized.
+ //
+ const string& s (cast<string> (hints["config.cc.target"]));
+
+ try
+ {
+ //@@ We do it in the hinting module and here. Any way not to
+ // duplicate the effort? Maybe move the splitting here and
+ // simply duplicate the values there?
+ //
+ triplet t (s);
+
+ // Enter as cc.target.{cpu,vendor,system,version,class}.
+ //
+ r.assign<string> ("cc.target") = s;
+ r.assign<string> ("cc.target.cpu") = move (t.cpu);
+ r.assign<string> ("cc.target.vendor") = move (t.vendor);
+ r.assign<string> ("cc.target.system") = move (t.system);
+ r.assign<string> ("cc.target.version") = move (t.version);
+ r.assign<string> ("cc.target.class") = move (t.class_);
+ }
+ catch (const invalid_argument& e)
+ {
+ assert (false); // Should have been caught by the hinting module.
+ }
+ }
+
+ // config.cc.pattern
+ //
+ {
+ // This value could be hinted.
+ //
+ if (auto l = hints["config.cc.pattern"])
+ r.assign<string> ("cc.pattern") = cast<string> (l);
+ }
+
+ // Note that we are not having a config report since it will just
+ // duplicate what has already been printed by the hinting module.
+ }
+
+ // config.cc.{p,c,l}options
+ // config.cc.libs
+ //
+ // @@ Same nonsense as in module.
+ //
+ //
+ b.assign ("cc.poptions") += cast_null<strings> (
+ config::optional (r, "config.cc.poptions"));
+
+ b.assign ("cc.coptions") += cast_null<strings> (
+ config::optional (r, "config.cc.coptions"));
+
+ b.assign ("cc.loptions") += cast_null<strings> (
+ config::optional (r, "config.cc.loptions"));
+
+ b.assign ("cc.libs") += cast_null<strings> (
+ config::optional (r, "config.cc.libs"));
+
+ // Load the bin.config module.
+ //
+ if (!cast_false<bool> (b["bin.config.loaded"]))
+ {
+ // Prepare configuration hints. They are only used on the first load
+ // of bin.config so we only populate them on our first load.
+ //
+ variable_map h;
+ if (first)
+ {
+ h.assign ("config.bin.target") = cast<string> (r["cc.target"]);
+ if (auto l = r["cc.pattern"])
+ h.assign ("config.bin.pattern") = cast<string> (l);
+ }
+
+ load_module ("bin.config", r, b, loc, false, h);
+ }
+
+ // Verify bin's target matches ours (we do it even if we loaded it
+ // ourselves since the target can come from the configuration and not
+ // our hint).
+ //
+ if (first)
+ {
+ const string& ct (cast<string> (r["cc.target"]));
+ const string& bt (cast<string> (r["bin.target"]));
+
+ if (bt != ct)
+ fail (loc) << "cc and bin module target mismatch" <<
+ info << "cc.target is " << ct <<
+ info << "bin.target is " << bt;
+ }
+
+ const string& cid (cast<string> (r["cc.id"]));
+ const string& tsys (cast<string> (r["cc.target.system"]));
+
+ // Load bin.*.config for bin.* modules we may need (see core_init()
+ // below).
+ //
+ if (auto l = r["config.bin.lib"])
+ {
+ if (cast<string> (l) != "shared")
+ {
+ if (!cast_false<bool> (b["bin.ar.config.loaded"]))
+ load_module ("bin.ar.config", r, b, loc);
+ }
+ }
+
+ if (cid == "msvc")
+ {
+ if (!cast_false<bool> (b["bin.ld.config.loaded"]))
+ load_module ("bin.ld.config", r, b, loc);
+ }
+
+ if (tsys == "mingw32")
+ {
+ if (!cast_false<bool> (b["bin.rc.config.loaded"]))
+ load_module ("bin.rc.config", r, b, loc);
+ }
+
+ return true;
+ }
+
+ bool
+ core_init (scope& r,
+ scope& b,
+ const location& loc,
+ unique_ptr<module_base>&,
+ bool,
+ bool,
+ const variable_map& hints)
+ {
+ tracer trace ("cc::core_init");
+ l5 ([&]{trace << "for " << b.out_path ();});
+
+ // Load cc.config.
+ //
+ if (!cast_false<bool> (b["cc.config.loaded"]))
+ load_module ("cc.config", r, b, loc, false, hints);
+
+ // Load the bin module.
+ //
+ if (!cast_false<bool> (b["bin.loaded"]))
+ load_module ("bin", r, b, loc);
+
+ const string& cid (cast<string> (r["cc.id"]));
+ const string& tsys (cast<string> (r["cc.target.system"]));
+
+ // Load the bin.ar module unless we were asked to only build shared
+ // libraries.
+ //
+ if (auto l = r["config.bin.lib"])
+ {
+ if (cast<string> (l) != "shared")
+ {
+ if (!cast_false<bool> (b["bin.ar.loaded"]))
+ load_module ("bin.ar", r, b, loc);
+ }
+ }
+
+ // In the VC world you link things directly with link.exe so load the
+ // bin.ld module.
+ //
+ if (cid == "msvc")
+ {
+ if (!cast_false<bool> (b["bin.ld.loaded"]))
+ load_module ("bin.ld", r, b, loc);
+ }
+
+ // If our target is MinGW, then we will need the resource compiler
+ // (windres) in order to embed manifests into executables.
+ //
+ if (tsys == "mingw32")
+ {
+ if (!cast_false<bool> (b["bin.rc.loaded"]))
+ load_module ("bin.rc", r, b, loc);
+ }
+
+ return true;
+ }
+
+ bool
+ init (scope& r,
+ scope& b,
+ const location& loc,
+ unique_ptr<module_base>&,
+ bool,
+ bool,
+ const variable_map&)
+ {
+ tracer trace ("cc::init");
+ l5 ([&]{trace << "for " << b.out_path ();});
+
+ // This module is an "alias" for c.config and cxx.config. Its intended
+ // use is to make sure that the C/C++ configuration is captured in an
+ // amalgamation rather than subprojects.
+ //
+ // We want to order the loading to match what user specified on the
+ // command line (config.c or config.cxx). This way the first loaded
+ // module (with user-specified config.*) will hint the compiler to the
+ // second.
+ //
+ bool lc (!cast_false<bool> (b["c.config.loaded"]));
+ bool lp (!cast_false<bool> (b["cxx.config.loaded"]));
+
+ // If none of them are already loaded, load c first only if config.c
+ // is specified.
+ //
+ if (lc && lp && r["config.c"])
+ {
+ load_module ("c.config", r, b, loc);
+ load_module ("cxx.config", r, b, loc);
+ }
+ else
+ {
+ if (lp) load_module ("cxx.config", r, b, loc);
+ if (lc) load_module ("c.config", r, b, loc);
+ }
+
+ return true;
+ }
+ }
+}
diff --git a/build2/cc/install b/build2/cc/install
new file mode 100644
index 0000000..e2be905
--- /dev/null
+++ b/build2/cc/install
@@ -0,0 +1,39 @@
+// file : build2/cc/install -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_INSTALL
+#define BUILD2_CC_INSTALL
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/install/rule>
+
+#include <build2/cc/types>
+#include <build2/cc/common>
+
+namespace build2
+{
+ namespace cc
+ {
+ class link;
+
+ class install: public build2::install::file_rule, virtual common
+ {
+ public:
+ install (data&&, const link&);
+
+ virtual target*
+ filter (action, target&, prerequisite_member) const;
+
+ virtual match_result
+ match (action, target&, const string&) const;
+
+ private:
+ const link& link_;
+ };
+ }
+}
+
+#endif // BUILD2_CC_INSTALL
diff --git a/build2/cc/install.cxx b/build2/cc/install.cxx
new file mode 100644
index 0000000..b674886
--- /dev/null
+++ b/build2/cc/install.cxx
@@ -0,0 +1,70 @@
+// file : build2/cc/install.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/cc/install>
+
+#include <build2/bin/target>
+
+#include <build2/cc/link> // match()
+#include <build2/cc/utility>
+
+using namespace std;
+
+namespace build2
+{
+ namespace cc
+ {
+ using namespace bin;
+
+ install::
+ install (data&& d, const link& l): common (move (d)), link_ (l) {}
+
+ target* install::
+ filter (action a, target& t, prerequisite_member p) const
+ {
+ if (t.is_a<exe> ())
+ {
+ // Don't install executable's prerequisite headers.
+ //
+ if (x_header (p))
+ return nullptr;
+ }
+
+ // If this is a shared library prerequisite, install it as long as it
+ // is in the same amalgamation as we are.
+ //
+ // @@ Shouldn't we also install a static library prerequisite of a
+ // static library?
+ //
+ if ((t.is_a<exe> () || t.is_a<libs> ()) &&
+ (p.is_a<lib> () || p.is_a<libs> ()))
+ {
+ target* pt (&p.search ());
+
+ // If this is the lib{} group, pick a member which we would link.
+ //
+ if (lib* l = pt->is_a<lib> ())
+ pt = &link_member (*l, link_order (t.base_scope (), link_type (t)));
+
+ if (pt->is_a<libs> ()) // Can be liba{}.
+ return pt->in (t.weak_scope ()) ? pt : nullptr;
+ }
+
+ return file_rule::filter (a, t, p);
+ }
+
+ match_result install::
+ match (action a, target& t, const string& hint) const
+ {
+ // @@ How do we split the hint between the two?
+ //
+
+ // We only want to handle installation if we are also the
+ // ones building this target. So first run link's match().
+ //
+ match_result r (link_.match (a, t, hint));
+ return r ? install::file_rule::match (a, t, "") : r;
+ }
+ }
+}
diff --git a/build2/cc/link b/build2/cc/link
new file mode 100644
index 0000000..8be386f
--- /dev/null
+++ b/build2/cc/link
@@ -0,0 +1,78 @@
+// file : build2/cc/link -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_LINK
+#define BUILD2_CC_LINK
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/rule>
+
+#include <build2/bin/target>
+
+#include <build2/cc/types>
+#include <build2/cc/common>
+
+namespace build2
+{
+ namespace cc
+ {
+ class link: public rule, virtual common
+ {
+ public:
+ link (data&&);
+
+ virtual match_result
+ match (action, target&, const string& hint) const;
+
+ virtual recipe
+ apply (action, target&, const match_result&) const;
+
+ target_state
+ perform_update (action, target&) const;
+
+ target_state
+ perform_clean (action, target&) const;
+
+ private:
+ friend class compile;
+
+ // Extract system library search paths from GCC or compatible (Clang,
+ // Intel) using the -print-search-dirs option.
+ //
+ void
+ gcc_library_search_paths (scope&, dir_paths&) const;
+
+ // Extract system library search paths from VC (msvc.cxx).
+ //
+ void
+ msvc_library_search_paths (scope&, dir_paths&) const;
+
+ dir_paths
+ extract_library_paths (scope&) const;
+
+ // Alternative search logic for VC (msvc.cxx).
+ //
+ bin::liba*
+ msvc_search_static (const path&, const dir_path&, prerequisite&) const;
+
+ bin::libs*
+ msvc_search_shared (const path&, const dir_path&, prerequisite&) const;
+
+ target*
+ search_library (optional<dir_paths>&, prerequisite&) const;
+
+ // Windows-specific (windows-manifest.cxx).
+ //
+ path
+ windows_manifest (file&, bool rpath_assembly) const;
+
+ private:
+ const string rule_id;
+ };
+ }
+}
+
+#endif // BUILD2_CC_LINK
diff --git a/build2/cc/link.cxx b/build2/cc/link.cxx
new file mode 100644
index 0000000..4bebc6f
--- /dev/null
+++ b/build2/cc/link.cxx
@@ -0,0 +1,1850 @@
+// file : build2/cc/link.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/cc/link>
+
+#include <cstdlib> // exit()
+#include <iostream> // cerr
+
+#include <butl/path-map>
+
+#include <build2/depdb>
+#include <build2/scope>
+#include <build2/context>
+#include <build2/variable>
+#include <build2/algorithm>
+#include <build2/filesystem>
+#include <build2/diagnostics>
+
+#include <build2/bin/target>
+
+#include <build2/cc/target> // c
+#include <build2/cc/utility>
+
+using namespace std;
+using namespace butl;
+
+namespace build2
+{
+ namespace cc
+ {
+ using namespace bin;
+
+ link::
+ link (data&& d)
+ : common (move (d)),
+ rule_id (string (x) += ".link 1")
+ {
+ }
+
+ // Extract system library search paths from GCC or compatible (Clang,
+ // Intel) using the -print-search-dirs option.
+ //
+ void link::
+ gcc_library_search_paths (scope& bs, dir_paths& r) const
+ {
+ scope& rs (*bs.root_scope ());
+
+ cstrings args;
+ string std; // Storage.
+
+ args.push_back (cast<path> (rs[config_x]).string ().c_str ());
+ append_options (args, bs, c_coptions);
+ append_options (args, bs, x_coptions);
+ append_std (args, rs, bs, std);
+ append_options (args, bs, c_loptions);
+ append_options (args, bs, x_loptions);
+ args.push_back ("-print-search-dirs");
+ args.push_back (nullptr);
+
+ if (verb >= 3)
+ print_process (args);
+
+ string l;
+ try
+ {
+ process pr (args.data (), 0, -1); // Open pipe to stdout.
+
+ try
+ {
+ ifdstream is (pr.in_ofd, fdstream_mode::skip, ifdstream::badbit);
+
+ string s;
+ while (getline (is, s))
+ {
+ if (s.compare (0, 12, "libraries: =") == 0)
+ {
+ l.assign (s, 12, string::npos);
+ break;
+ }
+ }
+
+ is.close (); // Don't block.
+
+ if (!pr.wait ())
+ throw failed ();
+ }
+ catch (const ifdstream::failure&)
+ {
+ pr.wait ();
+ fail << "error reading " << x_lang << " compiler -print-search-dirs "
+ << "output";
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e.what ();
+
+ if (e.child ())
+ exit (1);
+
+ throw failed ();
+ }
+
+ if (l.empty ())
+ fail << "unable to extract " << x_lang << " compiler system library "
+ << "search paths";
+
+ // Now the fun part: figuring out which delimiter is used. Normally it
+ // is ':' but on Windows it is ';' (or can be; who knows for sure). Also
+ // note that these paths are absolute (or should be). So here is what we
+ // are going to do: first look for ';'. If found, then that's the
+ // delimiter. If not found, then there are two cases: it is either a
+ // single Windows path or the delimiter is ':'. To distinguish these two
+ // cases we check if the path starts with a Windows drive.
+ //
+ char d (';');
+ string::size_type e (l.find (d));
+
+ if (e == string::npos &&
+ (l.size () < 2 || l[0] == '/' || l[1] != ':'))
+ {
+ d = ':';
+ e = l.find (d);
+ }
+
+ // Now chop it up. We already have the position of the first delimiter
+ // (if any).
+ //
+ for (string::size_type b (0);; e = l.find (d, (b = e + 1)))
+ {
+ r.emplace_back (l, b, (e != string::npos ? e - b : e));
+ r.back ().normalize ();
+
+ if (e == string::npos)
+ break;
+ }
+ }
+
+ dir_paths link::
+ extract_library_paths (scope& bs) const
+ {
+ dir_paths r;
+
+ // Extract user-supplied search paths (i.e., -L, /LIBPATH).
+ //
+ auto extract = [&r, this] (const value& val)
+ {
+ const auto& v (cast<strings> (val));
+
+ for (auto i (v.begin ()), e (v.end ()); i != e; ++i)
+ {
+ const string& o (*i);
+
+ dir_path d;
+
+ if (cid == "msvc")
+ {
+ // /LIBPATH:<dir> (case-insensitive).
+ //
+ if ((o[0] == '/' || o[0] == '-') &&
+ (i->compare (1, 8, "LIBPATH:") == 0 ||
+ i->compare (1, 8, "libpath:") == 0))
+ d = dir_path (*i, 9, string::npos);
+ else
+ continue;
+ }
+ else
+ {
+ // -L can either be in the "-L<dir>" or "-L <dir>" form.
+ //
+ if (*i == "-L")
+ {
+ if (++i == e)
+ break; // Let the compiler complain.
+
+ d = dir_path (*i);
+ }
+ else if (i->compare (0, 2, "-L") == 0)
+ d = dir_path (*i, 2, string::npos);
+ else
+ continue;
+ }
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (!d.relative ())
+ r.push_back (move (d));
+ }
+ };
+
+ if (auto l = bs[c_loptions]) extract (*l);
+ if (auto l = bs[x_loptions]) extract (*l);
+
+ if (cid == "msvc")
+ msvc_library_search_paths (bs, r);
+ else
+ gcc_library_search_paths (bs, r);
+
+ return r;
+ }
+
+ target* link::
+ search_library (optional<dir_paths>& spc, prerequisite& p) const
+ {
+ tracer trace (x, "link::search_library");
+
+ // @@ This is hairy enough to warrant a separate implementation for
+ // Windows.
+ //
+
+ // First check the cache.
+ //
+ if (p.target != nullptr)
+ return p.target;
+
+ bool l (p.is_a<lib> ());
+ const string* ext (l ? nullptr : p.ext); // Only for liba/libs.
+
+ // Then figure out what we need to search for.
+ //
+
+ // liba
+ //
+ path an;
+ const string* ae (nullptr);
+
+ if (l || p.is_a<liba> ())
+ {
+ // We are trying to find a library in the search paths extracted from
+ // the compiler. It would only be natural if we used the library
+ // prefix/extension that correspond to this compiler and/or its
+ // target.
+ //
+ // Unlike MinGW, VC's .lib/.dll.lib naming is by no means standard and
+ // we might need to search for other names. In fact, there is no
+ // reliable way to guess from the file name what kind of library it
+ // is, static or import and we will have to do deep inspection of such
+ // alternative names. However, if we did find .dll.lib, then we can
+ // assume that .lib is the static library without any deep inspection
+ // overhead.
+ //
+ const char* e ("");
+
+ if (cid == "msvc")
+ {
+ an = path (p.name);
+ e = "lib";
+ }
+ else
+ {
+ an = path ("lib" + p.name);
+ e = "a";
+ }
+
+ ae = ext == nullptr
+ ? &extension_pool.find (e)
+ : ext;
+
+ if (!ae->empty ())
+ {
+ an += '.';
+ an += *ae;
+ }
+ }
+
+ // libs
+ //
+ path sn;
+ const string* se (nullptr);
+
+ if (l || p.is_a<libs> ())
+ {
+ const char* e ("");
+
+ if (cid == "msvc")
+ {
+ sn = path (p.name);
+ e = "dll.lib";
+ }
+ else
+ {
+ sn = path ("lib" + p.name);
+
+ if (tsys == "darwin") e = "dylib";
+ else if (tsys == "mingw32") e = "dll.a"; // See search code below.
+ else e = "so";
+ }
+
+ se = ext == nullptr
+ ? &extension_pool.find (e)
+ : ext;
+
+ if (!se->empty ())
+ {
+ sn += '.';
+ sn += *se;
+ }
+ }
+
+ // Now search.
+ //
+ if (!spc)
+ spc = extract_library_paths (p.scope);
+
+ liba* a (nullptr);
+ libs* s (nullptr);
+
+ path f; // Reuse the buffer.
+ const dir_path* pd;
+ for (const dir_path& d: *spc)
+ {
+ timestamp mt;
+
+ // libs
+ //
+ // Look for the shared library first. The order is important for VC:
+ // only if we found .dll.lib can we safely assumy that just .lib is a
+ // static library.
+ //
+ if (!sn.empty ())
+ {
+ f = d;
+ f /= sn;
+ mt = file_mtime (f);
+
+ if (mt != timestamp_nonexistent)
+ {
+ // On Windows what we found is the import library which we need
+ // to make the first ad hoc member of libs{}.
+ //
+ if (tclass == "windows")
+ {
+ s = &targets.insert<libs> (
+ d, dir_path (), p.name, nullptr, trace);
+
+ if (s->member == nullptr)
+ {
+ libi& i (
+ targets.insert<libi> (
+ d, dir_path (), p.name, se, trace));
+
+ if (i.path ().empty ())
+ i.path (move (f));
+
+ i.mtime (mt);
+
+ // Presumably there is a DLL somewhere, we just don't know
+ // where (and its possible we might have to look for one if we
+ // decide we need to do rpath emulation for installed
+ // libraries as well). We will represent this as empty path
+ // but valid timestamp (aka "trust me, it's there").
+ //
+ s->mtime (mt);
+ s->member = &i;
+ }
+ }
+ else
+ {
+ s = &targets.insert<libs> (d, dir_path (), p.name, se, trace);
+
+ if (s->path ().empty ())
+ s->path (move (f));
+
+ s->mtime (mt);
+ }
+ }
+ else if (ext == nullptr && tsys == "mingw32")
+ {
+ // Above we searched for the import library (.dll.a) but if it's
+ // not found, then we also search for the .dll (unless the
+ // extension was specified explicitly) since we can link to it
+ // directly. Note also that the resulting libs{} would end up
+ // being the .dll.
+ //
+ se = &extension_pool.find ("dll");
+ f = f.base (); // Remove .a from .dll.a.
+ mt = file_mtime (f);
+
+ if (mt != timestamp_nonexistent)
+ {
+ s = &targets.insert<libs> (d, dir_path (), p.name, se, trace);
+
+ if (s->path ().empty ())
+ s->path (move (f));
+
+ s->mtime (mt);
+ }
+ }
+ }
+
+ // liba
+ //
+ // If we didn't find .dll.lib then we cannot assume .lib is static.
+ //
+ if (!an.empty () && (s != nullptr || cid != "msvc"))
+ {
+ f = d;
+ f /= an;
+
+ if ((mt = file_mtime (f)) != timestamp_nonexistent)
+ {
+ // Enter the target. Note that because the search paths are
+ // normalized, the result is automatically normalized as well.
+ //
+ // Note that this target is outside any project which we treat
+ // as out trees.
+ //
+ a = &targets.insert<liba> (d, dir_path (), p.name, ae, trace);
+
+ if (a->path ().empty ())
+ a->path (move (f));
+
+ a->mtime (mt);
+ }
+ }
+
+ // Alternative search for VC.
+ //
+ if (cid == "msvc")
+ {
+ scope& rs (*p.scope.root_scope ());
+ const path& ld (cast<path> (rs["config.bin.ld"]));
+
+ if (s == nullptr && !sn.empty ())
+ s = msvc_search_shared (ld, d, p);
+
+ if (a == nullptr && !an.empty ())
+ a = msvc_search_static (ld, d, p);
+ }
+
+ if (a != nullptr || s != nullptr)
+ {
+ pd = &d;
+ break;
+ }
+ }
+
+ if (a == nullptr && s == nullptr)
+ return nullptr;
+
+ // Add the "using static/shared library" macro (used, for example, to
+ // handle DLL export). The absence of either of these macros would mean
+ // some other build system that cannot distinguish between the two.
+ //
+ auto add_macro = [this] (target& t, const char* suffix)
+ {
+ // If there is already a value (either in cc.export or x.export),
+ // don't add anything: we don't want to be accumulating defines nor
+ // messing with custom values. And if we are adding, then use the
+ // generic cc.export.
+ //
+ if (!t.vars[x_export_poptions])
+ {
+ auto p (t.vars.insert (c_export_poptions));
+
+ if (p.second)
+ {
+ // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED},
+ // where <name> is the target name. Here we want to strike a
+ // balance between being unique and not too noisy.
+ //
+ string d ("-DLIB");
+
+ auto upcase_sanitize = [] (char c)
+ {
+ return (c == '-' || c == '+' || c == '.') ? '_' : ucase (c);
+ };
+
+ transform (t.name.begin (),
+ t.name.end (),
+ back_inserter (d),
+ upcase_sanitize);
+
+ d += '_';
+ d += suffix;
+
+ strings o;
+ o.push_back (move (d));
+ p.first.get () = move (o);
+ }
+ }
+ };
+
+ if (a != nullptr)
+ add_macro (*a, "STATIC");
+
+ if (s != nullptr)
+ add_macro (*s, "SHARED");
+
+ if (l)
+ {
+ // Enter the target group.
+ //
+ lib& l (targets.insert<lib> (*pd, dir_path (), p.name, p.ext, trace));
+
+ // It should automatically link-up to the members we have found.
+ //
+ assert (l.a == a);
+ assert (l.s == s);
+
+ // Set the bin.lib variable to indicate what's available.
+ //
+ const char* bl (a != nullptr
+ ? (s != nullptr ? "both" : "static")
+ : "shared");
+ l.assign ("bin.lib") = bl;
+
+ p.target = &l;
+ }
+ else
+ p.target = p.is_a<liba> () ? static_cast<target*> (a) : s;
+
+ return p.target;
+ }
+
+ match_result link::
+ match (action a, target& t, const string& hint) const
+ {
+ tracer trace (x, "link::match");
+
+ // @@ TODO:
+ //
+ // - if path already assigned, verify extension?
+ //
+ // @@ Q:
+ //
+ // - if there is no .o, are we going to check if the one derived
+ // from target exist or can be built? A: No.
+ // What if there is a library. Probably ok if static, not if shared,
+ // (i.e., a utility library).
+ //
+
+ otype lt (link_type (t));
+
+ // Scan prerequisites and see if we can work with what we've got. Note
+ // that X could be C. We handle this by always checking for X first.
+ //
+ bool seen_x (false), seen_c (false), seen_obj (false), seen_lib (false);
+
+ for (prerequisite_member p: group_prerequisite_members (a, t))
+ {
+ if (p.is_a (x_src))
+ {
+ seen_x = seen_x || true;
+ }
+ else if (p.is_a<c> ())
+ {
+ seen_c = seen_c || true;
+ }
+ else if (p.is_a<obj> ())
+ {
+ seen_obj = seen_obj || true;
+ }
+ else if (p.is_a<obje> ())
+ {
+ if (lt != otype::e)
+ fail << "obje{} as prerequisite of " << t;
+
+ seen_obj = seen_obj || true;
+ }
+ else if (p.is_a<obja> ())
+ {
+ if (lt != otype::a)
+ fail << "obja{} as prerequisite of " << t;
+
+ seen_obj = seen_obj || true;
+ }
+ else if (p.is_a<objs> ())
+ {
+ if (lt != otype::s)
+ fail << "objs{} as prerequisite of " << t;
+
+ seen_obj = seen_obj || true;
+ }
+ else if (p.is_a<lib> () ||
+ p.is_a<liba> () ||
+ p.is_a<libs> ())
+ {
+ seen_lib = seen_lib || true;
+ }
+ }
+
+ // We will only chain a C source if there is also an X source or we were
+ // explicitly told to.
+ //
+ if (seen_c && !seen_x && hint < x)
+ {
+ l4 ([&]{trace << "C prerequisite without " << x_lang << " or hint";});
+ return nullptr;
+ }
+
+ // If we have any prerequisite libraries (which also means that
+ // we match), search/import and pre-match them to implement the
+ // "library meta-information protocol". Don't do this if we are
+ // called from the install rule just to check if we would match.
+ //
+ if (seen_lib && lt != otype::e &&
+ a.operation () != install_id && a.outer_operation () != install_id)
+ {
+ if (t.group != nullptr)
+ t.group->prerequisite_targets.clear (); // lib{}'s
+
+ optional<dir_paths> lib_paths; // Extract lazily.
+
+ for (prerequisite_member p: group_prerequisite_members (a, t))
+ {
+ if (p.is_a<lib> () || p.is_a<liba> () || p.is_a<libs> ())
+ {
+ target* pt (nullptr);
+
+ // Handle imported libraries.
+ //
+ if (p.proj () != nullptr)
+ pt = search_library (lib_paths, p.prerequisite);
+
+ if (pt == nullptr)
+ {
+ pt = &p.search ();
+ match_only (a, *pt);
+ }
+
+ // If the prerequisite came from the lib{} group, then also
+ // add it to lib's prerequisite_targets.
+ //
+ if (!p.prerequisite.belongs (t))
+ t.group->prerequisite_targets.push_back (pt);
+
+ t.prerequisite_targets.push_back (pt);
+ }
+ }
+ }
+
+ return seen_x || seen_c || seen_obj || seen_lib ? &t : nullptr;
+ }
+
+ recipe link::
+ apply (action a, target& xt, const match_result&) const
+ {
+ tracer trace (x, "link::apply");
+
+ file& t (static_cast<file&> (xt));
+
+ scope& bs (t.base_scope ());
+ scope& rs (*bs.root_scope ());
+
+ otype lt (link_type (t));
+ lorder lo (link_order (bs, lt));
+
+ // Derive file name from target name.
+ //
+ if (t.path ().empty ())
+ {
+ const char* p (nullptr);
+ const char* e (nullptr);
+
+ switch (lt)
+ {
+ case otype::e:
+ {
+ if (tclass == "windows")
+ e = "exe";
+ else
+ e = "";
+
+ break;
+ }
+ case otype::a:
+ {
+ // To be anally precise, let's use the ar id to decide how to name
+ // the library in case, for example, someone wants to archive
+ // VC-compiled object files with MinGW ar or vice versa.
+ //
+ if (cast<string> (rs["bin.ar.id"]) == "msvc")
+ {
+ e = "lib";
+ }
+ else
+ {
+ p = "lib";
+ e = "a";
+ }
+
+ if (auto l = t["bin.libprefix"])
+ p = cast<string> (l).c_str ();
+
+ break;
+ }
+ case otype::s:
+ {
+ if (tclass == "macosx")
+ {
+ p = "lib";
+ e = "dylib";
+ }
+ else if (tclass == "windows")
+ {
+ // On Windows libs{} is an ad hoc group. The libs{} itself is
+ // the DLL and we add libi{} import library as its member (see
+ // below).
+ //
+ if (tsys == "mingw32")
+ p = "lib";
+
+ e = "dll";
+ }
+ else
+ {
+ p = "lib";
+ e = "so";
+ }
+
+ if (auto l = t["bin.libprefix"])
+ p = cast<string> (l).c_str ();
+
+ break;
+ }
+ }
+
+ t.derive_path (e, p);
+ }
+
+ // Add ad hoc group members.
+ //
+ auto add_adhoc = [a, &bs] (target& t, const char* type) -> file&
+ {
+ const target_type& tt (*bs.find_target_type (type));
+
+ if (t.member != nullptr) // Might already be there.
+ assert (t.member->type () == tt);
+ else
+ t.member = &search (tt, t.dir, t.out, t.name, nullptr, nullptr);
+
+ file& r (static_cast<file&> (*t.member));
+ r.recipe (a, group_recipe);
+ return r;
+ };
+
+ if (tclass == "windows")
+ {
+ // Import library.
+ //
+ if (lt == otype::s)
+ {
+ file& imp (add_adhoc (t, "libi"));
+
+ // Usually on Windows the import library is called the same as the
+ // DLL but with the .lib extension. Which means it clashes with the
+ // static library. Instead of decorating the static library name
+ // with ugly suffixes (as is customary), let's use the MinGW
+ // approach (one must admit it's quite elegant) and call it
+ // .dll.lib.
+ //
+ if (imp.path ().empty ())
+ imp.derive_path (t.path (), tsys == "mingw32" ? "a" : "lib");
+ }
+
+ // PDB
+ //
+ if (lt != otype::a &&
+ cid == "msvc" &&
+ (find_option ("/DEBUG", t, c_loptions, true) ||
+ find_option ("/DEBUG", t, x_loptions, true)))
+ {
+ // Add after the import library if any.
+ //
+ file& pdb (add_adhoc (t.member == nullptr ? t : *t.member, "pdb"));
+
+ // We call it foo.{exe,dll}.pdb rather than just foo.pdb because we
+ // can have both foo.exe and foo.dll in the same directory.
+ //
+ if (pdb.path ().empty ())
+ pdb.derive_path (t.path (), "pdb");
+ }
+ }
+
+ t.prerequisite_targets.clear (); // See lib pre-match in match() above.
+
+ // Inject dependency on the output directory.
+ //
+ inject_fsdir (a, t);
+
+ optional<dir_paths> lib_paths; // Extract lazily.
+
+ // Process prerequisites: do rule chaining for C and X source files as
+ // well as search and match.
+ //
+ // When cleaning, ignore prerequisites that are not in the same or a
+ // subdirectory of our project root.
+ //
+ const target_type& ott (lt == otype::e ? obje::static_type :
+ lt == otype::a ? obja::static_type :
+ objs::static_type);
+
+ for (prerequisite_member p: group_prerequisite_members (a, t))
+ {
+ target* pt (nullptr);
+
+ if (!p.is_a (x_src) && !p.is_a<c> ())
+ {
+ // Handle imported libraries.
+ //
+ if (p.proj () != nullptr)
+ pt = search_library (lib_paths, p.prerequisite);
+
+ // The rest is the same basic logic as in search_and_match().
+ //
+ if (pt == nullptr)
+ pt = &p.search ();
+
+ if (a.operation () == clean_id && !pt->dir.sub (rs.out_path ()))
+ continue; // Skip.
+
+ // If this is the obj{} or lib{} target group, then pick the
+ // appropriate member and make sure it is searched and matched.
+ //
+ if (obj* o = pt->is_a<obj> ())
+ {
+ switch (lt)
+ {
+ case otype::e: pt = o->e; break;
+ case otype::a: pt = o->a; break;
+ case otype::s: pt = o->s; break;
+ }
+
+ if (pt == nullptr)
+ pt = &search (ott, p.key ());
+ }
+ else if (lib* l = pt->is_a<lib> ())
+ {
+ pt = &link_member (*l, lo);
+ }
+
+ build2::match (a, *pt);
+ t.prerequisite_targets.push_back (pt);
+ continue;
+ }
+
+ // The rest is rule chaining.
+ //
+
+ // Which scope shall we use to resolve the root? Unlikely, but
+ // possible, the prerequisite is from a different project
+ // altogether. So we are going to use the target's project.
+ //
+
+ // @@ Why are we creating the obj{} group if the source came from a
+ // group?
+ //
+ bool group (!p.prerequisite.belongs (t)); // Group's prerequisite.
+
+ const prerequisite_key& cp (p.key ()); // C-source (X or C) key.
+ const target_type& tt (group ? obj::static_type : ott);
+
+ // Come up with the obj*{} target. The source prerequisite directory
+ // can be relative (to the scope) or absolute. If it is relative, then
+ // use it as is. If absolute, then translate it to the corresponding
+ // directory under out_root. While the source directory is most likely
+ // under src_root, it is also possible it is under out_root (e.g.,
+ // generated source).
+ //
+ dir_path d;
+ {
+ const dir_path& cpd (*cp.tk.dir);
+
+ if (cpd.relative () || cpd.sub (rs.out_path ()))
+ d = cpd;
+ else
+ {
+ if (!cpd.sub (rs.src_path ()))
+ fail << "out of project prerequisite " << cp <<
+ info << "specify corresponding " << tt.name << "{} "
+ << "target explicitly";
+
+ d = rs.out_path () / cpd.leaf (rs.src_path ());
+ }
+ }
+
+ // obj*{} is always in the out tree.
+ //
+ target& ot (
+ search (tt, d, dir_path (), *cp.tk.name, nullptr, cp.scope));
+
+ // If we are cleaning, check that this target is in the same or
+ // a subdirectory of our project root.
+ //
+ if (a.operation () == clean_id && !ot.dir.sub (rs.out_path ()))
+ {
+ // If we shouldn't clean obj{}, then it is fair to assume we
+ // shouldn't clean the source either (generated source will be in
+ // the same directory as obj{} and if not, well, go find yourself
+ // another build system ;-)).
+ //
+ continue; // Skip.
+ }
+
+ // If we have created the obj{} target group, pick one of its members;
+ // the rest would be primarily concerned with it.
+ //
+ if (group)
+ {
+ obj& o (static_cast<obj&> (ot));
+
+ switch (lt)
+ {
+ case otype::e: pt = o.e; break;
+ case otype::a: pt = o.a; break;
+ case otype::s: pt = o.s; break;
+ }
+
+ if (pt == nullptr)
+ pt = &search (ott, o.dir, o.out, o.name, o.ext, nullptr);
+ }
+ else
+ pt = &ot;
+
+ // If this obj*{} target already exists, then it needs to be
+ // "compatible" with what we are doing here.
+ //
+ // This gets a bit tricky. We need to make sure the source files
+ // are the same which we can only do by comparing the targets to
+ // which they resolve. But we cannot search the ot's prerequisites
+ // -- only the rule that matches can. Note, however, that if all
+ // this works out, then our next step is to match the obj*{}
+ // target. If things don't work out, then we fail, in which case
+ // searching and matching speculatively doesn't really hurt.
+ //
+ bool found (false);
+ for (prerequisite_member p1:
+ reverse_group_prerequisite_members (a, *pt))
+ {
+ // Most of the time we will have just a single source so fast-path
+ // that case.
+ //
+ if (p1.is_a (x_src))
+ {
+ if (!found)
+ {
+ build2::match (a, *pt); // Now p1 should be resolved.
+
+ // Searching our own prerequisite is ok.
+ //
+ if (&p.search () != &p1.search ())
+ fail << "synthesized target for prerequisite " << cp << " "
+ << "would be incompatible with existing target " << *pt <<
+ info << "existing prerequisite " << p1 << " does not match "
+ << cp <<
+ info << "specify corresponding " << tt.name << "{} target "
+ << "explicitly";
+
+ found = true;
+ }
+
+ continue; // Check the rest of the prerequisites.
+ }
+
+ // Ignore some known target types (fsdir, headers, libraries).
+ //
+ if (p1.is_a<fsdir> () ||
+ p1.is_a<lib> () ||
+ p1.is_a<liba> () ||
+ p1.is_a<libs> () ||
+ (p.is_a (x_src) && x_header (p1)) ||
+ (p.is_a<c> () && p1.is_a<h> ()))
+ continue;
+
+ fail << "synthesized target for prerequisite " << cp
+ << " would be incompatible with existing target " << *pt <<
+ info << "unexpected existing prerequisite type " << p1 <<
+ info << "specify corresponding obj{} target explicitly";
+ }
+
+ if (!found)
+ {
+ // Note: add the source to the group, not the member.
+ //
+ ot.prerequisites.emplace_back (p.as_prerequisite (trace));
+
+ // Add our lib*{} prerequisites to the object file (see the export.*
+ // machinery for details).
+ //
+ // Note that we don't resolve lib{} to liba{}/libs{} here instead
+ // leaving it to whoever (e.g., the compile rule) will be needing
+ // *.export.*. One reason for doing it there is that the object
+ // target might be specified explicitly by the user in which case
+ // they will have to specify the set of lib{} prerequisites and it's
+ // much cleaner to do as lib{} rather than liba{}/libs{}.
+ //
+ // Initially, we were only adding imported libraries, but there is a
+ // problem with this approach: the non-imported library might depend
+ // on the imported one(s) which we will never "see" unless we start
+ // with this library.
+ //
+ for (prerequisite& p: group_prerequisites (t))
+ {
+ if (p.is_a<lib> () || p.is_a<liba> () || p.is_a<libs> ())
+ ot.prerequisites.emplace_back (p);
+ }
+
+ build2::match (a, *pt);
+ }
+
+ t.prerequisite_targets.push_back (pt);
+ }
+
+ switch (a)
+ {
+ case perform_update_id:
+ return [this] (action a, target& t) {return perform_update (a, t);};
+ case perform_clean_id:
+ return [this] (action a, target& t) {return perform_clean (a, t);};
+ default:
+ return noop_recipe; // Configure update.
+ }
+ }
+
+ // Recursively append/hash prerequisite libraries of a static library.
+ //
+ static void
+ append_libraries (strings& args, liba& a)
+ {
+ for (target* pt: a.prerequisite_targets)
+ {
+ if (liba* pa = pt->is_a<liba> ())
+ {
+ args.push_back (relative (pa->path ()).string ()); // string()&&
+ append_libraries (args, *pa);
+ }
+ else if (libs* ps = pt->is_a<libs> ())
+ args.push_back (relative (ps->path ()).string ()); // string()&&
+ }
+ }
+
+ static void
+ hash_libraries (sha256& cs, liba& a)
+ {
+ for (target* pt: a.prerequisite_targets)
+ {
+ if (liba* pa = pt->is_a<liba> ())
+ {
+ cs.append (pa->path ().string ());
+ hash_libraries (cs, *pa);
+ }
+ else if (libs* ps = pt->is_a<libs> ())
+ cs.append (ps->path ().string ());
+ }
+ }
+
+ static void
+ append_rpath_link (strings& args, libs& t)
+ {
+ for (target* pt: t.prerequisite_targets)
+ {
+ if (libs* ls = pt->is_a<libs> ())
+ {
+ args.push_back ("-Wl,-rpath-link," +
+ ls->path ().directory ().string ());
+ append_rpath_link (args, *ls);
+ }
+ }
+ }
+
+ // See windows-rpath.cxx.
+ //
+ timestamp
+ windows_rpath_timestamp (file&);
+
+ void
+ windows_rpath_assembly (file&, const string& cpu, timestamp, bool scratch);
+
+ // Filter link.exe noise (msvc.cxx).
+ //
+ void
+ msvc_filter_link (ifdstream&, const file&, otype);
+
+ // Translate target CPU to /MACHINE option.
+ //
+ const char*
+ msvc_machine (const string& cpu); // msvc.cxx
+
+ target_state link::
+ perform_update (action a, target& xt) const
+ {
+ tracer trace (x, "link::perform_update");
+
+ file& t (static_cast<file&> (xt));
+
+ scope& rs (t.root_scope ());
+ otype lt (link_type (t));
+
+ // Update prerequisites.
+ //
+ bool update (execute_prerequisites (a, t, t.mtime ()));
+
+ // If targeting Windows, take care of the manifest.
+ //
+ path manifest; // Manifest itself (msvc) or compiled object file.
+ timestamp rpath_timestamp (timestamp_nonexistent); // DLLs timestamp.
+
+ if (lt == otype::e && tclass == "windows")
+ {
+ // First determine if we need to add our rpath emulating assembly. The
+ // assembly itself is generated later, after updating the target. Omit
+ // it if we are updating for install.
+ //
+ if (a.outer_operation () != install_id)
+ rpath_timestamp = windows_rpath_timestamp (t);
+
+ path mf (
+ windows_manifest (
+ t,
+ rpath_timestamp != timestamp_nonexistent));
+
+ timestamp mt (file_mtime (mf));
+
+ if (tsys == "mingw32")
+ {
+ // Compile the manifest into the object file with windres. While we
+ // are going to synthesize an .rc file to pipe to windres' stdin, we
+ // will still use .manifest to check if everything is up-to-date.
+ //
+ manifest = mf + ".o";
+
+ if (mt > file_mtime (manifest))
+ {
+ path of (relative (manifest));
+
+ // @@ Would be good to add this to depdb (e.g,, rc changes).
+ //
+ const char* args[] = {
+ cast<path> (rs["config.bin.rc"]).string ().c_str (),
+ "--input-format=rc",
+ "--output-format=coff",
+ "-o", of.string ().c_str (),
+ nullptr};
+
+ if (verb >= 3)
+ print_process (args);
+
+ try
+ {
+ process pr (args, -1);
+
+ try
+ {
+ ofdstream os (pr.out_fd);
+
+ // 1 is resource ID, 24 is RT_MANIFEST. We also need to escape
+ // Windows path backslashes.
+ //
+ os << "1 24 \"";
+
+ const string& s (mf.string ());
+ for (size_t i (0), j;; i = j + 1)
+ {
+ j = s.find ('\\', i);
+ os.write (s.c_str () + i,
+ (j == string::npos ? s.size () : j) - i);
+
+ if (j == string::npos)
+ break;
+
+ os.write ("\\\\", 2);
+ }
+
+ os << "\"" << endl;
+
+ os.close ();
+
+ if (!pr.wait ())
+ throw failed (); // Assume diagnostics issued.
+ }
+ catch (const ofdstream::failure& e)
+ {
+ if (pr.wait ()) // Ignore if child failed.
+ fail << "unable to pipe resource file to " << args[0]
+ << ": " << e.what ();
+ }
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e.what ();
+
+ if (e.child ())
+ exit (1);
+
+ throw failed ();
+ }
+
+ update = true; // Manifest changed, force update.
+ }
+ }
+ else
+ {
+ manifest = move (mf); // Save for link.exe's /MANIFESTINPUT.
+
+ if (mt > t.mtime ())
+ update = true; // Manifest changed, force update.
+ }
+ }
+
+ // Check/update the dependency database.
+ //
+ depdb dd (t.path () + ".d");
+
+ // First should come the rule name/version.
+ //
+ if (dd.expect (rule_id) != nullptr)
+ l4 ([&]{trace << "rule mismatch forcing update of " << t;});
+
+ lookup ranlib;
+
+ // Then the linker checksum (ar/ranlib or the compiler).
+ //
+ if (lt == otype::a)
+ {
+ ranlib = rs["config.bin.ranlib"];
+
+ if (ranlib && ranlib->empty ()) // @@ BC LT [null].
+ ranlib = lookup ();
+
+ const char* rl (
+ ranlib
+ ? cast<string> (rs["bin.ranlib.checksum"]).c_str ()
+ : "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855");
+
+ if (dd.expect (cast<string> (rs["bin.ar.checksum"])) != nullptr)
+ l4 ([&]{trace << "ar mismatch forcing update of " << t;});
+
+ if (dd.expect (rl) != nullptr)
+ l4 ([&]{trace << "ranlib mismatch forcing update of " << t;});
+ }
+ else
+ {
+ // For VC we use link.exe directly.
+ //
+ const string& cs (
+ cast<string> (
+ rs[cid == "msvc" ? var_pool["bin.ld.checksum"] : x_checksum]));
+
+ if (dd.expect (cs) != nullptr)
+ l4 ([&]{trace << "linker mismatch forcing update of " << t;});
+ }
+
+ // Next check the target. While it might be incorporated into the linker
+ // checksum, it also might not (e.g., VC link.exe).
+ //
+ if (dd.expect (ctg) != nullptr)
+ l4 ([&]{trace << "target mismatch forcing update of " << t;});
+
+ // Start building the command line. While we don't yet know whether we
+ // will really need it, we need to hash it to find out. So the options
+ // are to either replicate the exact process twice, first for hashing
+ // then for building or to go ahead and start building and hash the
+ // result. The first approach is probably more efficient while the
+ // second is simpler. Let's got with the simpler for now (actually it's
+ // kind of a hybrid).
+ //
+ cstrings args {nullptr}; // Reserve one for config.bin.ar/config.x.
+
+ // Storage.
+ //
+ string std;
+ string soname1, soname2;
+ strings sargs;
+
+ if (lt == otype::a)
+ {
+ if (cid == "msvc") ;
+ else
+ {
+ // If the user asked for ranlib, don't try to do its function with
+ // -s. Some ar implementations (e.g., the LLVM one) don't support
+ // leading '-'.
+ //
+ args.push_back (ranlib ? "rc" : "rcs");
+ }
+ }
+ else
+ {
+ if (cid == "msvc")
+ {
+ // We are using link.exe directly so don't pass the compiler
+ // options.
+ }
+ else
+ {
+ append_options (args, t, c_coptions);
+ append_options (args, t, x_coptions);
+ append_std (args, rs, t, std);
+ }
+
+ append_options (args, t, c_loptions);
+ append_options (args, t, x_loptions);
+
+ // Handle soname/rpath.
+ //
+ if (tclass == "windows")
+ {
+ // Limited emulation for Windows with no support for user-defined
+ // rpaths.
+ //
+ auto l (t["bin.rpath"]);
+
+ if (l && !l->empty ())
+ fail << ctg << " does not support rpath";
+ }
+ else
+ {
+ // Set soname.
+ //
+ if (lt == otype::s)
+ {
+ const string& leaf (t.path ().leaf ().string ());
+
+ if (tclass == "macosx")
+ {
+ // With Mac OS 10.5 (Leopard) Apple finally caved in and gave us
+ // a way to emulate vanilla -rpath.
+ //
+ // It may seem natural to do something different on update for
+ // install. However, if we don't make it @rpath, then the user
+ // won't be able to use config.bin.rpath for installed libraries.
+ //
+ soname1 = "-install_name";
+ soname2 = "@rpath/" + leaf;
+ }
+ else
+ soname1 = "-Wl,-soname," + leaf;
+
+ if (!soname1.empty ())
+ args.push_back (soname1.c_str ());
+
+ if (!soname2.empty ())
+ args.push_back (soname2.c_str ());
+ }
+
+ // Add rpaths. We used to first add the ones specified by the user
+ // so that they take precedence. But that caused problems if we have
+ // old versions of the libraries sitting in the rpath location
+ // (e.g., installed libraries). And if you think about this, it's
+ // probably correct to prefer libraries that we explicitly imported
+ // to the ones found via rpath.
+ //
+ // Note also that if this is update for install, then we don't add
+ // rpath of the imported libraries (i.e., we assume they are also
+ // installed).
+ //
+ for (target* pt: t.prerequisite_targets)
+ {
+ if (libs* ls = pt->is_a<libs> ())
+ {
+ if (a.outer_operation () != install_id)
+ {
+ sargs.push_back ("-Wl,-rpath," +
+ ls->path ().directory ().string ());
+ }
+ // Use -rpath-link on targets that support it (Linux, FreeBSD).
+ // Since with this option the paths are not stored in the
+ // library, we have to do this recursively (in fact, we don't
+ // really need it for top-level libraries).
+ //
+ else if (tclass == "linux" || tclass == "freebsd")
+ append_rpath_link (sargs, *ls);
+ }
+ }
+
+ if (auto l = t["bin.rpath"])
+ for (const dir_path& p: cast<dir_paths> (l))
+ sargs.push_back ("-Wl,-rpath," + p.string ());
+ }
+ }
+
+ // All the options should now be in. Hash them and compare with the db.
+ //
+ {
+ sha256 cs;
+
+ for (size_t i (1); i != args.size (); ++i)
+ cs.append (args[i]);
+
+ for (size_t i (0); i != sargs.size (); ++i)
+ cs.append (sargs[i]);
+
+ if (dd.expect (cs.string ()) != nullptr)
+ l4 ([&]{trace << "options mismatch forcing update of " << t;});
+ }
+
+ // Finally, hash and compare the list of input files.
+ //
+ // Should we capture actual files or their checksum? The only good
+ // reason for capturing actual files is diagnostics: we will be able
+ // to pinpoint exactly what is causing the update. On the other hand,
+ // the checksum is faster and simpler. And we like simple.
+ //
+ {
+ sha256 cs;
+
+ for (target* pt: t.prerequisite_targets)
+ {
+ file* f;
+ liba* a (nullptr);
+ libs* s (nullptr);
+
+ if ((f = pt->is_a<obje> ()) ||
+ (f = pt->is_a<obja> ()) ||
+ (f = pt->is_a<objs> ()) ||
+ (lt != otype::a &&
+ ((f = a = pt->is_a<liba> ()) ||
+ (f = s = pt->is_a<libs> ()))))
+ {
+ // On Windows a shared library is a DLL with the import library as
+ // a first ad hoc group member. MinGW though can link directly to
+ // DLLs (see search_library() for details).
+ //
+ if (s != nullptr && tclass == "windows")
+ {
+ if (s->member != nullptr)
+ f = static_cast<file*> (s->member);
+ }
+
+ cs.append (f->path ().string ());
+
+ // If this is a static library, link all the libraries it depends
+ // on, recursively.
+ //
+ if (a != nullptr)
+ hash_libraries (cs, *a);
+ }
+ }
+
+ // Treat it as input for both MinGW and VC.
+ //
+ if (!manifest.empty ())
+ cs.append (manifest.string ());
+
+ // Treat them as inputs, not options.
+ //
+ if (lt != otype::a)
+ {
+ hash_options (cs, t, c_libs);
+ hash_options (cs, t, x_libs);
+ }
+
+ if (dd.expect (cs.string ()) != nullptr)
+ l4 ([&]{trace << "file set mismatch forcing update of " << t;});
+ }
+
+ // If any of the above checks resulted in a mismatch (different linker,
+ // options or input file set), or if the database is newer than the
+ // target (interrupted update) then force the target update. Also note
+ // this situation in the "from scratch" flag.
+ //
+ bool scratch (false);
+ if (dd.writing () || dd.mtime () > t.mtime ())
+ scratch = update = true;
+
+ dd.close ();
+
+ // If nothing changed, then we are done.
+ //
+ if (!update)
+ return target_state::unchanged;
+
+ // Ok, so we are updating. Finish building the command line.
+ //
+ string out, out1, out2; // Storage.
+
+ // Translate paths to relative (to working directory) ones. This results
+ // in easier to read diagnostics.
+ //
+ path relt (relative (t.path ()));
+
+ switch (lt)
+ {
+ case otype::a:
+ {
+ args[0] = cast<path> (rs["config.bin.ar"]).string ().c_str ();
+
+ if (cid == "msvc")
+ {
+ // lib.exe has /LIBPATH but it's not clear/documented what it's
+ // used for. Perhaps for link-time code generation (/LTCG)? If
+ // that's the case, then we may need to pass *.loptions.
+ //
+ args.push_back ("/NOLOGO");
+
+ // Add /MACHINE.
+ //
+ args.push_back (msvc_machine (cast<string> (rs[x_target_cpu])));
+
+ out = "/OUT:" + relt.string ();
+ args.push_back (out.c_str ());
+ }
+ else
+ args.push_back (relt.string ().c_str ());
+
+ break;
+ }
+ // The options are usually similar enough to handle them together.
+ //
+ case otype::e:
+ case otype::s:
+ {
+ if (cid == "msvc")
+ {
+ // Using link.exe directly.
+ //
+ args[0] = cast<path> (rs["config.bin.ld"]).string ().c_str ();
+ args.push_back ("/NOLOGO");
+
+ if (lt == otype::s)
+ args.push_back ("/DLL");
+
+ // Add /MACHINE.
+ //
+ args.push_back (msvc_machine (cast<string> (rs[x_target_cpu])));
+
+ // Unless explicitly enabled with /INCREMENTAL, disable
+ // incremental linking (it is implicitly enabled if /DEBUG is
+ // specified). The reason is the .ilk file: its name cannot be
+ // changed and if we have, say, foo.exe and foo.dll, then they
+ // will end up stomping on each other's .ilk's.
+ //
+ // So the idea is to disable it by default but let the user
+ // request it explicitly if they are sure their project doesn't
+ // suffer from the above issue. We can also have something like
+ // 'incremental' config initializer keyword for this.
+ //
+ // It might also be a good idea to ask Microsoft to add an option.
+ //
+ if (!find_option ("/INCREMENTAL", args, true))
+ args.push_back ("/INCREMENTAL:NO");
+
+ // If you look at the list of libraries Visual Studio links by
+ // default, it includes everything and a couple of kitchen sinks
+ // (winspool32.lib, ole32.lib, odbc32.lib, etc) while we want to
+ // keep our low-level build as pure as possible. However, there
+ // seem to be fairly essential libraries that are not linked by
+ // link.exe by default (use /VERBOSE:LIB to see the list). For
+ // example, MinGW by default links advapi32, shell32, user32, and
+ // kernel32. And so we follow suit and make sure those are linked.
+ // advapi32 and kernel32 are already on the default list and we
+ // only need to add the other two.
+ //
+ // The way we are going to do it is via the /DEFAULTLIB option
+ // rather than specifying the libraries as normal inputs (as VS
+ // does). This way the user can override our actions with the
+ // /NODEFAULTLIB option.
+ //
+ args.push_back ("/DEFAULTLIB:shell32.lib");
+ args.push_back ("/DEFAULTLIB:user32.lib");
+
+ // Take care of the manifest (will be empty for the DLL).
+ //
+ if (!manifest.empty ())
+ {
+ std = "/MANIFESTINPUT:"; // Repurpose storage for std.
+ std += relative (manifest).string ();
+ args.push_back ("/MANIFEST:EMBED");
+ args.push_back (std.c_str ());
+ }
+
+ if (lt == otype::s)
+ {
+ // On Windows libs{} is the DLL and its first ad hoc group
+ // member is the import library.
+ //
+ // This will also create the .exp export file. Its name will be
+ // derived from the import library by changing the extension.
+ // Lucky for us -- there is no option to name it.
+ //
+ auto imp (static_cast<file*> (t.member));
+ out2 = "/IMPLIB:" + relative (imp->path ()).string ();
+ args.push_back (out2.c_str ());
+ }
+
+ // If we have /DEBUG then name the .pdb file. It is either the
+ // first (exe) or the second (dll) ad hoc group member.
+ //
+ if (find_option ("/DEBUG", args, true))
+ {
+ auto pdb (static_cast<file*> (
+ lt == otype::e ? t.member : t.member->member));
+ out1 = "/PDB:" + relative (pdb->path ()).string ();
+ args.push_back (out1.c_str ());
+ }
+
+ // @@ An executable can have an import library and VS seems to
+ // always name it. I wonder what would trigger its generation?
+ // Could it be the presence of export symbols? Yes, link.exe
+ // will generate the import library iff there are exported
+ // symbols. Which means there could be a DLL without an import
+ // library (which we currently don't handle very well).
+ //
+ out = "/OUT:" + relt.string ();
+ args.push_back (out.c_str ());
+ }
+ else
+ {
+ args[0] = cast<path> (rs[config_x]).string ().c_str ();
+
+ // Add the option that triggers building a shared library and take
+ // care of any extras (e.g., import library).
+ //
+ if (lt == otype::s)
+ {
+ if (tclass == "macosx")
+ args.push_back ("-dynamiclib");
+ else
+ args.push_back ("-shared");
+
+ if (tsys == "mingw32")
+ {
+ // On Windows libs{} is the DLL and its first ad hoc group
+ // member is the import library.
+ //
+ auto imp (static_cast<file*> (t.member));
+ out = "-Wl,--out-implib=" + relative (imp->path ()).string ();
+ args.push_back (out.c_str ());
+ }
+ }
+
+ args.push_back ("-o");
+ args.push_back (relt.string ().c_str ());
+ }
+
+ break;
+ }
+ }
+
+ for (target* pt: t.prerequisite_targets)
+ {
+ file* f;
+ liba* a (nullptr);
+ libs* s (nullptr);
+
+ if ((f = pt->is_a<obje> ()) ||
+ (f = pt->is_a<obja> ()) ||
+ (f = pt->is_a<objs> ()) ||
+ (lt != otype::a &&
+ ((f = a = pt->is_a<liba> ()) ||
+ (f = s = pt->is_a<libs> ()))))
+ {
+ // On Windows a shared library is a DLL with the import library as a
+ // first ad hoc group member. MinGW though can link directly to DLLs
+ // (see search_library() for details).
+ //
+ if (s != nullptr && tclass == "windows")
+ {
+ if (s->member != nullptr)
+ f = static_cast<file*> (s->member);
+ }
+
+ sargs.push_back (relative (f->path ()).string ()); // string()&&
+
+ // If this is a static library, link all the libraries it depends
+ // on, recursively.
+ //
+ if (a != nullptr)
+ append_libraries (sargs, *a);
+ }
+ }
+
+ // For MinGW manifest is an object file.
+ //
+ if (!manifest.empty () && tsys == "mingw32")
+ sargs.push_back (relative (manifest).string ());
+
+ // Copy sargs to args. Why not do it as we go along pushing into sargs?
+ // Because of potential reallocations.
+ //
+ for (size_t i (0); i != sargs.size (); ++i)
+ args.push_back (sargs[i].c_str ());
+
+ if (lt != otype::a)
+ {
+ append_options (args, t, c_libs);
+ append_options (args, t, x_libs);
+ }
+
+ args.push_back (nullptr);
+
+ if (verb >= 2)
+ print_process (args);
+ else if (verb)
+ text << "ld " << t;
+
+ try
+ {
+ // VC tools (both lib.exe and link.exe) send diagnostics to stdout.
+ // Also, link.exe likes to print various gratuitous messages. So for
+ // link.exe we redirect stdout to a pipe, filter that noise out, and
+ // send the rest to stderr.
+ //
+ // For lib.exe (and any other insane compiler that may try to pull off
+ // something like this) we are going to redirect stdout to stderr. For
+ // sane compilers this should be harmless.
+ //
+ bool filter (cid == "msvc" && lt != otype::a);
+
+ process pr (args.data (), 0, (filter ? -1 : 2));
+
+ if (filter)
+ {
+ try
+ {
+ ifdstream is (pr.in_ofd, fdstream_mode::text, ifdstream::badbit);
+
+ msvc_filter_link (is, t, lt);
+
+ // If anything remains in the stream, send it all to stderr. Note
+ // that the eof check is important: if the stream is at eof, this
+ // and all subsequent writes to cerr will fail (and you won't see
+ // a thing).
+ //
+ if (is.peek () != ifdstream::traits_type::eof ())
+ cerr << is.rdbuf ();
+
+ is.close ();
+ }
+ catch (const ifdstream::failure&) {} // Assume exits with error.
+ }
+
+ if (!pr.wait ())
+ throw failed ();
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e.what ();
+
+ // In a multi-threaded program that fork()'ed but did not exec(),
+ // it is unwise to try to do any kind of cleanup (like unwinding
+ // the stack and running destructors).
+ //
+ if (e.child ())
+ exit (1);
+
+ throw failed ();
+ }
+
+ // Remove the target file if any of the subsequent actions fail. If we
+ // don't do that, we will end up with a broken build that is up-to-date.
+ //
+ auto_rmfile rm (t.path ());
+
+ if (ranlib)
+ {
+ const char* args[] = {
+ cast<path> (ranlib).string ().c_str (),
+ relt.string ().c_str (),
+ nullptr};
+
+ if (verb >= 2)
+ print_process (args);
+
+ try
+ {
+ process pr (args);
+
+ if (!pr.wait ())
+ throw failed ();
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e.what ();
+
+ if (e.child ())
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ // For Windows generate rpath-emulating assembly (unless updaing for
+ // install).
+ //
+ if (lt == otype::e && tclass == "windows")
+ {
+ if (a.outer_operation () != install_id)
+ windows_rpath_assembly (t,
+ cast<string> (rs[x_target_cpu]),
+ rpath_timestamp,
+ scratch);
+ }
+
+ rm.cancel ();
+
+ // Should we go to the filesystem and get the new mtime? We know the
+ // file has been modified, so instead just use the current clock time.
+ // It has the advantage of having the subseconds precision.
+ //
+ t.mtime (system_clock::now ());
+ return target_state::changed;
+ }
+
+ target_state link::
+ perform_clean (action a, target& xt) const
+ {
+ file& t (static_cast<file&> (xt));
+
+ initializer_list<const char*> e;
+
+ switch (link_type (t))
+ {
+ case otype::a:
+ {
+ e = {".d"};
+ break;
+ }
+ case otype::e:
+ {
+ if (tclass == "windows")
+ {
+ if (tsys == "mingw32")
+ {
+ e = {".d", "/.dlls", ".manifest.o", ".manifest"};
+ }
+ else
+ {
+ // Assuming it's VC or alike. Clean up .ilk in case the user
+ // enabled incremental linking (note that .ilk replaces .exe).
+ //
+ e = {".d", "/.dlls", ".manifest", "-.ilk"};
+ }
+ }
+ else
+ e = {".d"};
+
+ break;
+ }
+ case otype::s:
+ {
+ if (tclass == "windows" && tsys != "mingw32")
+ {
+ // Assuming it's VC or alike. Clean up .exp and .ilk.
+ //
+ e = {".d", ".exp", "-.ilk"};
+ }
+ else
+ e = {".d"};
+
+ break;
+ }
+ }
+
+ return clean_extra (a, t, e);
+ }
+ }
+}
diff --git a/build2/cc/module b/build2/cc/module
new file mode 100644
index 0000000..bed7673
--- /dev/null
+++ b/build2/cc/module
@@ -0,0 +1,59 @@
+// file : build2/cc/module -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_MODULE
+#define BUILD2_CC_MODULE
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/module>
+#include <build2/variable>
+
+#include <build2/cc/common>
+
+#include <build2/cc/compile>
+#include <build2/cc/link>
+#include <build2/cc/install>
+
+namespace build2
+{
+ namespace cc
+ {
+ class config_module: public module_base, public virtual config_data
+ {
+ public:
+ explicit
+ config_module (config_data&& d) : config_data (move (d)) {}
+
+ void
+ init (scope&,
+ scope&,
+ const location&,
+ bool first,
+ const variable_map&);
+ };
+
+ class module: public module_base, protected virtual common,
+ link, compile, install
+ {
+ public:
+ explicit
+ module (data&& d)
+ : common (move (d)),
+ link (move (d)),
+ compile (move (d), *this),
+ install (move (d), *this) {}
+
+ void
+ init (scope&,
+ scope&,
+ const location&,
+ bool first,
+ const variable_map&);
+ };
+ }
+}
+
+#endif // BUILD2_CC_MODULE
diff --git a/build2/cc/module.cxx b/build2/cc/module.cxx
new file mode 100644
index 0000000..3a7dad2
--- /dev/null
+++ b/build2/cc/module.cxx
@@ -0,0 +1,291 @@
+// file : build2/cc/module.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/cc/module>
+
+#include <iomanip> // left, setw()
+
+#include <butl/triplet>
+
+#include <build2/scope>
+#include <build2/context>
+#include <build2/diagnostics>
+
+#include <build2/bin/target>
+
+#include <build2/config/utility>
+#include <build2/install/utility>
+
+#include <build2/cc/guess>
+
+using namespace std;
+using namespace butl;
+
+namespace build2
+{
+ namespace cc
+ {
+ void config_module::
+ init (scope& r,
+ scope& b,
+ const location& loc,
+ bool first,
+ const variable_map&)
+ {
+ tracer trace (x, "config_init");
+
+ // Configure.
+ //
+ string pattern; // Toolchain pattern.
+
+ if (first)
+ {
+ const variable& config_c_coptions (var_pool["config.cc.coptions"]);
+
+ // config.x
+ //
+ auto p (config::required (r, config_x, path (x_default)));
+
+ // Figure out which compiler we are dealing with, its target, etc.
+ //
+ const path& xc (cast<path> (p.first));
+ compiler_info ci (
+ guess (x_lang,
+ xc,
+ cast_null<strings> (r[config_c_coptions]),
+ cast_null<strings> (r[config_x_coptions])));
+
+ // If this is a new value (e.g., we are configuring), then print the
+ // report at verbosity level 2 and up (-v).
+ //
+ if (verb >= (p.second ? 2 : 3))
+ {
+ text << x << ' ' << project (r) << '@' << r.out_path () << '\n'
+ << " " << left << setw (11) << x << xc << '\n'
+ << " id " << ci.id << '\n'
+ << " version " << ci.version.string << '\n'
+ << " major " << ci.version.major << '\n'
+ << " minor " << ci.version.minor << '\n'
+ << " patch " << ci.version.patch << '\n'
+ << " build " << ci.version.build << '\n'
+ << " signature " << ci.signature << '\n'
+ << " checksum " << ci.checksum << '\n'
+ << " target " << ci.target;
+ }
+
+ r.assign (x_id) = ci.id.string ();
+ r.assign (x_id_type) = move (ci.id.type);
+ r.assign (x_id_variant) = move (ci.id.variant);
+
+ r.assign (x_version) = move (ci.version.string);
+ r.assign (x_version_major) = ci.version.major;
+ r.assign (x_version_minor) = ci.version.minor;
+ r.assign (x_version_patch) = ci.version.patch;
+ r.assign (x_version_build) = move (ci.version.build);
+
+ r.assign (x_signature) = move (ci.signature);
+ r.assign (x_checksum) = move (ci.checksum);
+
+ pattern = move (ci.pattern);
+
+ // Split/canonicalize the target. First see if the user asked us to
+ // use config.sub.
+ //
+ if (ops.config_sub_specified ())
+ {
+ ci.target = run<string> (ops.config_sub (),
+ ci.target.c_str (),
+ [] (string& l) {return move (l);});
+ l5 ([&]{trace << "config.sub target: '" << ci.target << "'";});
+ }
+
+ try
+ {
+ string canon;
+ triplet t (ci.target, canon);
+
+ l5 ([&]{trace << "canonical target: '" << canon << "'; "
+ << "class: " << t.class_;});
+
+ // Enter as x.target.{cpu,vendor,system,version,class}.
+ //
+ r.assign (x_target) = move (canon);
+ r.assign (x_target_cpu) = move (t.cpu);
+ r.assign (x_target_vendor) = move (t.vendor);
+ r.assign (x_target_system) = move (t.system);
+ r.assign (x_target_version) = move (t.version);
+ r.assign (x_target_class) = move (t.class_);
+ }
+ catch (const invalid_argument& e)
+ {
+ // This is where we suggest that the user specifies --config-sub to
+ // help us out.
+ //
+ fail << "unable to parse " << x_lang << "compiler target '"
+ << ci.target << "': " << e.what () <<
+ info << "consider using the --config-sub option";
+ }
+ }
+
+ // config.x.{p,c,l}options
+ // config.x.libs
+ //
+ // These are optional. We also merge them into the corresponding
+ // x.* variables.
+ //
+ // The merging part gets a bit tricky if this module has already
+ // been loaded in one of the outer scopes. By doing the straight
+ // append we would just be repeating the same options over and
+ // over. So what we are going to do is only append to a value if
+ // it came from this scope. Then the usage for merging becomes:
+ //
+ // x.coptions = <overridable options> # Note: '='.
+ // using x
+ // x.coptions += <overriding options> # Note: '+='.
+ //
+ b.assign (x_poptions) += cast_null<strings> (
+ config::optional (r, config_x_poptions));
+
+ b.assign (x_coptions) += cast_null<strings> (
+ config::optional (r, config_x_coptions));
+
+ b.assign (x_loptions) += cast_null<strings> (
+ config::optional (r, config_x_loptions));
+
+ b.assign (x_libs) += cast_null<strings> (
+ config::optional (r, config_x_libs));
+
+ // Load cc.config.
+ //
+ if (!cast_false<bool> (b["cc.config.loaded"]))
+ {
+ // Prepare configuration hints. They are only used on the first load
+ // of cc.config so we only populate them on our first load.
+ //
+ variable_map h;
+ if (first)
+ {
+ h.assign ("config.cc.id") = cast<string> (r[x_id]);
+ h.assign ("config.cc.target") = cast<string> (r[x_target]);
+ if (!pattern.empty ())
+ h.assign ("config.cc.pattern") = move (pattern);
+ }
+
+ load_module ("cc.config", r, b, loc, false, h);
+ }
+ else if (first)
+ {
+ // If cc.config is already loaded, verify its configuration matched
+ // ours since it could have been loaded by another c-family module.
+ //
+ auto check = [&r, &loc, this](const char* cv,
+ const variable& xv,
+ const char* w)
+ {
+ const string& c (cast<string> (r[cv]));
+ const string& x (cast<string> (r[xv]));
+
+ if (c != x)
+ fail (loc) << "cc and " << x << " module " << w << " mismatch" <<
+ info << cv << " is " << c <<
+ info << xv.name << " is " << x;
+ };
+
+ // Note that we don't require that patterns match. Presumably, if the
+ // toolchain id and target are the same, then where exactly the tools
+ // (e.g., ar) come from doesn't really matter.
+ //
+ check ("cc.id", x_id, "toolchain id");
+ check ("cc.target", x_target, "target");
+ }
+ }
+
+ void module::
+ init (scope& r,
+ scope& b,
+ const location& loc,
+ bool,
+ const variable_map&)
+ {
+ tracer trace (x, "init");
+
+ // Load cc.core. Besides other things, this will load bin (core) plus
+ // extra bin.* modules we may need.
+ //
+ if (!cast_false<bool> (b["cc.core.loaded"]))
+ load_module ("cc.core", r, b, loc);
+
+ // Register target types and configure their "installability".
+ //
+ {
+ using namespace install;
+
+ auto& t (b.target_types);
+
+ t.insert (x_src);
+
+ // Install headers into install.include.
+ //
+ for (const target_type* const* ht (x_hdr); *ht != nullptr; ++ht)
+ {
+ t.insert (**ht);
+ install_path (**ht, b, dir_path ("include"));
+ }
+ }
+
+ // Register rules.
+ //
+ {
+ using namespace bin;
+
+ auto& r (b.rules);
+
+ // We register for configure so that we detect unresolved imports
+ // during configuration rather that later, e.g., during update.
+ //
+ // @@ Should we check if install module was loaded (see bin)?
+ //
+ compile& cr (*this);
+ link& lr (*this);
+ install& ir (*this);
+
+ r.insert<obje> (perform_update_id, x_compile, cr);
+ r.insert<obje> (perform_clean_id, x_compile, cr);
+ r.insert<obje> (configure_update_id, x_compile, cr);
+
+ r.insert<exe> (perform_update_id, x_link, lr);
+ r.insert<exe> (perform_clean_id, x_link, lr);
+ r.insert<exe> (configure_update_id, x_link, lr);
+
+ r.insert<exe> (perform_install_id, x_install, ir);
+
+ // Only register static object/library rules if the bin.ar module is
+ // loaded (by us or by the user).
+ //
+ if (cast_false<bool> (b["bin.ar.loaded"]))
+ {
+ r.insert<obja> (perform_update_id, x_compile, cr);
+ r.insert<obja> (perform_clean_id, x_compile, cr);
+ r.insert<obja> (configure_update_id, x_compile, cr);
+
+ r.insert<liba> (perform_update_id, x_link, lr);
+ r.insert<liba> (perform_clean_id, x_link, lr);
+ r.insert<liba> (configure_update_id, x_link, lr);
+
+ r.insert<liba> (perform_install_id, x_install, ir);
+ }
+
+ r.insert<objs> (perform_update_id, x_compile, cr);
+ r.insert<objs> (perform_clean_id, x_compile, cr);
+ r.insert<objs> (configure_update_id, x_compile, cr);
+
+ r.insert<libs> (perform_update_id, x_link, lr);
+ r.insert<libs> (perform_clean_id, x_link, lr);
+ r.insert<libs> (configure_update_id, x_link, lr);
+
+ r.insert<libs> (perform_install_id, x_install, ir);
+ }
+ }
+ }
+}
diff --git a/build2/cc/msvc.cxx b/build2/cc/msvc.cxx
new file mode 100644
index 0000000..84020d0
--- /dev/null
+++ b/build2/cc/msvc.cxx
@@ -0,0 +1,342 @@
+// file : build2/cc/msvc.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <iostream> // cerr
+
+#include <build2/scope>
+#include <build2/target>
+#include <build2/context>
+#include <build2/variable>
+#include <build2/filesystem>
+#include <build2/diagnostics>
+
+#include <build2/bin/target>
+
+#include <build2/cc/types>
+
+#include <build2/cc/link>
+
+using namespace std;
+using namespace butl;
+
+namespace build2
+{
+ namespace cc
+ {
+ using namespace bin;
+
+ // Translate the target triplet CPU to lib.exe/link.exe /MACHINE option.
+ //
+ const char*
+ msvc_machine (const string& cpu)
+ {
+ const char* m (cpu == "i386" || cpu == "i686" ? "/MACHINE:x86" :
+ cpu == "x86_64" ? "/MACHINE:x64" :
+ cpu == "arm" ? "/MACHINE:ARM" :
+ cpu == "arm64" ? "/MACHINE:ARM64" :
+ nullptr);
+
+ if (m == nullptr)
+ fail << "unable to translate CPU " << cpu << " to /MACHINE";
+
+ return m;
+ }
+
+ // Filter cl.exe and link.exe noise.
+ //
+ void
+ msvc_filter_cl (ifdstream& is, const path& src)
+ {
+ // While it appears VC always prints the source name (event if the
+ // file does not exist), let's do a sanity check.
+ //
+ string l;
+ if (getline (is, l) && l != src.leaf ().string ())
+ cerr << l << endl;
+ }
+
+ void
+ msvc_filter_link (ifdstream& is, const file& t, otype lt)
+ {
+ // Filter lines until we encounter something we don't recognize. We also
+ // have to assume the messages can be translated.
+ //
+ for (string l; getline (is, l); )
+ {
+ // " Creating library foo\foo.dll.lib and object foo\foo.dll.exp"
+ //
+ if (lt == otype::s && l.compare (0, 3, " ") == 0)
+ {
+ path imp (static_cast<file*> (t.member)->path ().leaf ());
+
+ if (l.find (imp.string ()) != string::npos &&
+ l.find (imp.base ().string () + ".exp") != string::npos)
+ continue;
+ }
+
+ // /INCREMENTAL causes linker to sometimes issue messages but now I
+ // can't quite reproduce it.
+ //
+
+ cerr << l << endl;
+ break;
+ }
+ }
+
+ // Extract system library search paths from MSVC.
+ //
+ void link::
+ msvc_library_search_paths (scope&, dir_paths&) const
+ {
+ // The linker doesn't seem to have any built-in paths and all of them
+ // come from the LIB environment variable.
+
+ // @@ VC: how are we going to do this? E.g., cl-14 does this internally.
+ // cl.exe /Be prints LIB.
+ //
+ // Should we actually bother? LIB is normally used for system
+ // libraries and its highly unlikely we will see an explicit import
+ // for a library from one of those directories.
+ //
+ }
+
+ // Inspect the file and determine if it is static or import library.
+ // Return otype::e if it is neither (which we quietly ignore).
+ //
+ static otype
+ library_type (const path& ld, const path& l)
+ {
+ // The are several reasonably reliable methods to tell whether it is a
+ // static or import library. One is lib.exe /LIST -- if there aren't any
+ // .obj members, then it is most likely an import library (it can also
+ // be an empty static library in which case there won't be any members).
+ // For an import library /LIST will print a bunch of .dll members.
+ //
+ // Another approach is dumpbin.exe (link.exe /DUMP) with /ARCHIVEMEMBERS
+ // (similar to /LIST) and /LINKERMEMBER (looking for __impl__ symbols or
+ // _IMPORT_DESCRIPTOR_).
+ //
+ // Note also, that apparently it is possible to have a hybrid library.
+ //
+ // While the lib.exe approach is probably the simplest, the problem is
+ // it will require us loading the bin.ar module even if we are not
+ // building any static libraries. On the other hand, if we are searching
+ // for libraries then we have bin.ld. So we will use the link.exe /DUMP
+ // /ARCHIVEMEMBERS.
+ //
+ const char* args[] = {ld.string ().c_str (),
+ "/DUMP", // Must come first.
+ "/NOLOGO",
+ "/ARCHIVEMEMBERS",
+ l.string ().c_str (),
+ nullptr};
+
+ // Link.exe seem to always dump everything to stdout but just in case
+ // redirect stderr to stdout.
+ //
+ process pr (start_run (args, false));
+
+ bool obj (false), dll (false);
+ string s;
+
+ try
+ {
+ ifdstream is (pr.in_ofd, fdstream_mode::skip, ifdstream::badbit);
+
+ while (getline (is, s))
+ {
+ // Detect the one error we should let through.
+ //
+ if (s.compare (0, 18, "unable to execute ") == 0)
+ break;
+
+ // The lines we are interested in seem to have this form (though
+ // presumably the "Archive member name at" part can be translated):
+ //
+ // Archive member name at 746: [...]hello.dll[/][ ]*
+ // Archive member name at 8C70: [...]hello.lib.obj[/][ ]*
+ //
+ size_t n (s.size ());
+
+ for (; n != 0 && s[n - 1] == ' '; --n) ; // Skip trailing spaces.
+
+ if (n >= 7) // At least ": X.obj" or ": X.dll".
+ {
+ --n;
+
+ if (s[n] == '/') // Skip trailing slash if one is there.
+ --n;
+
+ n -= 3; // Beginning of extension.
+
+ if (s[n] == '.')
+ {
+ // Make sure there is ": ".
+ //
+ size_t p (s.rfind (':', n - 1));
+
+ if (p != string::npos && s[p + 1] == ' ')
+ {
+ const char* e (s.c_str () + n + 1);
+
+ if (casecmp (e, "obj", 3) == 0)
+ obj = true;
+
+ if (casecmp (e, "dll", 3) == 0)
+ dll = true;
+ }
+ }
+ }
+ }
+ }
+ catch (const ifdstream::failure&)
+ {
+ // Presumably the child process failed. Let finish_run() deal with
+ // that.
+ }
+
+ if (!finish_run (args, false, pr, s))
+ return otype::e;
+
+ if (obj && dll)
+ {
+ warn << l << " looks like hybrid static/import library, ignoring";
+ return otype::e;
+ }
+
+ if (!obj && !dll)
+ {
+ warn << l << " looks like empty static or import library, ignoring";
+ return otype::e;
+ }
+
+ return obj ? otype::a : otype::s;
+ }
+
+ template <typename T>
+ static T*
+ msvc_search_library (const char* mod,
+ const path& ld,
+ const dir_path& d,
+ prerequisite& p,
+ otype lt,
+ const char* pfx,
+ const char* sfx)
+ {
+ // Pretty similar logic to link::search_library().
+ //
+ tracer trace (mod, "msvc_search_library");
+
+ // Assemble the file path.
+ //
+ path f (d);
+
+ if (*pfx != '\0')
+ {
+ f /= pfx;
+ f += p.name;
+ }
+ else
+ f /= p.name;
+
+ if (*sfx != '\0')
+ f += sfx;
+
+ const string& e (
+ p.ext == nullptr || p.is_a<lib> () // Only for liba/libs.
+ ? extension_pool.find ("lib")
+ : *p.ext);
+
+ if (!e.empty ())
+ {
+ f += '.';
+ f += e;
+ }
+
+ // Check if the file exists and is of the expected type.
+ //
+ timestamp mt (file_mtime (f));
+
+ if (mt != timestamp_nonexistent && library_type (ld, f) == lt)
+ {
+ // Enter the target.
+ //
+ T& t (targets.insert<T> (d, dir_path (), p.name, &e, trace));
+
+ if (t.path ().empty ())
+ t.path (move (f));
+
+ t.mtime (mt);
+ return &t;
+ }
+
+ return nullptr;
+ }
+
+ liba* link::
+ msvc_search_static (const path& ld,
+ const dir_path& d,
+ prerequisite& p) const
+ {
+ liba* r (nullptr);
+
+ auto search = [&r, &ld, &d, &p, this] (const char* pf, const char* sf)
+ -> bool
+ {
+ r = msvc_search_library<liba> (x, ld, d, p, otype::a, pf, sf);
+ return r != nullptr;
+ };
+
+ // Try:
+ // foo.lib
+ // libfoo.lib
+ // foolib.lib
+ // foo_static.lib
+ //
+ return
+ search ("", "") ||
+ search ("lib", "") ||
+ search ("", "lib") ||
+ search ("", "_static") ? r : nullptr;
+ }
+
+ libs* link::
+ msvc_search_shared (const path& ld,
+ const dir_path& d,
+ prerequisite& p) const
+ {
+ tracer trace (x, "link::msvc_search_shared");
+
+ libs* r (nullptr);
+
+ auto search = [&r, &ld, &d, &p, &trace, this] (
+ const char* pf, const char* sf) -> bool
+ {
+ if (libi* i =
+ msvc_search_library<libi> (x, ld, d, p, otype::s, pf, sf))
+ {
+ r = &targets.insert<libs> (d, dir_path (), p.name, nullptr, trace);
+
+ if (r->member == nullptr)
+ {
+ r->mtime (i->mtime ());
+ r->member = i;
+ }
+ }
+
+ return r != nullptr;
+ };
+
+ // Try:
+ // foo.lib
+ // libfoo.lib
+ // foodll.lib
+ //
+ return
+ search ("", "") ||
+ search ("lib", "") ||
+ search ("", "dll") ? r : nullptr;
+ }
+ }
+}
diff --git a/build2/cc/target b/build2/cc/target
new file mode 100644
index 0000000..2d8125b
--- /dev/null
+++ b/build2/cc/target
@@ -0,0 +1,48 @@
+// file : build2/cc/target -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_TARGET
+#define BUILD2_CC_TARGET
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/target>
+
+namespace build2
+{
+ namespace cc
+ {
+ // There is hardly a c-family compilation without a C header inclusion.
+ // As a result, this target type is registered for any c-family module.
+ //
+ class h: public file
+ {
+ public:
+ using file::file;
+
+ public:
+ static const target_type static_type;
+ virtual const target_type& dynamic_type () const {return static_type;}
+ };
+
+ // This one we define in cc but the target type is only registered by the
+ // c module. This way we can implement rule chaining without jumping
+ // through too many hoops (like resolving target type dynamically) but
+ // also without relaxing things too much (i.e., the user still won't be
+ // able to refer to c{} without loading the c module).
+ //
+ class c: public file
+ {
+ public:
+ using file::file;
+
+ public:
+ static const target_type static_type;
+ virtual const target_type& dynamic_type () const {return static_type;}
+ };
+ }
+}
+
+#endif // BUILD2_CC_TARGET
diff --git a/build2/cc/target.cxx b/build2/cc/target.cxx
new file mode 100644
index 0000000..7c2bb24
--- /dev/null
+++ b/build2/cc/target.cxx
@@ -0,0 +1,39 @@
+// file : build2/cc/target.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/cc/target>
+
+using namespace std;
+
+namespace build2
+{
+ namespace cc
+ {
+ extern const char ext_var[] = "extension"; // VC 19 rejects constexpr.
+
+ extern const char h_ext_def[] = "h";
+ const target_type h::static_type
+ {
+ "h",
+ &file::static_type,
+ &target_factory<h>,
+ &target_extension_var<ext_var, h_ext_def>,
+ nullptr,
+ &search_file,
+ false
+ };
+
+ extern const char c_ext_def[] = "c";
+ const target_type c::static_type
+ {
+ "c",
+ &file::static_type,
+ &target_factory<c>,
+ &target_extension_var<ext_var, c_ext_def>,
+ nullptr,
+ &search_file,
+ false
+ };
+ }
+}
diff --git a/build2/cc/types b/build2/cc/types
new file mode 100644
index 0000000..9cacc60
--- /dev/null
+++ b/build2/cc/types
@@ -0,0 +1,32 @@
+// file : build2/cc/types -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_TYPES
+#define BUILD2_CC_TYPES
+
+#include <build2/types>
+#include <build2/utility>
+
+namespace build2
+{
+ namespace cc
+ {
+ // Compiler language.
+ //
+ enum class lang {c, cxx};
+
+ ostream&
+ operator<< (ostream&, lang); // utility.ixx
+
+ // Compile/link output type (executable, static, or shared).
+ //
+ enum class otype {e, a, s};
+
+ // Library link order.
+ //
+ enum class lorder {a, s, a_s, s_a};
+ }
+}
+
+#endif // BUILD2_CC_TYPES
diff --git a/build2/cc/utility b/build2/cc/utility
new file mode 100644
index 0000000..ae19d56
--- /dev/null
+++ b/build2/cc/utility
@@ -0,0 +1,64 @@
+// file : build2/cc/utility -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_UTILITY
+#define BUILD2_CC_UTILITY
+
+#include <build2/types>
+#include <build2/utility>
+
+#include <build2/target>
+#include <build2/bin/target>
+
+#include <build2/cc/types>
+
+namespace build2
+{
+ struct variable;
+
+ namespace cc
+ {
+ // Compile/link output type.
+ //
+ otype
+ compile_type (target&);
+
+ otype
+ link_type (target&);
+
+ // Library link order.
+ //
+ // The reason we pass scope and not the target is because this function is
+ // called not only for exe/lib but also for obj as part of the library
+ // meta-information protocol implementation. Normally the bin.*.lib values
+ // will be project-wide. With this scheme they can be customized on the
+ // per-directory basis but not per-target which means all exe/lib in the
+ // same directory have to have the same link order.
+ //
+ lorder
+ link_order (scope& base, otype);
+
+ // Given the link order return the library member (liba or libs) to link.
+ //
+ target&
+ link_member (bin::lib&, lorder);
+
+ // Append or hash library options from a pair of *.export.* variables
+ // (first one is cc.export.*) recursively, prerequisite libraries first.
+ //
+ void
+ append_lib_options (cstrings&, target&, lorder,
+ const variable&,
+ const variable&);
+
+ void
+ hash_lib_options (sha256&, target&, lorder,
+ const variable&,
+ const variable&);
+ }
+}
+
+#include <build2/cc/utility.ixx>
+
+#endif // BUILD2_CC_UTILITY
diff --git a/build2/cc/utility.cxx b/build2/cc/utility.cxx
new file mode 100644
index 0000000..773ba8f
--- /dev/null
+++ b/build2/cc/utility.cxx
@@ -0,0 +1,115 @@
+// file : build2/cc/utility.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/cc/utility>
+
+#include <build2/variable>
+#include <build2/algorithm> // search()
+
+#include <build2/bin/target>
+
+using namespace std;
+
+namespace build2
+{
+ namespace cc
+ {
+ using namespace bin;
+
+ lorder
+ link_order (scope& bs, otype ot)
+ {
+ // Initialize to suppress 'may be used uninitialized' warning produced
+ // by MinGW GCC 5.4.0.
+ //
+ const char* var (nullptr);
+
+ switch (ot)
+ {
+ case otype::e: var = "bin.exe.lib"; break;
+ case otype::a: var = "bin.liba.lib"; break;
+ case otype::s: var = "bin.libs.lib"; break;
+ }
+
+ const auto& v (cast<strings> (bs[var]));
+ return v[0] == "shared"
+ ? v.size () > 1 && v[1] == "static" ? lorder::s_a : lorder::s
+ : v.size () > 1 && v[1] == "shared" ? lorder::a_s : lorder::a;
+ }
+
+ target&
+ link_member (bin::lib& l, lorder lo)
+ {
+ bool ls (true);
+ const string& at (cast<string> (l["bin.lib"])); // Available members.
+
+ switch (lo)
+ {
+ case lorder::a:
+ case lorder::a_s:
+ ls = false; // Fall through.
+ case lorder::s:
+ case lorder::s_a:
+ {
+ if (ls ? at == "static" : at == "shared")
+ {
+ if (lo == lorder::a_s || lo == lorder::s_a)
+ ls = !ls;
+ else
+ fail << (ls ? "shared" : "static") << " variant of " << l
+ << " is not available";
+ }
+ }
+ }
+
+ target* r (ls ? static_cast<target*> (l.s) : l.a);
+
+ if (r == nullptr)
+ r = &search (ls ? libs::static_type : liba::static_type,
+ prerequisite_key {nullptr, l.key (), nullptr});
+
+ return *r;
+ }
+
+ void
+ append_lib_options (cstrings& args, target& l, lorder lo,
+ const variable& cv,
+ const variable& xv)
+ {
+ using namespace bin;
+
+ for (target* t: l.prerequisite_targets)
+ {
+ if (lib* l = t->is_a<lib> ())
+ t = &link_member (*l, lo); // Pick one of the members.
+
+ if (t->is_a<liba> () || t->is_a<libs> ())
+ append_lib_options (args, *t, lo, cv, xv);
+ }
+
+ append_options (args, l, cv);
+ append_options (args, l, xv);
+ }
+
+ void
+ hash_lib_options (sha256& csum, target& l, lorder lo,
+ const variable& cv,
+ const variable& xv)
+ {
+ using namespace bin;
+
+ for (target* t: l.prerequisite_targets)
+ {
+ if (lib* l = t->is_a<lib> ())
+ t = &link_member (*l, lo); // Pick one of the members.
+
+ if (t->is_a<liba> () || t->is_a<libs> ())
+ hash_lib_options (csum, *t, lo, cv, xv);
+ }
+
+ hash_options (csum, l, cv);
+ hash_options (csum, l, xv);
+ }
+ }
+}
diff --git a/build2/cc/utility.ixx b/build2/cc/utility.ixx
new file mode 100644
index 0000000..e7eb565
--- /dev/null
+++ b/build2/cc/utility.ixx
@@ -0,0 +1,33 @@
+// file : build2/cc/utility.ixx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+namespace build2
+{
+ namespace cc
+ {
+ inline ostream&
+ operator<< (ostream& os, lang l)
+ {
+ return os << (l == lang::c ? "C" : "C++");
+ }
+
+ inline otype
+ compile_type (target& t)
+ {
+ return
+ t.is_a<bin::obje> () ? otype::e :
+ t.is_a<bin::obja> () ? otype::a :
+ otype::s;
+ }
+
+ inline otype
+ link_type (target& t)
+ {
+ return
+ t.is_a<bin::exe> () ? otype::e :
+ t.is_a<bin::liba> () ? otype::a :
+ otype::s;
+ }
+ }
+}
diff --git a/build2/cc/windows-manifest.cxx b/build2/cc/windows-manifest.cxx
new file mode 100644
index 0000000..0666ef5
--- /dev/null
+++ b/build2/cc/windows-manifest.cxx
@@ -0,0 +1,136 @@
+// file : build2/cc/windows-manifest.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <build2/scope>
+#include <build2/target>
+#include <build2/context>
+#include <build2/variable>
+#include <build2/filesystem>
+#include <build2/diagnostics>
+
+#include <build2/cc/link>
+
+using namespace std;
+using namespace butl;
+
+namespace build2
+{
+ namespace cc
+ {
+ // Translate the compiler target CPU value to the processorArchitecture
+ // attribute value.
+ //
+ const char*
+ windows_manifest_arch (const string& tcpu)
+ {
+ const char* pa (tcpu == "i386" || tcpu == "i686" ? "x86" :
+ tcpu == "x86_64" ? "amd64" :
+ nullptr);
+
+ if (pa == nullptr)
+ fail << "unable to translate CPU " << tcpu << " to manifest "
+ << "processor architecture";
+
+ return pa;
+ }
+
+ // Generate a Windows manifest and if necessary create/update the manifest
+ // file corresponding to the exe{} target. Return the manifest file path.
+ //
+ path link::
+ windows_manifest (file& t, bool rpath_assembly) const
+ {
+ tracer trace (x, "windows_manifest");
+
+ scope& rs (t.root_scope ());
+
+ const char* pa (windows_manifest_arch (cast<string> (rs[x_target_cpu])));
+
+ string m;
+
+ m += "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n";
+ m += "<assembly xmlns='urn:schemas-microsoft-com:asm.v1'\n";
+ m += " manifestVersion='1.0'>\n";
+
+ // Program name, version, etc.
+ //
+ string name (t.path ().leaf ().string ());
+
+ m += " <assemblyIdentity name='"; m += name; m += "'\n";
+ m += " type='win32'\n";
+ m += " processorArchitecture='"; m += pa; m += "'\n";
+ m += " version='0.0.0.0'/>\n";
+
+ // Our rpath-emulating assembly.
+ //
+ if (rpath_assembly)
+ {
+ m += " <dependency>\n";
+ m += " <dependentAssembly>\n";
+ m += " <assemblyIdentity name='"; m += name; m += ".dlls'\n";
+ m += " type='win32'\n";
+ m += " processorArchitecture='"; m += pa; m += "'\n";
+ m += " language='*'\n";
+ m += " version='0.0.0.0'/>\n";
+ m += " </dependentAssembly>\n";
+ m += " </dependency>\n";
+ }
+
+ // UAC information. Without it Windows will try to guess, which, as you
+ // can imagine, doesn't end well.
+ //
+ m += " <trustInfo xmlns='urn:schemas-microsoft-com:asm.v3'>\n";
+ m += " <security>\n";
+ m += " <requestedPrivileges>\n";
+ m += " <requestedExecutionLevel level='asInvoker' uiAccess='false'/>\n";
+ m += " </requestedPrivileges>\n";
+ m += " </security>\n";
+ m += " </trustInfo>\n";
+
+ m += "</assembly>\n";
+
+ // If the manifest file exists, compare to its content. If nothing
+ // changed (common case), then we can avoid any further updates.
+ //
+ // The potentially faster alternative would be to hash it and store an
+ // entry in depdb. This, however, gets a bit complicated since we will
+ // need to avoid a race between the depdb and .manifest updates.
+ //
+ path mf (t.path () + ".manifest");
+
+ if (file_exists (mf))
+ {
+ try
+ {
+ ifdstream ifs (mf);
+ string s;
+ getline (ifs, s, '\0');
+
+ if (s == m)
+ return mf;
+ }
+ catch (const ifdstream::failure&)
+ {
+ // Whatever the reason we failed for , let's rewrite the file.
+ }
+ }
+
+ if (verb >= 3)
+ text << "cat >" << mf;
+
+ try
+ {
+ ofdstream ofs (mf);
+ ofs << m;
+ ofs.close ();
+ }
+ catch (const ofdstream::failure& e)
+ {
+ fail << "unable to write to " << m << ": " << e.what ();
+ }
+
+ return mf;
+ }
+ }
+}
diff --git a/build2/cc/windows-rpath.cxx b/build2/cc/windows-rpath.cxx
new file mode 100644
index 0000000..ea20a5c
--- /dev/null
+++ b/build2/cc/windows-rpath.cxx
@@ -0,0 +1,273 @@
+// file : build2/cc/windows-rpath.cxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#include <errno.h> // E*
+
+#include <set>
+
+#include <build2/scope>
+#include <build2/context>
+#include <build2/variable>
+#include <build2/filesystem>
+#include <build2/diagnostics>
+
+#include <build2/bin/target>
+
+using namespace std;
+using namespace butl;
+
+namespace build2
+{
+ namespace cc
+ {
+ // Provide limited emulation of the rpath functionality on Windows using a
+ // side-by-side assembly. In a nutshell, the idea is to create an assembly
+ // with links to all the prerequisite DLLs.
+ //
+ // Note that currently our assemblies contain all the DLLs that the
+ // executable depends on, recursively. The alternative approach could be
+ // to also create assemblies for DLLs. This appears to be possible (but we
+ // will have to use the resource ID 2 for such a manifest). And it will
+ // probably be necessary for DLLs that are loaded dynamically with
+ // LoadLibrary(). The tricky part is how such nested assemblies will be
+ // found. Since we are effectively (from the loader's point of view)
+ // copying the DLLs, we will also have to copy their assemblies (because
+ // the loader looks for them in the same directory as the DLL). It's not
+ // clear how well such nested assemblies are supported (e.g., in Wine).
+ //
+ using namespace bin;
+
+ // Return the greatest (newest) timestamp of all the DLLs that we will be
+ // adding to the assembly or timestamp_nonexistent if there aren't any.
+ //
+ timestamp
+ windows_rpath_timestamp (file& t)
+ {
+ timestamp r (timestamp_nonexistent);
+
+ for (target* pt: t.prerequisite_targets)
+ {
+ if (libs* ls = pt->is_a<libs> ())
+ {
+ // Skip installed DLLs.
+ //
+ if (ls->path ().empty ())
+ continue;
+
+ // What if the DLL is in the same directory as the executable, will
+ // it still be found even if there is an assembly? On the other
+ // hand, handling it as any other won't hurt us much.
+ //
+ timestamp t;
+
+ if ((t = ls->mtime ()) > r)
+ r = t;
+
+ if ((t = windows_rpath_timestamp (*ls)) > r)
+ r = t;
+ }
+ }
+
+ return r;
+ }
+
+ // Like *_timestamp() but actually collect the DLLs.
+ //
+ static void
+ rpath_dlls (set<libs*>& s, file& t)
+ {
+ for (target* pt: t.prerequisite_targets)
+ {
+ if (libs* ls = pt->is_a<libs> ())
+ {
+ // Skip installed DLLs.
+ //
+ if (ls->path ().empty ())
+ continue;
+
+ s.insert (ls);
+ rpath_dlls (s, *ls);
+ }
+ }
+ }
+
+ const char*
+ windows_manifest_arch (const string& tcpu); // windows-manifest.cxx
+
+ // The ts argument should be the the DLLs timestamp returned by
+ // *_timestamp().
+ //
+ // The scratch argument should be true if the DLL set has changed and we
+ // need to regenerate everything from scratch. Otherwise, we try to avoid
+ // unnecessary work by comparing the DLLs timestamp against the assembly
+ // manifest file.
+ //
+ void
+ windows_rpath_assembly (file& t,
+ const string& tcpu,
+ timestamp ts,
+ bool scratch)
+ {
+ // Assembly paths and name.
+ //
+ dir_path ad (path_cast<dir_path> (t.path () + ".dlls"));
+ string an (ad.leaf ().string ());
+ path am (ad / path (an + ".manifest"));
+
+ // First check if we actually need to do anything. Since most of the
+ // time we won't, we don't want to combine it with the *_dlls() call
+ // below which allocates memory, etc.
+ //
+ if (!scratch)
+ {
+ // The corner case here is when _timestamp() returns nonexistent
+ // signalling that there aren't any DLLs but the assembly manifest
+ // file exists. This, however, can only happen if we somehow managed
+ // to transition from the "have DLLs" state to "no DLLs" without going
+ // through the "from scratch" update. And this shouldn't happen
+ // (famous last words before a core dump).
+ //
+ if (ts <= file_mtime (am))
+ return;
+ }
+
+ // Next collect the set of DLLs that will be in our assembly. We need to
+ // do this recursively which means we may end up with duplicates. Also,
+ // it is possible that there aren't/no longer are any DLLs which means
+ // we just need to clean things up.
+ //
+ bool empty (ts == timestamp_nonexistent);
+
+ set<libs*> dlls;
+ if (!empty)
+ rpath_dlls (dlls, t);
+
+ // Clean the assembly directory and make sure it exists. Maybe it would
+ // have been faster to overwrite the existing manifest rather than
+ // removing the old one and creating a new one. But this is definitely
+ // simpler.
+ //
+ {
+ rmdir_status s (build2::rmdir_r (ad, empty, 3));
+
+ if (empty)
+ return;
+
+ if (s == rmdir_status::not_exist)
+ mkdir (ad, 3);
+ }
+
+ const char* pa (windows_manifest_arch (tcpu));
+
+ if (verb >= 3)
+ text << "cat >" << am;
+
+ try
+ {
+ ofdstream ofs (am);
+
+ ofs << "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n"
+ << "<assembly xmlns='urn:schemas-microsoft-com:asm.v1'\n"
+ << " manifestVersion='1.0'>\n"
+ << " <assemblyIdentity name='" << an << "'\n"
+ << " type='win32'\n"
+ << " processorArchitecture='" << pa << "'\n"
+ << " version='0.0.0.0'/>\n";
+
+ scope& as (*t.root_scope ().weak_scope ()); // Amalgamation scope.
+
+ auto link = [&as, &ad] (const path& f, const path& l)
+ {
+ auto print = [&f, &l] (const char* cmd)
+ {
+ if (verb >= 3)
+ text << cmd << ' ' << f << ' ' << l;
+ };
+
+ // First we try to create a symlink. If that fails (e.g., "Windows
+ // happens"), then we resort to hard links. If that doesn't work
+ // out either (e.g., not on the same filesystem), then we fall back
+ // to copies. So things are going to get a bit nested.
+ //
+ try
+ {
+ // For the symlink use a relative target path if both paths are
+ // part of the same amalgamation. This way if the amalgamation is
+ // moved as a whole, the links will remain valid.
+ //
+ if (f.sub (as.out_path ()))
+ mksymlink (f.relative (ad), l);
+ else
+ mksymlink (f, l);
+
+ print ("ln -s");
+ }
+ catch (const system_error& e)
+ {
+ int c (e.code ().value ());
+
+ if (c != EPERM && c != ENOSYS)
+ {
+ print ("ln -s");
+ fail << "unable to create symlink " << l << ": " << e.what ();
+ }
+
+ try
+ {
+ mkhardlink (f, l);
+ print ("ln");
+ }
+ catch (const system_error& e)
+ {
+ int c (e.code ().value ());
+
+ if (c != EPERM && c != ENOSYS)
+ {
+ print ("ln");
+ fail << "unable to create hardlink " << l << ": " << e.what ();
+ }
+
+ try
+ {
+ cpfile (f, l);
+ print ("cp");
+ }
+ catch (const system_error& e)
+ {
+ print ("cp");
+ fail << "unable to create copy " << l << ": " << e.what ();
+ }
+ }
+ }
+
+ };
+
+ for (libs* dll: dlls)
+ {
+ const path& dp (dll->path ()); // DLL path.
+ const path dn (dp.leaf ()); // DLL name.
+ link (dp, ad / dn);
+
+ // Link .pdb if there is one (second member of the ad hoc group).
+ //
+ if (dll->member != nullptr && dll->member->member != nullptr)
+ {
+ file& pdb (static_cast<file&> (*dll->member->member));
+ link (pdb.path (), ad / pdb.path ().leaf ());
+ }
+
+ ofs << " <file name='" << dn.string () << "'/>\n";
+ }
+
+ ofs << "</assembly>\n";
+
+ ofs.close ();
+ }
+ catch (const ofdstream::failure& e)
+ {
+ fail << "unable to write to " << am << ": " << e.what ();
+ }
+ }
+ }
+}