aboutsummaryrefslogtreecommitdiff
path: root/build2/cxx
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2016-08-09 11:31:53 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2016-08-12 17:04:22 +0200
commit9fa5f73d00905568e8979d0c93ec4a8f645c81d5 (patch)
treef2bf937fa256c0ef2c9bbe05d3655d1985719405 /build2/cxx
parenta1b2319ff2ddc8a6f139ee364cabe236ca62e23e (diff)
Implement support for C compilation
We now have two new modules: cc (c-common) and c.
Diffstat (limited to 'build2/cxx')
-rw-r--r--build2/cxx/common60
-rw-r--r--build2/cxx/common.cxx73
-rw-r--r--build2/cxx/compile37
-rw-r--r--build2/cxx/compile.cxx1497
-rw-r--r--build2/cxx/guess112
-rw-r--r--build2/cxx/guess.cxx948
-rw-r--r--build2/cxx/install31
-rw-r--r--build2/cxx/install.cxx70
-rw-r--r--build2/cxx/link48
-rw-r--r--build2/cxx/link.cxx1852
-rw-r--r--build2/cxx/module9
-rw-r--r--build2/cxx/module.cxx505
-rw-r--r--build2/cxx/msvc.cxx331
-rw-r--r--build2/cxx/target26
-rw-r--r--build2/cxx/target.cxx24
-rw-r--r--build2/cxx/utility42
-rw-r--r--build2/cxx/utility.cxx109
-rw-r--r--build2/cxx/utility.ixx33
-rw-r--r--build2/cxx/windows-manifest.cxx136
-rw-r--r--build2/cxx/windows-rpath.cxx274
20 files changed, 197 insertions, 6020 deletions
diff --git a/build2/cxx/common b/build2/cxx/common
deleted file mode 100644
index 77f1149..0000000
--- a/build2/cxx/common
+++ /dev/null
@@ -1,60 +0,0 @@
-// file : build2/cxx/common -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#ifndef BUILD2_CXX_COMMON
-#define BUILD2_CXX_COMMON
-
-#include <build2/types>
-#include <build2/utility>
-
-#include <build2/bin/target>
-
-namespace build2
-{
- namespace cxx
- {
- // Compile/link output type (executable, static, or shared).
- //
- enum class otype {e, a, s};
-
- inline otype
- compile_type (target& t)
- {
- return
- t.is_a<bin::obje> () ? otype::e :
- t.is_a<bin::obja> () ? otype::a :
- otype::s;
- }
-
- inline otype
- link_type (target& t)
- {
- return
- t.is_a<bin::exe> () ? otype::e :
- t.is_a<bin::liba> () ? otype::a :
- otype::s;
- }
-
- // Library link order.
- //
- enum class lorder {a, s, a_s, s_a};
-
- // The reason we pass scope and not the target is because this function is
- // called not only for exe/lib but also for obj as part of the library
- // meta-information protocol implementation. Normally the bin.*.lib values
- // will be project-wide. With this scheme they can be customized on the
- // per-directory basis but not per-target which means all exe/lib in the
- // same directory have to have the same link order.
- //
- lorder
- link_order (scope& base, otype);
-
- // Given the link order return the library member (liba or libs) to link.
- //
- target&
- link_member (bin::lib&, lorder);
- }
-}
-
-#endif // BUILD2_CXX_COMMON
diff --git a/build2/cxx/common.cxx b/build2/cxx/common.cxx
deleted file mode 100644
index ec724a5..0000000
--- a/build2/cxx/common.cxx
+++ /dev/null
@@ -1,73 +0,0 @@
-// file : build2/cxx/common.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#include <build2/cxx/common>
-
-#include <build2/variable>
-#include <build2/algorithm>
-
-using namespace std;
-
-namespace build2
-{
- namespace cxx
- {
- using namespace bin;
-
- lorder
- link_order (scope& bs, otype ot)
- {
- // Initialize to suppress 'may be used uninitialized' warning produced by
- // MinGW GCC 5.4.0.
- //
- const char* var (nullptr);
-
- switch (ot)
- {
- case otype::e: var = "bin.exe.lib"; break;
- case otype::a: var = "bin.liba.lib"; break;
- case otype::s: var = "bin.libs.lib"; break;
- }
-
- const auto& v (cast<strings> (bs[var]));
- return v[0] == "shared"
- ? v.size () > 1 && v[1] == "static" ? lorder::s_a : lorder::s
- : v.size () > 1 && v[1] == "shared" ? lorder::a_s : lorder::a;
- }
-
- target&
- link_member (bin::lib& l, lorder lo)
- {
- bool ls (true);
- const string& at (cast<string> (l["bin.lib"])); // Available members.
-
- switch (lo)
- {
- case lorder::a:
- case lorder::a_s:
- ls = false; // Fall through.
- case lorder::s:
- case lorder::s_a:
- {
- if (ls ? at == "static" : at == "shared")
- {
- if (lo == lorder::a_s || lo == lorder::s_a)
- ls = !ls;
- else
- fail << (ls ? "shared" : "static") << " variant of " << l
- << " is not available";
- }
- }
- }
-
- target* r (ls ? static_cast<target*> (l.s) : l.a);
-
- if (r == nullptr)
- r = &search (ls ? libs::static_type : liba::static_type,
- prerequisite_key {nullptr, l.key (), nullptr});
-
- return *r;
- }
- }
-}
diff --git a/build2/cxx/compile b/build2/cxx/compile
deleted file mode 100644
index 16c62e6..0000000
--- a/build2/cxx/compile
+++ /dev/null
@@ -1,37 +0,0 @@
-// file : build2/cxx/compile -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#ifndef BUILD2_CXX_COMPILE
-#define BUILD2_CXX_COMPILE
-
-#include <build2/types>
-#include <build2/utility>
-
-#include <build2/rule>
-
-namespace build2
-{
- namespace cxx
- {
- class compile: public rule
- {
- public:
- virtual match_result
- match (action, target&, const string& hint) const;
-
- virtual recipe
- apply (action, target&, const match_result&) const;
-
- static target_state
- perform_update (action, target&);
-
- static target_state
- perform_clean (action, target&);
-
- static compile instance;
- };
- }
-}
-
-#endif // BUILD2_CXX_COMPILE
diff --git a/build2/cxx/compile.cxx b/build2/cxx/compile.cxx
deleted file mode 100644
index 56c518b..0000000
--- a/build2/cxx/compile.cxx
+++ /dev/null
@@ -1,1497 +0,0 @@
-// file : build2/cxx/compile.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#include <build2/cxx/compile>
-
-#include <map>
-#include <limits> // numeric_limits
-#include <cstdlib> // exit()
-#include <iostream> // cerr
-
-#include <butl/path-map>
-
-#include <build2/depdb>
-#include <build2/scope>
-#include <build2/context>
-#include <build2/variable>
-#include <build2/algorithm>
-#include <build2/diagnostics>
-
-#include <build2/bin/target>
-#include <build2/cxx/target>
-
-#include <build2/cxx/link>
-#include <build2/cxx/common>
-#include <build2/cxx/utility>
-
-
-using namespace std;
-using namespace butl;
-
-namespace build2
-{
- namespace cxx
- {
- using namespace bin;
-
- match_result compile::
- match (action a, target& t, const string&) const
- {
- tracer trace ("cxx::compile::match");
-
- // @@ TODO:
- //
- // - check prerequisites: single source file
- // - if path already assigned, verify extension?
- //
-
- // See if we have a C++ source file. Iterate in reverse so that
- // a source file specified for an obj*{} member overrides the one
- // specified for the group. Also "see through" groups.
- //
- for (prerequisite_member p: reverse_group_prerequisite_members (a, t))
- {
- if (p.is_a<cxx> ())
- return p;
- }
-
- l4 ([&]{trace << "no c++ source file for target " << t;});
- return nullptr;
- }
-
- static void
- inject_prerequisites (action, target&, lorder, cxx&, scope&, depdb&);
-
- recipe compile::
- apply (action a, target& xt, const match_result& mr) const
- {
- tracer trace ("cxx::compile");
-
- file& t (static_cast<file&> (xt));
-
- scope& bs (t.base_scope ());
- scope& rs (*bs.root_scope ());
-
- const string& cid (cast<string> (rs["cxx.id"]));
- const string& tsys (cast<string> (rs["cxx.target.system"]));
- const string& tclass (cast<string> (rs["cxx.target.class"]));
-
- otype ct (compile_type (t));
-
- // Derive file name from target name.
- //
- if (t.path ().empty ())
- {
- const char* e (nullptr);
-
- if (tsys == "win32-msvc")
- {
- switch (ct)
- {
- case otype::e: e = "exe.obj"; break;
- case otype::a: e = "lib.obj"; break;
- case otype::s: e = "dll.obj"; break;
- }
- }
- else if (tsys == "mingw32")
- {
- switch (ct)
- {
- case otype::e: e = "exe.o"; break;
- case otype::a: e = "a.o"; break;
- case otype::s: e = "dll.o"; break;
- }
- }
- else if (tsys == "darwin")
- {
- switch (ct)
- {
- case otype::e: e = "o"; break;
- case otype::a: e = "a.o"; break;
- case otype::s: e = "dylib.o"; break;
- }
- }
- else
- {
- switch (ct)
- {
- case otype::e: e = "o"; break;
- case otype::a: e = "a.o"; break;
- case otype::s: e = "so.o"; break;
- }
- }
-
- t.derive_path (e);
- }
-
- // Inject dependency on the output directory.
- //
- fsdir* dir (inject_fsdir (a, t));
-
- // Search and match all the existing prerequisites. The injection
- // code (below) takes care of the ones it is adding.
- //
- // When cleaning, ignore prerequisites that are not in the same
- // or a subdirectory of our project root.
- //
- optional<dir_paths> lib_paths; // Extract lazily.
-
- for (prerequisite_member p: group_prerequisite_members (a, t))
- {
- // A dependency on a library is there so that we can get its
- // cxx.export.poptions. In particular, making sure it is
- // executed before us will only restrict parallelism. But we
- // do need to pre-match it in order to get its
- // prerequisite_targets populated. This is the "library
- // meta-information protocol". See also append_lib_options()
- // above.
- //
- if (p.is_a<lib> () || p.is_a<liba> () || p.is_a<libs> ())
- {
- if (a.operation () == update_id)
- {
- // Handle imported libraries. We know that for such libraries
- // we don't need to do match() in order to get options (if
- // any, they would be set by search_library()).
- //
- if (p.proj () == nullptr ||
- link::search_library (lib_paths, p.prerequisite) == nullptr)
- {
- match_only (a, p.search ());
- }
- }
-
- continue;
- }
-
- target& pt (p.search ());
-
- if (a.operation () == clean_id && !pt.dir.sub (rs.out_path ()))
- continue;
-
- build2::match (a, pt);
- t.prerequisite_targets.push_back (&pt);
- }
-
- // Inject additional prerequisites. We only do it when performing update
- // since chances are we will have to update some of our prerequisites in
- // the process (auto-generated source code).
- //
- if (a == perform_update_id)
- {
- // The cached prerequisite target should be the same as what is in
- // t.prerequisite_targets since we used standard search() and match()
- // above.
- //
- // @@ Ugly.
- //
- cxx& st (
- dynamic_cast<cxx&> (
- mr.target != nullptr ? *mr.target : *mr.prerequisite->target));
-
- // Make sure the output directory exists.
- //
- // Is this the right thing to do? It does smell a bit, but then we do
- // worse things in inject_prerequisites() below. There is also no way
- // to postpone this until update since we need to extract and inject
- // header dependencies now (we don't want to be calling search() and
- // match() in update), which means we need to cache them now as well.
- // So the only alternative, it seems, is to cache the updates to the
- // database until later which will sure complicate (and slow down)
- // things.
- //
- if (dir != nullptr)
- execute_direct (a, *dir);
-
- depdb dd (t.path () + ".d");
-
- // First should come the rule name/version.
- //
- if (dd.expect ("cxx.compile 1") != nullptr)
- l4 ([&]{trace << "rule mismatch forcing update of " << t;});
-
- // Then the compiler checksum. Note that here we assume it
- // incorporates the (default) target so that if the compiler changes
- // but only in what it targets, then the checksum will still change.
- //
- if (dd.expect (cast<string> (rs["cxx.checksum"])) != nullptr)
- l4 ([&]{trace << "compiler mismatch forcing update of " << t;});
-
- // Then the options checksum.
- //
- // The idea is to keep them exactly as they are passed to the compiler
- // since the order may be significant.
- //
- sha256 cs;
-
- // Hash cxx.export.poptions from prerequisite libraries.
- //
- lorder lo (link_order (bs, ct));
- for (prerequisite& p: group_prerequisites (t))
- {
- target* pt (p.target); // Already searched and matched.
-
- if (lib* l = pt->is_a<lib> ())
- pt = &link_member (*l, lo);
-
- if (pt->is_a<liba> () || pt->is_a<libs> ())
- hash_lib_options (cs, *pt, "cxx.export.poptions", lo);
- }
-
- hash_options (cs, t, "cxx.poptions");
- hash_options (cs, t, "cxx.coptions");
- hash_std (cs, rs, cid, t);
-
- if (ct == otype::s)
- {
- // On Darwin, Win32 -fPIC is the default.
- //
- if (tclass == "linux" || tclass == "freebsd")
- cs.append ("-fPIC");
- }
-
- if (dd.expect (cs.string ()) != nullptr)
- l4 ([&]{trace << "options mismatch forcing update of " << t;});
-
- // Finally the source file.
- //
- if (dd.expect (st.path ()) != nullptr)
- l4 ([&]{trace << "source file mismatch forcing update of " << t;});
-
- // If any of the above checks resulted in a mismatch (different
- // compiler, options, or source file), or if the database is newer
- // than the target (interrupted update) then force the target update.
- //
- if (dd.writing () || dd.mtime () > t.mtime ())
- t.mtime (timestamp_nonexistent);
-
- inject_prerequisites (a, t, lo, st, mr.prerequisite->scope, dd);
-
- dd.close ();
- }
-
- switch (a)
- {
- case perform_update_id: return &perform_update;
- case perform_clean_id: return &perform_clean;
- default: return noop_recipe; // Configure update.
- }
- }
-
- // Reverse-lookup target type from extension.
- //
- static const target_type*
- map_extension (scope& s, const string& n, const string& e)
- {
- // We will just have to try all of the possible ones, in the
- // "most likely to match" order.
- //
- const variable& var (var_pool.find ("extension"));
-
- auto test = [&s, &n, &e, &var] (const target_type& tt)
- -> const target_type*
- {
- if (auto l = s.find (var, tt, n))
- if (cast<string> (l) == e)
- return &tt;
-
- return nullptr;
- };
-
- if (auto r = test (hxx::static_type)) return r;
- if (auto r = test (h::static_type)) return r;
- if (auto r = test (ixx::static_type)) return r;
- if (auto r = test (txx::static_type)) return r;
- if (auto r = test (cxx::static_type)) return r;
- if (auto r = test (c::static_type)) return r;
-
- return nullptr;
- }
-
- // Mapping of include prefixes (e.g., foo in <foo/bar>) for auto-
- // generated headers to directories where they will be generated.
- //
- // We are using a prefix map of directories (dir_path_map) instead
- // of just a map in order also cover sub-paths (e.g., <foo/more/bar>
- // if we continue with the example). Specifically, we need to make
- // sure we don't treat foobar as a sub-directory of foo.
- //
- // @@ The keys should be canonicalized.
- //
- using prefix_map = dir_path_map<dir_path>;
-
- static void
- append_prefixes (prefix_map& m, target& t, const char* var)
- {
- tracer trace ("cxx::append_prefixes");
-
- // If this target does not belong to any project (e.g, an
- // "imported as installed" library), then it can't possibly
- // generate any headers for us.
- //
- scope* rs (t.base_scope ().root_scope ());
- if (rs == nullptr)
- return;
-
- const dir_path& out_base (t.dir);
- const dir_path& out_root (rs->out_path ());
-
- if (auto l = t[var])
- {
- const auto& v (cast<strings> (l));
-
- for (auto i (v.begin ()), e (v.end ()); i != e; ++i)
- {
- // -I can either be in the "-Ifoo" or "-I foo" form. For VC it can
- // also be /I.
- //
- const string& o (*i);
-
- if (o.size () < 2 || (o[0] != '-' && o[0] != '/') || o[1] != 'I')
- continue;
-
- dir_path d;
- if (o.size () == 2)
- {
- if (++i == e)
- break; // Let the compiler complain.
-
- d = dir_path (*i);
- }
- else
- d = dir_path (*i, 2, string::npos);
-
- l6 ([&]{trace << "-I '" << d << "'";});
-
- // If we are relative or not inside our project root, then
- // ignore.
- //
- if (d.relative () || !d.sub (out_root))
- continue;
-
- // If the target directory is a sub-directory of the include
- // directory, then the prefix is the difference between the
- // two. Otherwise, leave it empty.
- //
- // The idea here is to make this "canonical" setup work auto-
- // magically:
- //
- // 1. We include all files with a prefix, e.g., <foo/bar>.
- // 2. The library target is in the foo/ sub-directory, e.g.,
- // /tmp/foo/.
- // 3. The poptions variable contains -I/tmp.
- //
- dir_path p (out_base.sub (d) ? out_base.leaf (d) : dir_path ());
-
- auto j (m.find (p));
-
- if (j != m.end ())
- {
- if (j->second != d)
- {
- // We used to reject duplicates but it seems this can
- // be reasonably expected to work according to the order
- // of the -I options.
- //
- if (verb >= 4)
- trace << "overriding dependency prefix '" << p << "'\n"
- << " old mapping to " << j->second << "\n"
- << " new mapping to " << d;
-
- j->second = d;
- }
- }
- else
- {
- l6 ([&]{trace << "'" << p << "' = '" << d << "'";});
- m.emplace (move (p), move (d));
- }
- }
- }
- }
-
- // Append library prefixes based on the cxx.export.poptions variables
- // recursively, prerequisite libraries first.
- //
- static void
- append_lib_prefixes (prefix_map& m, target& l, lorder lo)
- {
- for (target* t: l.prerequisite_targets)
- {
- if (t == nullptr)
- continue;
-
- if (lib* l = t->is_a<lib> ())
- t = &link_member (*l, lo); // Pick one of the members.
-
- if (t->is_a<liba> () || t->is_a<libs> ())
- append_lib_prefixes (m, *t, lo);
- }
-
- append_prefixes (m, l, "cxx.export.poptions");
- }
-
- static prefix_map
- build_prefix_map (target& t, lorder lo)
- {
- prefix_map m;
-
- // First process the include directories from prerequisite
- // libraries. Note that here we don't need to see group
- // members (see apply()).
- //
- for (prerequisite& p: group_prerequisites (t))
- {
- target* pt (p.target); // Already searched and matched.
-
- if (lib* l = pt->is_a<lib> ())
- pt = &link_member (*l, lo); // Pick one of the members.
-
- if (pt->is_a<liba> () || pt->is_a<libs> ())
- append_lib_prefixes (m, *pt, lo);
- }
-
- // Then process our own.
- //
- append_prefixes (m, t, "cxx.poptions");
-
- return m;
- }
-
- // Return the next make prerequisite starting from the specified
- // position and update position to point to the start of the
- // following prerequisite or l.size() if there are none left.
- //
- static string
- next_make (const string& l, size_t& p)
- {
- size_t n (l.size ());
-
- // Skip leading spaces.
- //
- for (; p != n && l[p] == ' '; p++) ;
-
- // Lines containing multiple prerequisites are 80 characters max.
- //
- string r;
- r.reserve (n);
-
- // Scan the next prerequisite while watching out for escape sequences.
- //
- for (; p != n && l[p] != ' '; p++)
- {
- char c (l[p]);
-
- if (p + 1 != n)
- {
- if (c == '$')
- {
- // Got to be another (escaped) '$'.
- //
- if (l[p + 1] == '$')
- ++p;
- }
- else if (c == '\\')
- {
- // This may or may not be an escape sequence depending on whether
- // what follows is "escapable".
- //
- switch (c = l[++p])
- {
- case '\\': break;
- case ' ': break;
- default: c = '\\'; --p; // Restore.
- }
- }
- }
-
- r += c;
- }
-
- // Skip trailing spaces.
- //
- for (; p != n && l[p] == ' '; p++) ;
-
- // Skip final '\'.
- //
- if (p == n - 1 && l[p] == '\\')
- p++;
-
- return r;
- }
-
- // Extract the include path from the VC++ /showIncludes output line.
- // Return empty string if the line is not an include note or include
- // error. Set the good_error flag if it is an include error (which means
- // the process will terminate with the error status that needs to be
- // ignored).
- //
- static string
- next_show (const string& l, bool& good_error)
- {
- // The include error should be the last line that we handle.
- //
- assert (!good_error);
-
- // VC++ /showIncludes output. The first line is the file being
- // compiled. Then we have the list of headers, one per line, in this
- // form (text can presumably be translated):
- //
- // Note: including file: C:\Program Files (x86)\[...]\iostream
- //
- // Finally, if we hit a non-existent header, then we end with an error
- // line in this form:
- //
- // x.cpp(3): fatal error C1083: Cannot open include file: 'd/h.hpp':
- // No such file or directory
- //
-
- // Distinguishing between the include note and the include error is
- // easy: we can just check for C1083. Distinguising between the note and
- // other errors/warnings is harder: an error could very well end with
- // what looks like a path so we cannot look for the note but rather have
- // to look for an error. Here we assume that a line containing ' CNNNN:'
- // is an error. Should be robust enough in the face of language
- // translation, etc.
- //
- size_t p (l.find (':'));
- size_t n (l.size ());
-
- for (; p != string::npos; p = ++p != n ? l.find (':', p) : string::npos)
- {
- auto isnum = [](char c) {return c >= '0' && c <= '9';};
-
- if (p > 5 &&
- l[p - 6] == ' ' &&
- l[p - 5] == 'C' &&
- isnum (l[p - 4]) &&
- isnum (l[p - 3]) &&
- isnum (l[p - 2]) &&
- isnum (l[p - 1]))
- {
- p -= 4; // Start of the error code.
- break;
- }
- }
-
- if (p == string::npos)
- {
- // Include note. We assume the path is always at the end but
- // need to handle both absolute Windows and POSIX ones.
- //
- size_t p (l.rfind (':'));
-
- if (p != string::npos)
- {
- // See if this one is part of the Windows drive letter.
- //
- if (p > 1 && p + 1 < n && // 2 chars before, 1 after.
- l[p - 2] == ' ' &&
- alpha (l[p - 1]) &&
- path::traits::is_separator (l[p + 1]))
- p = l.rfind (':', p - 2);
- }
-
- if (p != string::npos)
- {
- // VC uses indentation to indicate the include nesting so there
- // could be any number of spaces after ':'. Skip them.
- //
- p = l.find_first_not_of (' ', p + 1);
- }
-
- if (p == string::npos)
- fail << "unable to parse /showIncludes include note line";
-
- return string (l, p);
- }
- else if (l.compare (p, 4, "1083") == 0)
- {
- // Include error. The path is conveniently quoted with ''.
- //
- size_t p2 (l.rfind ('\''));
-
- if (p2 != string::npos && p2 != 0)
- {
- size_t p1 (l.rfind ('\'', p2 - 1));
-
- if (p1 != string::npos)
- {
- good_error = true;
- return string (l, p1 + 1 , p2 - p1 - 1);
- }
- }
-
- error << "unable to parse /showIncludes include error line";
- throw failed ();
- }
- else
- {
- // Some other error.
- //
- return string ();
- }
- }
-
- static void
- inject_prerequisites (action a, target& t, lorder lo,
- cxx& s, scope& ds, depdb& dd)
- {
- tracer trace ("cxx::compile::inject_prerequisites");
-
- l6 ([&]{trace << "target: " << t;});
-
- // If things go wrong (and they often do in this area), give the user a
- // bit extra context.
- //
- auto g (
- make_exception_guard (
- [&s]()
- {
- info << "while extracting header dependencies from " << s;
- }));
-
- scope& rs (t.root_scope ());
- const string& cid (cast<string> (rs["cxx.id"]));
-
- // Initialize lazily, only if required.
- //
- cstrings args;
- string cxx_std; // Storage.
-
- auto init_args = [&t, lo, &s, &rs, &cid, &args, &cxx_std] ()
- {
- const path& cxx (cast<path> (rs["config.cxx"]));
- const string& tclass (cast<string> (rs["cxx.target.class"]));
-
- args.push_back (cxx.string ().c_str ());
-
- // Add cxx.export.poptions from prerequisite libraries. Note
- // that here we don't need to see group members (see apply()).
- //
- for (prerequisite& p: group_prerequisites (t))
- {
- target* pt (p.target); // Already searched and matched.
-
- if (lib* l = pt->is_a<lib> ())
- pt = &link_member (*l, lo);
-
- if (pt->is_a<liba> () || pt->is_a<libs> ())
- append_lib_options (args, *pt, "cxx.export.poptions", lo);
- }
-
- append_options (args, t, "cxx.poptions");
-
- // Some C++ options (e.g., -std, -m) affect the preprocessor.
- //
- append_options (args, t, "cxx.coptions");
- append_std (args, rs, cid, t, cxx_std);
-
- if (t.is_a<objs> ())
- {
- // On Darwin, Win32 -fPIC is the default.
- //
- if (tclass == "linux" || tclass == "freebsd")
- args.push_back ("-fPIC");
- }
-
- if (cid == "msvc")
- {
- args.push_back ("/nologo");
-
- // See perform_update() for details on overriding the default
- // exceptions and runtime.
- //
- if (!find_option_prefix ("/EH", args))
- args.push_back ("/EHsc");
-
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
- args.push_back ("/MD");
-
- args.push_back ("/EP"); // Preprocess to stdout.
- args.push_back ("/TP"); // Preprocess as C++.
- args.push_back ("/showIncludes"); // Goes to sterr becasue of /EP.
- }
- else
- {
- args.push_back ("-M"); // Note: -MM -MG skips missing <>-included.
- args.push_back ("-MG"); // Treat missing headers as generated.
-
- // Previously we used '*' as a target name but it gets expanded to
- // the current directory file names by GCC (4.9) that comes with
- // MSYS2 (2.4). Yes, this is the (bizarre) behavior of GCC being
- // executed in the shell with -MQ '*' option and not just -MQ *.
- //
- args.push_back ("-MQ"); // Quoted target name.
- args.push_back ("^"); // Old versions can't do empty target name.
- }
-
- // We are using absolute source file path in order to get absolute
- // paths in the result. Any relative paths in the result are non-
- // existent, potentially auto-generated headers.
- //
- // @@ We will also have to use absolute -I paths to guarantee
- // that. Or just detect relative paths and error out?
- //
- args.push_back (s.path ().string ().c_str ());
- args.push_back (nullptr);
- };
-
- // Build the prefix map lazily only if we have non-existent files.
- // Also reuse it over restarts since it doesn't change.
- //
- prefix_map pm;
-
- // If any prerequisites that we have extracted changed, then we have to
- // redo the whole thing. The reason for this is auto-generated headers:
- // the updated header may now include a yet-non-existent header. Unless
- // we discover this and generate it (which, BTW, will trigger another
- // restart since that header, in turn, can also include auto-generated
- // headers), we will end up with an error during compilation proper.
- //
- // One complication with this restart logic is that we will see a
- // "prefix" of prerequisites that we have already processed (i.e., they
- // are already in our prerequisite_targets list) and we don't want to
- // keep redoing this over and over again. One thing to note, however, is
- // that the prefix that we have seen on the previous run must appear
- // exactly the same in the subsequent run. The reason for this is that
- // none of the files that it can possibly be based on have changed and
- // thus it should be exactly the same. To put it another way, the
- // presence or absence of a file in the dependency output can only
- // depend on the previous files (assuming the compiler outputs them as
- // it encounters them and it is hard to think of a reason why would
- // someone do otherwise). And we have already made sure that all those
- // files are up to date. And here is the way we are going to exploit
- // this: we are going to keep track of how many prerequisites we have
- // processed so far and on restart skip right to the next one.
- //
- // And one more thing: most of the time this list of headers would stay
- // unchanged and extracting them by running the compiler every time is a
- // bit wasteful. So we are going to cache them in the depdb. If the db
- // hasn't been invalidated yet (e.g., because the compiler options have
- // changed), then we start by reading from it. If anything is out of
- // date then we use the same restart and skip logic to switch to the
- // compiler run.
- //
-
- // Update the target "smartly". Return true if it has changed or if the
- // passed timestamp is not timestamp_unknown and is older than the
- // target.
- //
- // There would normally be a lot of headers for every source file (think
- // all the system headers) and just calling execute_direct() on all of
- // them can get expensive. At the same time, most of these headers are
- // existing files that we will never be updating (again, system headers,
- // for example) and the rule that will match them is the fallback
- // file_rule. That rule has an optimization: it returns noop_recipe
- // (which causes the target state to be automatically set to unchanged)
- // if the file is known to be up to date.
- //
- auto update = [&trace, a] (path_target& pt, timestamp ts) -> bool
- {
- if (pt.state () != target_state::unchanged)
- {
- // We only want to restart if our call to execute() actually
- // caused an update. In particular, the target could already
- // have been in target_state::changed because of a dependency
- // extraction run for some other source file.
- //
- target_state os (pt.state ());
- target_state ns (execute_direct (a, pt));
-
- if (ns != os && ns != target_state::unchanged)
- {
- l6 ([&]{trace << "updated " << pt
- << "; old state " << os
- << "; new state " << ns;});
- return true;
- }
- }
-
- if (ts != timestamp_unknown)
- {
- timestamp mt (pt.mtime ());
-
- // See execute_prerequisites() for rationale behind the equal part.
- //
- return ts < mt || (ts == mt && pt.state () != target_state::changed);
- }
-
- return false;
- };
-
- // Update and add a header file to the list of prerequisite targets.
- // Depending on the cache flag, the file is assumed to either have come
- // from the depdb cache or from the compiler run. Return whether the
- // extraction process should be restarted.
- //
- auto add = [&trace, &update, &pm, a, &t, lo, &ds, &dd]
- (path f, bool cache) -> bool
- {
- if (!f.absolute ())
- {
- f.normalize ();
-
- // This is probably as often an error as an auto-generated file, so
- // trace at level 4.
- //
- l4 ([&]{trace << "non-existent header '" << f << "'";});
-
- // If we already did this and build_prefix_map() returned empty,
- // then we would have failed below.
- //
- if (pm.empty ())
- pm = build_prefix_map (t, lo);
-
- // First try the whole file. Then just the directory.
- //
- // @@ Has to be a separate map since the prefix can be
- // the same as the file name.
- //
- // auto i (pm.find (f));
-
- // Find the most qualified prefix of which we are a sub-path.
- //
- auto i (pm.end ());
-
- if (!pm.empty ())
- {
- const dir_path& d (f.directory ());
- i = pm.upper_bound (d);
-
- // Get the greatest less than, if any. We might still not be a
- // sub. Note also that we still have to check the last element if
- // upper_bound() returned end().
- //
- if (i == pm.begin () || !d.sub ((--i)->first))
- i = pm.end ();
- }
-
- if (i == pm.end ())
- fail << "unable to map presumably auto-generated header '"
- << f << "' to a project";
-
- f = i->second / f;
- }
- else
- {
- // We used to just normalize the path but that could result in an
- // invalid path (e.g., on CentOS 7 with Clang 3.4) because of the
- // symlinks. So now we realize (i.e., realpath(3)) it instead. If
- // it comes from the depdb, in which case we've already done that.
- //
- if (!cache)
- f.realize ();
- }
-
- l6 ([&]{trace << "injecting " << f;});
-
- // Split the name into its directory part, the name part, and
- // extension. Here we can assume the name part is a valid filesystem
- // name.
- //
- // Note that if the file has no extension, we record an empty
- // extension rather than NULL (which would signify that the default
- // extension should be added).
- //
- dir_path d (f.directory ());
- string n (f.leaf ().base ().string ());
- const char* es (f.extension ());
- const string* e (&extension_pool.find (es != nullptr ? es : ""));
-
- // Determine the target type.
- //
- const target_type* tt (nullptr);
-
- // See if this directory is part of any project out_root hierarchy.
- // Note that this will miss all the headers that come from src_root
- // (so they will be treated as generic C headers below). Generally,
- // we don't have the ability to determine that some file belongs to
- // src_root of some project. But that's not a problem for our
- // purposes: it is only important for us to accurately determine
- // target types for headers that could be auto-generated.
- //
- // While at it also try to determine if this target is from the src
- // or out tree of said project.
- //
- dir_path out;
-
- scope& bs (scopes.find (d));
- if (scope* rs = bs.root_scope ())
- {
- tt = map_extension (bs, n, *e);
-
- if (bs.out_path () != bs.src_path () && d.sub (bs.src_path ()))
- out = out_src (d, *rs);
- }
-
- // If it is outside any project, or the project doesn't have
- // such an extension, assume it is a plain old C header.
- //
- if (tt == nullptr)
- tt = &h::static_type;
-
- // Find or insert target.
- //
- // @@ OPT: move d, out, n
- //
- path_target& pt (
- static_cast<path_target&> (search (*tt, d, out, n, e, &ds)));
-
- // Assign path.
- //
- if (pt.path ().empty ())
- pt.path (move (f));
- else
- assert (pt.path () == f);
-
- // Match to a rule.
- //
- build2::match (a, pt);
-
- // Update.
- //
- // If this header came from the depdb, make sure it is no older than
- // the db itself (if it has changed since the db was written, then
- // chances are the cached data is stale).
- //
- bool restart (update (pt, cache ? dd.mtime () : timestamp_unknown));
-
- // Verify/add it to the dependency database. We do it after update in
- // order not to add bogus files (non-existent and without a way to
- // update).
- //
- if (!cache)
- dd.expect (pt.path ());
-
- // Add to our prerequisite target list.
- //
- t.prerequisite_targets.push_back (&pt);
-
- return restart;
- };
-
- // If nothing so far has invalidated the dependency database, then
- // try the cached data before running the compiler.
- //
- bool cache (dd.reading ());
-
- // But, before we do all that, make sure the source file itself if up to
- // date.
- //
- if (update (s, dd.mtime ()))
- {
- // If the file got updated or is newer than the database, then we
- // cannot rely on the cache any further. However, the cached data
- // could actually still be valid so the compiler run will validate it.
- //
- // We do need to update the database timestamp, however. Failed that,
- // we will keep re-validating the cached data over and over again.
- //
- if (cache)
- {
- cache = false;
- dd.touch ();
- }
- }
-
- size_t skip_count (0);
- for (bool restart (true); restart; cache = false)
- {
- restart = false;
-
- if (cache)
- {
- // If any, this is always the first run.
- //
- assert (skip_count == 0);
-
- while (dd.more ())
- {
- string* l (dd.read ());
-
- // If the line is invalid, run the compiler.
- //
- if (l == nullptr)
- {
- restart = true;
- break;
- }
-
- restart = add (path (move (*l)), true);
- skip_count++;
-
- // The same idea as in the source file update above.
- //
- if (restart)
- {
- l6 ([&]{trace << "restarting";});
- dd.touch ();
- break;
- }
- }
- }
- else
- {
- try
- {
- if (args.empty ())
- init_args ();
-
- if (verb >= 3)
- print_process (args);
-
- // For VC with /EP we need a pipe to stderr and stdout should go
- // to /dev/null.
- //
- process pr (args.data (),
- 0,
- cid == "msvc" ? -2 : -1,
- cid == "msvc" ? -1 : 2);
-
- try
- {
- // We may not read all the output (e.g., due to a restart).
- // Before we used to just close the file descriptor to signal to
- // the other end that we are not interested in the rest. This
- // works fine with GCC but Clang (3.7.0) finds this impolite and
- // complains, loudly (broken pipe). So now we are going to skip
- // until the end.
- //
- ifdstream is (cid == "msvc" ? pr.in_efd : pr.in_ofd,
- fdstream_mode::text | fdstream_mode::skip,
- ifdstream::badbit);
-
- // In some cases we may need to ignore the error return
- // status. The good_error flag keeps track of that. Similarly
- // we sometimes expect the error return status based on the
- // output we see. The bad_error flag is for that.
- //
- bool good_error (false), bad_error (false);
-
- size_t skip (skip_count);
- for (bool first (true), second (false);
- !(restart || is.eof ()); )
- {
- string l;
- getline (is, l);
-
- if (is.fail ())
- {
- if (is.eof ()) // Trailing newline.
- break;
-
- throw ifdstream::failure ("");
- }
-
- l6 ([&]{trace << "header dependency line '" << l << "'";});
-
- // Parse different dependency output formats.
- //
- if (cid == "msvc")
- {
- if (first)
- {
- // The first line should be the file we are compiling. If
- // it is not, then something went wrong even before we
- // could compile anything (e.g., file does not exist). In
- // this case the first line (and everything after it) is
- // presumably diagnostics.
- //
- if (l != s.path ().leaf ().string ())
- {
- text << l;
- bad_error = true;
- break;
- }
-
- first = false;
- continue;
- }
-
- string f (next_show (l, good_error));
-
- if (f.empty ()) // Some other diagnostics.
- {
- text << l;
- bad_error = true;
- break;
- }
-
- // Skip until where we left off.
- //
- if (skip != 0)
- {
- // We can't be skipping over a non-existent header.
- //
- assert (!good_error);
- skip--;
- }
- else
- {
- restart = add (path (move (f)), false);
- skip_count++;
-
- // If the header does not exist, we better restart.
- //
- assert (!good_error || restart);
-
- if (restart)
- l6 ([&]{trace << "restarting";});
- }
- }
- else
- {
- // Make dependency declaration.
- //
- size_t pos (0);
-
- if (first)
- {
- // Empty output should mean the wait() call below will
- // return false.
- //
- if (l.empty ())
- {
- bad_error = true;
- break;
- }
-
- assert (l[0] == '^' && l[1] == ':' && l[2] == ' ');
-
- first = false;
- second = true;
-
- // While normally we would have the source file on the
- // first line, if too long, it will be moved to the next
- // line and all we will have on this line is "^: \".
- //
- if (l.size () == 4 && l[3] == '\\')
- continue;
- else
- pos = 3; // Skip "^: ".
-
- // Fall through to the 'second' block.
- }
-
- if (second)
- {
- second = false;
- next_make (l, pos); // Skip the source file.
- }
-
- while (pos != l.size ())
- {
- string f (next_make (l, pos));
-
- // Skip until where we left off.
- //
- if (skip != 0)
- {
- skip--;
- continue;
- }
-
- restart = add (path (move (f)), false);
- skip_count++;
-
- if (restart)
- {
- l6 ([&]{trace << "restarting";});
- break;
- }
- }
- }
- }
-
- // In case of VC++, we are parsing stderr and if things go
- // south, we need to copy the diagnostics for the user to see.
- //
- // Note that the eof check is important: if the stream is at
- // eof, this and all subsequent writes to cerr will fail (and
- // you won't see a thing).
- //
- if (is.peek () != ifdstream::traits_type::eof () &&
- cid == "msvc" &&
- bad_error)
- cerr << is.rdbuf ();
-
- is.close ();
-
- // We assume the child process issued some diagnostics.
- //
- if (!pr.wait ())
- {
- if (!good_error) // Ignore expected errors (restart).
- throw failed ();
- }
- else if (bad_error)
- fail << "expected error exist status from C++ compiler";
- }
- catch (const ifdstream::failure&)
- {
- pr.wait ();
- fail << "unable to read C++ compiler header dependency output";
- }
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e.what ();
-
- // In a multi-threaded program that fork()'ed but did not exec(),
- // it is unwise to try to do any kind of cleanup (like unwinding
- // the stack and running destructors).
- //
- if (e.child ())
- exit (1);
-
- throw failed ();
- }
- }
- }
- }
-
- // Filter cl.exe noise (msvc.cxx).
- //
- void
- msvc_filter_cl (ifdstream&, const path& src);
-
- target_state compile::
- perform_update (action a, target& xt)
- {
- file& t (static_cast<file&> (xt));
- cxx* s (execute_prerequisites<cxx> (a, t, t.mtime ()));
-
- if (s == nullptr)
- return target_state::unchanged;
-
- scope& bs (t.base_scope ());
- scope& rs (*bs.root_scope ());
-
- const path& cxx (cast<path> (rs["config.cxx"]));
- const string& cid (cast<string> (rs["cxx.id"]));
- const string& tclass (cast<string> (rs["cxx.target.class"]));
-
- otype ct (compile_type (t));
-
- cstrings args {cxx.string ().c_str ()};
-
- // Translate paths to relative (to working directory) ones. This
- // results in easier to read diagnostics.
- //
- path relo (relative (t.path ()));
- path rels (relative (s->path ()));
-
- // Add cxx.export.poptions from prerequisite libraries. Note that
- // here we don't need to see group members (see apply()).
- //
- lorder lo (link_order (bs, ct));
- for (prerequisite& p: group_prerequisites (t))
- {
- target* pt (p.target); // Already searched and matched.
-
- if (lib* l = pt->is_a<lib> ())
- pt = &link_member (*l, lo);
-
- if (pt->is_a<liba> () || pt->is_a<libs> ())
- append_lib_options (args, *pt, "cxx.export.poptions", lo);
- }
-
- append_options (args, t, "cxx.poptions");
- append_options (args, t, "cxx.coptions");
-
- string std, out, out1; // Storage.
-
- append_std (args, rs, cid, t, std);
-
- if (cid == "msvc")
- {
- // The /F*: option variants with separate names only became available
- // in VS2013/12.0. Why do we bother? Because the command line suddenly
- // becomes readable.
- //
- uint64_t cver (cast<uint64_t> (rs["cxx.version.major"]));
-
- args.push_back ("/nologo");
-
- // While we want to keep the low-level build as "pure" as possible,
- // the two misguided defaults, exceptions and runtime, just have to be
- // fixed. Otherwise the default build is pretty much unusable. But we
- // also make sure that the user can easily disable our defaults: if we
- // see any relevant options explicitly specified, we take our hands
- // off.
- //
- if (!find_option_prefix ("/EH", args))
- args.push_back ("/EHsc");
-
- // The runtime is a bit more interesting. At first it may seem like a
- // good idea to be a bit clever and use the static runtime if we are
- // building obja{}. And for obje{} we could decide which runtime to
- // use based on the library link order: if it is static-only, then we
- // could assume the static runtime. But it is indeed too clever: when
- // building liba{} we have no idea who is going to use it. It could be
- // an exe{} that links both static and shared libraries (and is
- // therefore built with the shared runtime). And to safely use the
- // static runtime, everything must be built with /MT and there should
- // be no DLLs in the picture. So we are going to play it safe and
- // always default to the shared runtime.
- //
- // In a similar vein, it would seem reasonable to use the debug runtime
- // if we are compiling with debug. But, again, there will be fireworks
- // if we have some projects built with debug and some without and then
- // we try to link them together (which is not an unreasonable thing to
- // do). So by default we will always use the release runtime.
- //
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
- args.push_back ("/MD");
-
- // The presence of /Zi or /ZI causes the compiler to write debug info
- // to the .pdb file. By default it is a shared file called vcNN.pdb
- // (where NN is the VC version) created (wait for it) in the current
- // working directory (and not the directory of the .obj file). Also,
- // because it is shared, there is a special Windows service that
- // serializes access. We, of course, want none of that so we will
- // create a .pdb per object file.
- //
- // Note that this also changes the name of the .idb file (used for
- // minimal rebuild and incremental compilation): cl.exe take the /Fd
- // value and replaces the .pdb extension with .idb.
- //
- // Note also that what we are doing here appears to be incompatible
- // with PCH (/Y* options) and /Gm (minimal rebuild).
- //
- if (find_options ({"/Zi", "/ZI"}, args))
- {
- if (cver >= 18)
- args.push_back ("/Fd:");
- else
- out1 = "/Fd";
-
- out1 += relo.string ();
- out1 += ".pdb";
-
- args.push_back (out1.c_str ());
- }
-
- if (cver >= 18)
- {
- args.push_back ("/Fo:");
- args.push_back (relo.string ().c_str ());
- }
- else
- {
- out = "/Fo" + relo.string ();
- args.push_back (out.c_str ());
- }
-
- args.push_back ("/c"); // Compile only.
- args.push_back ("/TP"); // Compile as C++.
- args.push_back (rels.string ().c_str ());
- }
- else
- {
- if (ct == otype::s)
- {
- // On Darwin, Win32 -fPIC is the default.
- //
- if (tclass == "linux" || tclass == "freebsd")
- args.push_back ("-fPIC");
- }
-
- args.push_back ("-o");
- args.push_back (relo.string ().c_str ());
-
- args.push_back ("-c");
- args.push_back (rels.string ().c_str ());
- }
-
- args.push_back (nullptr);
-
- if (verb >= 2)
- print_process (args);
- else if (verb)
- text << "c++ " << *s;
-
- try
- {
- // VC cl.exe sends diagnostics to stdout. It also prints the file name
- // being compiled as the first line. So for cl.exe we redirect stdout
- // to a pipe, filter that noise out, and send the rest to stderr.
- //
- // For other compilers redirect stdout to stderr, in case any of them
- // tries to pull off something similar. For sane compilers this should
- // be harmless.
- //
- bool filter (cid == "msvc");
-
- process pr (args.data (), 0, (filter ? -1 : 2));
-
- if (filter)
- {
- try
- {
- ifdstream is (pr.in_ofd, fdstream_mode::text, ifdstream::badbit);
-
- msvc_filter_cl (is, rels);
-
- // If anything remains in the stream, send it all to stderr. Note
- // that the eof check is important: if the stream is at eof, this
- // and all subsequent writes to cerr will fail (and you won't see
- // a thing).
- //
- if (is.peek () != ifdstream::traits_type::eof ())
- cerr << is.rdbuf ();
-
- is.close ();
- }
- catch (const ifdstream::failure&) {} // Assume exits with error.
- }
-
- if (!pr.wait ())
- throw failed ();
-
- // Should we go to the filesystem and get the new mtime? We
- // know the file has been modified, so instead just use the
- // current clock time. It has the advantage of having the
- // subseconds precision.
- //
- t.mtime (system_clock::now ());
- return target_state::changed;
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e.what ();
-
- // In a multi-threaded program that fork()'ed but did not exec(),
- // it is unwise to try to do any kind of cleanup (like unwinding
- // the stack and running destructors).
- //
- if (e.child ())
- exit (1);
-
- throw failed ();
- }
- }
-
- target_state compile::
- perform_clean (action a, target& xt)
- {
- file& t (static_cast<file&> (xt));
-
- scope& rs (t.root_scope ());
- const string& cid (cast<string> (rs["cxx.id"]));
-
- initializer_list<const char*> e;
-
- if (cid == "msvc")
- e = {".d", ".idb", ".pdb"};
- else
- e = {".d"};
-
- return clean_extra (a, t, e);
- }
-
- compile compile::instance;
- }
-}
diff --git a/build2/cxx/guess b/build2/cxx/guess
deleted file mode 100644
index 63858ad..0000000
--- a/build2/cxx/guess
+++ /dev/null
@@ -1,112 +0,0 @@
-// file : build2/cxx/guess -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#ifndef BUILD2_CXX_GUESS
-#define BUILD2_CXX_GUESS
-
-#include <build2/types>
-#include <build2/utility>
-
-namespace build2
-{
- namespace cxx
- {
- // C++ compiler id consisting of a type and optional variant. If the
- // variant is not empty, then the id is spelled out as 'type-variant',
- // similar to target triplets (this also means that the type cannot
- // contain '-').
- //
- // Currently recognized compilers and their ids:
- //
- // gcc GCC g++
- // clang Vanilla Clang clang++
- // clang-apple Apple Clang clang++ and the g++ "alias"
- // icc Intel icpc
- // msvc Microsoft cl.exe
- //
- struct compiler_id
- {
- std::string type;
- std::string variant;
-
- bool
- empty () const {return type.empty ();}
-
- std::string
- string () const {return variant.empty () ? type : type + "-" + variant;}
- };
-
- inline ostream&
- operator<< (ostream& os, const compiler_id& id)
- {
- return os << id.string ();
- }
-
- // C++ compiler version. Here we map the various compiler version formats
- // to something that resembles the MAJOR.MINOR.PATCH-BUILD form of the
- // Semantic Versioning. While the MAJOR.MINOR part is relatively
- // straightforward, PATCH may be empty and BUILD can contain pretty much
- // anything (including spaces).
- //
- // gcc A.B.C[ ...] {A, B, C, ...}
- // clang A.B.C[( |-)...] {A, B, C, ...}
- // clang-apple A.B[.C] ... {A, B, C, ...}
- // icc A.B[.C.D] ... {A, B, C, D ...}
- // msvc A.B.C[.D] {A, B, C, D}
- //
- // Note that the clang-apple version is a custom Apple version and does
- // not correspond to the vanilla clang version.
- //
- struct compiler_version
- {
- std::string string;
-
- // Currently all the compilers that we support have numeric MAJOR,
- // MINOR, and PATCH components and it makes sense to represent them as
- // integers for easy comparison. If we meet a compiler for which this
- // doesn't hold, then we will probably just set these to 0 and let the
- // user deal with the string representation.
- //
- uint64_t major;
- uint64_t minor;
- uint64_t patch;
- std::string build;
- };
-
- // C++ compiler information.
- //
- // The signature is normally the -v/--version line that was used to guess
- // the compiler id and its version.
- //
- // The checksum is used to detect compiler changes. It is calculated in a
- // compiler-specific manner (usually the output of -v/--version) and is
- // not bulletproof (e.g., it most likely won't detect that the underlying
- // assembler or linker has changed). However, it should detect most
- // common cases, such as an upgrade to a new version or a configuration
- // change.
- //
- // Note that we assume the checksum incorporates the (default) target so
- // that if the compiler changes but only in what it targets, then the
- // checksum will still change. This is currently the case for all the
- // compilers that we support.
- //
- // The target is the compiler's traget architecture triplet. Note that
- // unlike all the preceding fields, this one takes into account the
- // compile options (e.g., -m32).
- //
- struct compiler_info
- {
- compiler_id id;
- compiler_version version;
- string signature;
- string checksum;
- string target;
- };
-
- compiler_info
- guess (const path& cxx, const strings* coptions);
- }
-}
-
-#endif // BUILD2_CXX_GUESS
diff --git a/build2/cxx/guess.cxx b/build2/cxx/guess.cxx
deleted file mode 100644
index 11a832c..0000000
--- a/build2/cxx/guess.cxx
+++ /dev/null
@@ -1,948 +0,0 @@
-// file : build2/cxx/guess.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#include <build2/cxx/guess>
-
-#include <cstring> // strlen()
-
-#include <build2/diagnostics>
-
-using namespace std;
-
-namespace build2
-{
- namespace cxx
- {
- // Pre-guess the compiler type based on the compiler executable name.
- // Return empty string if can't make a guess (for example, because the
- // compiler name is a generic 'c++'). Note that it only guesses the type,
- // not the variant.
- //
- static string
- pre_guess (const path& cxx)
- {
- tracer trace ("cxx::pre_guess");
-
- const string s (cxx.leaf ().base ().string ());
- size_t n (s.size ());
-
- // Name separator characters (e.g., '-' in 'g++-4.8').
- //
- auto sep = [] (char c) -> bool
- {
- return c == '-' || c == '_' || c == '.';
- };
-
- auto stem = [&sep, &s, n] (const char* x) -> bool
- {
- size_t m (strlen (x));
- size_t p (s.find (x, 0, m));
-
- return p != string::npos &&
- (p == 0 || sep (s[p - 1])) && // Separated at the beginning.
- ((p += m) == n || sep (s[p])); // Separated at the end.
- };
-
- if (stem ("g++"))
- return "gcc";
-
- if (stem ("clang++"))
- return "clang";
-
- if (stem ("icpc"))
- return "icc";
-
- // Keep this one last since 'cl' is very generic.
- //
- if (stem ("cl"))
- return "msvc";
-
- // Warn if the user specified a C compiler instead of C++.
- //
- if (stem ("gcc"))
- {
- warn << cxx << " looks like a C compiler" <<
- info << "should it be 'g++' instead of 'gcc'?";
- }
- else if (stem ("clang"))
- {
- warn << cxx << " looks like a C compiler" <<
- info << "should it be 'clang++' instead of 'clang'?";
- }
- else if (stem ("icc"))
- {
- warn << cxx << " looks like a C compiler" <<
- info << "should it be 'icpc' instead of 'icc'?";
- }
-
- l4 ([&]{trace << "unable to guess compiler type of " << cxx;});
- return "";
- }
-
- // Guess the compiler type and variant by running it. If the pre argument
- // is not empty, then only "confirm" the pre-guess. Return empty result if
- // unable to guess.
- //
- struct guess_result
- {
- compiler_id id;
- string signature;
- string checksum;
-
- bool
- empty () const {return id.empty ();}
- };
-
- static guess_result
- guess (const path& cxx, const string& pre)
- {
- tracer trace ("cxx::guess");
-
- guess_result r;
-
- // Start with -v. This will cover gcc and clang.
- //
- // While icc also writes what may seem like something we can use to
- // detect it:
- //
- // icpc version 16.0.2 (gcc version 4.9.0 compatibility)
- //
- // That first word is actually the executable name. So if we rename
- // icpc to foocpc, we will get:
- //
- // foocpc version 16.0.2 (gcc version 4.9.0 compatibility)
- //
- // In fact, if someone renames icpc to g++, there will be no way for
- // us to detect this. Oh, well, their problem.
- //
- if (r.id.empty () && (pre.empty () || pre == "gcc" || pre == "clang"))
- {
- auto f = [] (string& l) -> guess_result
- {
- // The g++ -v output will have a line (currently last) in the form:
- //
- // "gcc version X.Y.Z ..."
- //
- // The "version" word can probably be translated. For example:
- //
- // gcc version 3.4.4
- // gcc version 4.2.1
- // gcc version 4.8.2 (GCC)
- // gcc version 4.8.5 (Ubuntu 4.8.5-2ubuntu1~14.04.1)
- // gcc version 4.9.2 (Ubuntu 4.9.2-0ubuntu1~14.04)
- // gcc version 5.1.0 (Ubuntu 5.1.0-0ubuntu11~14.04.1)
- // gcc version 6.0.0 20160131 (experimental) (GCC)
- //
- if (l.compare (0, 4, "gcc ") == 0)
- return guess_result {{"gcc", ""}, move (l), ""};
-
- // The Apple clang++ -v output will have a line (currently first)
- // in the form:
- //
- // "Apple (LLVM|clang) version X.Y.Z ..."
- //
- // Apple clang version 3.1 (tags/Apple/clang-318.0.58) (based on LLVM 3.1svn)
- // Apple clang version 4.0 (tags/Apple/clang-421.0.60) (based on LLVM 3.1svn)
- // Apple clang version 4.1 (tags/Apple/clang-421.11.66) (based on LLVM 3.1svn)
- // Apple LLVM version 4.2 (clang-425.0.28) (based on LLVM 3.2svn)
- // Apple LLVM version 5.0 (clang-500.2.79) (based on LLVM 3.3svn)
- // Apple LLVM version 5.1 (clang-503.0.40) (based on LLVM 3.4svn)
- // Apple LLVM version 6.0 (clang-600.0.57) (based on LLVM 3.5svn)
- // Apple LLVM version 6.1.0 (clang-602.0.53) (based on LLVM 3.6.0svn)
- // Apple LLVM version 7.0.0 (clang-700.0.53)
- // Apple LLVM version 7.0.0 (clang-700.1.76)
- // Apple LLVM version 7.0.2 (clang-700.1.81)
- // Apple LLVM version 7.3.0 (clang-703.0.16.1)
- //
- // Note that the g++ "alias" for clang++ also includes this line
- // but it is (currently) preceded by "Configured with: ...".
- //
- // Check for Apple clang before the vanilla one since the above
- // line also includes "clang".
- //
- if (l.compare (0, 6, "Apple ") == 0 &&
- (l.compare (6, 5, "LLVM ") == 0 ||
- l.compare (6, 6, "clang ") == 0))
- return guess_result {{"clang", "apple"}, move (l), ""};
-
- // The vanilla clang++ -v output will have a line (currently first)
- // in the form:
- //
- // "[... ]clang version X.Y.Z[-...] ..."
- //
- // The "version" word can probably be translated. For example:
- //
- // FreeBSD clang version 3.4.1 (tags/RELEASE_34/dot1-final 208032) 20140512
- // Ubuntu clang version 3.5.0-4ubuntu2~trusty2 (tags/RELEASE_350/final) (based on LLVM 3.5.0)
- // Ubuntu clang version 3.6.0-2ubuntu1~trusty1 (tags/RELEASE_360/final) (based on LLVM 3.6.0)
- // clang version 3.7.0 (tags/RELEASE_370/final)
- //
- if (l.find ("clang ") != string::npos)
- return guess_result {{"clang", ""}, move (l), ""};
-
- return guess_result ();
- };
-
- // The -v output contains other information (such as the compiler
- // build configuration for gcc or the selected gcc installation for
- // clang) which makes sense to include into the compiler checksum. So
- // ask run() to calculate it for every line of the -v ouput.
- //
- sha256 cs;
-
- // Suppress all the compiler errors because we may be trying an
- // unsupported option.
- //
- r = run<guess_result> (cxx, "-v", f, false, false, &cs);
-
- if (!r.empty ())
- r.checksum = cs.string ();
- }
-
- // Next try --version to detect icc.
- //
- if (r.empty () && (pre.empty () || pre == "icc"))
- {
- auto f = [] (string& l) -> guess_result
- {
- // The first line has the " (ICC) " in it, for example:
- //
- // icpc (ICC) 9.0 20060120
- // icpc (ICC) 11.1 20100414
- // icpc (ICC) 12.1.0 20110811
- // icpc (ICC) 14.0.0 20130728
- // icpc (ICC) 15.0.2 20150121
- // icpc (ICC) 16.0.2 20160204
- //
- if (l.find (" (ICC) ") != string::npos)
- return guess_result {{"icc", ""}, move (l), ""};
-
- return guess_result ();
- };
-
- r = run<guess_result> (cxx, "--version", f, false);
- }
-
- // Finally try to run it without any options to detect msvc.
- //
- //
- if (r.empty () && (pre.empty () || pre == "msvc"))
- {
- auto f = [] (string& l) -> guess_result
- {
- // Check for "Microsoft (R)" and "C/C++" in the first line as a
- // signature since all other words/positions can be translated. For
- // example:
- //
- // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 13.10.6030 for 80x86
- // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 14.00.50727.762 for 80x86
- // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 15.00.30729.01 for 80x86
- // Compilador de optimizacion de C/C++ de Microsoft (R) version 16.00.30319.01 para x64
- // Microsoft (R) C/C++ Optimizing Compiler Version 17.00.50727.1 for x86
- // Microsoft (R) C/C++ Optimizing Compiler Version 18.00.21005.1 for x86
- // Microsoft (R) C/C++ Optimizing Compiler Version 19.00.23026 for x86
- //
- // In the recent versions the architecture is either "x86", "x64",
- // or "ARM".
- //
- if (l.find ("Microsoft (R)") != string::npos &&
- l.find ("C/C++") != string::npos)
- return guess_result {{"msvc", ""}, move (l), ""};
-
- return guess_result ();
- };
-
- r = run<guess_result> (cxx, f, false);
- }
-
- if (!r.empty ())
- {
- if (!pre.empty () && r.id.type != pre)
- {
- l4 ([&]{trace << "compiler type guess mismatch"
- << ", pre-guessed " << pre
- << ", determined " << r.id.type;});
-
- r = guess_result ();
- }
- else
- l5 ([&]{trace << cxx << " is " << r.id << ": '"
- << r.signature << "'";});
- }
- else
- l4 ([&]{trace << "unable to determine compiler type of " << cxx;});
-
- return r;
- }
-
- static compiler_info
- guess_gcc (const path& cxx, const strings* coptions, guess_result&& gr)
- {
- tracer trace ("cxx::guess_gcc");
-
- // Extract the version. The signature line has the following format
- // though language words can be translated and even rearranged (see
- // examples above).
- //
- // "gcc version A.B.C[ ...]"
- //
- string& s (gr.signature);
-
- // Scan the string as words and look for one that looks like a version.
- //
- size_t b (0), e (0);
- while (next_word (s, b, e))
- {
- // The third argument to find_first_not_of() is the length of the
- // first argument, not the length of the interval to check. So to
- // limit it to [b, e) we are also going to compare the result to the
- // end of the word position (first space). In fact, we can just check
- // if it is >= e.
- //
- if (s.find_first_not_of ("1234567890.", b, 11) >= e)
- break;
- }
-
- if (b == e)
- fail << "unable to extract gcc version from '" << s << "'";
-
- compiler_version v;
- v.string.assign (s, b, string::npos);
-
- // Split the version into components.
- //
- size_t vb (b), ve (b);
- auto next = [&s, b, e, &vb, &ve] (const char* m) -> uint64_t
- {
- try
- {
- if (next_word (s, e, vb, ve, '.'))
- return stoull (string (s, vb, ve - vb));
- }
- catch (const invalid_argument&) {}
- catch (const out_of_range&) {}
-
- error << "unable to extract gcc " << m << " version from '"
- << string (s, b, e - b) << "'";
- throw failed ();
- };
-
- v.major = next ("major");
- v.minor = next ("minor");
- v.patch = next ("patch");
-
- if (e != s.size ())
- v.build.assign (s, e + 1, string::npos);
-
- // Figure out the target architecture. This is actually a lot trickier
- // than one would have hoped.
- //
- // There is the -dumpmachine option but gcc doesn't adjust it per the
- // compile options (e.g., -m32). However, starting with 4.6 it has the
- // -print-multiarch option which gives (almost) the right answer. The
- // "almost" part has to do with it not honoring the -arch option (which
- // is really what this compiler is building for). To get to that, we
- // would have to resort to a hack like this:
- //
- // gcc -v -E - 2>&1 | grep cc1
- // .../cc1 ... -mtune=generic -march=x86-64
- //
- // Also, -print-multiarch will print am empty line if the compiler
- // actually wasn't built with multi-arch support.
- //
- // So for now this is what we are going to do for the time being: First
- // try -print-multiarch. If that works out (recent gcc configure with
- // multi-arch support), then use the result. Otherwise, fallback to
- // -dumpmachine (older gcc or not multi-arch).
- //
- cstrings args {cxx.string ().c_str (), "-print-multiarch"};
- if (coptions != nullptr)
- append_options (args, *coptions);
- args.push_back (nullptr);
-
- // The output of both -print-multiarch and -dumpmachine is a single line
- // containing just the target triplet.
- //
- auto f = [] (string& l) {return move (l);};
-
- string t (run<string> (args.data (), f, false));
-
- if (t.empty ())
- {
- l5 ([&]{trace << cxx << " doesn's support -print-multiarch, "
- << "falling back to -dumpmachine";});
-
- args[1] = "-dumpmachine";
- t = run<string> (args.data (), f);
- }
-
- if (t.empty ())
- fail << "unable to extract target architecture from " << cxx
- << " -print-multiarch or -dumpmachine output";
-
- return compiler_info {
- move (gr.id),
- move (v),
- move (gr.signature),
- move (gr.checksum), // Calculated on whole -v output.
- move (t)};
- }
-
- static compiler_info
- guess_clang (const path& cxx, const strings* coptions, guess_result&& gr)
- {
- // Extract the version. Here we will try to handle both vanilla and
- // Apple clang since the signature lines are fairly similar. They have
- // the following format though language words can probably be translated
- // and even rearranged (see examples above).
- //
- // "[... ]clang version A.B.C[( |-)...]"
- // "Apple (clang|LLVM) version A.B[.C] ..."
- //
- string& s (gr.signature);
-
- // Some overrides for testing.
- //
- //s = "clang version 3.7.0 (tags/RELEASE_370/final)";
- //
- //gr.id.variant = "apple";
- //s = "Apple LLVM version 7.3.0 (clang-703.0.16.1)";
- //s = "Apple clang version 3.1 (tags/Apple/clang-318.0.58) (based on LLVM 3.1svn)";
-
- // Scan the string as words and look for one that looks like a version.
- // Use '-' as a second delimiter to handle versions like
- // "3.6.0-2ubuntu1~trusty1".
- //
- size_t b (0), e (0);
- while (next_word (s, b, e, ' ', '-'))
- {
- // The third argument to find_first_not_of() is the length of the
- // first argument, not the length of the interval to check. So to
- // limit it to [b, e) we are also going to compare the result to the
- // end of the word position (first space). In fact, we can just check
- // if it is >= e.
- //
- if (s.find_first_not_of ("1234567890.", b, 11) >= e)
- break;
- }
-
- if (b == e)
- fail << "unable to extract clang version from '" << s << "'";
-
- compiler_version v;
- v.string.assign (s, b, string::npos);
-
- // Split the version into components.
- //
- size_t vb (b), ve (b);
- auto next = [&s, b, e, &vb, &ve] (const char* m, bool opt) -> uint64_t
- {
- try
- {
- if (next_word (s, e, vb, ve, '.'))
- return stoull (string (s, vb, ve - vb));
-
- if (opt)
- return 0;
- }
- catch (const invalid_argument&) {}
- catch (const out_of_range&) {}
-
- error << "unable to extract clang " << m << " version from '"
- << string (s, b, e - b) << "'";
- throw failed ();
- };
-
- v.major = next ("major", false);
- v.minor = next ("minor", false);
- v.patch = next ("patch", gr.id.variant == "apple");
-
- if (e != s.size ())
- v.build.assign (s, e + 1, string::npos);
-
- // Figure out the target architecture.
- //
- // Unlike gcc, clang doesn't have -print-multiarch. Its -dumpmachine,
- // however, respects the compile options (e.g., -m32).
- //
- cstrings args {cxx.string ().c_str (), "-dumpmachine"};
- if (coptions != nullptr)
- append_options (args, *coptions);
- args.push_back (nullptr);
-
- // The output of -dumpmachine is a single line containing just the
- // target triplet.
- //
- string t (run<string> (args.data (), [] (string& l) {return move (l);}));
-
- if (t.empty ())
- fail << "unable to extract target architecture from " << cxx
- << " -dumpmachine output";
-
- return compiler_info {
- move (gr.id),
- move (v),
- move (gr.signature),
- move (gr.checksum), // Calculated on whole -v output.
- move (t)};
- }
-
- static compiler_info
- guess_icc (const path& cxx, const strings* coptions, guess_result&& gr)
- {
- // Extract the version. If the version has the fourth component, then
- // the signature line (extracted with --version) won't include it. So we
- // will have to get a more elaborate line with -V. We will also have to
- // do it to get the compiler target that respects the -m option: icc
- // doesn't support -print-multiarch like gcc and its -dumpmachine
- // doesn't respect -m like clang. In fact, its -dumpmachine is
- // completely broken as it appears to print the compiler's host and not
- // the target (e.g., .../bin/ia32/icpc prints x86_64-linux-gnu).
- //
- // Some examples of the signature lines from -V output:
- //
- // Intel(R) C++ Compiler for 32-bit applications, Version 9.1 Build 20070215Z Package ID: l_cc_c_9.1.047
- // Intel(R) C++ Compiler for applications running on Intel(R) 64, Version 10.1 Build 20071116
- // Intel(R) C++ Compiler for applications running on IA-32, Version 10.1 Build 20071116 Package ID: l_cc_p_10.1.010
- // Intel C++ Intel 64 Compiler Professional for applications running on Intel 64, Version 11.0 Build 20081105 Package ID: l_cproc_p_11.0.074
- // Intel(R) C++ Intel(R) 64 Compiler Professional for applications running on Intel(R) 64, Version 11.1 Build 20091130 Package ID: l_cproc_p_11.1.064
- // Intel C++ Intel 64 Compiler XE for applications running on Intel 64, Version 12.0.4.191 Build 20110427
- // Intel(R) C++ Intel(R) 64 Compiler for applications running on Intel(R) 64, Version 16.0.2.181 Build 20160204
- // Intel(R) C++ Intel(R) 64 Compiler for applications running on IA-32, Version 16.0.2.181 Build 20160204
- // Intel(R) C++ Intel(R) 64 Compiler for applications running on Intel(R) MIC Architecture, Version 16.0.2.181 Build 20160204
- //
- // We should probably also assume the language words can be translated
- // and even rearranged.
- //
- string& s (gr.signature);
- s.clear ();
-
- auto f = [] (string& l)
- {
- return l.compare (0, 5, "Intel") == 0 && (l[5] == '(' || l[5] == ' ')
- ? move (l)
- : string ();
- };
-
- // The -V output is sent to STDERR.
- //
- s = run<string> (cxx, "-V", f, false);
-
- if (s.empty ())
- fail << "unable to extract signature from " << cxx << " -V output";
-
- if (s.find ("C++") == string::npos)
- fail << cxx << " does not appear to be the Intel C++ compiler" <<
- info << "extracted signature: '" << s << "'";
-
- // Scan the string as words and look for the version. It consist of only
- // digits and periods and contains at least one period.
- //
-
- // Some overrides for testing.
- //
- //s = "Intel(R) C++ Compiler for 32-bit applications, Version 9.1 Build 20070215Z Package ID: l_cc_c_9.1.047";
- //s = "Intel(R) C++ Compiler for applications running on Intel(R) 64, Version 10.1 Build 20071116";
- //s = "Intel(R) C++ Compiler for applications running on IA-32, Version 10.1 Build 20071116 Package ID: l_cc_p_10.1.010";
- //s = "Intel C++ Intel 64 Compiler Professional for applications running on Intel 64, Version 11.0 Build 20081105 Package ID: l_cproc_p_11.0.074";
- //s = "Intel(R) C++ Intel(R) 64 Compiler Professional for applications running on Intel(R) 64, Version 11.1 Build 20091130 Package ID: l_cproc_p_11.1.064";
- //s = "Intel C++ Intel 64 Compiler XE for applications running on Intel 64, Version 12.0.4.191 Build 20110427";
-
- size_t b (0), e (0), n;
- while (next_word (s, b, e, ' ', ',') != 0)
- {
- // The third argument to find_first_not_of() is the length of the
- // first argument, not the length of the interval to check. So to
- // limit it to [b, e) we are also going to compare the result to the
- // end of the word position (first space). In fact, we can just check
- // if it is >= e. Similar logic for find_first_of() except that we add
- // space to the list of character to make sure we don't go too far.
- //
- if (s.find_first_not_of ("1234567890.", b, 11) >= e &&
- s.find_first_of (". ", b, 2) < e)
- break;
- }
-
- if (b == e)
- fail << "unable to extract icc version from '" << s << "'";
-
- compiler_version v;
- v.string.assign (s, b, string::npos);
-
- // Split the version into components.
- //
- size_t vb (b), ve (b);
- auto next = [&s, b, e, &vb, &ve] (const char* m, bool opt) -> uint64_t
- {
- try
- {
- if (next_word (s, e, vb, ve, '.'))
- return stoull (string (s, vb, ve - vb));
-
- if (opt)
- return 0;
- }
- catch (const invalid_argument&) {}
- catch (const out_of_range&) {}
-
- error << "unable to extract icc " << m << " version from '"
- << string (s, b, e - b) << "'";
- throw failed ();
- };
-
- v.major = next ("major", false);
- v.minor = next ("minor", false);
- v.patch = next ("patch", true);
-
- if (vb != ve && next_word (s, e, vb, ve, '.'))
- v.build.assign (s, vb, ve - vb);
-
- if (e != s.size ())
- {
- if (!v.build.empty ())
- v.build += ' ';
-
- v.build.append (s, e + 1, string::npos);
- }
-
- // Figure out the target CPU by re-running the compiler with -V and
- // compile options (which may include, e.g., -m32). The output will
- // contain two CPU keywords: the first is the host and the second is the
- // target (hopefully this won't get rearranged by the translation).
- //
- // The CPU keywords (based on the above samples) appear to be:
- //
- // "32-bit"
- // "IA-32"
- // "Intel" "64"
- // "Intel(R)" "64"
- // "Intel(R)" "MIC" (-dumpmachine says: x86_64-k1om-linux)
- //
- cstrings args {cxx.string ().c_str (), "-V"};
- if (coptions != nullptr)
- append_options (args, *coptions);
- args.push_back (nullptr);
-
- // The -V output is sent to STDERR.
- //
- string t (run<string> (args.data (), f, false));
-
- if (t.empty ())
- fail << "unable to extract target architecture from " << cxx
- << " -V output";
-
- string arch;
- for (b = e = 0; (n = next_word (t, b, e, ' ', ',')) != 0; )
- {
- if (t.compare (b, n, "Intel(R)", 8) == 0 ||
- t.compare (b, n, "Intel", 5) == 0)
- {
- if ((n = next_word (t, b, e, ' ', ',')) != 0)
- {
- if (t.compare (b, n, "64", 2) == 0)
- {
- arch = "x86_64";
- }
- else if (t.compare (b, n, "MIC", 3) == 0)
- {
- arch = "x86_64"; // Plus "-k1om-linux" from -dumpmachine below.
- }
- }
- else
- break;
- }
- else if (t.compare (b, n, "IA-32", 5) == 0 ||
- t.compare (b, n, "32-bit", 6) == 0)
- {
- arch = "i386";
- }
- }
-
- if (arch.empty ())
- fail << "unable to extract icc target architecture from '" << t << "'";
-
- // So we have the CPU but we still need the rest of the triplet. While
- // icc currently doesn't support cross-compilation (at least on Linux)
- // and we could have just used the build triplet (i.e., the architecture
- // on which we are running), who knows what will happen in the future.
- // So instead we are going to use -dumpmachine and substitute the CPU.
- //
- t = run<string> (cxx, "-dumpmachine", [] (string& l) {return move (l);});
-
- if (t.empty ())
- fail << "unable to extract target architecture from " << cxx
- << " -dumpmachine output";
-
- // The first component in the triplet is always CPU.
- //
- size_t p (t.find ('-'));
-
- if (p == string::npos)
- fail << "unable to parse icc target architecture '" << t << "'";
-
- arch.append (t, p, string::npos);
-
- // Use the signature line to generate the checksum.
- //
- sha256 cs (s);
-
- return compiler_info {
- move (gr.id),
- move (v),
- move (gr.signature),
- cs.string (),
- move (arch)};
- }
-
- static compiler_info
- guess_msvc (const path&, guess_result&& gr)
- {
- // Extract the version. The signature line has the following format
- // though language words can be translated and even rearranged (see
- // examples above).
- //
- // "Microsoft (R) C/C++ Optimizing Compiler Version A.B.C[.D] for CPU"
- //
- // The CPU keywords (based on the above samples) appear to be:
- //
- // "80x86"
- // "x86"
- // "x64"
- // "ARM"
- //
- string& s (gr.signature);
-
- // Some overrides for testing.
- //
- //s = "Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 15.00.30729.01 for 80x86";
- //s = "Compilador de optimizacion de C/C++ de Microsoft (R) version 16.00.30319.01 para x64";
-
- // Scan the string as words and look for the version. While doing this
- // also keep an eye on the CPU keywords.
- //
- string arch;
- size_t b (0), e (0);
-
- auto check_cpu = [&arch, &s, &b, &e] () -> bool
- {
- size_t n (e - b);
-
- if (s.compare (b, n, "x64", 3) == 0 ||
- s.compare (b, n, "x86", 3) == 0 ||
- s.compare (b, n, "ARM", 3) == 0 ||
- s.compare (b, n, "80x86", 5) == 0)
- {
- arch.assign (s, b, n);
- return true;
- }
-
- return false;
- };
-
- while (next_word (s, b, e, ' ', ','))
- {
- // First check for the CPU keywords in case in some language they come
- // before the version.
- //
- if (check_cpu ())
- continue;
-
- // The third argument to find_first_not_of() is the length of the
- // first argument, not the length of the interval to check. So to
- // limit it to [b, e) we are also going to compare the result to the
- // end of the word position (first space). In fact, we can just check
- // if it is >= e.
- //
- if (s.find_first_not_of ("1234567890.", b, 11) >= e)
- break;
- }
-
- if (b == e)
- fail << "unable to extract msvc version from '" << s << "'";
-
- compiler_version v;
- v.string.assign (s, b, e - b);
-
- // Split the version into components.
- //
- size_t vb (b), ve (b);
- auto next = [&s, b, e, &vb, &ve] (const char* m) -> uint64_t
- {
- try
- {
- if (next_word (s, e, vb, ve, '.'))
- return stoull (string (s, vb, ve - vb));
- }
- catch (const invalid_argument&) {}
- catch (const out_of_range&) {}
-
- error << "unable to extract msvc " << m << " version from '"
- << string (s, b, e - b) << "'";
- throw failed ();
- };
-
- v.major = next ("major");
- v.minor = next ("minor");
- v.patch = next ("patch");
-
- if (next_word (s, e, vb, ve, '.'))
- v.build.assign (s, vb, ve - vb);
-
- // Continue scanning for the CPU.
- //
- if (e != s.size ())
- {
- while (next_word (s, b, e, ' ', ','))
- {
- if (check_cpu ())
- break;
- }
- }
-
- if (arch.empty ())
- fail << "unable to extract msvc target architecture from "
- << "'" << s << "'";
-
- // Now we need to map x86, x64, and ARM to the target triplets. The
- // problem is, there aren't any established ones so we got to invent
- // them ourselves. Based on the discussion in <butl/triplet>, we need
- // something in the CPU-VENDOR-OS-ABI form.
- //
- // The CPU part is fairly straightforward with x86 mapped to 'i386' (or
- // maybe 'i686'), x64 to 'x86_64', and ARM to 'arm' (it could also
- // include the version, e.g., 'amrv8').
- //
- // The (toolchain) VENDOR is also straightforward: 'microsoft'. Why not
- // omit it? Two reasons: firstly, there are other compilers with the
- // otherwise same target, for example Intel C++, and it could be useful
- // to distinguish between them. Secondly, by having all four components
- // we remove any parsing ambiguity.
- //
- // OS-ABI is where things are not as clear cut. The OS part shouldn't
- // probably be just 'windows' since we have Win32 and WinCE. And WinRT.
- // And Universal Windows Platform (UWP). So perhaps the following values
- // for OS: 'win32', 'wince', 'winrt', 'winup'.
- //
- // For 'win32' the ABI part could signal the Microsoft C/C++ runtime by
- // calling it 'msvc'. And seeing that the runtimes are incompatible from
- // version to version, we should probably add the 'X.Y' version at the
- // end (so we essentially mimic the DLL name, e.g, msvcr120.dll). Some
- // suggested we also encode the runtime type (those /M* options) though
- // I am not sure: the only "redistributable" runtime is multi-threaded
- // release DLL.
- //
- // The ABI part for the other OS values needs thinking. For 'winrt' and
- // 'winup' it probably makes sense to encode the WINAPI_FAMILY macro
- // value (perhaps also with the version). Some of its values:
- //
- // WINAPI_FAMILY_APP Windows 10
- // WINAPI_FAMILY_PC_APP Windows 8.1
- // WINAPI_FAMILY_PHONE_APP Windows Phone 8.1
- //
- // For 'wince' we may also want to add the OS version, e.g., 'wince4.2'.
- //
- // Putting it all together, Visual Studio 2015 will then have the
- // following target triplets:
- //
- // x86 i386-microsoft-win32-msvc14.0
- // x64 x86_64-microsoft-win32-msvc14.0
- // ARM arm-microsoft-winup-???
- //
- if (arch == "ARM")
- fail << "cl.exe ARM/WinRT/UWP target is not yet supported";
- else
- {
- if (arch == "x64")
- arch = "x86_64-microsoft-win32-msvc";
- else if (arch == "x86" || arch == "80x86")
- arch = "i386-microsoft-win32-msvc";
- else
- assert (false);
-
- // Mapping of compiler versions to runtime versions:
- //
- // 19.00 140/14.0 VS2015
- // 18.00 120/12.0 VS2013
- // 17.00 110/11.0 VS2012
- // 16.00 100/10.0 VS2010
- // 15.00 90/9.0 VS2008
- // 14.00 80/8.0 VS2005
- // 13.10 71/7.1 VS2003
- //
- /**/ if (v.major == 19 && v.minor == 0) arch += "14.0";
- else if (v.major == 18 && v.minor == 0) arch += "12.0";
- else if (v.major == 17 && v.minor == 0) arch += "11.0";
- else if (v.major == 16 && v.minor == 0) arch += "10.0";
- else if (v.major == 15 && v.minor == 0) arch += "9.0";
- else if (v.major == 14 && v.minor == 0) arch += "8.0";
- else if (v.major == 13 && v.minor == 10) arch += "7.1";
- else fail << "unable to map msvc compiler version '" << v.string
- << "' to runtime version";
- }
-
- // Use the signature line to generate the checksum.
- //
- sha256 cs (s);
-
- return compiler_info {
- move (gr.id),
- move (v),
- move (gr.signature),
- cs.string (),
- move (arch)};
- }
-
- compiler_info
- guess (const path& cxx, const strings* coptions)
- {
- string pre (pre_guess (cxx));
- guess_result gr;
-
- // If we could pre-guess the type based on the excutable name, then
- // try the test just for that compiler.
- //
- if (!pre.empty ())
- {
- gr = guess (cxx, pre);
-
- if (gr.empty ())
- warn << cxx << " name looks like " << pre << " but it is not";
- }
-
- if (gr.empty ())
- gr = guess (cxx, "");
-
- if (gr.empty ())
- fail << "unable to guess C++ compiler type of " << cxx;
-
- const compiler_id& id (gr.id);
-
- if (id.type == "gcc")
- {
- assert (id.variant.empty ());
- return guess_gcc (cxx, coptions, move (gr));
- }
- else if (id.type == "clang")
- {
- assert (id.variant.empty () || id.variant == "apple");
- return guess_clang (cxx, coptions, move (gr));
- }
- else if (id.type == "icc")
- {
- assert (id.variant.empty ());
- return guess_icc (cxx, coptions, move (gr));
- }
- else if (id.type == "msvc")
- {
- assert (id.variant.empty ());
- return guess_msvc (cxx, move (gr));
- }
- else
- {
- assert (false);
- return compiler_info ();
- }
- }
- }
-}
diff --git a/build2/cxx/install b/build2/cxx/install
deleted file mode 100644
index 119ef94..0000000
--- a/build2/cxx/install
+++ /dev/null
@@ -1,31 +0,0 @@
-// file : build2/cxx/install -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#ifndef BUILD2_CXX_INSTALL
-#define BUILD2_CXX_INSTALL
-
-#include <build2/types>
-#include <build2/utility>
-
-#include <build2/install/rule>
-
-namespace build2
-{
- namespace cxx
- {
- class install: public build2::install::file_rule
- {
- public:
- virtual target*
- filter (action, target&, prerequisite_member) const;
-
- virtual match_result
- match (action, target&, const string&) const;
-
- static install instance;
- };
- }
-}
-
-#endif // BUILD2_CXX_INSTALL
diff --git a/build2/cxx/install.cxx b/build2/cxx/install.cxx
deleted file mode 100644
index e07d115..0000000
--- a/build2/cxx/install.cxx
+++ /dev/null
@@ -1,70 +0,0 @@
-// file : build2/cxx/install.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#include <build2/cxx/install>
-
-#include <build2/bin/target>
-
-#include <build2/cxx/link>
-#include <build2/cxx/common>
-#include <build2/cxx/target>
-
-using namespace std;
-
-namespace build2
-{
- namespace cxx
- {
- using namespace bin;
-
- target* install::
- filter (action a, target& t, prerequisite_member p) const
- {
- if (t.is_a<exe> ())
- {
- // Don't install executable's prerequisite headers.
- //
- if (p.is_a<hxx> () || p.is_a<ixx> () || p.is_a<txx> () || p.is_a<h> ())
- return nullptr;
- }
-
- // If this is a shared library prerequisite, install it as long as it
- // is in the same amalgamation as we are.
- //
- // @@ Shouldn't we also install a static library prerequisite of a
- // static library?
- //
- if ((t.is_a<exe> () || t.is_a<libs> ()) &&
- (p.is_a<lib> () || p.is_a<libs> ()))
- {
- target* pt (&p.search ());
-
- // If this is the lib{} group, pick a member which we would link.
- //
- if (lib* l = pt->is_a<lib> ())
- pt = &link_member (*l, link_order (t.base_scope (), link_type (t)));
-
- if (pt->is_a<libs> ()) // Can be liba{}.
- return pt->in (t.weak_scope ()) ? pt : nullptr;
- }
-
- return file_rule::filter (a, t, p);
- }
-
- match_result install::
- match (action a, target& t, const string& hint) const
- {
- // @@ How do we split the hint between the two?
- //
-
- // We only want to handle installation if we are also the
- // ones building this target. So first run link's match().
- //
- match_result r (link::instance.match (a, t, hint));
- return r ? install::file_rule::match (a, t, "") : r;
- }
-
- install install::instance;
- }
-}
diff --git a/build2/cxx/link b/build2/cxx/link
deleted file mode 100644
index 4f00ea0..0000000
--- a/build2/cxx/link
+++ /dev/null
@@ -1,48 +0,0 @@
-// file : build2/cxx/link -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#ifndef BUILD2_CXX_LINK
-#define BUILD2_CXX_LINK
-
-#include <build2/types>
-#include <build2/utility>
-
-#include <build2/rule>
-
-#include <build2/bin/target>
-
-namespace build2
-{
- namespace cxx
- {
- class link: public rule
- {
- public:
- virtual match_result
- match (action, target&, const string& hint) const;
-
- virtual recipe
- apply (action, target&, const match_result&) const;
-
- static target_state
- perform_update (action, target&);
-
- static target_state
- perform_clean (action, target&);
-
- static link instance;
-
- private:
- friend class compile;
-
- static target*
- search_library (optional<dir_paths>&, prerequisite&);
-
- static dir_paths
- extract_library_paths (scope&);
- };
- }
-}
-
-#endif // BUILD2_CXX_LINK
diff --git a/build2/cxx/link.cxx b/build2/cxx/link.cxx
deleted file mode 100644
index d19d6b1..0000000
--- a/build2/cxx/link.cxx
+++ /dev/null
@@ -1,1852 +0,0 @@
-// file : build2/cxx/link.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#include <build2/cxx/link>
-
-#include <cstdlib> // exit()
-#include <iostream> // cerr
-
-#include <butl/path-map>
-
-#include <build2/depdb>
-#include <build2/scope>
-#include <build2/context>
-#include <build2/variable>
-#include <build2/algorithm>
-#include <build2/filesystem>
-#include <build2/diagnostics>
-
-#include <build2/bin/target>
-#include <build2/cxx/target>
-
-#include <build2/cxx/common>
-#include <build2/cxx/utility>
-
-using namespace std;
-using namespace butl;
-
-namespace build2
-{
- namespace cxx
- {
- using namespace bin;
-
- // Extract system library search paths from GCC or compatible (Clang,
- // Intel C++) using the -print-search-dirs option.
- //
- static void
- gcc_library_search_paths (scope& bs, const string& cid, dir_paths& r)
- {
- scope& rs (*bs.root_scope ());
-
- cstrings args;
- string std_storage;
-
- args.push_back (cast<path> (rs["config.cxx"]).string ().c_str ());
- append_options (args, bs, "cxx.coptions");
- append_std (args, rs, cid, bs, std_storage);
- append_options (args, bs, "cxx.loptions");
- args.push_back ("-print-search-dirs");
- args.push_back (nullptr);
-
- if (verb >= 3)
- print_process (args);
-
- string l;
- try
- {
- process pr (args.data (), 0, -1); // Open pipe to stdout.
-
- try
- {
- ifdstream is (pr.in_ofd, fdstream_mode::skip, ifdstream::badbit);
-
- string s;
- while (getline (is, s))
- {
- if (s.compare (0, 12, "libraries: =") == 0)
- {
- l.assign (s, 12, string::npos);
- break;
- }
- }
-
- is.close (); // Don't block.
-
- if (!pr.wait ())
- throw failed ();
- }
- catch (const ifdstream::failure&)
- {
- pr.wait ();
- fail << "error reading C++ compiler -print-search-dirs output";
- }
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e.what ();
-
- if (e.child ())
- exit (1);
-
- throw failed ();
- }
-
- if (l.empty ())
- fail << "unable to extract C++ compiler system library paths";
-
- // Now the fun part: figuring out which delimiter is used. Normally it
- // is ':' but on Windows it is ';' (or can be; who knows for sure). Also
- // note that these paths are absolute (or should be). So here is what we
- // are going to do: first look for ';'. If found, then that's the
- // delimiter. If not found, then there are two cases: it is either a
- // single Windows path or the delimiter is ':'. To distinguish these two
- // cases we check if the path starts with a Windows drive.
- //
- char d (';');
- string::size_type e (l.find (d));
-
- if (e == string::npos &&
- (l.size () < 2 || l[0] == '/' || l[1] != ':'))
- {
- d = ':';
- e = l.find (d);
- }
-
- // Now chop it up. We already have the position of the first delimiter
- // (if any).
- //
- for (string::size_type b (0);; e = l.find (d, (b = e + 1)))
- {
- r.emplace_back (l, b, (e != string::npos ? e - b : e));
- r.back ().normalize ();
-
- if (e == string::npos)
- break;
- }
- }
-
- // Extract system library search paths from MSVC.
- //
- void
- msvc_library_search_paths (scope&, const string&, dir_paths&); // msvc.cxx
-
- dir_paths link::
- extract_library_paths (scope& bs)
- {
- dir_paths r;
- scope& rs (*bs.root_scope ());
- const string& cid (cast<string> (rs["cxx.id"]));
-
- // Extract user-supplied search paths (i.e., -L, /LIBPATH).
- //
- if (auto l = bs["cxx.loptions"])
- {
- const auto& v (cast<strings> (l));
-
- for (auto i (v.begin ()), e (v.end ()); i != e; ++i)
- {
- const string& o (*i);
-
- dir_path d;
-
- if (cid == "msvc")
- {
- // /LIBPATH:<dir> (case-insensitive).
- //
- if ((o[0] == '/' || o[0] == '-') &&
- (i->compare (1, 8, "LIBPATH:") == 0 ||
- i->compare (1, 8, "libpath:") == 0))
- d = dir_path (*i, 9, string::npos);
- else
- continue;
- }
- else
- {
- // -L can either be in the "-L<dir>" or "-L <dir>" form.
- //
- if (*i == "-L")
- {
- if (++i == e)
- break; // Let the compiler complain.
-
- d = dir_path (*i);
- }
- else if (i->compare (0, 2, "-L") == 0)
- d = dir_path (*i, 2, string::npos);
- else
- continue;
- }
-
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
- }
- }
-
- if (cid == "msvc")
- msvc_library_search_paths (bs, cid, r);
- else
- gcc_library_search_paths (bs, cid, r);
-
- return r;
- }
-
- // Alternative search for VC (msvc.cxx).
- //
- liba*
- msvc_search_static (const path& ld, const dir_path&, prerequisite&);
-
- libs*
- msvc_search_shared (const path& ld, const dir_path&, prerequisite&);
-
- target* link::
- search_library (optional<dir_paths>& spc, prerequisite& p)
- {
- tracer trace ("cxx::link::search_library");
-
- // @@ This is hairy enough to warrant a separate implementation for
- // Windows.
- //
-
- // First check the cache.
- //
- if (p.target != nullptr)
- return p.target;
-
- scope& rs (*p.scope.root_scope ());
- const string& cid (cast<string> (rs["cxx.id"]));
- const string& tsys (cast<string> (rs["cxx.target.system"]));
- const string& tclass (cast<string> (rs["cxx.target.class"]));
-
- bool l (p.is_a<lib> ());
- const string* ext (l ? nullptr : p.ext); // Only for liba/libs.
-
- // Then figure out what we need to search for.
- //
-
- // liba
- //
- path an;
- const string* ae (nullptr);
-
- if (l || p.is_a<liba> ())
- {
- // We are trying to find a library in the search paths extracted from
- // the compiler. It would only be natural if we used the library
- // prefix/extension that correspond to this compiler and/or its
- // target.
- //
- // Unlike MinGW, VC's .lib/.dll.lib naming is by no means standard and
- // we might need to search for other names. In fact, there is no
- // reliable way to guess from the file name what kind of library it
- // is, static or import and we will have to do deep inspection of such
- // alternative names. However, if we did find .dll.lib, then we can
- // assume that .lib is the static library without any deep inspection
- // overhead.
- //
- const char* e ("");
-
- if (cid == "msvc")
- {
- an = path (p.name);
- e = "lib";
- }
- else
- {
- an = path ("lib" + p.name);
- e = "a";
- }
-
- ae = ext == nullptr
- ? &extension_pool.find (e)
- : ext;
-
- if (!ae->empty ())
- {
- an += '.';
- an += *ae;
- }
- }
-
- // libs
- //
- path sn;
- const string* se (nullptr);
-
- if (l || p.is_a<libs> ())
- {
- const char* e ("");
-
- if (cid == "msvc")
- {
- sn = path (p.name);
- e = "dll.lib";
- }
- else
- {
- sn = path ("lib" + p.name);
-
- if (tsys == "darwin") e = "dylib";
- else if (tsys == "mingw32") e = "dll.a"; // See search code below.
- else e = "so";
- }
-
- se = ext == nullptr
- ? &extension_pool.find (e)
- : ext;
-
- if (!se->empty ())
- {
- sn += '.';
- sn += *se;
- }
- }
-
- // Now search.
- //
- if (!spc)
- spc = extract_library_paths (p.scope);
-
- liba* a (nullptr);
- libs* s (nullptr);
-
- path f; // Reuse the buffer.
- const dir_path* pd;
- for (const dir_path& d: *spc)
- {
- timestamp mt;
-
- // libs
- //
- // Look for the shared library first. The order is important for VC:
- // only if we found .dll.lib can we safely assumy that just .lib is a
- // static library.
- //
- if (!sn.empty ())
- {
- f = d;
- f /= sn;
- mt = file_mtime (f);
-
- if (mt != timestamp_nonexistent)
- {
- // On Windows what we found is the import library which we need
- // to make the first ad hoc member of libs{}.
- //
- if (tclass == "windows")
- {
- s = &targets.insert<libs> (
- d, dir_path (), p.name, nullptr, trace);
-
- if (s->member == nullptr)
- {
- libi& i (
- targets.insert<libi> (
- d, dir_path (), p.name, se, trace));
-
- if (i.path ().empty ())
- i.path (move (f));
-
- i.mtime (mt);
-
- // Presumably there is a DLL somewhere, we just don't know
- // where (and its possible we might have to look for one if we
- // decide we need to do rpath emulation for installed
- // libraries as well). We will represent this as empty path
- // but valid timestamp (aka "trust me, it's there").
- //
- s->mtime (mt);
- s->member = &i;
- }
- }
- else
- {
- s = &targets.insert<libs> (d, dir_path (), p.name, se, trace);
-
- if (s->path ().empty ())
- s->path (move (f));
-
- s->mtime (mt);
- }
- }
- else if (ext == nullptr && tsys == "mingw32")
- {
- // Above we searched for the import library (.dll.a) but if it's
- // not found, then we also search for the .dll (unless the
- // extension was specified explicitly) since we can link to it
- // directly. Note also that the resulting libs{} would end up
- // being the .dll.
- //
- se = &extension_pool.find ("dll");
- f = f.base (); // Remove .a from .dll.a.
- mt = file_mtime (f);
-
- if (mt != timestamp_nonexistent)
- {
- s = &targets.insert<libs> (d, dir_path (), p.name, se, trace);
-
- if (s->path ().empty ())
- s->path (move (f));
-
- s->mtime (mt);
- }
- }
- }
-
- // liba
- //
- // If we didn't find .dll.lib then we cannot assume .lib is static.
- //
- if (!an.empty () && (s != nullptr || cid != "msvc"))
- {
- f = d;
- f /= an;
-
- if ((mt = file_mtime (f)) != timestamp_nonexistent)
- {
- // Enter the target. Note that because the search paths are
- // normalized, the result is automatically normalized as well.
- //
- // Note that this target is outside any project which we treat
- // as out trees.
- //
- a = &targets.insert<liba> (d, dir_path (), p.name, ae, trace);
-
- if (a->path ().empty ())
- a->path (move (f));
-
- a->mtime (mt);
- }
- }
-
- // Alternative search for VC.
- //
- if (cid == "msvc")
- {
- const path& ld (cast<path> (rs["config.bin.ld"]));
-
- if (s == nullptr && !sn.empty ())
- s = msvc_search_shared (ld, d, p);
-
- if (a == nullptr && !an.empty ())
- a = msvc_search_static (ld, d, p);
- }
-
- if (a != nullptr || s != nullptr)
- {
- pd = &d;
- break;
- }
- }
-
- if (a == nullptr && s == nullptr)
- return nullptr;
-
- // Add the "using static/shared library" macro (used, for example, to
- // handle DLL export). The absence of either of these macros would mean
- // some other build system that cannot distinguish between the two.
- //
- auto add_macro = [] (target& t, const char* suffix)
- {
- // If there is already a value, don't add anything, we don't want to
- // be accumulating defines nor messing with custom values.
- //
- auto p (t.vars.insert ("cxx.export.poptions"));
-
- if (p.second)
- {
- // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED},
- // where <name> is the target name. Here we want to strike a balance
- // between being unique and not too noisy.
- //
- string d ("-DLIB");
-
- auto upcase_sanitize = [] (char c) -> char
- {
- if (c == '-' || c == '+' || c == '.')
- return '_';
- else
- return ucase (c);
- };
-
- transform (t.name.begin (),
- t.name.end (),
- back_inserter (d),
- upcase_sanitize);
-
- d += '_';
- d += suffix;
-
- strings o;
- o.push_back (move (d));
- p.first.get () = move (o);
- }
- };
-
- if (a != nullptr)
- add_macro (*a, "STATIC");
-
- if (s != nullptr)
- add_macro (*s, "SHARED");
-
- if (l)
- {
- // Enter the target group.
- //
- lib& l (targets.insert<lib> (*pd, dir_path (), p.name, p.ext, trace));
-
- // It should automatically link-up to the members we have found.
- //
- assert (l.a == a);
- assert (l.s == s);
-
- // Set the bin.lib variable to indicate what's available.
- //
- const char* bl (a != nullptr
- ? (s != nullptr ? "both" : "static")
- : "shared");
- l.assign ("bin.lib") = bl;
-
- p.target = &l;
- }
- else
- p.target = p.is_a<liba> () ? static_cast<target*> (a) : s;
-
- return p.target;
- }
-
- match_result link::
- match (action a, target& t, const string& hint) const
- {
- tracer trace ("cxx::link::match");
-
- // @@ TODO:
- //
- // - if path already assigned, verify extension?
- //
- // @@ Q:
- //
- // - if there is no .o, are we going to check if the one derived
- // from target exist or can be built? A: No.
- // What if there is a library. Probably ok if static, not if shared,
- // (i.e., a utility library).
- //
-
- otype lt (link_type (t));
-
- // Scan prerequisites and see if we can work with what we've got.
- //
- bool seen_cxx (false), seen_c (false), seen_obj (false),
- seen_lib (false);
-
- for (prerequisite_member p: group_prerequisite_members (a, t))
- {
- if (p.is_a<cxx> ())
- {
- seen_cxx = seen_cxx || true;
- }
- else if (p.is_a<c> ())
- {
- seen_c = seen_c || true;
- }
- else if (p.is_a<obj> ())
- {
- seen_obj = seen_obj || true;
- }
- else if (p.is_a<obje> ())
- {
- if (lt != otype::e)
- fail << "obje{} as prerequisite of " << t;
-
- seen_obj = seen_obj || true;
- }
- else if (p.is_a<obja> ())
- {
- if (lt != otype::a)
- fail << "obja{} as prerequisite of " << t;
-
- seen_obj = seen_obj || true;
- }
- else if (p.is_a<objs> ())
- {
- if (lt != otype::s)
- fail << "objs{} as prerequisite of " << t;
-
- seen_obj = seen_obj || true;
- }
- else if (p.is_a<lib> () ||
- p.is_a<liba> () ||
- p.is_a<libs> ())
- {
- seen_lib = seen_lib || true;
- }
- }
-
- // We will only chain a C source if there is also a C++ source or we
- // were explicitly told to.
- //
- if (seen_c && !seen_cxx && hint < "cxx")
- {
- l4 ([&]{trace << "C prerequisite(s) without C++ or hint";});
- return nullptr;
- }
-
- // If we have any prerequisite libraries (which also means that
- // we match), search/import and pre-match them to implement the
- // "library meta-information protocol". Don't do this if we are
- // called from the install rule just to check if we would match.
- //
- if (seen_lib && lt != otype::e &&
- a.operation () != install_id && a.outer_operation () != install_id)
- {
- if (t.group != nullptr)
- t.group->prerequisite_targets.clear (); // lib{}'s
-
- optional<dir_paths> lib_paths; // Extract lazily.
-
- for (prerequisite_member p: group_prerequisite_members (a, t))
- {
- if (p.is_a<lib> () || p.is_a<liba> () || p.is_a<libs> ())
- {
- target* pt (nullptr);
-
- // Handle imported libraries.
- //
- if (p.proj () != nullptr)
- pt = search_library (lib_paths, p.prerequisite);
-
- if (pt == nullptr)
- {
- pt = &p.search ();
- match_only (a, *pt);
- }
-
- // If the prerequisite came from the lib{} group, then also
- // add it to lib's prerequisite_targets.
- //
- if (!p.prerequisite.belongs (t))
- t.group->prerequisite_targets.push_back (pt);
-
- t.prerequisite_targets.push_back (pt);
- }
- }
- }
-
- return seen_cxx || seen_c || seen_obj || seen_lib ? &t : nullptr;
- }
-
- recipe link::
- apply (action a, target& xt, const match_result&) const
- {
- tracer trace ("cxx::link::apply");
-
- file& t (static_cast<file&> (xt));
-
- scope& bs (t.base_scope ());
- scope& rs (*bs.root_scope ());
-
- const string& cid (cast<string> (rs["cxx.id"]));
- const string& tsys (cast<string> (rs["cxx.target.system"]));
- const string& tclass (cast<string> (rs["cxx.target.class"]));
-
- otype lt (link_type (t));
- lorder lo (link_order (bs, lt));
-
- // Derive file name from target name.
- //
- if (t.path ().empty ())
- {
- const char* p (nullptr);
- const char* e (nullptr);
-
- switch (lt)
- {
- case otype::e:
- {
- if (tclass == "windows")
- e = "exe";
- else
- e = "";
-
- break;
- }
- case otype::a:
- {
- // To be anally precise, let's use the ar id to decide how to name
- // the library in case, for example, someone wants to archive
- // VC-compiled object files with MinGW ar or vice versa.
- //
- if (cast<string> (rs["bin.ar.id"]) == "msvc")
- {
- e = "lib";
- }
- else
- {
- p = "lib";
- e = "a";
- }
-
- if (auto l = t["bin.libprefix"])
- p = cast<string> (l).c_str ();
-
- break;
- }
- case otype::s:
- {
- if (tclass == "macosx")
- {
- p = "lib";
- e = "dylib";
- }
- else if (tclass == "windows")
- {
- // On Windows libs{} is an ad hoc group. The libs{} itself is
- // the DLL and we add libi{} import library as its member (see
- // below).
- //
- if (tsys == "mingw32")
- p = "lib";
-
- e = "dll";
- }
- else
- {
- p = "lib";
- e = "so";
- }
-
- if (auto l = t["bin.libprefix"])
- p = cast<string> (l).c_str ();
-
- break;
- }
- }
-
- t.derive_path (e, p);
- }
-
- // Add ad hoc group members.
- //
- auto add_adhoc = [a, &bs] (target& t, const char* type) -> file&
- {
- const target_type& tt (*bs.find_target_type (type));
-
- if (t.member != nullptr) // Might already be there.
- assert (t.member->type () == tt);
- else
- t.member = &search (tt, t.dir, t.out, t.name, nullptr, nullptr);
-
- file& r (static_cast<file&> (*t.member));
- r.recipe (a, group_recipe);
- return r;
- };
-
- if (tclass == "windows")
- {
- // Import library.
- //
- if (lt == otype::s)
- {
- file& imp (add_adhoc (t, "libi"));
-
- // Usually on Windows the import library is called the same as the
- // DLL but with the .lib extension. Which means it clashes with the
- // static library. Instead of decorating the static library name
- // with ugly suffixes (as is customary), let's use the MinGW
- // approach (one must admit it's quite elegant) and call it
- // .dll.lib.
- //
- if (imp.path ().empty ())
- imp.derive_path (t.path (), tsys == "mingw32" ? "a" : "lib");
- }
-
- // PDB
- //
- if (lt != otype::a &&
- cid == "msvc" &&
- find_option ("/DEBUG", t, "cxx.loptions", true))
- {
- // Add after the import library if any.
- //
- file& pdb (add_adhoc (t.member == nullptr ? t : *t.member, "pdb"));
-
- // We call it foo.{exe,dll}.pdb rather than just foo.pdb because we
- // can have both foo.exe and foo.dll in the same directory.
- //
- if (pdb.path ().empty ())
- pdb.derive_path (t.path (), "pdb");
- }
- }
-
- t.prerequisite_targets.clear (); // See lib pre-match in match() above.
-
- // Inject dependency on the output directory.
- //
- inject_fsdir (a, t);
-
- optional<dir_paths> lib_paths; // Extract lazily.
-
- // Process prerequisites: do rule chaining for C and C++ source
- // files as well as search and match.
- //
- // When cleaning, ignore prerequisites that are not in the same
- // or a subdirectory of our project root.
- //
- const target_type& ott (lt == otype::e ? obje::static_type :
- lt == otype::a ? obja::static_type :
- objs::static_type);
-
- for (prerequisite_member p: group_prerequisite_members (a, t))
- {
- target* pt (nullptr);
-
- if (!p.is_a<c> () && !p.is_a<cxx> ())
- {
- // Handle imported libraries.
- //
- if (p.proj () != nullptr)
- pt = search_library (lib_paths, p.prerequisite);
-
- // The rest is the same basic logic as in search_and_match().
- //
- if (pt == nullptr)
- pt = &p.search ();
-
- if (a.operation () == clean_id && !pt->dir.sub (rs.out_path ()))
- continue; // Skip.
-
- // If this is the obj{} or lib{} target group, then pick the
- // appropriate member and make sure it is searched and matched.
- //
- if (obj* o = pt->is_a<obj> ())
- {
- switch (lt)
- {
- case otype::e: pt = o->e; break;
- case otype::a: pt = o->a; break;
- case otype::s: pt = o->s; break;
- }
-
- if (pt == nullptr)
- pt = &search (ott, p.key ());
- }
- else if (lib* l = pt->is_a<lib> ())
- {
- pt = &link_member (*l, lo);
- }
-
- build2::match (a, *pt);
- t.prerequisite_targets.push_back (pt);
- continue;
- }
-
- // Which scope shall we use to resolve the root? Unlikely, but
- // possible, the prerequisite is from a different project
- // altogether. So we are going to use the target's project.
- //
-
- // @@ Why are we creating the obj{} group if the source came from a
- // group?
- //
- bool group (!p.prerequisite.belongs (t)); // Group's prerequisite.
-
- const prerequisite_key& cp (p.key ()); // c(xx){} prerequisite key.
- const target_type& tt (group ? obj::static_type : ott);
-
- // Come up with the obj*{} target. The c(xx){} prerequisite directory
- // can be relative (to the scope) or absolute. If it is relative, then
- // use it as is. If absolute, then translate it to the corresponding
- // directory under out_root. While the c(xx){} directory is most
- // likely under src_root, it is also possible it is under out_root
- // (e.g., generated source).
- //
- dir_path d;
- {
- const dir_path& cpd (*cp.tk.dir);
-
- if (cpd.relative () || cpd.sub (rs.out_path ()))
- d = cpd;
- else
- {
- if (!cpd.sub (rs.src_path ()))
- fail << "out of project prerequisite " << cp <<
- info << "specify corresponding " << tt.name << "{} "
- << "target explicitly";
-
- d = rs.out_path () / cpd.leaf (rs.src_path ());
- }
- }
-
- // obj*{} is always in the out tree.
- //
- target& ot (
- search (tt, d, dir_path (), *cp.tk.name, nullptr, cp.scope));
-
- // If we are cleaning, check that this target is in the same or
- // a subdirectory of our project root.
- //
- if (a.operation () == clean_id && !ot.dir.sub (rs.out_path ()))
- {
- // If we shouldn't clean obj{}, then it is fair to assume
- // we shouldn't clean cxx{} either (generated source will
- // be in the same directory as obj{} and if not, well, go
- // find yourself another build system ;-)).
- //
- continue; // Skip.
- }
-
- // If we have created the obj{} target group, pick one of its
- // members; the rest would be primarily concerned with it.
- //
- if (group)
- {
- obj& o (static_cast<obj&> (ot));
-
- switch (lt)
- {
- case otype::e: pt = o.e; break;
- case otype::a: pt = o.a; break;
- case otype::s: pt = o.s; break;
- }
-
- if (pt == nullptr)
- pt = &search (ott, o.dir, o.out, o.name, o.ext, nullptr);
- }
- else
- pt = &ot;
-
- // If this obj*{} target already exists, then it needs to be
- // "compatible" with what we are doing here.
- //
- // This gets a bit tricky. We need to make sure the source files
- // are the same which we can only do by comparing the targets to
- // which they resolve. But we cannot search the ot's prerequisites
- // -- only the rule that matches can. Note, however, that if all
- // this works out, then our next step is to match the obj*{}
- // target. If things don't work out, then we fail, in which case
- // searching and matching speculatively doesn't really hurt.
- //
- bool found (false);
- for (prerequisite_member p1:
- reverse_group_prerequisite_members (a, *pt))
- {
- // Ignore some known target types (fsdir, headers, libraries).
- //
- if (p1.is_a<fsdir> () ||
- p1.is_a<h> () ||
- (p.is_a<cxx> () && (p1.is_a<hxx> () ||
- p1.is_a<ixx> () ||
- p1.is_a<txx> ())) ||
- p1.is_a<lib> () ||
- p1.is_a<liba> () ||
- p1.is_a<libs> ())
- {
- continue;
- }
-
- if (!p1.is_a<cxx> ())
- fail << "synthesized target for prerequisite " << cp
- << " would be incompatible with existing target " << *pt <<
- info << "unexpected existing prerequisite type " << p1 <<
- info << "specify corresponding obj{} target explicitly";
-
- if (!found)
- {
- build2::match (a, *pt); // Now p1 should be resolved.
-
- // Searching our own prerequisite is ok.
- //
- if (&p.search () != &p1.search ())
- fail << "synthesized target for prerequisite " << cp << " would "
- << "be incompatible with existing target " << *pt <<
- info << "existing prerequisite " << p1 << " does not match "
- << cp <<
- info << "specify corresponding " << tt.name << "{} target "
- << "explicitly";
-
- found = true;
- // Check the rest of the prerequisites.
- }
- }
-
- if (!found)
- {
- // Note: add the source to the group, not the member.
- //
- ot.prerequisites.emplace_back (p.as_prerequisite (trace));
-
- // Add our lib*{} prerequisites to the object file (see
- // cxx.export.poptions above for details).
- //
- // Note that we don't resolve lib{} to liba{}/libs{} here instead
- // leaving it to whoever (e.g., the compile rule) will be needing
- // cxx.export.*. One reason for doing it there is that the object
- // target might be specified explicitly by the user in which case
- // they will have to specify the set of lib{} prerequisites and it's
- // much cleaner to do as lib{} rather than liba{}/libs{}.
- //
- // Initially, we were only adding imported libraries, but there is a
- // problem with this approach: the non-imported library might depend
- // on the imported one(s) which we will never "see" unless we start
- // with this library.
- //
- for (prerequisite& p: group_prerequisites (t))
- {
- if (p.is_a<lib> () || p.is_a<liba> () || p.is_a<libs> ())
- ot.prerequisites.emplace_back (p);
- }
-
- build2::match (a, *pt);
- }
-
- t.prerequisite_targets.push_back (pt);
- }
-
- switch (a)
- {
- case perform_update_id: return &perform_update;
- case perform_clean_id: return &perform_clean;
- default: return noop_recipe; // Configure update.
- }
- }
-
- // Recursively append/hash prerequisite libraries of a static library.
- //
- static void
- append_libraries (strings& args, liba& a)
- {
- for (target* pt: a.prerequisite_targets)
- {
- if (liba* pa = pt->is_a<liba> ())
- {
- args.push_back (relative (pa->path ()).string ()); // string()&&
- append_libraries (args, *pa);
- }
- else if (libs* ps = pt->is_a<libs> ())
- args.push_back (relative (ps->path ()).string ()); // string()&&
- }
- }
-
- static void
- hash_libraries (sha256& cs, liba& a)
- {
- for (target* pt: a.prerequisite_targets)
- {
- if (liba* pa = pt->is_a<liba> ())
- {
- cs.append (pa->path ().string ());
- hash_libraries (cs, *pa);
- }
- else if (libs* ps = pt->is_a<libs> ())
- cs.append (ps->path ().string ());
- }
- }
-
- static void
- append_rpath_link (strings& args, libs& t)
- {
- for (target* pt: t.prerequisite_targets)
- {
- if (libs* ls = pt->is_a<libs> ())
- {
- args.push_back ("-Wl,-rpath-link," +
- ls->path ().directory ().string ());
- append_rpath_link (args, *ls);
- }
- }
- }
-
- // See windows-manifest.cxx.
- //
- path
- windows_manifest (file&, bool rpath_assembly);
-
- // See windows-rpath.cxx.
- //
- timestamp
- windows_rpath_timestamp (file&);
-
- void
- windows_rpath_assembly (file&, timestamp, bool scratch);
-
- const char*
- msvc_machine (const string& cpu); // msvc.cxx
-
- // Filter link.exe noise (msvc.cxx).
- //
- void
- msvc_filter_link (ifdstream&, const file&, otype);
-
- target_state link::
- perform_update (action a, target& xt)
- {
- tracer trace ("cxx::link::perform_update");
-
- file& t (static_cast<file&> (xt));
-
- otype lt (link_type (t));
-
- // Update prerequisites.
- //
- bool update (execute_prerequisites (a, t, t.mtime ()));
-
- scope& rs (t.root_scope ());
-
- const string& cid (cast<string> (rs["cxx.id"]));
- const string& tgt (cast<string> (rs["cxx.target"]));
- const string& tsys (cast<string> (rs["cxx.target.system"]));
- const string& tclass (cast<string> (rs["cxx.target.class"]));
-
- // If targeting Windows, take care of the manifest.
- //
- path manifest; // Manifest itself (msvc) or compiled object file.
- timestamp rpath_timestamp (timestamp_nonexistent); // DLLs timestamp.
-
- if (lt == otype::e && tclass == "windows")
- {
- // First determine if we need to add our rpath emulating assembly. The
- // assembly itself is generated later, after updating the target. Omit
- // it if we are updating for install.
- //
- if (a.outer_operation () != install_id)
- rpath_timestamp = windows_rpath_timestamp (t);
-
- path mf (
- windows_manifest (
- t,
- rpath_timestamp != timestamp_nonexistent));
-
- timestamp mt (file_mtime (mf));
-
- if (tsys == "mingw32")
- {
- // Compile the manifest into the object file with windres. While we
- // are going to synthesize an .rc file to pipe to windres' stdin, we
- // will still use .manifest to check if everything is up-to-date.
- //
- manifest = mf + ".o";
-
- if (mt > file_mtime (manifest))
- {
- path of (relative (manifest));
-
- // @@ Would be good to add this to depdb (e.g,, rc changes).
- //
- const char* args[] = {
- cast<path> (rs["config.bin.rc"]).string ().c_str (),
- "--input-format=rc",
- "--output-format=coff",
- "-o", of.string ().c_str (),
- nullptr};
-
- if (verb >= 3)
- print_process (args);
-
- try
- {
- process pr (args, -1);
-
- try
- {
- ofdstream os (pr.out_fd);
-
- // 1 is resource ID, 24 is RT_MANIFEST. We also need to escape
- // Windows path backslashes.
- //
- os << "1 24 \"";
-
- const string& s (mf.string ());
- for (size_t i (0), j;; i = j + 1)
- {
- j = s.find ('\\', i);
- os.write (s.c_str () + i,
- (j == string::npos ? s.size () : j) - i);
-
- if (j == string::npos)
- break;
-
- os.write ("\\\\", 2);
- }
-
- os << "\"" << endl;
-
- os.close ();
-
- if (!pr.wait ())
- throw failed (); // Assume diagnostics issued.
- }
- catch (const ofdstream::failure& e)
- {
- if (pr.wait ()) // Ignore if child failed.
- fail << "unable to pipe resource file to " << args[0]
- << ": " << e.what ();
- }
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e.what ();
-
- if (e.child ())
- exit (1);
-
- throw failed ();
- }
-
- update = true; // Manifest changed, force update.
- }
- }
- else
- {
- manifest = move (mf); // Save for link.exe's /MANIFESTINPUT.
-
- if (mt > t.mtime ())
- update = true; // Manifest changed, force update.
- }
- }
-
- // Check/update the dependency database.
- //
- depdb dd (t.path () + ".d");
-
- // First should come the rule name/version.
- //
- if (dd.expect ("cxx.link 1") != nullptr)
- l4 ([&]{trace << "rule mismatch forcing update of " << t;});
-
- lookup ranlib;
-
- // Then the linker checksum (ar/ranlib or C++ compiler).
- //
- if (lt == otype::a)
- {
- ranlib = rs["config.bin.ranlib"];
-
- if (ranlib && ranlib->empty ()) // @@ BC LT [null].
- ranlib = lookup ();
-
- const char* rl (
- ranlib
- ? cast<string> (rs["bin.ranlib.checksum"]).c_str ()
- : "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855");
-
- if (dd.expect (cast<string> (rs["bin.ar.checksum"])) != nullptr)
- l4 ([&]{trace << "ar mismatch forcing update of " << t;});
-
- if (dd.expect (rl) != nullptr)
- l4 ([&]{trace << "ranlib mismatch forcing update of " << t;});
- }
- else
- {
- // For VC we use link.exe directly.
- //
- const string& cs (
- cast<string> (
- rs[cid == "msvc" ? "bin.ld.checksum" : "cxx.checksum"]));
-
- if (dd.expect (cs) != nullptr)
- l4 ([&]{trace << "linker mismatch forcing update of " << t;});
- }
-
- // Next check the target. While it might be incorporated into the linker
- // checksum, it also might not (e.g., MS link.exe).
- //
- if (dd.expect (tgt) != nullptr)
- l4 ([&]{trace << "target mismatch forcing update of " << t;});
-
- // Start building the command line. While we don't yet know whether we
- // will really need it, we need to hash it to find out. So the options
- // are to either replicate the exact process twice, first for hashing
- // then for building or to go ahead and start building and hash the
- // result. The first approach is probably more efficient while the
- // second is simpler. Let's got with the simpler for now (actually it's
- // kind of a hybrid).
- //
- cstrings args {nullptr}; // Reserve one for config.bin.ar/config.cxx.
-
- // Storage.
- //
- string std;
- string soname1, soname2;
- strings sargs;
-
- if (lt == otype::a)
- {
- if (cid == "msvc") ;
- else
- {
- // If the user asked for ranlib, don't try to do its function with -s.
- // Some ar implementations (e.g., the LLVM one) doesn't support
- // leading '-'.
- //
- args.push_back (ranlib ? "rc" : "rcs");
- }
- }
- else
- {
- if (cid == "msvc")
- {
- // We are using link.exe directly so we don't pass the C++ compiler
- // options.
- }
- else
- {
- append_options (args, t, "cxx.coptions");
- append_std (args, rs, cid, t, std);
- }
-
- append_options (args, t, "cxx.loptions");
-
- // Handle soname/rpath.
- //
- if (tclass == "windows")
- {
- // Limited emulation for Windows with no support for user-defined
- // rpaths.
- //
- auto l (t["bin.rpath"]);
-
- if (l && !l->empty ())
- fail << tgt << " does not support rpath";
- }
- else
- {
- // Set soname.
- //
- if (lt == otype::s)
- {
- const string& leaf (t.path ().leaf ().string ());
-
- if (tclass == "macosx")
- {
- // With Mac OS 10.5 (Leopard) Apple finally caved in and gave us
- // a way to emulate vanilla -rpath.
- //
- // It may seem natural to do something different on update for
- // install. However, if we don't make it @rpath, then the user
- // won't be able to use config.bin.rpath for installed libraries.
- //
- soname1 = "-install_name";
- soname2 = "@rpath/" + leaf;
- }
- else
- soname1 = "-Wl,-soname," + leaf;
-
- if (!soname1.empty ())
- args.push_back (soname1.c_str ());
-
- if (!soname2.empty ())
- args.push_back (soname2.c_str ());
- }
-
- // Add rpaths. We used to first add the ones specified by the user
- // so that they take precedence. But that caused problems if we have
- // old versions of the libraries sitting in the rpath location
- // (e.g., installed libraries). And if you think about this, it's
- // probably correct to prefer libraries that we explicitly imported
- // to the ones found via rpath.
- //
- // Note also that if this is update for install, then we don't add
- // rpath of the imported libraries (i.e., we assume they are also
- // installed).
- //
- for (target* pt: t.prerequisite_targets)
- {
- if (libs* ls = pt->is_a<libs> ())
- {
- if (a.outer_operation () != install_id)
- {
- sargs.push_back ("-Wl,-rpath," +
- ls->path ().directory ().string ());
- }
- // Use -rpath-link on targets that support it (Linux, FreeBSD).
- // Since with this option the paths are not stored in the
- // library, we have to do this recursively (in fact, we don't
- // really need it for top-level libraries).
- //
- else if (tclass == "linux" || tclass == "freebsd")
- append_rpath_link (sargs, *ls);
- }
- }
-
- if (auto l = t["bin.rpath"])
- for (const dir_path& p: cast<dir_paths> (l))
- sargs.push_back ("-Wl,-rpath," + p.string ());
- }
- }
-
- // All the options should now be in. Hash them and compare with the db.
- //
- {
- sha256 cs;
-
- for (size_t i (1); i != args.size (); ++i)
- cs.append (args[i]);
-
- for (size_t i (0); i != sargs.size (); ++i)
- cs.append (sargs[i]);
-
- if (dd.expect (cs.string ()) != nullptr)
- l4 ([&]{trace << "options mismatch forcing update of " << t;});
- }
-
- // Finally, hash and compare the list of input files.
- //
- // Should we capture actual files or their checksum? The only good
- // reason for capturing actual files is diagnostics: we will be able
- // to pinpoint exactly what is causing the update. On the other hand,
- // the checksum is faster and simpler. And we like simple.
- //
- {
- sha256 cs;
-
- for (target* pt: t.prerequisite_targets)
- {
- file* f;
- liba* a (nullptr);
- libs* s (nullptr);
-
- if ((f = pt->is_a<obje> ()) ||
- (f = pt->is_a<obja> ()) ||
- (f = pt->is_a<objs> ()) ||
- (lt != otype::a &&
- ((f = a = pt->is_a<liba> ()) ||
- (f = s = pt->is_a<libs> ()))))
- {
- // On Windows a shared library is a DLL with the import library as
- // a first ad hoc group member. MinGW though can link directly to
- // DLLs (see search_library() for details).
- //
- if (s != nullptr && tclass == "windows")
- {
- if (s->member != nullptr)
- f = static_cast<file*> (s->member);
- }
-
- cs.append (f->path ().string ());
-
- // If this is a static library, link all the libraries it depends
- // on, recursively.
- //
- if (a != nullptr)
- hash_libraries (cs, *a);
- }
- }
-
- // Treat it as input for both MinGW and VC.
- //
- if (!manifest.empty ())
- cs.append (manifest.string ());
-
- // Treat them as inputs, not options.
- //
- if (lt != otype::a)
- hash_options (cs, t, "cxx.libs");
-
- if (dd.expect (cs.string ()) != nullptr)
- l4 ([&]{trace << "file set mismatch forcing update of " << t;});
- }
-
- // If any of the above checks resulted in a mismatch (different linker,
- // options or input file set), or if the database is newer than the
- // target (interrupted update) then force the target update. Also
- // note this situation in the "from scratch" flag.
- //
- bool scratch (false);
- if (dd.writing () || dd.mtime () > t.mtime ())
- scratch = update = true;
-
- dd.close ();
-
- // If nothing changed, then we are done.
- //
- if (!update)
- return target_state::unchanged;
-
- // Ok, so we are updating. Finish building the command line.
- //
- string out, out1, out2; // Storage.
-
- // Translate paths to relative (to working directory) ones. This results
- // in easier to read diagnostics.
- //
- path relt (relative (t.path ()));
-
- switch (lt)
- {
- case otype::a:
- {
- args[0] = cast<path> (rs["config.bin.ar"]).string ().c_str ();
-
- if (cid == "msvc")
- {
- // lib.exe has /LIBPATH but it's not clear/documented what it's
- // used for. Perhaps for link-time code generation (/LTCG)? If
- // that's the case, then we may need to pass cxx.loptions.
- //
- args.push_back ("/NOLOGO");
-
- // Add /MACHINE.
- //
- args.push_back (
- msvc_machine (cast<string> (rs["cxx.target.cpu"])));
-
- out = "/OUT:" + relt.string ();
- args.push_back (out.c_str ());
- }
- else
- args.push_back (relt.string ().c_str ());
-
- break;
- }
- // The options are usually similar enough to handle them together.
- //
- case otype::e:
- case otype::s:
- {
- if (cid == "msvc")
- {
- // Using link.exe directly.
- //
- args[0] = cast<path> (rs["config.bin.ld"]).string ().c_str ();
- args.push_back ("/NOLOGO");
-
- if (lt == otype::s)
- args.push_back ("/DLL");
-
- // Add /MACHINE.
- //
- args.push_back (
- msvc_machine (cast<string> (rs["cxx.target.cpu"])));
-
- // Unless explicitly enabled with /INCREMENTAL, disable
- // incremental linking (it is implicitly enabled if /DEBUG is
- // specified). The reason is the .ilk file: its name cannot be
- // changed and if we have, say, foo.exe and foo.dll, then they
- // will end up stomping on each other's .ilk's.
- //
- // So the idea is to disable it by default but let the user
- // request it explicitly if they are sure their project doesn't
- // suffer from the above issue. We can also have something like
- // 'incremental' config initializer keyword for this.
- //
- // It might also be a good idea to ask Microsoft to add an option.
- //
- if (!find_option ("/INCREMENTAL", args, true))
- args.push_back ("/INCREMENTAL:NO");
-
- // If you look at the list of libraries Visual Studio links by
- // default, it includes everything and a couple of kitchen sinks
- // (winspool32.lib, ole32.lib, odbc32.lib, etc) while we want to
- // keep our low-level build as pure as possible. However, there
- // seem to be fairly essential libraries that are not linked by
- // link.exe by default (use /VERBOSE:LIB to see the list). For
- // example, MinGW by default links advapi32, shell32, user32, and
- // kernel32. And so we follow suit and make sure those are linked.
- // advapi32 and kernel32 are already on the default list and we
- // only need to add the other two.
- //
- // The way we are going to do it is via the /DEFAULTLIB option
- // rather than specifying the libraries as normal inputs (as VS
- // does). This way the user can override our actions with the
- // /NODEFAULTLIB option.
- //
- args.push_back ("/DEFAULTLIB:shell32.lib");
- args.push_back ("/DEFAULTLIB:user32.lib");
-
- // Take care of the manifest (will be empty for the DLL).
- //
- if (!manifest.empty ())
- {
- std = "/MANIFESTINPUT:"; // Repurpose storage for std.
- std += relative (manifest).string ();
- args.push_back ("/MANIFEST:EMBED");
- args.push_back (std.c_str ());
- }
-
- if (lt == otype::s)
- {
- // On Windows libs{} is the DLL and its first ad hoc group
- // member is the import library.
- //
- // This will also create the .exp export file. Its name will be
- // derived from the import library by changing the extension.
- // Lucky for us -- there is no option to name it.
- //
- auto imp (static_cast<file*> (t.member));
- out2 = "/IMPLIB:" + relative (imp->path ()).string ();
- args.push_back (out2.c_str ());
- }
-
- // If we have /DEBUG then name the .pdb file. It is either the
- // first (exe) or the second (dll) ad hoc group member.
- //
- if (find_option ("/DEBUG", args, true))
- {
- auto pdb (static_cast<file*> (
- lt == otype::e ? t.member : t.member->member));
- out1 = "/PDB:" + relative (pdb->path ()).string ();
- args.push_back (out1.c_str ());
- }
-
- // @@ An executable can have an import library and VS seems to
- // always name it. I wonder what would trigger its generation?
- // Could it be the presence of export symbols?
-
- out = "/OUT:" + relt.string ();
- args.push_back (out.c_str ());
- }
- else
- {
- args[0] = cast<path> (rs["config.cxx"]).string ().c_str ();
-
- // Add the option that triggers building a shared library and take
- // care of any extras (e.g., import library).
- //
- if (lt == otype::s)
- {
- if (tclass == "macosx")
- args.push_back ("-dynamiclib");
- else
- args.push_back ("-shared");
-
- if (tsys == "mingw32")
- {
- // On Windows libs{} is the DLL and its first ad hoc group
- // member is the import library.
- //
- auto imp (static_cast<file*> (t.member));
- out = "-Wl,--out-implib=" + relative (imp->path ()).string ();
- args.push_back (out.c_str ());
- }
- }
-
- args.push_back ("-o");
- args.push_back (relt.string ().c_str ());
- }
-
- break;
- }
- }
-
- for (target* pt: t.prerequisite_targets)
- {
- file* f;
- liba* a (nullptr);
- libs* s (nullptr);
-
- if ((f = pt->is_a<obje> ()) ||
- (f = pt->is_a<obja> ()) ||
- (f = pt->is_a<objs> ()) ||
- (lt != otype::a &&
- ((f = a = pt->is_a<liba> ()) ||
- (f = s = pt->is_a<libs> ()))))
- {
- // On Windows a shared library is a DLL with the import library as a
- // first ad hoc group member. MinGW though can link directly to DLLs
- // (see search_library() for details).
- //
- if (s != nullptr && tclass == "windows")
- {
- if (s->member != nullptr)
- f = static_cast<file*> (s->member);
- }
-
- sargs.push_back (relative (f->path ()).string ()); // string()&&
-
- // If this is a static library, link all the libraries it depends
- // on, recursively.
- //
- if (a != nullptr)
- append_libraries (sargs, *a);
- }
- }
-
- // For MinGW manifest is an object file.
- //
- if (!manifest.empty () && tsys == "mingw32")
- sargs.push_back (relative (manifest).string ());
-
- // Copy sargs to args. Why not do it as we go along pushing into sargs?
- // Because of potential reallocations.
- //
- for (size_t i (0); i != sargs.size (); ++i)
- args.push_back (sargs[i].c_str ());
-
- if (lt != otype::a)
- append_options (args, t, "cxx.libs");
-
- args.push_back (nullptr);
-
- if (verb >= 2)
- print_process (args);
- else if (verb)
- text << "ld " << t;
-
- try
- {
- // VC tools (both lib.exe and link.exe) send diagnostics to stdout.
- // Also, link.exe likes to print various gratuitous messages. So for
- // link.exe we redirect stdout to a pipe, filter that noise out, and
- // send the rest to stderr.
- //
- // For lib.exe (and any other insane compiler that may try to pull off
- // something like this) we are going to redirect stdout to stderr. For
- // sane compilers this should be harmless.
- //
- bool filter (cid == "msvc" && lt != otype::a);
-
- process pr (args.data (), 0, (filter ? -1 : 2));
-
- if (filter)
- {
- try
- {
- ifdstream is (pr.in_ofd, fdstream_mode::text, ifdstream::badbit);
-
- msvc_filter_link (is, t, lt);
-
- // If anything remains in the stream, send it all to stderr. Note
- // that the eof check is important: if the stream is at eof, this
- // and all subsequent writes to cerr will fail (and you won't see
- // a thing).
- //
- if (is.peek () != ifdstream::traits_type::eof ())
- cerr << is.rdbuf ();
-
- is.close ();
- }
- catch (const ifdstream::failure&) {} // Assume exits with error.
- }
-
- if (!pr.wait ())
- throw failed ();
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e.what ();
-
- // In a multi-threaded program that fork()'ed but did not exec(),
- // it is unwise to try to do any kind of cleanup (like unwinding
- // the stack and running destructors).
- //
- if (e.child ())
- exit (1);
-
- throw failed ();
- }
-
- // Remove the target file if any of the subsequent actions fail. If we
- // don't do that, we will end up with a broken build that is up-to-date.
- //
- auto_rmfile rm (t.path ());
-
- if (ranlib)
- {
- const char* args[] = {
- cast<path> (ranlib).string ().c_str (),
- relt.string ().c_str (),
- nullptr};
-
- if (verb >= 2)
- print_process (args);
-
- try
- {
- process pr (args);
-
- if (!pr.wait ())
- throw failed ();
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e.what ();
-
- if (e.child ())
- exit (1);
-
- throw failed ();
- }
- }
-
- // For Windows generate rpath-emulating assembly (unless updaing for
- // install).
- //
- if (lt == otype::e && tclass == "windows")
- {
- if (a.outer_operation () != install_id)
- windows_rpath_assembly (t, rpath_timestamp, scratch);
- }
-
- rm.cancel ();
-
- // Should we go to the filesystem and get the new mtime? We know the
- // file has been modified, so instead just use the current clock time.
- // It has the advantage of having the subseconds precision.
- //
- t.mtime (system_clock::now ());
- return target_state::changed;
- }
-
- target_state link::
- perform_clean (action a, target& xt)
- {
- file& t (static_cast<file&> (xt));
-
- scope& rs (t.root_scope ());
- const string& tsys (cast<string> (rs["cxx.target.system"]));
- const string& tclass (cast<string> (rs["cxx.target.class"]));
-
- initializer_list<const char*> e;
-
- switch (link_type (t))
- {
- case otype::a:
- {
- e = {".d"};
- break;
- }
- case otype::e:
- {
- if (tclass == "windows")
- {
- if (tsys == "mingw32")
- {
- e = {".d", "/.dlls", ".manifest.o", ".manifest"};
- }
- else
- {
- // Assuming it's VC or alike. Clean up .ilk in case the user
- // enabled incremental linking (note that .ilk replaces .exe).
- //
- e = {".d", "/.dlls", ".manifest", "-.ilk"};
- }
- }
- else
- e = {".d"};
-
- break;
- }
- case otype::s:
- {
- if (tclass == "windows" && tsys != "mingw32")
- {
- // Assuming it's VC or alike. Clean up .exp and .ilk.
- //
- e = {".d", ".exp", "-.ilk"};
- }
- else
- e = {".d"};
-
- break;
- }
- }
-
- return clean_extra (a, t, e);
- }
-
- link link::instance;
- }
-}
diff --git a/build2/cxx/module b/build2/cxx/module
index 37466ef..8c1a01f 100644
--- a/build2/cxx/module
+++ b/build2/cxx/module
@@ -15,6 +15,15 @@ namespace build2
namespace cxx
{
bool
+ config_init (scope&,
+ scope&,
+ const location&,
+ unique_ptr<module_base>&,
+ bool,
+ bool,
+ const variable_map&);
+
+ bool
init (scope&,
scope&,
const location&,
diff --git a/build2/cxx/module.cxx b/build2/cxx/module.cxx
index f66ef53..fd98114 100644
--- a/build2/cxx/module.cxx
+++ b/build2/cxx/module.cxx
@@ -4,23 +4,13 @@
#include <build2/cxx/module>
-#include <butl/triplet>
-
#include <build2/scope>
#include <build2/context>
#include <build2/diagnostics>
-#include <build2/config/utility>
-#include <build2/install/utility>
-
-#include <build2/bin/target>
+#include <build2/cc/module>
-#include <build2/cxx/link>
-#include <build2/cxx/guess>
#include <build2/cxx/target>
-#include <build2/cxx/compile>
-#include <build2/cxx/install>
-#include <build2/cxx/utility>
using namespace std;
using namespace butl;
@@ -29,355 +19,228 @@ namespace build2
{
namespace cxx
{
- bool
- init (scope& r,
- scope& b,
- const location& loc,
- unique_ptr<module_base>&,
- bool first,
- bool,
- const variable_map& config_hints)
- {
- tracer trace ("cxx::init");
- l5 ([&]{trace << "for " << b.out_path ();});
-
- // Enter module variables.
- //
- if (first)
- {
- auto& v (var_pool);
+ using cc::config_module;
- // Note: some overridable, some not.
- //
- v.insert<path> ("config.cxx", true);
- v.insert<strings> ("config.cxx.poptions", true);
- v.insert<strings> ("config.cxx.coptions", true);
- v.insert<strings> ("config.cxx.loptions", true);
- v.insert<strings> ("config.cxx.libs", true);
-
- v.insert<strings> ("cxx.poptions");
- v.insert<strings> ("cxx.coptions");
- v.insert<strings> ("cxx.loptions");
- v.insert<strings> ("cxx.libs");
-
- v.insert<strings> ("cxx.export.poptions");
- v.insert<strings> ("cxx.export.coptions");
- v.insert<strings> ("cxx.export.loptions");
- v.insert<strings> ("cxx.export.libs");
-
- v.insert<string> ("cxx.std", true);
- }
-
- // Configure.
- //
-
- assert (config_hints.empty ()); // We don't known any hints.
-
- // config.cxx.{p,c,l}options
- // config.cxx.libs
- //
- // These are optional. We also merge them into the corresponding
- // cxx.* variables.
- //
- // The merging part gets a bit tricky if this module has already
- // been loaded in one of the outer scopes. By doing the straight
- // append we would just be repeating the same options over and
- // over. So what we are going to do is only append to a value if
- // it came from this scope. Then the usage for merging becomes:
- //
- // cxx.coptions = <overridable options> # Note: '='.
- // using cxx
- // cxx.coptions += <overriding options> # Note: '+='.
- //
- b.assign ("cxx.poptions") += cast_null<strings> (
- config::optional (r, "config.cxx.poptions"));
-
- b.assign ("cxx.coptions") += cast_null<strings> (
- config::optional (r, "config.cxx.coptions"));
-
- b.assign ("cxx.loptions") += cast_null<strings> (
- config::optional (r, "config.cxx.loptions"));
+ class module: public cc::module
+ {
+ public:
+ explicit
+ module (data&& d): common (move (d)), cc::module (move (d)) {}
- b.assign ("cxx.libs") += cast_null<strings> (
- config::optional (r, "config.cxx.libs"));
+ bool
+ translate_std (string&, scope&, const value&) const override;
+ };
- // Configuration hints for the bin module. They will only be used on the
- // first loading of the bin module (for this project) so we only
- // populate them on our first loading.
- //
- variable_map bin_hints;
+ bool module::
+ translate_std (string& s, scope& r, const value& val) const
+ {
+ const string& v (cast<string> (val));
- // config.cxx
- //
- if (first)
+ if (cid == "msvc")
{
- auto p (config::required (r, "config.cxx", path ("g++")));
-
- // Figure out which compiler we are dealing with, its target, etc.
+ // C++ standard-wise, with VC you get what you get. The question is
+ // whether we should verify that the requested standard is provided by
+ // this VC version. And if so, from which version should we say VC
+ // supports 11, 14, and 17? We should probably be as loose as possible
+ // here since the author will always be able to tighten (but not
+ // loosen) this in the buildfile (i.e., detect unsupported versions).
//
- const path& cxx (cast<path> (p.first));
- compiler_info ci (guess (cxx, cast_null<strings> (r["cxx.coptions"])));
-
- // If this is a new value (e.g., we are configuring), then print the
- // report at verbosity level 2 and up (-v).
+ // For now we are not going to bother doing this for C++03.
//
- if (verb >= (p.second ? 2 : 3))
+ if (v != "98" && v != "03")
{
- text << "cxx " << project (r) << '@' << r.out_path () << '\n'
- << " cxx " << cxx << '\n'
- << " id " << ci.id << '\n'
- << " version " << ci.version.string << '\n'
- << " major " << ci.version.major << '\n'
- << " minor " << ci.version.minor << '\n'
- << " patch " << ci.version.patch << '\n'
- << " build " << ci.version.build << '\n'
- << " signature " << ci.signature << '\n'
- << " checksum " << ci.checksum << '\n'
- << " target " << ci.target;
- }
-
- r.assign<string> ("cxx.id") = ci.id.string ();
- r.assign<string> ("cxx.id.type") = move (ci.id.type);
- r.assign<string> ("cxx.id.variant") = move (ci.id.variant);
-
- r.assign<string> ("cxx.version") = move (ci.version.string);
- r.assign<uint64_t> ("cxx.version.major") = ci.version.major;
- r.assign<uint64_t> ("cxx.version.minor") = ci.version.minor;
- r.assign<uint64_t> ("cxx.version.patch") = ci.version.patch;
- r.assign<string> ("cxx.version.build") = move (ci.version.build);
+ uint64_t cver (cast<uint64_t> (r[x_version_major]));
- r.assign<string> ("cxx.signature") = move (ci.signature);
- r.assign<string> ("cxx.checksum") = move (ci.checksum);
-
- // While we still have the original, compiler-reported target, see if
- // we can derive a binutils program pattern.
- //
- // BTW, for GCC we also get gcc-{ar,ranlib} which add support for the
- // LTO plugin though it seems more recent GNU binutils (2.25) are able
- // to load the plugin when needed automatically. So it doesn't seem we
- // should bother trying to support this on our end (the way we could
- // do it is by passing config.bin.{ar,ranlib} as hints).
- //
- string pattern;
-
- if (cast<string> (r["cxx.id"]) == "msvc")
- {
- // If the compiler name is/starts with 'cl' (e.g., cl.exe, cl-14),
- // then replace it with '*' and use it as a pattern for lib, link,
- // etc.
+ // @@ Is mapping for 14 and 17 correct? Maybe Update 2 for 14?
//
- if (cxx.size () > 2)
+ if ((v == "11" && cver < 16) || // C++11 since VS2010/10.0.
+ (v == "14" && cver < 19) || // C++14 since VS2015/14.0.
+ (v == "17" && cver < 20)) // C++17 since VS20??/15.0.
{
- const string& l (cxx.leaf ().string ());
- size_t n (l.size ());
-
- if (n >= 2 &&
- (l[0] == 'c' || l[0] == 'C') &&
- (l[1] == 'l' || l[1] == 'L') &&
- (n == 2 || l[2] == '.' || l[2] == '-'))
- {
- path p (cxx.directory ());
- p /= "*";
- p += l.c_str () + 2;
- pattern = move (p).string ();
- }
+ fail << "C++" << v << " is not supported by "
+ << cast<string> (r[x_signature]) <<
+ info << "required by " << project (r) << '@' << r.out_path ();
}
}
+
+ return false;
+ }
+ else
+ {
+ // Translate 11 to 0x, 14 to 1y, and 17 to 1z for compatibility with
+ // older versions of the compilers.
+ //
+ s = "-std=";
+
+ if (v == "98")
+ s += "c++98";
+ else if (v == "03")
+ s += "c++03";
+ else if (v == "11")
+ s += "c++0x";
+ else if (v == "14")
+ s += "c++1y";
+ else if (v == "17")
+ s += "c++1z";
else
- {
- // When cross-compiling the whole toolchain is normally prefixed
- // with the target triplet, e.g., x86_64-w64-mingw32-{g++,ar,ld}.
- //
- const string& t (ci.target);
- size_t n (t.size ());
+ s += v; // In case the user specifies something like 'gnu++17'.
- if (cxx.size () > n + 1)
- {
- const string& l (cxx.leaf ().string ());
-
- if (l.size () > n + 1 && l.compare (0, n, t) == 0 && l[n] == '-')
- {
- path p (cxx.directory ());
- p /= t;
- p += "-*";
- pattern = move (p).string ();
- }
- }
- }
+ return true;
+ }
+ }
- if (!pattern.empty ())
- bin_hints.assign ("config.bin.pattern") = move (pattern);
+ bool
+ config_init (scope& r,
+ scope& b,
+ const location& loc,
+ unique_ptr<module_base>& m,
+ bool first,
+ bool,
+ const variable_map& hints)
+ {
+ tracer trace ("cxx::config_init");
+ l5 ([&]{trace << "for " << b.out_path ();});
- // Split/canonicalize the target.
+ if (first)
+ {
+ // Load cc.vars so that we can cache all the cc.* variables.
//
+ if (!cast_false<bool> (b["cc.vars.loaded"]))
+ load_module ("cc.vars", r, b, loc);
- // Did the user ask us to use config.sub?
+ // Enter all the variables and initialize the module data.
//
- if (ops.config_sub_specified ())
- {
- ci.target = run<string> (ops.config_sub (),
- ci.target.c_str (),
- [] (string& l) {return move (l);});
- l5 ([&]{trace << "config.sub target: '" << ci.target << "'";});
- }
-
- try
- {
- string canon;
- triplet t (ci.target, canon);
+ auto& v (var_pool);
- l5 ([&]{trace << "canonical target: '" << canon << "'; "
- << "class: " << t.class_;});
+ cc::config_data d {
+ cc::lang::cxx,
- // Pass the target we extracted from the C++ compiler as a config
- // hint to the bin module.
- //
- bin_hints.assign ("config.bin.target") = canon;
+ "cxx",
+ "c++",
+ "g++",
- // Enter as cxx.target.{cpu,vendor,system,version,class}.
+ // Note: some overridable, some not.
//
- r.assign<string> ("cxx.target") = move (canon);
- r.assign<string> ("cxx.target.cpu") = move (t.cpu);
- r.assign<string> ("cxx.target.vendor") = move (t.vendor);
- r.assign<string> ("cxx.target.system") = move (t.system);
- r.assign<string> ("cxx.target.version") = move (t.version);
- r.assign<string> ("cxx.target.class") = move (t.class_);
- }
- catch (const invalid_argument& e)
- {
- // This is where we suggest that the user specifies --config-sub to
- // help us out.
- //
- fail << "unable to parse compiler target '" << ci.target << "': "
- << e.what () <<
- info << "consider using the --config-sub option";
- }
- }
-
- const string& cid (cast<string> (r["cxx.id"]));
- const string& tsys (cast<string> (r["cxx.target.system"]));
-
- // Initialize the bin module. Only do this if it hasn't already been
- // loaded so that we don't overwrite user's bin.* settings.
- //
- if (!cast_false<bool> (b["bin.loaded"]))
- load_module ("bin", r, b, loc, false, bin_hints);
-
- // Verify bin's target matches ours.
- //
- {
- const string& bt (cast<string> (r["bin.target"]));
- const string& ct (cast<string> (r["cxx.target"]));
-
- if (bt != ct)
- fail (loc) << "bin and cxx module target platform mismatch" <<
- info << "bin.target is " << bt <<
- info << "cxx.target is " << ct;
+ v.insert<path> ("config.cxx", true),
+ v.insert<strings> ("config.cxx.poptions", true),
+ v.insert<strings> ("config.cxx.coptions", true),
+ v.insert<strings> ("config.cxx.loptions", true),
+ v.insert<strings> ("config.cxx.libs", true),
+
+ v.insert<strings> ("cxx.poptions"),
+ v.insert<strings> ("cxx.coptions"),
+ v.insert<strings> ("cxx.loptions"),
+ v.insert<strings> ("cxx.libs"),
+
+ v["cc.poptions"],
+ v["cc.coptions"],
+ v["cc.loptions"],
+ v["cc.libs"],
+
+ v.insert<strings> ("cxx.export.poptions"),
+ v.insert<strings> ("cxx.export.coptions"),
+ v.insert<strings> ("cxx.export.loptions"),
+ v.insert<strings> ("cxx.export.libs"),
+
+ v["cc.export.poptions"],
+ v["cc.export.coptions"],
+ v["cc.export.loptions"],
+ v["cc.export.libs"],
+
+ v.insert<string> ("cxx.std", true),
+
+ v.insert<string> ("cxx.id"),
+ v.insert<string> ("cxx.id.type"),
+ v.insert<string> ("cxx.id.variant"),
+
+ v.insert<string> ("cxx.version"),
+ v.insert<uint64_t> ("cxx.version.major"),
+ v.insert<uint64_t> ("cxx.version.minor"),
+ v.insert<uint64_t> ("cxx.version.patch"),
+ v.insert<string> ("cxx.version.build"),
+
+ v.insert<string> ("cxx.signature"),
+ v.insert<string> ("cxx.checksum"),
+
+ v.insert<string> ("cxx.target"),
+ v.insert<string> ("cxx.target.cpu"),
+ v.insert<string> ("cxx.target.vendor"),
+ v.insert<string> ("cxx.target.system"),
+ v.insert<string> ("cxx.target.version"),
+ v.insert<string> ("cxx.target.class")
+ };
+
+ assert (m == nullptr);
+ m.reset (new config_module (move (d)));
}
- // Load the bin.ar module unless we were asked to only build shared
- // libraries.
- //
- if (auto l = r["config.bin.lib"])
- {
- if (cast<string> (l) != "shared")
- {
- if (!cast_false<bool> (b["bin.ar.loaded"]))
- load_module ("bin.ar", r, b, loc, false, bin_hints);
- }
- }
+ static_cast<config_module&> (*m).init (r, b, loc, first, hints);
+ return true;
+ }
- // In the VC world you link things directly with link.exe so load the
- // bin.ld module.
- //
- if (cid == "msvc")
- {
- if (!cast_false<bool> (b["bin.ld.loaded"]))
- load_module ("bin.ld", r, b, loc, false, bin_hints);
- }
+ static const target_type* hdr[] =
+ {
+ &hxx::static_type,
+ &ixx::static_type,
+ &txx::static_type,
+ &h::static_type,
+ nullptr
+ };
+
+ static const target_type* inc[] =
+ {
+ &hxx::static_type,
+ &ixx::static_type,
+ &txx::static_type,
+ &cxx::static_type,
+ &h::static_type,
+ &c::static_type,
+ nullptr
+ };
- // If our target is MinGW, then we will need the resource compiler
- // (windres) in order to embed the manifest.
- //
- if (tsys == "mingw32")
- {
- if (!cast_false<bool> (b["bin.rc.loaded"]))
- load_module ("bin.rc", r, b, loc, false, bin_hints);
- }
+ bool
+ init (scope& r,
+ scope& b,
+ const location& loc,
+ unique_ptr<module_base>& m,
+ bool first,
+ bool,
+ const variable_map& hints)
+ {
+ tracer trace ("cxx::init");
+ l5 ([&]{trace << "for " << b.out_path ();});
- // Register target types.
+ // Load cxx.config.
//
- {
- auto& t (b.target_types);
-
- t.insert<h> ();
- t.insert<c> ();
+ if (!cast_false<bool> (b["cxx.config.loaded"]))
+ load_module ("cxx.config", r, b, loc, false, hints);
- t.insert<cxx> ();
- t.insert<hxx> ();
- t.insert<ixx> ();
- t.insert<txx> ();
- }
-
- // Register rules.
- //
+ if (first)
{
- using namespace bin;
-
- auto& r (b.rules);
-
- // We register for configure so that we detect unresolved imports
- // during configuration rather that later, e.g., during update.
- //
- // @@ Should we check if install module was loaded (see bin)?
- //
-
- r.insert<obje> (perform_update_id, "cxx.compile", compile::instance);
- r.insert<obje> (perform_clean_id, "cxx.compile", compile::instance);
- r.insert<obje> (configure_update_id, "cxx.compile", compile::instance);
+ config_module& cm (*r.modules.lookup<config_module> ("cxx.config"));
- r.insert<exe> (perform_update_id, "cxx.link", link::instance);
- r.insert<exe> (perform_clean_id, "cxx.link", link::instance);
- r.insert<exe> (configure_update_id, "cxx.link", link::instance);
+ cc::data d {
+ cm,
- r.insert<exe> (perform_install_id, "cxx.install", install::instance);
+ "cxx.compile",
+ "cxx.link",
+ "cxx.install",
- // Only register static object/library rules if the bin.ar module is
- // loaded (by us or by the user).
- //
- if (cast_false<bool> (b["bin.ar.loaded"]))
- {
- r.insert<obja> (perform_update_id, "cxx.compile", compile::instance);
- r.insert<obja> (perform_clean_id, "cxx.compile", compile::instance);
- r.insert<obja> (configure_update_id, "cxx.compile", compile::instance);
+ cast<string> (r[cm.x_id]),
+ cast<string> (r[cm.x_target]),
+ cast<string> (r[cm.x_target_system]),
+ cast<string> (r[cm.x_target_class]),
- r.insert<liba> (perform_update_id, "cxx.link", link::instance);
- r.insert<liba> (perform_clean_id, "cxx.link", link::instance);
- r.insert<liba> (configure_update_id, "cxx.link", link::instance);
+ cxx::static_type,
+ hdr,
+ inc
+ };
- r.insert<liba> (perform_install_id, "cxx.install", install::instance);
- }
-
- r.insert<objs> (perform_update_id, "cxx.compile", compile::instance);
- r.insert<objs> (perform_clean_id, "cxx.compile", compile::instance);
- r.insert<objs> (configure_update_id, "cxx.compile", compile::instance);
-
- r.insert<libs> (perform_update_id, "cxx.link", link::instance);
- r.insert<libs> (perform_clean_id, "cxx.link", link::instance);
- r.insert<libs> (configure_update_id, "cxx.link", link::instance);
-
- r.insert<libs> (perform_install_id, "cxx.install", install::instance);
+ assert (m == nullptr);
+ m.reset (new module (move (d)));
}
- // Configure "installability" of our target types.
- //
- using namespace install;
-
- install_path<hxx> (b, dir_path ("include")); // Into install.include.
- install_path<ixx> (b, dir_path ("include"));
- install_path<txx> (b, dir_path ("include"));
- install_path<h> (b, dir_path ("include"));
-
+ static_cast<module&> (*m).init (r, b, loc, first, hints);
return true;
}
}
diff --git a/build2/cxx/msvc.cxx b/build2/cxx/msvc.cxx
deleted file mode 100644
index 9798046..0000000
--- a/build2/cxx/msvc.cxx
+++ /dev/null
@@ -1,331 +0,0 @@
-// file : build2/cxx/msvc.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#include <iostream> // cerr
-
-#include <build2/scope>
-#include <build2/target>
-#include <build2/context>
-#include <build2/variable>
-#include <build2/filesystem>
-#include <build2/diagnostics>
-
-#include <build2/cxx/common>
-
-using namespace std;
-using namespace butl;
-
-namespace build2
-{
- namespace cxx
- {
- using namespace bin;
-
- // Translate the target triplet CPU to lib.exe/link.exe /MACHINE option.
- //
- const char*
- msvc_machine (const string& cpu)
- {
- const char* m (cpu == "i386" || cpu == "i686" ? "/MACHINE:x86" :
- cpu == "x86_64" ? "/MACHINE:x64" :
- cpu == "arm" ? "/MACHINE:ARM" :
- cpu == "arm64" ? "/MACHINE:ARM64" :
- nullptr);
-
- if (m == nullptr)
- fail << "unable to translate CPU " << cpu << " to /MACHINE";
-
- return m;
- }
-
- // Filter cl.exe and link.exe noise.
- //
- void
- msvc_filter_cl (ifdstream& is, const path& src)
- {
- // While it appears VC always prints the source name (event if the
- // file does not exist), let's do a sanity check.
- //
- string l;
- if (getline (is, l) && l != src.leaf ().string ())
- cerr << l << endl;
- }
-
- void
- msvc_filter_link (ifdstream& is, const file& t, otype lt)
- {
- // Filter lines until we encounter something we don't recognize. We also
- // have to assume the messages can be translated.
- //
- for (string l; getline (is, l); )
- {
- // " Creating library foo\foo.dll.lib and object foo\foo.dll.exp"
- //
- if (lt == otype::s && l.compare (0, 3, " ") == 0)
- {
- path imp (static_cast<file*> (t.member)->path ().leaf ());
-
- if (l.find (imp.string ()) != string::npos &&
- l.find (imp.base ().string () + ".exp") != string::npos)
- continue;
- }
-
- // /INCREMENTAL causes linker to sometimes issue messages but now I
- // can't quite reproduce it.
- //
-
- cerr << l << endl;
- break;
- }
- }
-
- // Extract system library search paths from MSVC.
- //
- void
- msvc_library_search_paths (scope&, const string&, dir_paths&)
- {
- // The linker doesn't seem to have any built-in paths and all of them
- // come from the LIB environment variable.
-
- // @@ VC: how are we going to do this? E.g., cl-14 does this internally.
- // cl.exe /Be prints LIB.
- //
- // Should we actually bother? LIB is normally used for system
- // libraries and its highly unlikely we will see an explicit import
- // for a library from one of those directories.
- //
- }
-
- // Inspect the file and determine if it is static or import library.
- // Return otype::e if it is neither (which we quietly ignore).
- //
- static otype
- library_type (const path& ld, const path& l)
- {
- // The are several reasonably reliable methods to tell whether it is a
- // static or import library. One is lib.exe /LIST -- if there aren't any
- // .obj members, then it is most likely an import library (it can also
- // be an empty static library in which case there won't be any members).
- // For an import library /LIST will print a bunch of .dll members.
- //
- // Another approach is dumpbin.exe (link.exe /DUMP) with /ARCHIVEMEMBERS
- // (similar to /LIST) and /LINKERMEMBER (looking for __impl__ symbols or
- // _IMPORT_DESCRIPTOR_).
- //
- // Note also, that apparently it is possible to have a hybrid library.
- //
- // While the lib.exe approach is probably the simplest, the problem is
- // it will require us loading the bin.ar module even if we are not
- // building any static libraries. On the other hand, if we are searching
- // for libraries then we have bin.ld. So we will use the link.exe /DUMP
- // /ARCHIVEMEMBERS.
- //
- const char* args[] = {ld.string ().c_str (),
- "/DUMP", // Must come first.
- "/NOLOGO",
- "/ARCHIVEMEMBERS",
- l.string ().c_str (),
- nullptr};
-
- // Link.exe seem to always dump everything to stdout but just in case
- // redirect stderr to stdout.
- //
- process pr (start_run (args, false));
-
- bool obj (false), dll (false);
- string s;
-
- try
- {
- ifdstream is (pr.in_ofd, fdstream_mode::skip, ifdstream::badbit);
-
- while (getline (is, s))
- {
- // Detect the one error we should let through.
- //
- if (s.compare (0, 18, "unable to execute ") == 0)
- break;
-
- // The lines we are interested in seem to have this form (though
- // presumably the "Archive member name at" part can be translated):
- //
- // Archive member name at 746: [...]hello.dll[/][ ]*
- // Archive member name at 8C70: [...]hello.lib.obj[/][ ]*
- //
- size_t n (s.size ());
-
- for (; n != 0 && s[n - 1] == ' '; --n) ; // Skip trailing spaces.
-
- if (n >= 7) // At least ": X.obj" or ": X.dll".
- {
- --n;
-
- if (s[n] == '/') // Skip trailing slash if one is there.
- --n;
-
- n -= 3; // Beginning of extension.
-
- if (s[n] == '.')
- {
- // Make sure there is ": ".
- //
- size_t p (s.rfind (':', n - 1));
-
- if (p != string::npos && s[p + 1] == ' ')
- {
- const char* e (s.c_str () + n + 1);
-
- if (casecmp (e, "obj", 3) == 0)
- obj = true;
-
- if (casecmp (e, "dll", 3) == 0)
- dll = true;
- }
- }
- }
- }
- }
- catch (const ifdstream::failure&)
- {
- // Presumably the child process failed. Let finish_run() deal with
- // that.
- }
-
- if (!finish_run (args, false, pr, s))
- return otype::e;
-
- if (obj && dll)
- {
- warn << l << " looks like hybrid static/import library, ignoring";
- return otype::e;
- }
-
- if (!obj && !dll)
- {
- warn << l << " looks like empty static or import library, ignoring";
- return otype::e;
- }
-
- return obj ? otype::a : otype::s;
- }
-
- template <typename T>
- static T*
- search_library (const path& ld,
- const dir_path& d,
- prerequisite& p,
- otype lt,
- const char* pfx,
- const char* sfx)
- {
- // Pretty similar logic to link::search_library().
- //
- tracer trace ("cxx::msvc_search_library");
-
- // Assemble the file path.
- //
- path f (d);
-
- if (*pfx != '\0')
- {
- f /= pfx;
- f += p.name;
- }
- else
- f /= p.name;
-
- if (*sfx != '\0')
- f += sfx;
-
- const string& e (
- p.ext == nullptr || p.is_a<lib> () // Only for liba/libs.
- ? extension_pool.find ("lib")
- : *p.ext);
-
- if (!e.empty ())
- {
- f += '.';
- f += e;
- }
-
- // Check if the file exists and is of the expected type.
- //
- timestamp mt (file_mtime (f));
-
- if (mt != timestamp_nonexistent && library_type (ld, f) == lt)
- {
- // Enter the target.
- //
- T& t (targets.insert<T> (d, dir_path (), p.name, &e, trace));
-
- if (t.path ().empty ())
- t.path (move (f));
-
- t.mtime (mt);
- return &t;
- }
-
- return nullptr;
- }
-
- liba*
- msvc_search_static (const path& ld, const dir_path& d, prerequisite& p)
- {
- liba* r (nullptr);
-
- auto search = [&r, &ld, &d, &p] (const char* pf, const char* sf) -> bool
- {
- r = search_library<liba> (ld, d, p, otype::a, pf, sf);
- return r != nullptr;
- };
-
- // Try:
- // foo.lib
- // libfoo.lib
- // foolib.lib
- // foo_static.lib
- //
- return
- search ("", "") ||
- search ("lib", "") ||
- search ("", "lib") ||
- search ("", "_static") ? r : nullptr;
- }
-
- libs*
- msvc_search_shared (const path& ld, const dir_path& d, prerequisite& p)
- {
- tracer trace ("cxx::msvc_search_shared");
-
- libs* r (nullptr);
-
- auto search = [&r, &ld, &d, &p, &trace] (
- const char* pf, const char* sf) -> bool
- {
- if (libi* i = search_library<libi> (ld, d, p, otype::s, pf, sf))
- {
- r = &targets.insert<libs> (d, dir_path (), p.name, nullptr, trace);
-
- if (r->member == nullptr)
- {
- r->mtime (i->mtime ());
- r->member = i;
- }
- }
-
- return r != nullptr;
- };
-
- // Try:
- // foo.lib
- // libfoo.lib
- // foodll.lib
- //
- return
- search ("", "") ||
- search ("lib", "") ||
- search ("", "dll") ? r : nullptr;
- }
- }
-}
diff --git a/build2/cxx/target b/build2/cxx/target
index 154ec24..0239c25 100644
--- a/build2/cxx/target
+++ b/build2/cxx/target
@@ -9,11 +9,15 @@
#include <build2/utility>
#include <build2/target>
+#include <build2/cc/target>
namespace build2
{
namespace cxx
{
+ using cc::h;
+ using cc::c;
+
class hxx: public file
{
public:
@@ -53,28 +57,6 @@ namespace build2
static const target_type static_type;
virtual const target_type& dynamic_type () const {return static_type;}
};
-
- //@@ TMP: should be in c-common or some such.
- //
- class h: public file
- {
- public:
- using file::file;
-
- public:
- static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
- };
-
- class c: public file
- {
- public:
- using file::file;
-
- public:
- static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
- };
}
}
diff --git a/build2/cxx/target.cxx b/build2/cxx/target.cxx
index 22ace50..30afd89 100644
--- a/build2/cxx/target.cxx
+++ b/build2/cxx/target.cxx
@@ -59,29 +59,5 @@ namespace build2
&search_file,
false
};
-
- extern const char h_ext_def[] = "h";
- const target_type h::static_type
- {
- "h",
- &file::static_type,
- &target_factory<h>,
- &target_extension_var<ext_var, h_ext_def>,
- nullptr,
- &search_file,
- false
- };
-
- extern const char c_ext_def[] = "c";
- const target_type c::static_type
- {
- "c",
- &file::static_type,
- &target_factory<c>,
- &target_extension_var<ext_var, c_ext_def>,
- nullptr,
- &search_file,
- false
- };
}
}
diff --git a/build2/cxx/utility b/build2/cxx/utility
deleted file mode 100644
index 7333af6..0000000
--- a/build2/cxx/utility
+++ /dev/null
@@ -1,42 +0,0 @@
-// file : build2/cxx/utility -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#ifndef BUILD2_CXX_UTILITY
-#define BUILD2_CXX_UTILITY
-
-#include <build2/types>
-#include <build2/utility>
-
-#include <build2/target>
-
-#include <build2/cxx/common>
-
-namespace build2
-{
- namespace cxx
- {
- // T is either target or scope.
- //
- template <typename T>
- void
- append_std (cstrings&, scope& rs, const string& cid, T&, string& storage);
-
- template <typename T>
- void
- hash_std (sha256&, scope& rs, const string& cid, T&);
-
- // Append or hash library options from one of the cxx.export.* variables
- // recursively, prerequisite libraries first.
- //
- void
- append_lib_options (cstrings&, target&, const char* variable, lorder);
-
- void
- hash_lib_options (sha256&, target&, const char* variable, lorder);
- }
-}
-
-#include <build2/cxx/utility.ixx>
-
-#endif // BUILD2_CXX_UTILITY
diff --git a/build2/cxx/utility.cxx b/build2/cxx/utility.cxx
deleted file mode 100644
index 7aae6ac..0000000
--- a/build2/cxx/utility.cxx
+++ /dev/null
@@ -1,109 +0,0 @@
-// file : build2/cxx/utility.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#include <build2/cxx/utility>
-
-#include <build2/bin/target>
-
-using namespace std;
-
-namespace build2
-{
- namespace cxx
- {
- // Return true if there is an option (stored in s).
- //
- bool
- translate_std (scope& rs, const string& cid, const value& val, string& s)
- {
- const string& v (cast<string> (val));
-
- if (cid == "msvc")
- {
- // C++ standard-wise, with VC++ you get what you get. The question is
- // whether we should verify that the requested standard is provided by
- // this VC++ version. And if so, from which version should we say VC++
- // supports 11, 14, and 17? We should probably be as loose as possible
- // here since the author will always be able to tighten (but not
- // loosen) this in the buildfile (i.e., detect unsupported versions).
- //
- // For now we are not going to bother doing this for C++03.
- //
- if (v != "98" && v != "03")
- {
- uint64_t cver (cast<uint64_t> (rs["cxx.version.major"]));
-
- // @@ Is mapping for 14 and 17 correct? Maybe Update 2 for 14?
- //
- if ((v == "11" && cver < 16) || // C++11 since VS2010/10.0.
- (v == "14" && cver < 19) || // C++14 since VS2015/14.0.
- (v == "17" && cver < 20)) // C++17 since VS20??/15.0.
- {
- fail << "C++" << v << " is not supported by "
- << cast<string> (rs["cxx.signature"]) <<
- info << "required by " << rs.out_path ();
- }
- }
-
- return false;
- }
- else
- {
- // Translate 11 to 0x, 14 to 1y, and 17 to 1z for compatibility with
- // older versions of the compilers.
- //
- s = "-std=";
-
- if (v == "98")
- s += "c++98";
- else if (v == "03")
- s += "c++03";
- else if (v == "11")
- s += "c++0x";
- else if (v == "14")
- s += "c++1y";
- else if (v == "17")
- s += "c++1z";
- else
- s += v; // In case the user specifies something like 'gnu++17'.
-
- return true;
- }
- }
-
- void
- append_lib_options (cstrings& args, target& l, const char* var, lorder lo)
- {
- using namespace bin;
-
- for (target* t: l.prerequisite_targets)
- {
- if (lib* l = t->is_a<lib> ())
- t = &link_member (*l, lo); // Pick one of the members.
-
- if (t->is_a<liba> () || t->is_a<libs> ())
- append_lib_options (args, *t, var, lo);
- }
-
- append_options (args, l, var);
- }
-
- void
- hash_lib_options (sha256& csum, target& l, const char* var, lorder lo)
- {
- using namespace bin;
-
- for (target* t: l.prerequisite_targets)
- {
- if (lib* l = t->is_a<lib> ())
- t = &link_member (*l, lo); // Pick one of the members.
-
- if (t->is_a<liba> () || t->is_a<libs> ())
- hash_lib_options (csum, *t, var, lo);
- }
-
- hash_options (csum, l, var);
- }
- }
-}
diff --git a/build2/cxx/utility.ixx b/build2/cxx/utility.ixx
deleted file mode 100644
index c624e87..0000000
--- a/build2/cxx/utility.ixx
+++ /dev/null
@@ -1,33 +0,0 @@
-// file : build2/cxx/utility.ixx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-namespace build2
-{
- namespace cxx
- {
- bool
- translate_std (scope&, const string&, const value&, string&);
-
- template <typename T>
- inline void
- append_std (cstrings& args, scope& rs, const string& cid, T& t, string& s)
- {
- if (auto l = t["cxx.std"])
- if (translate_std (rs, cid, *l, s))
- args.push_back (s.c_str ());
- }
-
- template <typename T>
- inline void
- hash_std (sha256& csum, scope& rs, const string& cid, T& t)
- {
- if (auto l = t["cxx.std"])
- {
- string s;
- if (translate_std (rs, cid, *l, s))
- csum.append (s);
- }
- }
- }
-}
diff --git a/build2/cxx/windows-manifest.cxx b/build2/cxx/windows-manifest.cxx
deleted file mode 100644
index 915610d..0000000
--- a/build2/cxx/windows-manifest.cxx
+++ /dev/null
@@ -1,136 +0,0 @@
-// file : build2/cxx/windows-manifest.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#include <build2/scope>
-#include <build2/target>
-#include <build2/context>
-#include <build2/variable>
-#include <build2/filesystem>
-#include <build2/diagnostics>
-
-using namespace std;
-using namespace butl;
-
-namespace build2
-{
- namespace cxx
- {
- // Translate the compiler target CPU value to the processorArchitecture
- // attribute value.
- //
- const char*
- windows_manifest_arch (const string& tcpu)
- {
- const char* pa (tcpu == "i386" || tcpu == "i686" ? "x86" :
- tcpu == "x86_64" ? "amd64" :
- nullptr);
-
- if (pa == nullptr)
- fail << "unable to translate CPU " << tcpu << " to manifest "
- << "processor architecture";
-
- return pa;
- }
-
- // Generate a Windows manifest and if necessary create/update the manifest
- // file corresponding to the exe{} target. Return the manifest file path.
- //
- path
- windows_manifest (file& t, bool rpath_assembly)
- {
- tracer trace ("cxx::windows_manifest");
-
- scope& rs (t.root_scope ());
-
- const char* pa (
- windows_manifest_arch (
- cast<string> (rs["cxx.target.cpu"])));
-
- string m;
-
- m += "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n";
- m += "<assembly xmlns='urn:schemas-microsoft-com:asm.v1'\n";
- m += " manifestVersion='1.0'>\n";
-
- // Program name, version, etc.
- //
- string name (t.path ().leaf ().string ());
-
- m += " <assemblyIdentity name='"; m += name; m += "'\n";
- m += " type='win32'\n";
- m += " processorArchitecture='"; m += pa; m += "'\n";
- m += " version='0.0.0.0'/>\n";
-
- // Our rpath-emulating assembly.
- //
- if (rpath_assembly)
- {
- m += " <dependency>\n";
- m += " <dependentAssembly>\n";
- m += " <assemblyIdentity name='"; m += name; m += ".dlls'\n";
- m += " type='win32'\n";
- m += " processorArchitecture='"; m += pa; m += "'\n";
- m += " language='*'\n";
- m += " version='0.0.0.0'/>\n";
- m += " </dependentAssembly>\n";
- m += " </dependency>\n";
- }
-
- // UAC information. Without it Windows will try to guess, which, as you
- // can imagine, doesn't end well.
- //
- m += " <trustInfo xmlns='urn:schemas-microsoft-com:asm.v3'>\n";
- m += " <security>\n";
- m += " <requestedPrivileges>\n";
- m += " <requestedExecutionLevel level='asInvoker' uiAccess='false'/>\n";
- m += " </requestedPrivileges>\n";
- m += " </security>\n";
- m += " </trustInfo>\n";
-
- m += "</assembly>\n";
-
- // If the manifest file exists, compare to its content. If nothing
- // changed (common case), then we can avoid any further updates.
- //
- // The potentially faster alternative would be to hash it and store an
- // entry in depdb. This, however, gets a bit complicated since we will
- // need to avoid a race between the depdb and .manifest updates.
- //
- path mf (t.path () + ".manifest");
-
- if (file_exists (mf))
- {
- try
- {
- ifdstream ifs (mf);
- string s;
- getline (ifs, s, '\0');
-
- if (s == m)
- return mf;
- }
- catch (const ifdstream::failure&)
- {
- // Whatever the reason we failed for , let's rewrite the file.
- }
- }
-
- if (verb >= 3)
- text << "cat >" << mf;
-
- try
- {
- ofdstream ofs (mf);
- ofs << m;
- ofs.close ();
- }
- catch (const ofdstream::failure& e)
- {
- fail << "unable to write to " << m << ": " << e.what ();
- }
-
- return mf;
- }
- }
-}
diff --git a/build2/cxx/windows-rpath.cxx b/build2/cxx/windows-rpath.cxx
deleted file mode 100644
index b52315c..0000000
--- a/build2/cxx/windows-rpath.cxx
+++ /dev/null
@@ -1,274 +0,0 @@
-// file : build2/cxx/windows-rpath.cxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2016 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#include <errno.h> // E*
-
-#include <set>
-
-#include <build2/scope>
-#include <build2/context>
-#include <build2/variable>
-#include <build2/filesystem>
-#include <build2/diagnostics>
-
-#include <build2/bin/target>
-
-using namespace std;
-using namespace butl;
-
-namespace build2
-{
- namespace cxx
- {
- // Provide limited emulation of the rpath functionality on Windows using a
- // side-by-side assembly. In a nutshell, the idea is to create an assembly
- // with links to all the prerequisite DLLs.
- //
- // Note that currently our assemblies contain all the DLLs that the
- // executable depends on, recursively. The alternative approach could be
- // to also create assemblies for DLLs. This appears to be possible (but we
- // will have to use the resource ID 2 for such a manifest). And it will
- // probably be necessary for DLLs that are loaded dynamically with
- // LoadLibrary(). The tricky part is how such nested assemblies will be
- // found. Since we are effectively (from the loader's point of view)
- // copying the DLLs, we will also have to copy their assemblies (because
- // the loader looks for them in the same directory as the DLL). It's not
- // clear how well such nested assemblies are supported (e.g., in Wine).
- //
- using namespace bin;
-
- // Return the greatest (newest) timestamp of all the DLLs that we will be
- // adding to the assembly or timestamp_nonexistent if there aren't any.
- //
- timestamp
- windows_rpath_timestamp (file& t)
- {
- timestamp r (timestamp_nonexistent);
-
- for (target* pt: t.prerequisite_targets)
- {
- if (libs* ls = pt->is_a<libs> ())
- {
- // Skip installed DLLs.
- //
- if (ls->path ().empty ())
- continue;
-
- // What if the DLL is in the same directory as the executable, will
- // it still be found even if there is an assembly? On the other
- // hand, handling it as any other won't hurt us much.
- //
- timestamp t;
-
- if ((t = ls->mtime ()) > r)
- r = t;
-
- if ((t = windows_rpath_timestamp (*ls)) > r)
- r = t;
- }
- }
-
- return r;
- }
-
- // Like *_timestamp() but actually collect the DLLs.
- //
- static void
- rpath_dlls (set<libs*>& s, file& t)
- {
- for (target* pt: t.prerequisite_targets)
- {
- if (libs* ls = pt->is_a<libs> ())
- {
- // Skip installed DLLs.
- //
- if (ls->path ().empty ())
- continue;
-
- s.insert (ls);
- rpath_dlls (s, *ls);
- }
- }
- }
-
- const char*
- windows_manifest_arch (const string& tcpu); // windows-manifest.cxx
-
- // The ts argument should be the the DLLs timestamp returned by
- // *_timestamp().
- //
- // The scratch argument should be true if the DLL set has changed and we
- // need to regenerate everything from scratch. Otherwise, we try to avoid
- // unnecessary work by comparing the DLLs timestamp against the assembly
- // manifest file.
- //
- void
- windows_rpath_assembly (file& t, timestamp ts, bool scratch)
- {
- // Assembly paths and name.
- //
- dir_path ad (path_cast<dir_path> (t.path () + ".dlls"));
- string an (ad.leaf ().string ());
- path am (ad / path (an + ".manifest"));
-
- // First check if we actually need to do anything. Since most of the
- // time we won't, we don't want to combine it with the *_dlls() call
- // below which allocates memory, etc.
- //
- if (!scratch)
- {
- // The corner case here is when _timestamp() returns nonexistent
- // signalling that there aren't any DLLs but the assembly manifest
- // file exists. This, however, can only happen if we somehow managed
- // to transition from the "have DLLs" state to "no DLLs" without going
- // through the "from scratch" update. And this shouldn't happen
- // (famous last words before a core dump).
- //
- if (ts <= file_mtime (am))
- return;
- }
-
- scope& rs (t.root_scope ());
-
- // Next collect the set of DLLs that will be in our assembly. We need to
- // do this recursively which means we may end up with duplicates. Also,
- // it is possible that there aren't/no longer are any DLLs which means
- // we just need to clean things up.
- //
- bool empty (ts == timestamp_nonexistent);
-
- set<libs*> dlls;
- if (!empty)
- rpath_dlls (dlls, t);
-
- // Clean the assembly directory and make sure it exists. Maybe it would
- // have been faster to overwrite the existing manifest rather than
- // removing the old one and creating a new one. But this is definitely
- // simpler.
- //
- {
- rmdir_status s (build2::rmdir_r (ad, empty, 3));
-
- if (empty)
- return;
-
- if (s == rmdir_status::not_exist)
- mkdir (ad, 3);
- }
-
- const char* pa (
- windows_manifest_arch (
- cast<string> (rs["cxx.target.cpu"])));
-
- if (verb >= 3)
- text << "cat >" << am;
-
- try
- {
- ofdstream ofs (am);
-
- ofs << "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n"
- << "<assembly xmlns='urn:schemas-microsoft-com:asm.v1'\n"
- << " manifestVersion='1.0'>\n"
- << " <assemblyIdentity name='" << an << "'\n"
- << " type='win32'\n"
- << " processorArchitecture='" << pa << "'\n"
- << " version='0.0.0.0'/>\n";
-
- scope& as (*rs.weak_scope ()); // Amalgamation scope.
-
- auto link = [&as, &ad] (const path& f, const path& l)
- {
- auto print = [&f, &l] (const char* cmd)
- {
- if (verb >= 3)
- text << cmd << ' ' << f << ' ' << l;
- };
-
- // First we try to create a symlink. If that fails (e.g., "Windows
- // happens"), then we resort to hard links. If that doesn't work
- // out either (e.g., not on the same filesystem), then we fall back
- // to copies. So things are going to get a bit nested.
- //
- try
- {
- // For the symlink use a relative target path if both paths are
- // part of the same amalgamation. This way if the amalgamation is
- // moved as a whole, the links will remain valid.
- //
- if (f.sub (as.out_path ()))
- mksymlink (f.relative (ad), l);
- else
- mksymlink (f, l);
-
- print ("ln -s");
- }
- catch (const system_error& e)
- {
- int c (e.code ().value ());
-
- if (c != EPERM && c != ENOSYS)
- {
- print ("ln -s");
- fail << "unable to create symlink " << l << ": " << e.what ();
- }
-
- try
- {
- mkhardlink (f, l);
- print ("ln");
- }
- catch (const system_error& e)
- {
- int c (e.code ().value ());
-
- if (c != EPERM && c != ENOSYS)
- {
- print ("ln");
- fail << "unable to create hardlink " << l << ": " << e.what ();
- }
-
- try
- {
- cpfile (f, l);
- print ("cp");
- }
- catch (const system_error& e)
- {
- print ("cp");
- fail << "unable to create copy " << l << ": " << e.what ();
- }
- }
- }
-
- };
-
- for (libs* dll: dlls)
- {
- const path& dp (dll->path ()); // DLL path.
- const path dn (dp.leaf ()); // DLL name.
- link (dp, ad / dn);
-
- // Link .pdb if there is one (second member of the ad hoc group).
- //
- if (dll->member != nullptr && dll->member->member != nullptr)
- {
- file& pdb (static_cast<file&> (*dll->member->member));
- link (pdb.path (), ad / pdb.path ().leaf ());
- }
-
- ofs << " <file name='" << dn.string () << "'/>\n";
- }
-
- ofs << "</assembly>\n";
-
- ofs.close ();
- }
- catch (const ofdstream::failure& e)
- {
- fail << "unable to write to " << am << ": " << e.what ();
- }
- }
- }
-}