diff options
Diffstat (limited to 'libbuild2/cc')
32 files changed, 5622 insertions, 2747 deletions
diff --git a/libbuild2/cc/buildfile b/libbuild2/cc/buildfile index e98e3de..e090e76 100644 --- a/libbuild2/cc/buildfile +++ b/libbuild2/cc/buildfile @@ -6,15 +6,24 @@ include ../ impl_libs = ../lib{build2} # Implied interface dependency. -import impl_libs += libpkgconf%lib{pkgconf} +libpkgconf = $config.build2.libpkgconf + +if $libpkgconf + import impl_libs += libpkgconf%lib{pkgconf} +else + import impl_libs += libpkg-config%lib{pkg-config} include ../bin/ intf_libs = ../bin/lib{build2-bin} -./: lib{build2-cc}: libul{build2-cc}: {hxx ixx txx cxx}{** -**.test...} \ - h{msvc-setup} \ +./: lib{build2-cc}: libul{build2-cc}: \ + {hxx ixx txx cxx}{** -pkgconfig-lib* -**.test...} \ + h{msvc-setup} \ $intf_libs $impl_libs +libul{build2-cc}: cxx{pkgconfig-libpkgconf}: include = $libpkgconf +libul{build2-cc}: cxx{pkgconfig-libpkg-config}: include = (!$libpkgconf) + # Unit tests. # exe{*.test}: @@ -38,6 +47,9 @@ for t: cxx{**.test...} obja{*}: cxx.poptions += -DLIBBUILD2_CC_STATIC_BUILD objs{*}: cxx.poptions += -DLIBBUILD2_CC_SHARED_BUILD +if $libpkgconf + cxx.poptions += -DBUILD2_LIBPKGCONF + if ($cxx.target.class == 'windows') cxx.libs += $regex.apply(advapi32 ole32 oleaut32, \ '(.+)', \ diff --git a/libbuild2/cc/common.cxx b/libbuild2/cc/common.cxx index 09a1752..07a4d48 100644 --- a/libbuild2/cc/common.cxx +++ b/libbuild2/cc/common.cxx @@ -39,6 +39,11 @@ namespace build2 // 3. dependency libs (prerequisite_targets, left to right, depth-first) // 4. dependency libs (*.libs variables). // + // If proc_opt_group is true, then pass to proc_opt the group rather than + // the member if a member was picked (according to linfo) form a group. + // This is useful when we only want to see the common options set on the + // group. + // // If either proc_opt or proc_lib return false, then any further // processing of this library or its dependencies is skipped. This can be // used to "prune" the graph traversal in case of duplicates. Note that @@ -49,19 +54,19 @@ namespace build2 // array that contains the current library dependency chain all the way to // the library passed to process_libraries(). The first element of this // array is NULL. If this argument is NULL, then this is a library without - // a target (e.g., -lpthread) and its name is in the second argument - // (which could be resolved to an absolute path or passed as an -l<name> - // option). Otherwise, (the first argument is not NULL), the second - // argument contains the target path (which can be empty in case of the - // unknown DLL path). + // a target (e.g., -lm, -pthread, etc) and its name is in the second + // argument (which could be resolved to an absolute path or passed as an + // -l<name>/-pthread option). Otherwise, (the first argument is not NULL), + // the second argument contains the target path (which can be empty in + // case of the unknown DLL path). // - // Initially, the second argument (library name) was a string (e.g., - // -lpthread) but there are cases where the library is identified with - // multiple options, such as -framework CoreServices (there are also cases - // like -Wl,--whole-archive -lfoo -lbar -Wl,--no-whole-archive). So now it - // is a vector_view that contains a fragment of options (from one of the - // *.libs variables) that corresponds to the library (or several - // libraries, as in the --whole-archive example above). + // Initially, the second argument (library name) was a string (e.g., -lm) + // but there are cases where the library is identified with multiple + // options, such as -framework CoreServices (there are also cases like + // -Wl,--whole-archive -lfoo -lbar -Wl,--no-whole-archive). So now it is a + // vector_view that contains a fragment of options (from one of the *.libs + // variables) that corresponds to the library (or several libraries, as in + // the --whole-archive example above). // // Storing a reference to elements of library name in proc_lib is legal // (they come either from the target's path or from one of the *.libs @@ -72,10 +77,18 @@ namespace build2 // not to pick the liba/libs{} member for installed libraries instead // passing the lib{} group itself. This can be used to match the semantics // of file_rule which, when matching prerequisites, does not pick the - // liba/libs{} member (naturally) but just matches the lib{} group. + // liba/libs{} member (naturally) but just matches the lib{} group. Note + // that currently this truly only works for installed lib{} since non- + // installed ones don't have cc.type set. See proc_opt_group for an + // alternative way to (potentially) achieve the desired semantics. // // Note that if top_li is present, then the target passed to proc_impl, - // proc_lib, and proc_opt is always a file. + // proc_lib, and proc_opt (unless proc_opt_group is true) is always a + // file. + // + // The dedup argument is part of the interface dependency deduplication + // functionality, similar to $x.deduplicate_export_libs(). Note, however, + // that here we do it "properly" (i.e., using group members, etc). // void common:: process_libraries ( @@ -83,7 +96,7 @@ namespace build2 const scope& top_bs, optional<linfo> top_li, const dir_paths& top_sysd, - const mtime_target& l, // liba/libs{} or lib{} + const mtime_target& l, // liba/libs{}, libux{}, or lib{} bool la, lflags lf, const function<bool (const target&, @@ -92,34 +105,68 @@ namespace build2 const small_vector<reference_wrapper< const string>, 2>&, // Library "name". lflags, // Link flags. - const string* type, // cc.type + const string* type, // whole cc.type bool sys)>& proc_lib, // System library? const function<bool (const target&, - const string& type, // cc.type + const string& lang, // lang from cc.type bool com, // cc. or x. bool exp)>& proc_opt, // *.export. - bool self /*= false*/, // Call proc_lib on l? - library_cache* cache, - small_vector<const target*, 24>* chain) const + bool self, // Call proc_lib on l? + bool proc_opt_group, // Call proc_opt on group instead of member? + library_cache* cache) const { library_cache cache_storage; if (cache == nullptr) cache = &cache_storage; - small_vector<const target*, 24> chain_storage; - if (chain == nullptr) - { - chain = &chain_storage; + small_vector<const target*, 32> chain; - if (proc_lib) - chain->push_back (nullptr); - } + if (proc_lib) + chain.push_back (nullptr); + + process_libraries_impl (a, top_bs, top_li, top_sysd, + nullptr, l, la, lf, + proc_impl, proc_lib, proc_opt, + self, proc_opt_group, + cache, &chain, nullptr); + } + void common:: + process_libraries_impl ( + action a, + const scope& top_bs, + optional<linfo> top_li, + const dir_paths& top_sysd, + const target* lg, + const mtime_target& l, + bool la, + lflags lf, + const function<bool (const target&, + bool la)>& proc_impl, + const function<bool (const target* const*, + const small_vector<reference_wrapper< + const string>, 2>&, + lflags, + const string* type, + bool sys)>& proc_lib, + const function<bool (const target&, + const string& lang, + bool com, + bool exp)>& proc_opt, + bool self, + bool proc_opt_group, + library_cache* cache, + small_vector<const target*, 32>* chain, + small_vector<const target*, 32>* dedup) const + { // Add the library to the chain. // if (self && proc_lib) chain->push_back (&l); + // We only lookup public variables so go straight for the public + // variable pool. + // auto& vp (top_bs.ctx.var_pool); do // Breakout loop. @@ -131,25 +178,45 @@ namespace build2 // performance we use lookup_original() directly and only look in the // target (so no target type/pattern-specific). // - const string* t ( + const string* pt ( cast_null<string> ( l.state[a].lookup_original (c_type, true /* target_only */).first)); + // cc.type value format is <lang>[,...]. + // + size_t p; + const string& t (pt != nullptr + ? ((p = pt->find (',')) == string::npos + ? *pt + : string (*pt, 0, p)) + : string ()); + + // Why are we bothering with impl for binless libraries since all + // their dependencies are by definition interface? Well, for one, it + // could be that it is dynamically-binless (e.g., binless on some + // platforms or in some configurations and binful on/in others). In + // this case it would be helpful to have a uniform semantics so that, + // for example, *.libs are used for liba{} regardless of whether it is + // binless or not. On the other hand, having to specify both + // *.export.libs=-lm and *.libs=-lm (or *.export.impl_libs) for an + // always-binless library is sure not very intuitive. Not sure if we + // can win here. + // bool impl (proc_impl && proc_impl (l, la)); bool cc (false), same (false); - if (t != nullptr) + if (!t.empty ()) { - cc = (*t == "cc"); - same = (!cc && *t == x); + cc = (t == "cc"); + same = (!cc && t == x); } - const scope& bs (t == nullptr || cc ? top_bs : l.base_scope ()); + const scope& bs (t.empty () || cc ? top_bs : l.base_scope ()); lookup c_e_libs; lookup x_e_libs; - if (t != nullptr) + if (!t.empty ()) { // Note that we used to treat *.export.libs set on the liba/libs{} // members as *.libs overrides rather than as member-specific @@ -168,8 +235,6 @@ namespace build2 // // See also deduplicate_export_libs() if changing anything here. // - // @@ PERF: do target_only (helps a bit in non-installed case)? - // { const variable& v (impl ? c_export_impl_libs : c_export_libs); c_e_libs = l.lookup_original (v, false, &bs).first; @@ -180,7 +245,7 @@ namespace build2 const variable& v ( same ? (impl ? x_export_impl_libs : x_export_libs) - : vp[*t + (impl ? ".export.impl_libs" : ".export.libs")]); + : vp[t + (impl ? ".export.impl_libs" : ".export.libs")]); x_e_libs = l.lookup_original (v, false, &bs).first; } @@ -188,12 +253,14 @@ namespace build2 // if (proc_opt) { + const target& ol (proc_opt_group && lg != nullptr ? *lg : l); + // If all we know is it's a C-common library, then in both cases // we only look for cc.export.*. // if (cc) { - if (!proc_opt (l, *t, true, true)) break; + if (!proc_opt (ol, t, true, true)) break; } else { @@ -210,24 +277,24 @@ namespace build2 // // Note: options come from *.export.* variables. // - if (!proc_opt (l, *t, false, true) || - !proc_opt (l, *t, true, true)) break; + if (!proc_opt (ol, t, false, true) || + !proc_opt (ol, t, true, true)) break; } else { // For default export we use the same options as were used // to build the library. // - if (!proc_opt (l, *t, false, false) || - !proc_opt (l, *t, true, false)) break; + if (!proc_opt (ol, t, false, false) || + !proc_opt (ol, t, true, false)) break; } } else { // Interface: only add *.export.* (interface dependencies). // - if (!proc_opt (l, *t, false, true) || - !proc_opt (l, *t, true, true)) break; + if (!proc_opt (ol, t, false, true) || + !proc_opt (ol, t, true, true)) break; } } } @@ -268,12 +335,12 @@ namespace build2 const file* f; const path& p ((f = l.is_a<file> ()) ? f->path () : empty_path); - bool s (t != nullptr // If cc library (matched or imported). + bool s (pt != nullptr // If cc library (matched or imported). ? cast_false<bool> (l.vars[c_system]) : !p.empty () && sys (top_sysd, p.string ())); proc_lib_name = {p.string ()}; - if (!proc_lib (&chain->back (), proc_lib_name, lf, t, s)) + if (!proc_lib (&chain->back (), proc_lib_name, lf, pt, s)) break; } @@ -283,21 +350,21 @@ namespace build2 // Find system search directories corresponding to this library, i.e., // from its project and for its type (C, C++, etc). // - auto find_sysd = [&top_sysd, t, cc, same, &bs, &sysd, this] () + auto find_sysd = [&top_sysd, &vp, t, cc, same, &bs, &sysd, this] () { // Use the search dirs corresponding to this library scope/type. // - sysd = (t == nullptr || cc) + sysd = (t.empty () || cc) ? &top_sysd // Imported library, use importer's sysd. : &cast<dir_paths> ( bs.root_scope ()->vars[same ? x_sys_lib_dirs - : bs.ctx.var_pool[*t + ".sys_lib_dirs"]]); + : vp[t + ".sys_lib_dirs"]]); }; auto find_linfo = [top_li, t, cc, &bs, &l, &li] () { - li = (t == nullptr || cc) + li = (t.empty () || cc) ? top_li : optional<linfo> (link_info (bs, link_type (l).type)); // @@ PERF }; @@ -315,11 +382,16 @@ namespace build2 for (const prerequisite_target& pt: l.prerequisite_targets[a]) { // Note: adhoc prerequisites are not part of the library metadata - // protocol (and we should check for adhoc first to avoid races). + // protocol (and we should check for adhoc first to avoid races + // during execute). // - if (pt.adhoc || pt == nullptr) + if (pt.adhoc () || pt == nullptr) continue; + if (marked (pt)) + fail << "implicit dependency cycle detected involving library " + << l; + bool la; const file* f; @@ -327,13 +399,20 @@ namespace build2 (la = (f = pt->is_a<libux> ())) || ( f = pt->is_a<libs> ())) { + // See link_rule for details. + // + const target* g ((pt.include & include_group) != 0 + ? f->group + : nullptr); + if (sysd == nullptr) find_sysd (); if (!li) find_linfo (); - process_libraries (a, bs, *li, *sysd, - *f, la, pt.data, - proc_impl, proc_lib, proc_opt, true, - cache, chain); + process_libraries_impl (a, bs, *li, *sysd, + g, *f, la, pt.data /* lflags */, + proc_impl, proc_lib, proc_opt, + true /* self */, proc_opt_group, + cache, chain, nullptr); } } } @@ -344,7 +423,7 @@ namespace build2 // If it is not a C-common library, then it probably doesn't have any // of the *.libs. // - if (t != nullptr) + if (!t.empty ()) { optional<dir_paths> usrd; // Extract lazily. @@ -366,8 +445,8 @@ namespace build2 // Determine the length of the library name fragment as well as // whether it is a system library. Possible length values are: // - // 1 - just the argument itself (-lpthread) - // 2 - argument and next element (-l pthread, -framework CoreServices) + // 1 - just the argument itself (-lm, -pthread) + // 2 - argument and next element (-l m, -framework CoreServices) // 0 - unrecognized/until the end (-Wl,--whole-archive ...) // // See similar code in find_system_library(). @@ -398,9 +477,9 @@ namespace build2 { if (l[0] == '-') { - // -l<name>, -l <name> + // -l<name>, -l <name>, -pthread // - if (l[1] == 'l') + if (l[1] == 'l' || l == "-pthread") { n = l.size () == 2 ? 2 : 1; } @@ -427,11 +506,14 @@ namespace build2 return make_pair (n, s); }; - auto proc_int = [&l, cache, chain, - &proc_impl, &proc_lib, &proc_lib_name, &proc_opt, - &sysd, &usrd, - &find_sysd, &find_linfo, &sense_fragment, - &bs, a, &li, impl, this] (const lookup& lu) + auto proc_intf = [&l, proc_opt_group, cache, chain, + &proc_impl, &proc_lib, &proc_lib_name, &proc_opt, + &sysd, &usrd, + &find_sysd, &find_linfo, &sense_fragment, + &bs, a, &li, impl, this] ( + const lookup& lu, + small_vector<const target*, 32>* dedup, + size_t dedup_start) // Start of our deps. { const vector<name>* ns (cast_null<vector<name>> (lu)); if (ns == nullptr || ns->empty ()) @@ -441,12 +523,15 @@ namespace build2 { const name& n (*i); + // Note: see also recursively-binless logic in link_rule if + // changing anything in simple library handling. + // if (n.simple ()) { - // This is something like -lpthread or shell32.lib so should - // be a valid path. But it can also be an absolute library - // path (e.g., something that may come from our - // .{static/shared}.pc files). + // This is something like -lm or shell32.lib so should be a + // valid path. But it can also be an absolute library path + // (e.g., something that may come from our .{static/shared}.pc + // files). // if (proc_lib) { @@ -471,68 +556,145 @@ namespace build2 if (sysd == nullptr) find_sysd (); if (!li) find_linfo (); - const mtime_target& t ( - resolve_library (a, - bs, - n, - (n.pair ? (++i)->dir : dir_path ()), - *li, - *sysd, usrd, - cache)); + const mtime_target* t; + const target* g; - if (proc_lib) + const char* w (nullptr); + try { - // This can happen if the target is mentioned in - // *.export.libs (i.e., it is an interface dependency) but - // not in the library's prerequisites (i.e., it is not an - // implementation dependency). + pair<const mtime_target&, const target*> p ( + resolve_library (a, + bs, + n, + (n.pair ? (++i)->dir : dir_path ()), + *li, + *sysd, usrd, + cache)); + + t = &p.first; + g = p.second; + + // Deduplicate. // - // Note that we used to just check for path being assigned - // but on Windows import-installed DLLs may legally have - // empty paths. + // Note that dedup_start makes sure we only consider our + // interface dependencies while maintaining the "through" + // list. // - const char* w (nullptr); - if (t.ctx.phase == run_phase::match) + if (dedup != nullptr) { - size_t o ( - t.state[a].task_count.load (memory_order_consume) - - t.ctx.count_base ()); + if (find (dedup->begin () + dedup_start, + dedup->end (), + t) != dedup->end ()) + { + ++i; + continue; + } + + dedup->push_back (t); + } + } + catch (const non_existent_library& e) + { + // This is another manifestation of the "mentioned in + // *.export.libs but not in prerequisites" case (see below). + // + t = &e.target; + w = "unknown"; + } - if (o != target::offset_applied && - o != target::offset_executed) + // This can happen if the target is mentioned in *.export.libs + // (i.e., it is an interface dependency) but not in the + // library's prerequisites (i.e., it is not an implementation + // dependency). + // + // Note that we used to just check for path being assigned but + // on Windows import-installed DLLs may legally have empty + // paths. + // + if (w != nullptr) + ; // See above. + else if (l.ctx.phase == run_phase::match) + { + // We allow not matching installed libraries if all we need + // is their options (see compile_rule::apply()). + // + if (proc_lib || t->base_scope ().root_scope () != nullptr) + { + if (!t->matched (a)) w = "not matched"; } - else if (t.mtime () == timestamp_unknown) - w = "out of date"; - - if (w != nullptr) - fail << (impl ? "implementation" : "interface") - << " dependency " << t << " is " << w << - info << "mentioned in *.export." << (impl ? "impl_" : "") - << "libs of target " << l << - info << "is it a prerequisite of " << l << "?"; + } + else + { + // Note that this check we only do if there is proc_lib + // (since it's valid to process library's options before + // updating it). + // + if (proc_lib) + { + if (t->mtime () == timestamp_unknown) + w = "out of date"; + } + } + + if (w != nullptr) + { + fail << (impl ? "implementation" : "interface") + << " dependency " << *t << " is " << w << + info << "mentioned in *.export." << (impl ? "impl_" : "") + << "libs of target " << l << + info << "is it a prerequisite of " << l << "?"; } // Process it recursively. // - // @@ Where can we get the link flags? Should we try to find - // them in the library's prerequisites? What about - // installed stuff? + bool u; + bool la ((u = t->is_a<libux> ()) || t->is_a<liba> ()); + lflags lf (0); + + // If this is a static library, see if we need to link it + // whole. // - process_libraries (a, bs, *li, *sysd, - t, t.is_a<liba> () || t.is_a<libux> (), 0, - proc_impl, proc_lib, proc_opt, true, - cache, chain); + if (la && proc_lib) + { + // Note: go straight for the public variable pool. + // + const variable& var (t->ctx.var_pool["bin.whole"]); + + // See the link rule for the lookup semantics. + // + lookup l ( + t->lookup_original (var, true /* target_only */).first); + + if (l ? cast<bool> (*l) : u) + lf |= lflag_whole; + } + + process_libraries_impl ( + a, bs, *li, *sysd, + g, *t, la, lf, + proc_impl, proc_lib, proc_opt, + true /* self */, proc_opt_group, + cache, chain, dedup); } ++i; } }; + auto proc_intf_storage = [&proc_intf] (const lookup& lu1, + const lookup& lu2 = lookup ()) + { + small_vector<const target*, 32> dedup_storage; + + if (lu1) proc_intf (lu1, &dedup_storage, 0); + if (lu2) proc_intf (lu2, &dedup_storage, 0); + }; + // Process libraries from *.libs (of type strings). // - auto proc_imp = [&proc_lib, &proc_lib_name, - &sense_fragment] (const lookup& lu) + auto proc_impl = [&proc_lib, &proc_lib_name, + &sense_fragment] (const lookup& lu) { const strings* ns (cast_null<strings> (lu)); if (ns == nullptr || ns->empty ()) @@ -540,8 +702,8 @@ namespace build2 for (auto i (ns->begin ()), e (ns->end ()); i != e; ) { - // This is something like -lpthread or shell32.lib so should be - // a valid path. + // This is something like -lm or shell32.lib so should be a + // valid path. // pair<size_t, bool> r (sense_fragment (*i)); @@ -564,10 +726,26 @@ namespace build2 // if (cc) { - if (c_e_libs) proc_int (c_e_libs); + if (impl) + { + if (c_e_libs) proc_intf (c_e_libs, nullptr, 0); + } + else + { + if (c_e_libs) + { + if (dedup != nullptr) + proc_intf (c_e_libs, dedup, dedup->size ()); + else + proc_intf_storage (c_e_libs); + } + } } else { + // Note: see also recursively-binless logic in link_rule if + // changing anything here. + if (impl) { // Interface and implementation: as discussed above, we can have @@ -575,8 +753,12 @@ namespace build2 // if (c_e_libs.defined () || x_e_libs.defined ()) { - if (c_e_libs) proc_int (c_e_libs); - if (x_e_libs) proc_int (x_e_libs); + // Why are we calling proc_intf() on *.impl_libs? Perhaps + // because proc_impl() expects strings, not names? Yes, and + // proc_intf() checks impl. + // + if (c_e_libs) proc_intf (c_e_libs, nullptr, 0); + if (x_e_libs) proc_intf (x_e_libs, nullptr, 0); } else { @@ -590,9 +772,9 @@ namespace build2 // if (proc_lib) { - const variable& v (same ? x_libs : vp[*t + ".libs"]); - proc_imp (l.lookup_original (c_libs, false, &bs).first); - proc_imp (l.lookup_original (v, false, &bs).first); + const variable& v (same ? x_libs : vp[t + ".libs"]); + proc_impl (l.lookup_original (c_libs, false, &bs).first); + proc_impl (l.lookup_original (v, false, &bs).first); } } } @@ -600,8 +782,18 @@ namespace build2 { // Interface: only add *.export.* (interface dependencies). // - if (c_e_libs) proc_int (c_e_libs); - if (x_e_libs) proc_int (x_e_libs); + if (c_e_libs.defined () || x_e_libs.defined ()) + { + if (dedup != nullptr) + { + size_t s (dedup->size ()); // Start of our interface deps. + + if (c_e_libs) proc_intf (c_e_libs, dedup, s); + if (x_e_libs) proc_intf (x_e_libs, dedup, s); + } + else + proc_intf_storage (c_e_libs, x_e_libs); + } } } } @@ -628,9 +820,14 @@ namespace build2 // // If li is absent, then don't pick the liba/libs{} member, returning the // lib{} target itself. If li is present, then the returned target is - // always a file. + // always a file. The second half of the returned pair is the group, if + // the member was picked. + // + // Note: paths in sysd/usrd are expected to be absolute and normalized. // - const mtime_target& common:: + // Note: may throw non_existent_library. + // + pair<const mtime_target&, const target*> common:: resolve_library (action a, const scope& s, const name& cn, @@ -651,7 +848,8 @@ namespace build2 // large number of times (see Boost for an extreme example of this). // // Note also that for non-utility libraries we know that only the link - // order from linfo is used. + // order from linfo is used. While not caching it and always picking an + // alternative could also work, we cache it to avoid the lookup. // if (cache != nullptr) { @@ -671,7 +869,7 @@ namespace build2 })); if (i != cache->end ()) - return i->lib; + return pair<const mtime_target&, const target*> {i->lib, i->group}; } else cache = nullptr; // Do not cache. @@ -710,29 +908,36 @@ namespace build2 fail << "unable to find library " << pk; } - // If this is lib{}/libu*{}, pick appropriate member unless we were + // If this is lib{}/libul{}, pick appropriate member unless we were // instructed not to. // + const target* g (nullptr); if (li) { if (const libx* l = xt->is_a<libx> ()) + { + g = xt; xt = link_member (*l, a, *li); // Pick lib*{e,a,s}{}. + } } auto& t (xt->as<mtime_target> ()); if (cache != nullptr) - cache->push_back (library_cache_entry {lo, cn.type, cn.value, t}); + cache->push_back (library_cache_entry {lo, cn.type, cn.value, t, g}); - return t; + return pair<const mtime_target&, const target*> {t, g}; } - // Note that pk's scope should not be NULL (even if dir is absolute). + // Action should be absent if called during the load phase. Note that pk's + // scope should not be NULL (even if dir is absolute). + // + // Note: paths in sysd/usrd are expected to be absolute and normalized. // // Note: see similar logic in find_system_library(). // target* common:: - search_library (action act, + search_library (optional<action> act, const dir_paths& sysd, optional<dir_paths>& usrd, const prerequisite_key& p, @@ -740,7 +945,7 @@ namespace build2 { tracer trace (x, "search_library"); - assert (p.scope != nullptr); + assert (p.scope != nullptr && (!exist || act)); context& ctx (p.scope->ctx); const scope& rs (*p.scope->root_scope ()); @@ -857,6 +1062,21 @@ namespace build2 { context& ctx (p.scope->ctx); + // Whether to look for a binless variant using the common .pc file + // (see below). + // + // Normally we look for a binless version if the binful one was not + // found. However, sometimes we may find what looks like a binful + // library but on a closer examination realize that there is something + // wrong with it (for example, it's not a Windows import library). In + // such cases we want to omit looking for a binless library using the + // common .pc file since it most likely corresponds to the binful + // library (and we may end up in a infinite loop trying to resolve + // itself). + // + bool ba (true); + bool bs (true); + timestamp mt; // libs @@ -957,10 +1177,24 @@ namespace build2 if (tsys == "win32-msvc") { if (s == nullptr && !sn.empty ()) - s = msvc_search_shared (ld, d, p, exist); + { + pair<libs*, bool> r (msvc_search_shared (ld, d, p, exist)); + + if (r.first != nullptr) + s = r.first; + else if (!r.second) + bs = false; + } if (a == nullptr && !an.empty ()) - a = msvc_search_static (ld, d, p, exist); + { + pair<liba*, bool> r (msvc_search_static (ld, d, p, exist)); + + if (r.first != nullptr) + a = r.first; + else if (!r.second) + ba = false; + } } // Look for binary-less libraries via pkg-config .pc files. Note that @@ -977,7 +1211,10 @@ namespace build2 // is no binful variant. // pair<path, path> r ( - pkgconfig_search (d, p.proj, name, na && ns /* common */)); + pkgconfig_search (d, + p.proj, + name, + na && ns && ba && bs /* common */)); if (na && !r.first.empty ()) { @@ -1030,6 +1267,8 @@ namespace build2 // making it the only one to allow things to be overriden (e.g., // if build2 was moved or some such). // + // Note: build_install_lib is already normalized. + // usrd->insert (usrd->begin (), build_install_lib); } } @@ -1082,20 +1321,87 @@ namespace build2 if (exist) return r; - // If we cannot acquire the lock then this mean the target has already - // been matched and we assume all of this has already been done. + // Try to extract library information from pkg-config. We only add the + // default macro if we could not extract more precise information. The + // idea is that in .pc files that we generate, we copy those macros (or + // custom ones) from *.export.poptions. // - auto lock = [act] (const target* t) -> target_lock + // @@ Should we add .pc files as ad hoc members so pkconfig_save() can + // use their names when deriving -l-names (this would be especially + // helpful for binless libraries to get hold of prefix/suffix, etc). + // + auto load_pc = [this, &trace, + act, &p, &name, + &sysd, &usrd, + pd, &pc, lt, a, s] (pair<bool, bool> metaonly) { - auto l (t != nullptr ? build2::lock (act, *t, true) : target_lock ()); + l5 ([&]{trace << "loading pkg-config information during " + << (act ? "match" : "load") << " for " + << (a != nullptr ? "static " : "") + << (s != nullptr ? "shared " : "") + << "member(s) of " << *lt << "; metadata only: " + << metaonly.first << " " << metaonly.second;}); + + // Add the "using static/shared library" macro (used, for example, to + // handle DLL export). The absence of either of these macros would + // mean some other build system that cannot distinguish between the + // two (and no pkg-config information). + // + auto add_macro = [this] (target& t, const char* suffix) + { + // If there is already a value (either in cc.export or x.export), + // don't add anything: we don't want to be accumulating defines nor + // messing with custom values. And if we are adding, then use the + // generic cc.export. + // + // The only way we could already have this value is if this same + // library was also imported as a project (as opposed to installed). + // Unlikely but possible. In this case the values were set by the + // export stub and we shouldn't touch them. + // + if (!t.vars[x_export_poptions]) + { + auto p (t.vars.insert (c_export_poptions)); - if (l && l.offset == target::offset_matched) + if (p.second) + { + // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED}, + // where <name> is the target name. Here we want to strike a + // balance between being unique and not too noisy. + // + string d ("-DLIB"); + + d += sanitize_identifier ( + ucase (const_cast<const string&> (t.name))); + + d += '_'; + d += suffix; + + strings o; + o.push_back (move (d)); + p.first = move (o); + } + } + }; + + if (pc.first.empty () && pc.second.empty ()) { - assert ((*t)[act].rule == &file_rule::rule_match); - l.unlock (); + if (!pkgconfig_load (act, *p.scope, + *lt, a, s, + p.proj, name, + *pd, sysd, *usrd, + metaonly)) + { + if (a != nullptr && !metaonly.first) add_macro (*a, "STATIC"); + if (s != nullptr && !metaonly.second) add_macro (*s, "SHARED"); + } } - - return l; + else + pkgconfig_load (act, *p.scope, + *lt, a, s, + pc, + *pd, sysd, *usrd, + metaonly); }; // Mark as a "cc" library (unless already marked) and set the system @@ -1116,6 +1422,82 @@ namespace build2 return p.second; }; + // Deal with the load phase case. The rest is already hairy enough so + // let's not try to weave this logic into that. + // + if (!act) + { + assert (ctx.phase == run_phase::load); + + // The overall idea here is to set everything up so that the default + // file_rule matches the returned targets, the same way as it would if + // multiple operations were executed for the match phase (see below). + // + // Note however, that there is no guarantee that we won't end up in + // the match phase code below even after loading things here. For + // example, the same library could be searched from pkgconfig_load() + // if specified as -l. And if we try to re-assign group members, then + // that would be a race condition. So we use the cc mark to detect + // this. + // + timestamp mt (timestamp_nonexistent); + if (a != nullptr) {lt->a = a; a->group = lt; mt = a->mtime ();} + if (s != nullptr) {lt->s = s; s->group = lt; mt = s->mtime ();} + + // @@ TODO: we currently always reload pkgconfig for lt (and below). + // + mark_cc (*lt); + lt->mtime (mt); + + // We can only load metadata from here since we can only do this + // during the load phase. But it's also possible that a racing match + // phase already found and loaded this library without metadata. So + // looks like the only way is to load the metadata incrementally. We + // can base this decision on the presense/absense of cc.type and + // export.metadata. + // + pair<bool, bool> metaonly {false, false}; + + if (a != nullptr && !mark_cc (*a)) + { + if (a->vars[ctx.var_export_metadata]) + a = nullptr; + else + metaonly.first = true; + } + + if (s != nullptr && !mark_cc (*s)) + { + if (s->vars[ctx.var_export_metadata]) + s = nullptr; + else + metaonly.second = true; + } + + // Try to extract library information from pkg-config. + // + if (a != nullptr || s != nullptr) + load_pc (metaonly); + + return r; + } + + // If we cannot acquire the lock then this mean the target has already + // been matched and we assume all of this has already been done. + // + auto lock = [a = *act] (const target* t) -> target_lock + { + auto l (t != nullptr ? build2::lock (a, *t, true) : target_lock ()); + + if (l && l.offset == target::offset_matched) + { + assert ((*t)[a].rule == &file_rule::rule_match); + l.unlock (); + } + + return l; + }; + target_lock ll (lock (lt)); // Set lib{} group members to indicate what's available. Note that we @@ -1125,13 +1507,22 @@ namespace build2 timestamp mt (timestamp_nonexistent); if (ll) { - if (s != nullptr) {lt->s = s; mt = s->mtime ();} - if (a != nullptr) {lt->a = a; mt = a->mtime ();} - // Mark the group since sometimes we use it itself instead of one of - // the liba/libs{} members (see process_libraries() for details). + // the liba/libs{} members (see process_libraries_impl() for details). // - mark_cc (*lt); + // If it's already marked, then it could have been imported during + // load (see above). + // + // @@ TODO: we currently always reload pkgconfig for lt (and above). + // Maybe pass NULL lt to pkgconfig_load() in this case? + // + if (mark_cc (*lt)) + { + if (a != nullptr) {lt->a = a; mt = a->mtime ();} + if (s != nullptr) {lt->s = s; mt = s->mtime ();} + } + else + ll.unlock (); } target_lock al (lock (a)); @@ -1140,81 +1531,20 @@ namespace build2 if (!al) a = nullptr; if (!sl) s = nullptr; - if (a != nullptr) a->group = lt; - if (s != nullptr) s->group = lt; - - // If the library already has cc.type, then assume it was either - // already imported or was matched by a rule. + // If the library already has cc.type, then assume it was either already + // imported (e.g., during load) or was matched by a rule. // if (a != nullptr && !mark_cc (*a)) a = nullptr; if (s != nullptr && !mark_cc (*s)) s = nullptr; - // Add the "using static/shared library" macro (used, for example, to - // handle DLL export). The absence of either of these macros would - // mean some other build system that cannot distinguish between the - // two (and no pkg-config information). - // - auto add_macro = [this] (target& t, const char* suffix) - { - // If there is already a value (either in cc.export or x.export), - // don't add anything: we don't want to be accumulating defines nor - // messing with custom values. And if we are adding, then use the - // generic cc.export. - // - // The only way we could already have this value is if this same - // library was also imported as a project (as opposed to installed). - // Unlikely but possible. In this case the values were set by the - // export stub and we shouldn't touch them. - // - if (!t.vars[x_export_poptions]) - { - auto p (t.vars.insert (c_export_poptions)); - - if (p.second) - { - // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED}, - // where <name> is the target name. Here we want to strike a - // balance between being unique and not too noisy. - // - string d ("-DLIB"); - - d += sanitize_identifier ( - ucase (const_cast<const string&> (t.name))); - - d += '_'; - d += suffix; - - strings o; - o.push_back (move (d)); - p.first = move (o); - } - } - }; + if (a != nullptr) a->group = lt; + if (s != nullptr) s->group = lt; if (ll && (a != nullptr || s != nullptr)) { - // Try to extract library information from pkg-config. We only add the - // default macro if we could not extract more precise information. The - // idea is that in .pc files that we generate, we copy those macros - // (or custom ones) from *.export.poptions. - // - // @@ Should we add .pc files as ad hoc members so pkconfig_save() can - // use their names when deriving -l-names (this would be expecially - // helpful for binless libraries to get hold of prefix/suffix, etc). + // Try to extract library information from pkg-config. // - if (pc.first.empty () && pc.second.empty ()) - { - if (!pkgconfig_load (act, *p.scope, - *lt, a, s, - p.proj, name, - *pd, sysd, *usrd)) - { - if (a != nullptr) add_macro (*a, "STATIC"); - if (s != nullptr) add_macro (*s, "SHARED"); - } - } - else - pkgconfig_load (act, *p.scope, *lt, a, s, pc, *pd, sysd, *usrd); + load_pc ({false, false} /* metaonly */); } // If we have the lock (meaning this is the first time), set the matched @@ -1227,8 +1557,8 @@ namespace build2 // // Note also that these calls clear target data. // - if (al) match_rule (al, file_rule::rule_match); - if (sl) match_rule (sl, file_rule::rule_match); + if (a != nullptr) match_rule (al, file_rule::rule_match); + if (s != nullptr) match_rule (sl, file_rule::rule_match); if (ll) { match_rule (ll, file_rule::rule_match); @@ -1280,5 +1610,54 @@ namespace build2 return r; } + + void common:: + append_diag_color_options (cstrings& args) const + { + switch (cclass) + { + case compiler_class::msvc: + { + // Note: see init_diag logic if enabling anything here (probably + // need an "override disable" mode or some such). + // + break; + } + case compiler_class::gcc: + { + // Enable/disable diagnostics color unless a custom option is + // specified. + // + // Supported from GCC 4.9 and (at least) from Clang 3.5. Clang + // supports -f[no]color-diagnostics in addition to the GCC's + // spelling. + // + if (ctype == compiler_type::gcc ? cmaj > 4 || (cmaj == 4 && cmin >= 9) : + ctype == compiler_type::clang ? cmaj > 3 || (cmaj == 3 && cmin >= 5) : + false) + { + if (!(find_option_prefix ("-fdiagnostics-color", args) || + find_option ("-fno-diagnostics-color", args) || + find_option ("-fdiagnostics-plain-output", args) || + (ctype == compiler_type::clang && + (find_option ("-fcolor-diagnostics", args) || + find_option ("-fno-color-diagnostics", args))))) + { + // Omit -fno-diagnostics-color if stderr is not a terminal (we + // know there will be no color in this case and the option will + // just add noise, for example, in build logs). + // + if (const char* o = ( + show_diag_color () ? "-fdiagnostics-color" : + stderr_term ? "-fno-diagnostics-color" : + nullptr)) + args.push_back (o); + } + } + + break; + } + } + } } } diff --git a/libbuild2/cc/common.hxx b/libbuild2/cc/common.hxx index 78442f8..eefcc0d 100644 --- a/libbuild2/cc/common.hxx +++ b/libbuild2/cc/common.hxx @@ -32,10 +32,12 @@ namespace build2 { lang x_lang; - const char* x; // Module name ("c", "cxx"). - const char* x_name; // Compiler name ("c", "c++"). - const char* x_default; // Compiler default ("gcc", "g++"). - const char* x_pext; // Preprocessed source extension (".i", ".ii"). + const char* x; // Module name ("c", "cxx"). + const char* x_name; // Compiler name ("c", "c++"). + const char* x_obj_name; // Same for Objective-X ("obj-c", "obj-c++"). + const char* x_default; // Compiler default ("gcc", "g++"). + const char* x_pext; // Preprocessed source extension (".i", ".ii"). + const char* x_obj_pext; // Same for Objective-X (".mi", ".mii"). // Array of modules that can hint us the toolchain, terminate with // NULL. @@ -102,6 +104,9 @@ namespace build2 const variable& c_export_libs; const variable& c_export_impl_libs; + const variable& c_pkgconfig_include; + const variable& c_pkgconfig_lib; + const variable& x_stdlib; // x.stdlib const variable& c_runtime; // cc.runtime @@ -153,10 +158,9 @@ namespace build2 struct data: config_data { - const char* x_compile; // Rule names. - const char* x_link; - const char* x_install; - const char* x_uninstall; + string x_compile; // Rule names. + string x_link; + string x_install; // Cached values for some commonly-used variables/values. // @@ -194,22 +198,53 @@ namespace build2 build2::cc::importable_headers* importable_headers; // The order of sys_*_dirs is the mode entries first, followed by the - // compiler built-in entries, and finished off with any extra entries - // (e.g., fallback directories such as /usr/local/*). + // extra entries (e.g., /usr/local/*), followed by the compiler built-in + // entries. + // + // Note that even if we wanted to, we wouldn't be able to support extra + // trailing (after built-in) directories since we would need a portable + // equivalent of -idirafter for both headers and libraries. // const dir_paths& sys_lib_dirs; // x.sys_lib_dirs const dir_paths& sys_hdr_dirs; // x.sys_hdr_dirs const dir_paths* sys_mod_dirs; // compiler_info::sys_mod_dirs - size_t sys_lib_dirs_mode; // Number of leading mode entries (0 if none). + size_t sys_lib_dirs_mode; // Number of mode entries (0 if none). size_t sys_hdr_dirs_mode; size_t sys_mod_dirs_mode; - size_t sys_lib_dirs_extra; // First trailing extra entry (size if none). + size_t sys_lib_dirs_extra; // Number of extra entries (0 if none). size_t sys_hdr_dirs_extra; + // Note that x_obj is patched in by the x.objx module. So it stays NULL + // if Objective-X compilation is not enabled. Similarly for x_asp except + // here we don't have duality and it's purely to signal (by the c.as-cpp + // module) that it's enabled. + // const target_type& x_src; // Source target type (c{}, cxx{}). const target_type* x_mod; // Module target type (mxx{}), if any. + const target_type* x_obj; // Objective-X target type (m{}, mm{}). + const target_type* x_asp; // Assembler with CPP target type (S{}). + + // Check if an object (target, prerequisite, etc) is an Objective-X + // source. + // + template <typename T> + bool + x_objective (const T& t) const + { + return x_obj != nullptr && t.is_a (*x_obj); + } + + // Check if an object (target, prerequisite, etc) is an Assembler with + // C preprocessor source. + // + template <typename T> + bool + x_assembler_cpp (const T& t) const + { + return x_asp != nullptr && t.is_a (*x_asp); + } // Array of target types that are considered the X-language headers // (excluding h{} except for C). Keep them in the most likely to appear @@ -217,6 +252,8 @@ namespace build2 // const target_type* const* x_hdr; + // Check if an object (target, prerequisite, etc) is a header. + // template <typename T> bool x_header (const T& t, bool c_hdr = true) const @@ -240,7 +277,6 @@ namespace build2 const char* compile, const char* link, const char* install, - const char* uninstall, compiler_type ct, const string& cv, compiler_class cl, @@ -267,7 +303,6 @@ namespace build2 x_compile (compile), x_link (link), x_install (install), - x_uninstall (uninstall), ctype (ct), cvariant (cv), cclass (cl), cmaj (mj), cmin (mi), cvmaj (vmj), cvmin (vmi), @@ -283,7 +318,8 @@ namespace build2 sys_lib_dirs_mode (slm), sys_hdr_dirs_mode (shm), sys_mod_dirs_mode (smm), sys_lib_dirs_extra (sle), sys_hdr_dirs_extra (she), - x_src (src), x_mod (mod), x_hdr (hdr), x_inc (inc) {} + x_src (src), x_mod (mod), x_obj (nullptr), x_asp (nullptr), + x_hdr (hdr), x_inc (inc) {} }; class LIBBUILD2_CC_SYMEXPORT common: public data @@ -300,10 +336,16 @@ namespace build2 string type; // name::type string value; // name::value reference_wrapper<const mtime_target> lib; + const target* group; }; using library_cache = small_vector<library_cache_entry, 32>; + // The prerequisite_target::include bit that indicates a library + // member has been picked from the group. + // + static const uintptr_t include_group = 0x100; + void process_libraries ( action, @@ -319,8 +361,29 @@ namespace build2 lflags, const string*, bool)>&, const function<bool (const target&, const string&, bool, bool)>&, bool = false, - library_cache* = nullptr, - small_vector<const target*, 24>* = nullptr) const; + bool = false, + library_cache* = nullptr) const; + + void + process_libraries_impl ( + action, + const scope&, + optional<linfo>, + const dir_paths&, + const target*, + const mtime_target&, + bool, + lflags, + const function<bool (const target&, bool)>&, + const function<bool (const target* const*, + const small_vector<reference_wrapper<const string>, 2>&, + lflags, const string*, bool)>&, + const function<bool (const target&, const string&, bool, bool)>&, + bool, + bool, + library_cache*, + small_vector<const target*, 32>*, + small_vector<const target*, 32>*) const; const target* search_library (action a, @@ -347,7 +410,7 @@ namespace build2 } public: - const mtime_target& + pair<const mtime_target&, const target*> resolve_library (action, const scope&, const name&, @@ -357,6 +420,11 @@ namespace build2 optional<dir_paths>&, library_cache* = nullptr) const; + struct non_existent_library + { + const mtime_target& target; + }; + template <typename T> static ulock insert_library (context&, @@ -369,7 +437,7 @@ namespace build2 tracer&); target* - search_library (action, + search_library (optional<action>, const dir_paths&, optional<dir_paths>&, const prerequisite_key&, @@ -389,13 +457,16 @@ namespace build2 // Alternative search logic for VC (msvc.cxx). // - bin::liba* + // The second half is false if we should poison the binless search via + // the common .pc file. + // + pair<bin::liba*, bool> msvc_search_static (const process_path&, const dir_path&, const prerequisite_key&, bool existing) const; - bin::libs* + pair<bin::libs*, bool> msvc_search_shared (const process_path&, const dir_path&, const prerequisite_key&, @@ -415,21 +486,28 @@ namespace build2 bool) const; void - pkgconfig_load (action, const scope&, + pkgconfig_load (optional<action>, const scope&, bin::lib&, bin::liba*, bin::libs*, const pair<path, path>&, const dir_path&, const dir_paths&, - const dir_paths&) const; + const dir_paths&, + pair<bool, bool>) const; bool - pkgconfig_load (action, const scope&, + pkgconfig_load (optional<action>, const scope&, bin::lib&, bin::liba*, bin::libs*, const optional<project_name>&, const string&, const dir_path&, const dir_paths&, - const dir_paths&) const; + const dir_paths&, + pair<bool, bool>) const; + + // Append compiler-specific diagnostics color options as necessary. + // + void + append_diag_color_options (cstrings&) const; }; } } diff --git a/libbuild2/cc/common.txx b/libbuild2/cc/common.txx index d14f966..8c80686 100644 --- a/libbuild2/cc/common.txx +++ b/libbuild2/cc/common.txx @@ -19,15 +19,18 @@ namespace build2 bool exist, tracer& trace) { - auto p (ctx.targets.insert_locked (T::static_type, - move (dir), - path_cast<dir_path> (out.effect), - name, - move (ext), - target_decl::implied, - trace)); + auto p (ctx.targets.insert_locked ( + T::static_type, + move (dir), + dir_path (out.effect_string ()).normalize (), + name, + move (ext), + target_decl::implied, + trace)); + + if (exist && p.second) + throw non_existent_library {p.first.template as<mtime_target> ()}; - assert (!exist || !p.second); r = &p.first.template as<T> (); return move (p.second); } diff --git a/libbuild2/cc/compile-rule.cxx b/libbuild2/cc/compile-rule.cxx index 93f05f1..fa46332 100644 --- a/libbuild2/cc/compile-rule.cxx +++ b/libbuild2/cc/compile-rule.cxx @@ -3,6 +3,7 @@ #include <libbuild2/cc/compile-rule.hxx> +#include <cerrno> #include <cstdlib> // exit() #include <cstring> // strlen(), strchr(), strncmp() @@ -16,6 +17,7 @@ #include <libbuild2/algorithm.hxx> #include <libbuild2/filesystem.hxx> // mtime() #include <libbuild2/diagnostics.hxx> +#include <libbuild2/make-parser.hxx> #include <libbuild2/bin/target.hxx> @@ -174,7 +176,7 @@ namespace build2 if (s == "includes") return preprocessed::includes; if (s == "modules") return preprocessed::modules; if (s == "all") return preprocessed::all; - throw invalid_argument ("invalid preprocessed value '" + s + "'"); + throw invalid_argument ("invalid preprocessed value '" + s + '\''); } // Return true if the compiler supports -isystem (GCC class) or @@ -228,11 +230,17 @@ namespace build2 return nullopt; } + // Note that we don't really need this for clean (where we only need + // unrefined unit type) so we could make this update-only. But let's keep + // it simple for now. Note that now we do need the source prerequisite + // type in clean to deal with Objective-X. + // struct compile_rule::match_data { - explicit - match_data (unit_type t, const prerequisite_member& s) - : type (t), src (s) {} + match_data (const compile_rule& r, + unit_type t, + const prerequisite_member& s) + : type (t), src (s), rule (r) {} unit_type type; preprocessed pp = preprocessed::none; @@ -245,39 +253,67 @@ namespace build2 path dd; // Dependency database path. size_t header_units = 0; // Number of imported header units. module_positions modules = {0, 0, 0}; // Positions of imported modules. + + const compile_rule& rule; + + target_state + operator() (action a, const target& t) + { + return rule.perform_update (a, t, *this); + } }; compile_rule:: - compile_rule (data&& d) + compile_rule (data&& d, const scope& rs) : common (move (d)), - rule_id (string (x) += ".compile 5") + rule_id (string (x) += ".compile 6") { - static_assert (sizeof (match_data) <= target::data_size, - "insufficient space"); + // Locate the header cache (see enter_header() for details). + // + { + string mn (string (x) + ".config"); + + header_cache_ = rs.find_module<config_module> (mn); // Must be there. + + const scope* ws (rs.weak_scope ()); + if (ws != &rs) + { + const scope* s (&rs); + do + { + s = s->parent_scope ()->root_scope (); + + if (const auto* m = s->find_module<config_module> (mn)) + header_cache_ = m; + + } while (s != ws); + } + } } template <typename T> void compile_rule:: append_sys_hdr_options (T& args) const { - assert (sys_hdr_dirs_extra <= sys_hdr_dirs.size ()); + assert (sys_hdr_dirs_mode + sys_hdr_dirs_extra <= sys_hdr_dirs.size ()); // Note that the mode options are added as part of cmode. // auto b (sys_hdr_dirs.begin () + sys_hdr_dirs_mode); - auto m (sys_hdr_dirs.begin () + sys_hdr_dirs_extra); - auto e (sys_hdr_dirs.end ()); + auto x (b + sys_hdr_dirs_extra); + // Add extras. + // // Note: starting from 16.10, MSVC gained /external:I option though it // doesn't seem to affect the order, only "system-ness". // append_option_values ( args, - cclass == compiler_class::gcc ? "-idirafter" : + cclass == compiler_class::gcc ? "-isystem" : cclass == compiler_class::msvc ? (isystem (*this) ? "/external:I" : "/I") : "-I", - m, e, + b, x, [] (const dir_path& d) {return d.string ().c_str ();}); // For MSVC if we have no INCLUDE environment variable set, then we @@ -293,7 +329,7 @@ namespace build2 { append_option_values ( args, "/I", - b, m, + x, sys_hdr_dirs.end (), [] (const dir_path& d) {return d.string ().c_str ();}); } } @@ -336,10 +372,18 @@ namespace build2 case unit_type::module_impl: { o1 = "-x"; - switch (x_lang) + + if (x_assembler_cpp (md.src)) + o2 = "assembler-with-cpp"; + else { - case lang::c: o2 = "c"; break; - case lang::cxx: o2 = "c++"; break; + bool obj (x_objective (md.src)); + + switch (x_lang) + { + case lang::c: o2 = obj ? "objective-c" : "c"; break; + case lang::cxx: o2 = obj ? "objective-c++" : "c++"; break; + } } break; } @@ -406,7 +450,7 @@ namespace build2 } bool compile_rule:: - match (action a, target& t, const string&) const + match (action a, target& t) const { tracer trace (x, "compile_rule::match"); @@ -441,11 +485,13 @@ namespace build2 // if (ut == unit_type::module_header ? p.is_a (**x_hdr) || p.is_a<h> () : ut == unit_type::module_intf ? p.is_a (*x_mod) : - p.is_a (x_src)) + p.is_a (x_src) || + (x_asp != nullptr && p.is_a (*x_asp)) || + (x_obj != nullptr && p.is_a (*x_obj))) { // Save in the target's auxiliary storage. // - t.data (match_data (ut, p)); + t.data (a, match_data (*this, ut, p)); return true; } } @@ -456,13 +502,16 @@ namespace build2 // Append or hash library options from a pair of *.export.* variables // (first is x.* then cc.*) recursively, prerequisite libraries first. + // If common is true, then only append common options from the lib{} + // groups. // template <typename T> void compile_rule:: append_library_options (appended_libraries& ls, T& args, const scope& bs, const scope* is, // Internal scope. - action a, const file& l, bool la, linfo li, + action a, const file& l, bool la, + linfo li, bool common, library_cache* lib_cache) const { struct data @@ -476,7 +525,7 @@ namespace build2 // auto imp = [] (const target& l, bool la) {return la && l.is_a<libux> ();}; - auto opt = [&d, this] (const target& lt, + auto opt = [&d, this] (const target& l, // Note: could be lib{} const string& t, bool com, bool exp) { // Note that in our model *.export.poptions are always "interface", @@ -485,8 +534,6 @@ namespace build2 if (!exp) // Ignore libux. return true; - const file& l (lt.as<file> ()); - // Suppress duplicates. // // Compilation is the simple case: we can add the options on the first @@ -496,6 +543,8 @@ namespace build2 if (find (d.ls.begin (), d.ls.end (), &l) != d.ls.end ()) return false; + // Note: go straight for the public variable pool. + // const variable& var ( com ? c_export_poptions @@ -645,16 +694,24 @@ namespace build2 process_libraries (a, bs, li, sys_lib_dirs, l, la, 0, // lflags unused. - imp, nullptr, opt, false /* self */, lib_cache); + imp, nullptr, opt, + false /* self */, + common /* proc_opt_group */, + lib_cache); } void compile_rule:: append_library_options (appended_libraries& ls, strings& args, const scope& bs, - action a, const file& l, bool la, linfo li) const + action a, const file& l, bool la, + linfo li, + bool common, + bool original) const { - const scope* is (isystem (*this) ? effective_iscope (bs) : nullptr); - append_library_options (ls, args, bs, is, a, l, la, li, nullptr); + const scope* is (!original && isystem (*this) + ? effective_iscope (bs) + : nullptr); + append_library_options (ls, args, bs, is, a, l, la, li, common, nullptr); } template <typename T> @@ -695,7 +752,9 @@ namespace build2 append_library_options (ls, args, bs, iscope (), - a, *f, la, li, + a, *f, la, + li, + false /* common */, &lc); } } @@ -708,7 +767,7 @@ namespace build2 void compile_rule:: append_library_prefixes (appended_libraries& ls, prefix_map& pm, const scope& bs, - action a, target& t, linfo li) const + action a, const target& t, linfo li) const { struct data { @@ -731,14 +790,23 @@ namespace build2 if (find (d.ls.begin (), d.ls.end (), &l) != d.ls.end ()) return false; - const variable& var ( - com - ? c_export_poptions - : (t == x - ? x_export_poptions - : l.ctx.var_pool[t + ".export.poptions"])); - - append_prefixes (d.pm, l, var); + // If this target does not belong to any project (e.g, an "imported as + // installed" library), then it can't possibly generate any headers + // for us. + // + if (const scope* rs = l.base_scope ().root_scope ()) + { + // Note: go straight for the public variable pool. + // + const variable& var ( + com + ? c_export_poptions + : (t == x + ? x_export_poptions + : l.ctx.var_pool[t + ".export.poptions"])); + + append_prefixes (d.pm, *rs, l, var); + } if (com) d.ls.push_back (&l); @@ -770,75 +838,14 @@ namespace build2 process_libraries (a, bs, li, sys_lib_dirs, pt->as<file> (), la, 0, // lflags unused. - impf, nullptr, optf, false /* self */, + impf, nullptr, optf, + false /* self */, + false /* proc_opt_group */, &lib_cache); } } } - // Update the target during the match phase. Return true if it has changed - // or if the passed timestamp is not timestamp_unknown and is older than - // the target. - // - // This function is used to make sure header dependencies are up to date. - // - // There would normally be a lot of headers for every source file (think - // all the system headers) and just calling execute_direct() on all of - // them can get expensive. At the same time, most of these headers are - // existing files that we will never be updating (again, system headers, - // for example) and the rule that will match them is the fallback - // file_rule. That rule has an optimization: it returns noop_recipe (which - // causes the target state to be automatically set to unchanged) if the - // file is known to be up to date. So we do the update "smartly". - // - static bool - update (tracer& trace, action a, const target& t, timestamp ts) - { - const path_target* pt (t.is_a<path_target> ()); - - if (pt == nullptr) - ts = timestamp_unknown; - - target_state os (t.matched_state (a)); - - if (os == target_state::unchanged) - { - if (ts == timestamp_unknown) - return false; - else - { - // We expect the timestamp to be known (i.e., existing file). - // - timestamp mt (pt->mtime ()); - assert (mt != timestamp_unknown); - return mt > ts; - } - } - else - { - // We only want to return true if our call to execute() actually - // caused an update. In particular, the target could already have been - // in target_state::changed because of a dependency extraction run for - // some other source file. - // - // @@ MT perf: so we are going to switch the phase and execute for - // any generated header. - // - phase_switch ps (t.ctx, run_phase::execute); - target_state ns (execute_direct (a, t)); - - if (ns != os && ns != target_state::unchanged) - { - l6 ([&]{trace << "updated " << t - << "; old state " << os - << "; new state " << ns;}); - return true; - } - else - return ts != timestamp_unknown ? pt->newer (ts, ns) : false; - } - } - recipe compile_rule:: apply (action a, target& xt) const { @@ -846,7 +853,7 @@ namespace build2 file& t (xt.as<file> ()); // Either obj*{} or bmi*{}. - match_data& md (t.data<match_data> ()); + match_data& md (t.data<match_data> (a)); context& ctx (t.ctx); @@ -1000,6 +1007,12 @@ namespace build2 // to match it if we may need its modules or importable headers // (see search_modules(), make_header_sidebuild() for details). // + // Well, that was the case until we've added support for immediate + // importation of libraries, which happens during the load phase + // and natually leaves the library unmatched. While we could have + // returned from search_library() an indication of whether the + // library has been matched, this doesn't seem worth the trouble. + // if (p.proj ()) { pt = search_library (a, @@ -1007,8 +1020,10 @@ namespace build2 usr_lib_dirs, p.prerequisite); +#if 0 if (pt != nullptr && !modules) continue; +#endif } if (pt == nullptr) @@ -1063,8 +1078,13 @@ namespace build2 // match in link::apply() it will be safe unless someone is building // an obj?{} target directly. // + // @@ If for some reason unmatch fails, this messes up the for_install + // logic because we will update this library during match. Perhaps + // we should postpone updating them until execute if we failed to + // unmatch. See how we do this in ad hoc rule. + // pair<bool, target_state> mr ( - build2::match ( + match_complete ( a, *pt, pt->is_a<liba> () || pt->is_a<libs> () || pt->is_a<libux> () @@ -1098,6 +1118,8 @@ namespace build2 md.symexport = l ? cast<bool> (l) : symexport; } + // NOTE: see similar code in adhoc_buildscript_rule::apply(). + // Make sure the output directory exists. // // Is this the right thing to do? It does smell a bit, but then we do @@ -1198,15 +1220,6 @@ namespace build2 append_options (cs, t, c_coptions); append_options (cs, t, x_coptions); - - if (ot == otype::s) - { - // On Darwin, Win32 -fPIC is the default. - // - if (tclass == "linux" || tclass == "bsd") - cs.append ("-fPIC"); - } - append_options (cs, cmode); if (md.pp != preprocessed::all) @@ -1300,7 +1313,7 @@ namespace build2 // l5 ([&]{trace << "extracting headers from " << src;}); auto& is (tu.module_info.imports); - psrc = extract_headers (a, bs, t, li, src, md, dd, u, mt, is); + extract_headers (a, bs, t, li, src, md, dd, u, mt, is, psrc); is.clear (); // No longer needed. } @@ -1475,10 +1488,10 @@ namespace build2 // to keep re-validating the file on every subsequent dry-run as well // on the real run). // - if (u && dd.reading () && !ctx.dry_run) - dd.touch = true; + if (u && dd.reading () && !ctx.dry_run_option) + dd.touch = timestamp_unknown; - dd.close (); + dd.close (false /* mtime_check */); md.dd = move (dd.path); // If the preprocessed output is suitable for compilation, then pass @@ -1536,67 +1549,25 @@ namespace build2 switch (a) { - case perform_update_id: return [this] (action a, const target& t) - { - return perform_update (a, t); - }; - case perform_clean_id: return [this] (action a, const target& t) + case perform_update_id: return move (md); + case perform_clean_id: { - return perform_clean (a, t); - }; + return [this, srct = &md.src.type ()] (action a, const target& t) + { + return perform_clean (a, t, *srct); + }; + } default: return noop_recipe; // Configure update. } } - // Reverse-lookup target type(s) from extension. - // - small_vector<const target_type*, 2> compile_rule:: - map_extension (const scope& s, const string& n, const string& e) const - { - // We will just have to try all of the possible ones, in the "most - // likely to match" order. - // - auto test = [&s, &n, &e] (const target_type& tt) -> bool - { - // Call the extension derivation function. Here we know that it will - // only use the target type and name from the target key so we can - // pass bogus values for the rest. - // - target_key tk {&tt, nullptr, nullptr, &n, nullopt}; - - // This is like prerequisite search. - // - optional<string> de (tt.default_extension (tk, s, nullptr, true)); - - return de && *de == e; - }; - - small_vector<const target_type*, 2> r; - - for (const target_type* const* p (x_inc); *p != nullptr; ++p) - if (test (**p)) - r.push_back (*p); - - return r; - } - void compile_rule:: - append_prefixes (prefix_map& m, const target& t, const variable& var) const + append_prefixes (prefix_map& m, + const scope& rs, const target& t, + const variable& var) const { tracer trace (x, "compile_rule::append_prefixes"); - // If this target does not belong to any project (e.g, an "imported as - // installed" library), then it can't possibly generate any headers for - // us. - // - const scope& bs (t.base_scope ()); - const scope* rs (bs.root_scope ()); - if (rs == nullptr) - return; - - const dir_path& out_base (t.dir); - const dir_path& out_root (rs->out_path ()); - if (auto l = t[var]) { const auto& v (cast<strings> (l)); @@ -1654,113 +1625,8 @@ namespace build2 // If we are not inside our project root, then ignore. // - if (!d.sub (out_root)) - continue; - - // If the target directory is a sub-directory of the include - // directory, then the prefix is the difference between the - // two. Otherwise, leave it empty. - // - // The idea here is to make this "canonical" setup work auto- - // magically: - // - // 1. We include all files with a prefix, e.g., <foo/bar>. - // 2. The library target is in the foo/ sub-directory, e.g., - // /tmp/foo/. - // 3. The poptions variable contains -I/tmp. - // - dir_path p (out_base.sub (d) ? out_base.leaf (d) : dir_path ()); - - // We use the target's directory as out_base but that doesn't work - // well for targets that are stashed in subdirectories. So as a - // heuristics we are going to also enter the outer directories of - // the original prefix. It is, however, possible, that another -I - // option after this one will produce one of these outer prefixes as - // its original prefix in which case we should override it. - // - // So we are going to assign the original prefix priority value 0 - // (highest) and then increment it for each outer prefix. - // - auto enter = [&trace, &m] (dir_path p, dir_path d, size_t prio) - { - auto j (m.find (p)); - - if (j != m.end ()) - { - prefix_value& v (j->second); - - // We used to reject duplicates but it seems this can be - // reasonably expected to work according to the order of the - // -I options. - // - // Seeing that we normally have more "specific" -I paths first, - // (so that we don't pick up installed headers, etc), we ignore - // it. - // - if (v.directory == d) - { - if (v.priority > prio) - v.priority = prio; - } - else if (v.priority <= prio) - { - if (verb >= 4) - trace << "ignoring mapping for prefix '" << p << "'\n" - << " existing mapping to " << v.directory - << " priority " << v.priority << '\n' - << " another mapping to " << d - << " priority " << prio; - } - else - { - if (verb >= 4) - trace << "overriding mapping for prefix '" << p << "'\n" - << " existing mapping to " << v.directory - << " priority " << v.priority << '\n' - << " new mapping to " << d - << " priority " << prio; - - v.directory = move (d); - v.priority = prio; - } - } - else - { - l6 ([&]{trace << "'" << p << "' -> " << d << " priority " - << prio;}); - m.emplace (move (p), prefix_value {move (d), prio}); - } - }; - -#if 1 - // Enter all outer prefixes, including prefixless. - // - // The prefixless part is fuzzy but seems to be doing the right - // thing ignoring/overriding-wise, at least in cases where one of - // the competing -I paths is a subdirectory of another. But the - // proper solution will be to keep all the prefixless entries (by - // changing prefix_map to a multimap) since for them we have an - // extra check (target must be explicitly spelled out in a - // buildfile). - // - for (size_t prio (0);; ++prio) - { - bool e (p.empty ()); - enter ((e ? move (p) : p), (e ? move (d) : d), prio); - if (e) - break; - p = p.directory (); - } -#else - size_t prio (0); - for (bool e (false); !e; ++prio) - { - dir_path n (p.directory ()); - e = n.empty (); - enter ((e ? move (p) : p), (e ? move (d) : d), prio); - p = move (n); - } -#endif + if (d.sub (rs.out_path ())) + append_prefix (trace, m, t, move (d)); } } } @@ -1768,15 +1634,16 @@ namespace build2 auto compile_rule:: build_prefix_map (const scope& bs, action a, - target& t, + const target& t, linfo li) const -> prefix_map { prefix_map pm; // First process our own. // - append_prefixes (pm, t, x_poptions); - append_prefixes (pm, t, c_poptions); + const scope& rs (*bs.root_scope ()); + append_prefixes (pm, rs, t, x_poptions); + append_prefixes (pm, rs, t, c_poptions); // Then process the include directories from prerequisite libraries. // @@ -1786,68 +1653,6 @@ namespace build2 return pm; } - // Return the next make prerequisite starting from the specified - // position and update position to point to the start of the - // following prerequisite or l.size() if there are none left. - // - static string - next_make (const string& l, size_t& p) - { - size_t n (l.size ()); - - // Skip leading spaces. - // - for (; p != n && l[p] == ' '; p++) ; - - // Lines containing multiple prerequisites are 80 characters max. - // - string r; - r.reserve (n); - - // Scan the next prerequisite while watching out for escape sequences. - // - for (; p != n && l[p] != ' '; p++) - { - char c (l[p]); - - if (p + 1 != n) - { - if (c == '$') - { - // Got to be another (escaped) '$'. - // - if (l[p + 1] == '$') - ++p; - } - else if (c == '\\') - { - // This may or may not be an escape sequence depending on whether - // what follows is "escapable". - // - switch (c = l[++p]) - { - case '\\': break; - case ' ': break; - default: c = '\\'; --p; // Restore. - } - } - } - - r += c; - } - - // Skip trailing spaces. - // - for (; p != n && l[p] == ' '; p++) ; - - // Skip final '\'. - // - if (p == n - 1 && l[p] == '\\') - p++; - - return r; - } - // VC /showIncludes output. The first line is the file being compiled // (unless clang-cl; handled by our caller). Then we have the list of // headers, one per line, in this form (text can presumably be @@ -2043,7 +1848,7 @@ namespace build2 // Any unhandled io_error is handled by the caller as a generic module // mapper io error. Returning false terminates the communication. // - struct compile_rule::module_mapper_state //@@ gcc_module_mapper_state + struct compile_rule::gcc_module_mapper_state { size_t skip; // Number of depdb entries to skip. size_t header_units = 0; // Number of header units imported. @@ -2054,15 +1859,20 @@ namespace build2 optional<const build2::cc::translatable_headers*> translatable_headers; small_vector<string, 2> batch; // Reuse buffers. + size_t batch_n = 0; - module_mapper_state (size_t s, module_imports& i) + gcc_module_mapper_state (size_t s, module_imports& i) : skip (s), imports (i) {} }; - bool compile_rule:: - gcc_module_mapper (module_mapper_state& st, + // The module mapper is called on one line of input at a time. It should + // return nullopt if another line is expected (batch), false if the mapper + // interaction should be terminated, and true if it should be continued. + // + optional<bool> compile_rule:: + gcc_module_mapper (gcc_module_mapper_state& st, action a, const scope& bs, file& t, linfo li, - ifdstream& is, + const string& l, ofdstream& os, depdb& dd, bool& update, bool& bad_error, optional<prefix_map>& pfx_map, srcout_map& so_map) const @@ -2078,35 +1888,40 @@ namespace build2 // Read in the entire batch trying hard to reuse the buffers. // - auto& batch (st.batch); - size_t batch_n (0); + small_vector<string, 2>& batch (st.batch); + size_t& batch_n (st.batch_n); - for (;;) + // Add the next line. + // { if (batch.size () == batch_n) - batch.push_back (string ()); - - string& r (batch[batch_n]); - - if (eof (getline (is, r))) - break; + batch.push_back (l); + else + batch[batch_n] = l; batch_n++; + } - if (r.back () != ';') - break; + // Check if more is expected in this batch. + // + { + string& r (batch[batch_n - 1]); - // Strip the trailing `;` word. - // - r.pop_back (); - r.pop_back (); - } + if (r.back () == ';') + { + // Strip the trailing `;` word. + // + r.pop_back (); + r.pop_back (); - if (batch_n == 0) // EOF - return false; + return nullopt; + } + } if (verb >= 3) { + // It doesn't feel like buffering this would be useful. + // // Note that we show `;` in requests/responses so that the result // could be replayed. // @@ -2128,23 +1943,211 @@ namespace build2 for (size_t i (0); i != batch_n; ++i) { string& r (batch[i]); + size_t rn (r.size ()); + + // The protocol uses a peculiar quoting/escaping scheme that can be + // summarized as follows (see the libcody documentation for details): + // + // - Words are seperated with spaces and/or tabs. + // + // - Words need not be quoted if they only containing characters from + // the [-+_/%.A-Za-z0-9] set. + // + // - Otherwise words need to be single-quoted. + // + // - Inside single-quoted words, the \n \t \' and \\ escape sequences + // are recognized. + // + // Note that we currently don't treat abutted quotes (as in a' 'b) as + // a single word (it doesn't seem plausible that we will ever receive + // something like this). + // + size_t b (0), e (0), n; bool q; // Next word. + + auto next = [&r, rn, &b, &e, &n, &q] () -> size_t + { + if (b != e) + b = e; + + // Skip leading whitespaces. + // + for (; b != rn && (r[b] == ' ' || r[b] == '\t'); ++b) ; + + if (b != rn) + { + q = (r[b] == '\''); + + // Find first trailing whitespace or closing quote. + // + for (e = b + 1; e != rn; ++e) + { + // Note that we deal with invalid quoting/escaping in unquote(). + // + switch (r[e]) + { + case ' ': + case '\t': + if (q) + continue; + else + break; + case '\'': + if (q) + { + ++e; // Include closing quote (hopefully). + break; + } + else + { + assert (false); // Abutted quote. + break; + } + case '\\': + if (++e != rn) // Skip next character (hopefully). + continue; + else + break; + default: + continue; + } + + break; + } + + n = e - b; + } + else + { + q = false; + e = rn; + n = 0; + } + + return n; + }; + + // Unquote into tmp the current word returning false if malformed. + // + auto unquote = [&r, &b, &n, &q, &tmp] (bool clear = true) -> bool + { + if (q && n > 1) + { + size_t e (b + n - 1); + + if (r[b] == '\'' && r[e] == '\'') + { + if (clear) + tmp.clear (); + + size_t i (b + 1); + for (; i != e; ++i) + { + char c (r[i]); + if (c == '\\') + { + if (++i == e) + { + i = 0; + break; + } + + c = r[i]; + if (c == 'n') c = '\n'; + else if (c == 't') c = '\t'; + } + tmp += c; + } + + if (i == e) + return true; + } + } - // @@ TODO: quoting and escaping. + return false; + }; + +#if 0 +#define UNQUOTE(x, y) \ + r = x; rn = r.size (); b = e = 0; \ + assert (next () && unquote () && tmp == y) + + UNQUOTE ("'foo bar'", "foo bar"); + UNQUOTE (" 'foo bar' ", "foo bar"); + UNQUOTE ("'foo\\\\bar'", "foo\\bar"); + UNQUOTE ("'\\'foo bar'", "'foo bar"); + UNQUOTE ("'foo bar\\''", "foo bar'"); + UNQUOTE ("'\\'foo\\\\bar\\''", "'foo\\bar'"); + + fail << "all good"; +#endif + + // Escape if necessary the specified string and append to r. // - size_t b (0), e (0), n; // Next word. + auto escape = [&r] (const string& s) + { + size_t b (0), e, n (s.size ()); + while (b != n && (e = s.find_first_of ("\\'\n\t", b)) != string::npos) + { + r.append (s, b, e - b); // Preceding chunk. + + char c (s[e]); + r += '\\'; + r += (c == '\n' ? 'n' : c == '\t' ? 't' : c); + b = e + 1; + } + + if (b != n) + r.append (s, b, e); // Final chunk. + }; - auto next = [&r, &b, &e, &n] () -> size_t + // Quote and escape if necessary the specified string and append to r. + // + auto quote = [&r, &escape] (const string& s) { - return (n = next_word (r, b, e, ' ', '\t')); + if (find_if (s.begin (), s.end (), + [] (char c) + { + return !((c >= 'a' && c <= 'z') || + (c >= '0' && c <= '9') || + (c >= 'A' && c <= 'Z') || + c == '-' || c == '_' || c == '/' || + c == '.' || c == '+' || c == '%'); + }) == s.end ()) + { + r += s; + } + else + { + r += '\''; + escape (s); + r += '\''; + } }; +#if 0 +#define QUOTE(x, y) \ + r.clear (); quote (x); \ + assert (r == y) + + QUOTE ("foo/Bar-7.h", "foo/Bar-7.h"); + + QUOTE ("foo bar", "'foo bar'"); + QUOTE ("foo\\bar", "'foo\\\\bar'"); + QUOTE ("'foo bar", "'\\'foo bar'"); + QUOTE ("foo bar'", "'foo bar\\''"); + QUOTE ("'foo\\bar'", "'\\'foo\\\\bar\\''"); + + fail << "all good"; +#endif + next (); // Request name. - auto name = [&r, b, n] (const char* c) -> bool + auto name = [&r, b, n, q] (const char* c) -> bool { // We can reasonably assume a command will never be quoted. // - return (r.compare (b, n, c) == 0 && + return (!q && + r.compare (b, n, c) == 0 && (r[n] == ' ' || r[n] == '\t' || r[n] == '\0')); }; @@ -2193,7 +2196,17 @@ namespace build2 if (next ()) { - path f (r, b, n); + path f; + if (!q) + f = path (r, b, n); + else if (unquote ()) + f = path (tmp); + else + { + r = "ERROR 'malformed quoting/escaping in request'"; + continue; + } + bool exists (true); // The TU path we pass to the compiler is always absolute so any @@ -2204,8 +2217,9 @@ namespace build2 // if (exists && f.relative ()) { - tmp.assign (r, b, n); - r = "ERROR relative header path '"; r += tmp; r += '\''; + r = "ERROR 'relative header path "; + escape (f.string ()); + r += '\''; continue; } @@ -2242,16 +2256,17 @@ namespace build2 try { pair<const file*, bool> er ( - enter_header (a, bs, t, li, - move (f), false /* cache */, false /* norm */, - pfx_map, so_map)); + enter_header ( + a, bs, t, li, + move (f), false /* cache */, false /* normalized */, + pfx_map, so_map)); ht = er.first; remapped = er.second; if (remapped) { - r = "ERROR remapping of headers not supported"; + r = "ERROR 'remapping of headers not supported'"; continue; } @@ -2261,14 +2276,14 @@ namespace build2 // diagnostics won't really add anything to the compiler's. So // let's only print it at -V or higher. // - if (ht == nullptr) + if (ht == nullptr) // f is still valid. { assert (!exists); // Sanity check. if (verb > 2) { diag_record dr; - dr << error << "header '" << f << "' not found and no " + dr << error << "header " << f << " not found and no " << "rule to generate it"; if (verb < 4) @@ -2309,8 +2324,10 @@ namespace build2 // messy, let's keep both (it would have been nicer to print // ours after the compiler's but that isn't easy). // - r = "ERROR unable to update header '"; - r += (ht != nullptr ? ht->path () : f).string (); + // Note: if ht is NULL, f is still valid. + // + r = "ERROR 'unable to update header "; + escape ((ht != nullptr ? ht->path () : f).string ()); r += '\''; continue; } @@ -2445,17 +2462,27 @@ namespace build2 // original (which we may need to normalize when we read // this mapping in extract_headers()). // - tmp = "@ "; tmp.append (r, b, n); tmp += ' '; tmp += bp; + // @@ This still breaks if the header path contains spaces. + // GCC bug 110153. + // + tmp = "@ "; + if (!q) tmp.append (r, b, n); + else unquote (false /* clear */); // Can't fail. + tmp += ' '; + tmp += bp; + dd.expect (tmp); st.header_units++; } - r = "PATHNAME "; r += bp; + r = "PATHNAME "; + quote (bp); } catch (const failed&) { r = "ERROR 'unable to update header unit for "; - r += hs; r += '\''; + escape (hs); + r += '\''; continue; } } @@ -2481,7 +2508,7 @@ namespace build2 // Truncate the response batch and terminate the communication (see // also libcody issue #22). // - tmp.assign (r, b, n); + tmp.assign (r, b, n); // Request name (unquoted). r = "ERROR '"; r += w; r += ' '; r += tmp; r += '\''; batch_n = i + 1; term = true; @@ -2497,6 +2524,9 @@ namespace build2 // Write the response batch. // + // @@ It's theoretically possible that we get blocked writing the + // response while the compiler gets blocked writing the diagnostics. + // for (size_t i (0);; ) { string& r (batch[i]); @@ -2517,6 +2547,8 @@ namespace build2 os.flush (); + batch_n = 0; // Start a new batch. + return !term; } @@ -2770,9 +2802,10 @@ namespace build2 if (exists) { pair<const file*, bool> r ( - enter_header (a, bs, t, li, - move (f), false /* cache */, false /* norm */, - pfx_map, so_map)); + enter_header ( + a, bs, t, li, + move (f), false /* cache */, false /* normalized */, + pfx_map, so_map)); if (!r.second) // Shouldn't be remapped. ht = r.first; @@ -2780,7 +2813,7 @@ namespace build2 if (ht != pts.back ()) { - ht = static_cast<const file*> (pts.back ().target); + ht = &pts.back ().target->as<file> (); rs = "ERROR expected header '" + ht->path ().string () + "' to be found instead"; bad_error = true; // We expect an error from the compiler. @@ -2797,9 +2830,10 @@ namespace build2 try { pair<const file*, bool> er ( - enter_header (a, bs, t, li, - move (f), false /* cache */, false /* norm */, - pfx_map, so_map)); + enter_header ( + a, bs, t, li, + move (f), false /* cache */, false /* normalized */, + pfx_map, so_map)); ht = er.first; remapped = er.second; @@ -2817,7 +2851,7 @@ namespace build2 // diagnostics won't really add anything to the compiler's. So // let's only print it at -V or higher. // - if (ht == nullptr) + if (ht == nullptr) // f is still valid. { assert (!exists); // Sanity check. @@ -2864,10 +2898,12 @@ namespace build2 // messy, let's keep both (it would have been nicer to print // ours after the compiler's but that isn't easy). // + // Note: if ht is NULL, f is still valid. + // rs = !exists ? string ("INCLUDE") : ("ERROR unable to update header '" + - (ht != nullptr ? ht->path () : f).string () + "'"); + (ht != nullptr ? ht->path () : f).string () + '\''); bad_error = true; break; @@ -2945,7 +2981,7 @@ namespace build2 } catch (const failed&) { - rs = "ERROR unable to update header unit '" + hp + "'"; + rs = "ERROR unable to update header unit '" + hp + '\''; bad_error = true; break; } @@ -2987,351 +3023,204 @@ namespace build2 } #endif - // Enter as a target a header file. Depending on the cache flag, the file - // is assumed to either have come from the depdb cache or from the - // compiler run. - // - // Return the header target and an indication of whether it was remapped - // or NULL if the header does not exist and cannot be generated. In the - // latter case the passed header path is guaranteed to be still valid but - // might have been adjusted (e.g., normalized, etc). + //atomic_count cache_hit {0}; + //atomic_count cache_mis {0}; + //atomic_count cache_cls {0}; + + // The fp path is only moved from on success. // // Note: this used to be a lambda inside extract_headers() so refer to the // body of that function for the overall picture. // pair<const file*, bool> compile_rule:: enter_header (action a, const scope& bs, file& t, linfo li, - path&& f, bool cache, bool norm, - optional<prefix_map>& pfx_map, srcout_map& so_map) const + path&& fp, bool cache, bool norm, + optional<prefix_map>& pfx_map, + const srcout_map& so_map) const { tracer trace (x, "compile_rule::enter_header"); - // Find or maybe insert the target. The directory is only moved from if - // insert is true. Note that it must be normalized. - // - auto find = [&trace, &t, this] (dir_path&& d, - path&& f, - bool insert) -> const file* + // It's reasonable to expect the same header to be included by multiple + // translation units, which means we will be re-doing this work over and + // over again. And it's not exactly cheap, taking up to 50% of an + // up-to-date check time on some projects. So we are going to cache the + // header path to target mapping. + // + // While we pass quite a bit of specific "context" (target, base scope) + // to enter_file(), here is the analysis why the result will not depend + // on this context for the non-absent header (fp is absolute): + // + // 1. Let's start with the base scope (bs). Firstly, the base scope + // passed to map_extension() is the scope of the header (i.e., it is + // the scope of fp.directory()). Other than that, the target base + // scope is only passed to build_prefix_map() which is only called + // for the absent header (linfo is also only used here). + // + // 2. Next is the target (t). It is passed to build_prefix_map() but + // that doesn't matter for the same reason as in (1). Other than + // that, it is only passed to build2::search() which in turn passes + // it to target type-specific prerequisite search callback (see + // target_type::search) if one is not NULL. The target type in + // question here is one of the headers and we know all of them use + // the standard file_search() which ignores the passed target. + // + // 3. Finally, so_map could be used for an absolute fp. While we could + // simply not cache the result if it was used (second half of the + // result pair is true), there doesn't seem to be any harm in caching + // the remapped path->target mapping. In fact, if to think about it, + // there is no harm in caching the generated file mapping since it + // will be immediately generated and any subsequent inclusions we + // will "see" with an absolute path, which we can resolve from the + // cache. + // + // To put it another way, all we need to do is make sure that if we were + // to not return an existing cache entry, the call to enter_file() would + // have returned exactly the same path/target. + // + // @@ Could it be that the header is re-mapped in one config but not the + // other (e.g., when we do both in src and in out builds and we pick + // the generated header in src)? If so, that would lead to a + // divergence. I.e., we would cache the no-remap case first and then + // return it even though the re-map is necessary? Why can't we just + // check for re-mapping ourselves? A: the remapping logic in + // enter_file() is not exactly trivial. + // + // But on the other hand, I think we can assume that different + // configurations will end up with different caches. In other words, + // we can assume that for the same "cc amalgamation" we use only a + // single "version" of a header. Seems reasonable. + // + // Note also that while it would have been nice to have a unified cc + // cache, the map_extension() call is passed x_inc which is module- + // specific. In other words, we may end up mapping the same header to + // two different targets depending on whether it is included from, say, + // C or C++ translation unit. We could have used a unified cache for + // headers that were mapped using the fallback target type, which would + // cover the installed headers. Maybe, one day (it's also possible that + // separate caches reduce contention). + // + // Another related question is where we want to keep the cache: project, + // strong amalgamation, or weak amalgamation (like module sidebuilds). + // Some experimentation showed that weak has the best performance (which + // suggest that a unified cache will probably be a win). + // + // Note also that we don't need to clear this cache since we never clear + // the targets set. In other words, the only time targets are + // invalidated is when we destroy the build context, which also destroys + // the cache. + // + const config_module& hc (*header_cache_); + + // First check the cache. + // + config_module::header_key hk; + + bool e (fp.absolute ()); + if (e) { - // Split the file into its name part and extension. Here we can assume - // the name part is a valid filesystem name. - // - // Note that if the file has no extension, we record an empty - // extension rather than NULL (which would signify that the default - // extension should be added). - // - string e (f.extension ()); - string n (move (f).string ()); - - if (!e.empty ()) - n.resize (n.size () - e.size () - 1); // One for the dot. - - // See if this directory is part of any project and if so determine - // the target type. - // - // While at it also determine if this target is from the src or out - // tree of said project. - // - dir_path out; - - // It's possible the extension-to-target type mapping is ambiguous - // (usually because both C and X-language headers use the same .h - // extension). In this case we will first try to find one that matches - // an explicit target (similar logic to when insert is false). - // - small_vector<const target_type*, 2> tts; - - // Note that the path can be in out or src directory and the latter - // can be associated with multiple scopes. So strictly speaking we - // need to pick one that is "associated" with us. But that is still a - // TODO (see scope_map::find() for details) and so for now we just - // pick the first one (it's highly unlikely the source file extension - // mapping will differ based on the configuration). - // - { - const scope& bs (**t.ctx.scopes.find (d).first); - if (const scope* rs = bs.root_scope ()) - { - tts = map_extension (bs, n, e); - - if (!bs.out_eq_src () && d.sub (bs.src_path ())) - out = out_src (d, *rs); - } - } - - // If it is outside any project, or the project doesn't have such an - // extension, assume it is a plain old C header. - // - if (tts.empty ()) + if (!norm) { - // If the project doesn't "know" this extension then we can't - // possibly find an explicit target of this type. - // - if (!insert) - return nullptr; - - tts.push_back (&h::static_type); + normalize_external (fp, "header"); + norm = true; } - // Find or insert target. - // - // Note that in case of the target type ambiguity we first try to find - // an explicit target that resolves this ambiguity. - // - const target* r (nullptr); + hk.file = move (fp); + hk.hash = hash<path> () (hk.file); - if (!insert || tts.size () > 1) + slock l (hc.header_map_mutex); + auto i (hc.header_map.find (hk)); + if (i != hc.header_map.end ()) { - // Note that we skip any target type-specific searches (like for an - // existing file) and go straight for the target object since we - // need to find the target explicitly spelled out. - // - // Also, it doesn't feel like we should be able to resolve an - // absolute path with a spelled-out extension to multiple targets. - // - for (const target_type* tt: tts) - if ((r = t.ctx.targets.find (*tt, d, out, n, e, trace)) != nullptr) - break; - - // Note: we can't do this because of the in-source builds where - // there won't be explicit targets for non-generated headers. - // - // This should be harmless, however, since in our world generated - // headers are normally spelled-out as explicit targets. And if not, - // we will still get an error, just a bit less specific. - // -#if 0 - if (r == nullptr && insert) - { - f = d / n; - if (!e.empty ()) - { - f += '.'; - f += e; - } - - diag_record dr (fail); - dr << "mapping of header " << f << " to target type is ambiguous"; - for (const target_type* tt: tts) - dr << info << "could be " << tt->name << "{}"; - dr << info << "spell-out its target to resolve this ambiguity"; - } -#endif + //cache_hit.fetch_add (1, memory_order_relaxed); + return make_pair (i->second, false); } - // @@ OPT: move d, out, n - // - if (r == nullptr && insert) - r = &search (t, *tts[0], d, out, n, &e, nullptr); + fp = move (hk.file); - return static_cast<const file*> (r); - }; + //cache_mis.fetch_add (1, memory_order_relaxed); + } - // If it's not absolute then it either does not (yet) exist or is a - // relative ""-include (see init_args() for details). Reduce the second - // case to absolute. - // - // Note: we now always use absolute path to the translation unit so this - // no longer applies. But let's keep it for posterity. - // -#if 0 - if (f.relative () && rels.relative ()) + struct data { - // If the relative source path has a directory component, make sure - // it matches since ""-include will always start with that (none of - // the compilers we support try to normalize this path). Failed that - // we may end up searching for a generated header in a random - // (working) directory. - // - const string& fs (f.string ()); - const string& ss (rels.string ()); - - size_t p (path::traits::rfind_separator (ss)); - - if (p == string::npos || // No directory. - (fs.size () > p + 1 && - path::traits::compare (fs.c_str (), p, ss.c_str (), p) == 0)) - { - path t (work / f); // The rels path is relative to work. - - if (exists (t)) - f = move (t); - } - } -#endif + linfo li; + optional<prefix_map>& pfx_map; + } d {li, pfx_map}; + + // If it is outside any project, or the project doesn't have such an + // extension, assume it is a plain old C header. + // + auto r (enter_file ( + trace, "header", + a, bs, t, + fp, cache, norm, + [this] (const scope& bs, const string& n, const string& e) + { + return map_extension (bs, n, e, x_inc); + }, + h::static_type, + [this, &d] (action a, const scope& bs, const target& t) + -> const prefix_map& + { + if (!d.pfx_map) + d.pfx_map = build_prefix_map (bs, a, t, d.li); - const file* pt (nullptr); - bool remapped (false); + return *d.pfx_map; + }, + so_map)); - // If still relative then it does not exist. + // Cache. // - if (f.relative ()) + if (r.first != nullptr) { - // This is probably as often an error as an auto-generated file, so - // trace at level 4. - // - l4 ([&]{trace << "non-existent header '" << f << "'";}); - - f.normalize (); - - // The relative path might still contain '..' (e.g., ../foo.hxx; - // presumably ""-include'ed). We don't attempt to support auto- - // generated headers with such inclusion styles. - // - if (f.normalized ()) - { - if (!pfx_map) - pfx_map = build_prefix_map (bs, a, t, li); - - // First try the whole file. Then just the directory. - // - // @@ Has to be a separate map since the prefix can be the same as - // the file name. - // - // auto i (pfx_map->find (f)); - - // Find the most qualified prefix of which we are a sub-path. - // - if (!pfx_map->empty ()) - { - dir_path d (f.directory ()); - auto i (pfx_map->find_sup (d)); + hk.file = move (fp); - if (i != pfx_map->end ()) - { - // Note: value in pfx_map is not necessarily canonical. - // - dir_path pd (i->second.directory); - pd.canonicalize (); - - l4 ([&]{trace << "prefix '" << d << "' mapped to " << pd;}); - - // If this is a prefixless mapping, then only use it if we can - // resolve it to an existing target (i.e., it is explicitly - // spelled out in a buildfile). - // - // Note that at some point we will probably have a list of - // directories. - // - pt = find (pd / d, f.leaf (), !i->first.empty ()); - if (pt != nullptr) - { - f = pd / f; - l4 ([&]{trace << "mapped as auto-generated " << f;}); - } - else - l4 ([&]{trace << "no explicit target in " << pd;}); - } - else - l4 ([&]{trace << "no prefix map entry for '" << d << "'";}); - } - else - l4 ([&]{trace << "prefix map is empty";}); - } - } - else - { - // Normalize the path unless it comes from the depdb, in which case - // we've already done that (normally). This is also where we handle - // src-out remap (again, not needed if cached). + // Calculate the hash if we haven't yet and re-calculate it if the + // path has changed (header has been remapped). // - if (!cache || norm) - normalize_header (f); + if (!e || r.second) + hk.hash = hash<path> () (hk.file); - if (!cache) + const file* f; { - if (!so_map.empty ()) - { - // Find the most qualified prefix of which we are a sub-path. - // - auto i (so_map.find_sup (f)); - if (i != so_map.end ()) - { - // Ok, there is an out tree for this headers. Remap to a path - // from the out tree and see if there is a target for it. Note - // that the value in so_map is not necessarily canonical. - // - dir_path d (i->second); - d /= f.leaf (i->first).directory (); - d.canonicalize (); - - pt = find (move (d), f.leaf (), false); // d is not moved from. - - if (pt != nullptr) - { - path p (d / f.leaf ()); - l4 ([&]{trace << "remapping " << f << " to " << p;}); - f = move (p); - remapped = true; - } - } - } + ulock l (hc.header_map_mutex); + auto p (hc.header_map.emplace (move (hk), r.first)); + f = p.second ? nullptr : p.first->second; } - if (pt == nullptr) + if (f != nullptr) { - l6 ([&]{trace << "entering " << f;}); - pt = find (f.directory (), f.leaf (), true); + //cache_cls.fetch_add (1, memory_order_relaxed); + assert (r.first == f); } } - return make_pair (pt, remapped); + return r; } - // Update and add to the list of prerequisite targets a header or header - // unit target. - // - // Return the indication of whether it has changed or, if the passed - // timestamp is not timestamp_unknown, is older than the target. If the - // header does not exists nor can be generated (no rule), then issue - // diagnostics and fail if the fail argument is true and return nullopt - // otherwise. - // // Note: this used to be a lambda inside extract_headers() so refer to the // body of that function for the overall picture. // optional<bool> compile_rule:: inject_header (action a, file& t, - const file& pt, timestamp mt, bool f /* fail */) const + const file& pt, timestamp mt, bool fail) const { tracer trace (x, "compile_rule::inject_header"); - // Even if failing we still use try_match() in order to issue consistent - // (with extract_headers() below) diagnostics (rather than the generic - // "not rule to update ..."). - // - if (!try_match (a, pt).first) - { - if (!f) - return nullopt; - - diag_record dr; - dr << fail << "header " << pt << " not found and no rule to " - << "generate it"; - - if (verb < 4) - dr << info << "re-run with --verbose=4 for more information"; - } - - bool r (update (trace, a, pt, mt)); - - // Add to our prerequisite target list. - // - t.prerequisite_targets[a].push_back (&pt); - - return r; + return inject_file (trace, "header", a, t, pt, mt, fail); } - // Extract and inject header dependencies. Return the preprocessed source - // file as well as an indication if it is usable for compilation (see - // below for details). + // Extract and inject header dependencies. Return (in result) the + // preprocessed source file as well as an indication if it is usable for + // compilation (see below for details). Note that result is expected to + // be initialized to {entry (), false}. Not using return type due to + // GCC bug #107555. // // This is also the place where we handle header units which are a lot // more like auto-generated headers than modules. In particular, if a // header unit BMI is out-of-date, then we have to re-preprocess this // translation unit. // - pair<file_cache::entry, bool> compile_rule:: + void compile_rule:: extract_headers (action a, const scope& bs, file& t, @@ -3341,7 +3230,8 @@ namespace build2 depdb& dd, bool& update, timestamp mt, - module_imports& imports) const + module_imports& imports, + pair<file_cache::entry, bool>& result) const { tracer trace (x, "compile_rule::extract_headers"); @@ -3354,19 +3244,16 @@ namespace build2 file_cache::entry psrc; bool puse (true); - // If things go wrong (and they often do in this area), give the user a - // bit extra context. + // Preprocessed file extension. // - auto df = make_diag_frame ( - [&src](const diag_record& dr) - { - if (verb != 0) - dr << info << "while extracting header dependencies from " << src; - }); + const char* pext (x_assembler_cpp (src) ? ".Si" : + x_objective (src) ? x_obj_pext : + x_pext); // Preprocesor mode that preserves as much information as possible while // still performing inclusions. Also serves as a flag indicating whether - // this compiler uses the separate preprocess and compile setup. + // this (non-MSVC) compiler uses the separate preprocess and compile + // setup. // const char* pp (nullptr); @@ -3377,7 +3264,16 @@ namespace build2 // -fdirectives-only is available since GCC 4.3.0. // if (cmaj > 4 || (cmaj == 4 && cmin >= 3)) - pp = "-fdirectives-only"; + { + // Note that for assembler-with-cpp GCC currently forces full + // preprocessing in (what appears to be) an attempt to paper over + // a deeper issue (see GCC bug 109534). If/when that bug gets + // fixed, we can enable this on our side. Note also that Clang's + // -frewrite-includes appear to work correctly on such files. + // + if (!x_assembler_cpp (src)) + pp = "-fdirectives-only"; + } break; } @@ -3467,7 +3363,7 @@ namespace build2 // // GCC's -fdirective-only, on the other hand, processes all the // directives so they are gone from the preprocessed source. Here is - // what we are going to do to work around this: we will detect if any + // what we are going to do to work around this: we will sense if any // diagnostics has been written to stderr on the -E run. If that's the // case (but the compiler indicated success) then we assume they are // warnings and disable the use of the preprocessed output for @@ -3496,6 +3392,8 @@ namespace build2 // // So seeing that it is hard to trigger a legitimate VC preprocessor // warning, for now, we will just treat them as errors by adding /WX. + // BTW, another example of a plausible preprocessor warnings are C4819 + // and C4828 (character unrepresentable in source charset). // // Finally, if we are using the module mapper, then all this mess falls // away: we only run the compiler once, we let the diagnostics through, @@ -3503,7 +3401,9 @@ namespace build2 // not found, and there is no problem with outdated generated headers // since we update/remap them before the compiler has a chance to read // them. Overall, this "dependency mapper" approach is how it should - // have been done from the beginning. + // have been done from the beginning. Note: that's the ideal world, + // the reality is that the required mapper extensions are not (yet) + // in libcody/GCC. // Note: diagnostics sensing is currently only supported if dependency // info is written to a file (see above). @@ -3513,15 +3413,15 @@ namespace build2 // And here is another problem: if we have an already generated header // in src and the one in out does not yet exist, then the compiler will // pick the one in src and we won't even notice. Note that this is not - // only an issue with mixing in- and out-of-tree builds (which does feel + // only an issue with mixing in and out of source builds (which does feel // wrong but is oh so convenient): this is also a problem with // pre-generated headers, a technique we use to make installing the // generator by end-users optional by shipping pre-generated headers. // // This is a nasty problem that doesn't seem to have a perfect solution - // (except, perhaps, C++ modules). So what we are going to do is try to - // rectify the situation by detecting and automatically remapping such - // mis-inclusions. It works as follows. + // (except, perhaps, C++ modules and/or module mapper). So what we are + // going to do is try to rectify the situation by detecting and + // automatically remapping such mis-inclusions. It works as follows. // // First we will build a map of src/out pairs that were specified with // -I. Here, for performance and simplicity, we will assume that they @@ -3534,10 +3434,7 @@ namespace build2 // case, then we calculate a corresponding header in the out tree and, // (this is the most important part), check if there is a target for // this header in the out tree. This should be fairly accurate and not - // require anything explicit from the user except perhaps for a case - // where the header is generated out of nothing (so there is no need to - // explicitly mention its target in the buildfile). But this probably - // won't be very common. + // require anything explicit from the user. // // One tricky area in this setup are target groups: if the generated // sources are mentioned in the buildfile as a group, then there might @@ -3547,10 +3444,7 @@ namespace build2 // generated depending on the options (e.g., inline files might be // suppressed), headers are usually non-optional. // - // Note that we use path_map instead of dir_path_map to allow searching - // using path (file path). - // - srcout_map so_map; // path_map<dir_path> + srcout_map so_map; // Dynamic module mapper. // @@ -3558,13 +3452,13 @@ namespace build2 // The gen argument to init_args() is in/out. The caller signals whether // to force the generated header support and on return it signals - // whether this support is enabled. The first call to init_args is - // expected to have gen false. + // whether this support is enabled. If gen is false, then stderr is + // expected to be either discarded or merged with sdtout. // // Return NULL if the dependency information goes to stdout and a // pointer to the temporary file path otherwise. // - auto init_args = [a, &t, ot, li, reprocess, + auto init_args = [a, &t, ot, li, reprocess, pext, &src, &md, &psrc, &sense_diag, &mod_mapper, &bs, pp, &env, &args, &args_gen, &args_i, &out, &drm, &so_map, this] @@ -3630,17 +3524,13 @@ namespace build2 // Populate the src-out with the -I$out_base -I$src_base pairs. // { + srcout_builder builder (ctx, so_map); + // Try to be fast and efficient by reusing buffers as much as // possible. // string ds; - // Previous -I innermost scope if out_base plus the difference - // between the scope path and the -I path (normally empty). - // - const scope* s (nullptr); - dir_path p; - for (auto i (args.begin ()), e (args.end ()); i != e; ++i) { const char* o (*i); @@ -3665,7 +3555,7 @@ namespace build2 if (p == 0) { - s = nullptr; + builder.skip (); continue; } @@ -3698,68 +3588,14 @@ namespace build2 // if (!d.empty ()) { - // Ignore any paths containing '.', '..' components. Allow - // any directory separators though (think -I$src_root/foo - // on Windows). - // - if (d.absolute () && d.normalized (false)) - { - // If we have a candidate out_base, see if this is its - // src_base. - // - if (s != nullptr) - { - const dir_path& bp (s->src_path ()); - - if (d.sub (bp)) - { - if (p.empty () || d.leaf (bp) == p) - { - // We've got a pair. - // - so_map.emplace (move (d), s->out_path () / p); - s = nullptr; // Taken. - continue; - } - } - - // Not a pair. Fall through to consider as out_base. - // - s = nullptr; - } - - // See if this path is inside a project with an out-of- - // tree build and is in the out directory tree. - // - const scope& bs (ctx.scopes.find_out (d)); - if (bs.root_scope () != nullptr) - { - if (!bs.out_eq_src ()) - { - const dir_path& bp (bs.out_path ()); - - bool e; - if ((e = (d == bp)) || d.sub (bp)) - { - s = &bs; - if (e) - p.clear (); - else - p = d.leaf (bp); - } - } - } - } - else - s = nullptr; - - ds = move (d).string (); // Move the buffer out. + if (!builder.next (move (d))) + ds = move (d).string (); // Move the buffer back out. } else - s = nullptr; + builder.skip (); } else - s = nullptr; + builder.skip (); } } @@ -3806,19 +3642,41 @@ namespace build2 append_options (args, cmode); append_sys_hdr_options (args); // Extra system header dirs (last). - // See perform_update() for details on /external:W0, /EHsc, /MD. + // Note that for MSVC stderr is merged with stdout and is then + // parsed, so no append_diag_color_options() call. + + // See perform_update() for details on the choice of options. // + { + bool sc (find_option_prefixes ( + {"/source-charset:", "-source-charset:"}, args)); + bool ec (find_option_prefixes ( + {"/execution-charset:", "-execution-charset:"}, args)); + + if (!sc && !ec) + args.push_back ("/utf-8"); + else + { + if (!sc) + args.push_back ("/source-charset:UTF-8"); + + if (!ec) + args.push_back ("/execution-charset:UTF-8"); + } + } + if (cvariant != "clang" && isystem (*this)) { - if (find_option_prefix ("/external:I", args) && - !find_option_prefix ("/external:W", args)) + if (find_option_prefixes ({"/external:I", "-external:I"}, args) && + !find_option_prefixes ({"/external:W", "-external:W"}, args)) args.push_back ("/external:W0"); } - if (x_lang == lang::cxx && !find_option_prefix ("/EH", args)) + if (x_lang == lang::cxx && + !find_option_prefixes ({"/EH", "-EH"}, args)) args.push_back ("/EHsc"); - if (!find_option_prefixes ({"/MD", "/MT"}, args)) + if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args)) args.push_back ("/MD"); args.push_back ("/P"); // Preprocess to file. @@ -3829,7 +3687,7 @@ namespace build2 msvc_sanitize_cl (args); - psrc = ctx.fcache.create (t.path () + x_pext, !modules); + psrc = ctx.fcache->create (t.path () + pext, !modules); if (fc) { @@ -3848,8 +3706,20 @@ namespace build2 } case compiler_class::gcc: { + append_options (args, cmode, + cmode.size () - (modules && clang ? 1 : 0)); + append_sys_hdr_options (args); // Extra system header dirs (last). + + // If not gen, then stderr is discarded. + // + if (gen) + append_diag_color_options (args); + // See perform_update() for details on the choice of options. // + if (!find_option_prefix ("-finput-charset=", args)) + args.push_back ("-finput-charset=UTF-8"); + if (ot == otype::s) { if (tclass == "linux" || tclass == "bsd") @@ -3878,10 +3748,6 @@ namespace build2 } } - append_options (args, cmode, - cmode.size () - (modules && clang ? 1 : 0)); - append_sys_hdr_options (args); // Extra system header dirs (last). - // Setup the dynamic module mapper if needed. // // Note that it's plausible in the future we will use it even if @@ -3973,7 +3839,7 @@ namespace build2 // Preprocessor output. // - psrc = ctx.fcache.create (t.path () + x_pext, !modules); + psrc = ctx.fcache->create (t.path () + pext, !modules); args.push_back ("-o"); args.push_back (psrc.path ().string ().c_str ()); } @@ -4099,15 +3965,12 @@ namespace build2 // to be inconvenient: some users like to re-run a failed build with // -s not to get "swamped" with errors. // - bool df (!ctx.match_only && !ctx.dry_run_option); - - const file* ht (enter_header (a, bs, t, li, - move (hp), cache, false /* norm */, - pfx_map, so_map).first); - if (ht == nullptr) + auto fail = [&ctx] (const auto& h) -> optional<bool> { + bool df (!ctx.match_only && !ctx.dry_run_option); + diag_record dr; - dr << error << "header '" << hp << "' not found and no rule to " + dr << error << "header " << h << " not found and no rule to " << "generate it"; if (df) @@ -4116,41 +3979,44 @@ namespace build2 if (verb < 4) dr << info << "re-run with --verbose=4 for more information"; - if (df) return nullopt; else dr << endf; - } + if (df) + return nullopt; + else + dr << endf; + }; - // If we are reading the cache, then it is possible the file has since - // been removed (think of a header in /usr/local/include that has been - // uninstalled and now we need to use one from /usr/include). This - // will lead to the match failure which we translate to a restart. - // - if (optional<bool> u = inject_header (a, t, *ht, mt, false /* fail */)) + if (const file* ht = enter_header ( + a, bs, t, li, + move (hp), cache, cache /* normalized */, + pfx_map, so_map).first) { - // Verify/add it to the dependency database. + // If we are reading the cache, then it is possible the file has + // since been removed (think of a header in /usr/local/include that + // has been uninstalled and now we need to use one from + // /usr/include). This will lead to the match failure which we + // translate to a restart. And, yes, this case will trip up + // inject_header(), not enter_header(). // - if (!cache) - dd.expect (ht->path ()); - - skip_count++; - return *u; - } - else if (!cache) - { - diag_record dr; - dr << error << "header " << *ht << " not found and no rule to " - << "generate it"; - - if (df) - dr << info << "failure deferred to compiler diagnostics"; - - if (verb < 4) - dr << info << "re-run with --verbose=4 for more information"; + if (optional<bool> u = inject_header (a, t, *ht, mt, false /*fail*/)) + { + // Verify/add it to the dependency database. + // + if (!cache) + dd.expect (ht->path ()); - if (df) return nullopt; else dr << endf; + skip_count++; + return *u; + } + else if (cache) + { + dd.write (); // Invalidate this line. + return true; + } + else + return fail (*ht); } - - dd.write (); // Invalidate this line. - return true; + else + return fail (hp); // hp is still valid. }; // As above but for a header unit. Note that currently it is only used @@ -4167,13 +4033,13 @@ namespace build2 const file* ht ( enter_header (a, bs, t, li, - move (hp), true /* cache */, true /* norm */, + move (hp), true /* cache */, false /* normalized */, pfx_map, so_map).first); - if (ht == nullptr) + if (ht == nullptr) // hp is still valid. { diag_record dr; - dr << error << "header '" << hp << "' not found and no rule to " + dr << error << "header " << hp << " not found and no rule to " << "generate it"; if (df) @@ -4219,6 +4085,16 @@ namespace build2 const path* drmp (nullptr); // Points to drm.path () if active. + // If things go wrong (and they often do in this area), give the user a + // bit extra context. + // + auto df = make_diag_frame ( + [&src](const diag_record& dr) + { + if (verb != 0) + dr << info << "while extracting header dependencies from " << src; + }); + // If nothing so far has invalidated the dependency database, then try // the cached data before running the compiler. // @@ -4255,11 +4131,14 @@ namespace build2 // // See apply() for details on the extra MSVC check. // - return modules && (ctype != compiler_type::msvc || - md.type != unit_type::module_intf) - ? make_pair (ctx.fcache.create_existing (t.path () + x_pext), - true) - : make_pair (file_cache::entry (), false); + if (modules && (ctype != compiler_type::msvc || + md.type != unit_type::module_intf)) + { + result.first = ctx.fcache->create_existing (t.path () + pext); + result.second = true; + } + + return; } // This can be a header or a header unit (mapping). @@ -4312,7 +4191,7 @@ namespace build2 // Bail out early if we have deferred a failure. // - return make_pair (file_cache::entry (), false); + return; } } } @@ -4338,6 +4217,12 @@ namespace build2 process pr; + // We use the fdstream_mode::skip mode on stdout (cannot be used + // on both) and so dbuf must be destroyed (closed) first. + // + ifdstream is (ifdstream::badbit); + diag_buffer dbuf (ctx); + try { // Assume the preprocessed output (if produced) is usable @@ -4358,217 +4243,229 @@ namespace build2 // bool good_error (false), bad_error (false); - // If we have no generated header support, then suppress all - // diagnostics (if things go badly we will restart with this - // support). - // - if (drmp == nullptr) // Dependency info goes to stdout. + if (mod_mapper) // Dependency info is implied by mapper requests. { - assert (!sense_diag); // Note: could support with fdselect(). + assert (gen && !sense_diag); // Not used in this mode. - // For VC with /P the dependency info and diagnostics all go - // to stderr so redirect it to stdout. + // Note that here we use the skip mode on the diagnostics + // stream which means we have to use own instance of stdout + // stream for the correct destruction order (see below). // - pr = process ( - cpath, - args.data (), - 0, - -1, - cclass == compiler_class::msvc ? 1 : gen ? 2 : -2, - nullptr, // CWD - env.empty () ? nullptr : env.data ()); - } - else // Dependency info goes to a temporary file. - { pr = process (cpath, - args.data (), - mod_mapper ? -1 : 0, - mod_mapper ? -1 : 2, // Send stdout to stderr. - gen ? 2 : sense_diag ? -1 : -2, + args, + -1, + -1, + diag_buffer::pipe (ctx), nullptr, // CWD env.empty () ? nullptr : env.data ()); - // Monitor for module mapper requests and/or diagnostics. If - // diagnostics is detected, mark the preprocessed output as - // unusable for compilation. - // - if (mod_mapper || sense_diag) + dbuf.open (args[0], + move (pr.in_efd), + fdstream_mode::non_blocking | + fdstream_mode::skip); + try { - module_mapper_state mm_state (skip_count, imports); + gcc_module_mapper_state mm_state (skip_count, imports); + + // Note that while we read both streams until eof in normal + // circumstances, we cannot use fdstream_mode::skip for the + // exception case on both of them: we may end up being + // blocked trying to read one stream while the process may + // be blocked writing to the other. So in case of an + // exception we only skip the diagnostics and close the + // mapper stream hard. The latter (together with closing of + // the stdin stream) should happen first so the order of + // the following variable is important. + // + // Note also that we open the stdin stream in the blocking + // mode. + // + ifdstream is (move (pr.in_ofd), + fdstream_mode::non_blocking, + ifdstream::badbit); // stdout + ofdstream os (move (pr.out_fd)); // stdin (badbit|failbit) - const char* w (nullptr); - try + // Read until we reach EOF on all streams. + // + // Note that if dbuf is not opened, then we automatically + // get an inactive nullfd entry. + // + fdselect_set fds {is.fd (), dbuf.is.fd ()}; + fdselect_state& ist (fds[0]); + fdselect_state& dst (fds[1]); + + bool more (false); + for (string l; ist.fd != nullfd || dst.fd != nullfd; ) { - // For now we don't need to do both so let's use a simpler - // blocking implementation. Note that the module mapper - // also needs to be adjusted when switching to the - // non-blocking version. + // @@ Currently we will accept a (potentially truncated) + // line that ends with EOF rather than newline. // -#if 1 - assert (mod_mapper != sense_diag); - - if (mod_mapper) + if (ist.fd != nullfd && getline_non_blocking (is, l)) { - w = "module mapper request"; - - // Note: the order is important (see the non-blocking - // verison for details). - // - ifdstream is (move (pr.in_ofd), - fdstream_mode::skip, - ifdstream::badbit); - ofdstream os (move (pr.out_fd)); - - do + if (eof (is)) { - if (!gcc_module_mapper (mm_state, - a, bs, t, li, - is, os, - dd, update, bad_error, - pfx_map, so_map)) - break; + os.close (); + is.close (); - } while (!is.eof ()); + if (more) + throw_generic_ios_failure (EIO, "unexpected EOF"); - os.close (); - is.close (); - } - - if (sense_diag) - { - w = "diagnostics"; - ifdstream is (move (pr.in_efd), fdstream_mode::skip); - puse = puse && (is.peek () == ifdstream::traits_type::eof ()); - is.close (); - } -#else - fdselect_set fds; - auto add = [&fds] (const auto_fd& afd) -> fdselect_state* - { - int fd (afd.get ()); - fdmode (fd, fdstream_mode::non_blocking); - fds.push_back (fd); - return &fds.back (); - }; - - // Note that while we read both streams until eof in - // normal circumstances, we cannot use fdstream_mode::skip - // for the exception case on both of them: we may end up - // being blocked trying to read one stream while the - // process may be blocked writing to the other. So in case - // of an exception we only skip the diagnostics and close - // the mapper stream hard. The latter should happen first - // so the order of the following variable is important. - // - ifdstream es; - ofdstream os; - ifdstream is; - - fdselect_state* ds (nullptr); - if (sense_diag) - { - w = "diagnostics"; - ds = add (pr.in_efd); - es.open (move (pr.in_efd), fdstream_mode::skip); - } - - fdselect_state* ms (nullptr); - if (mod_mapper) - { - w = "module mapper request"; - ms = add (pr.in_ofd); - is.open (move (pr.in_ofd)); - os.open (move (pr.out_fd)); // Note: blocking. - } - - // Set each state pointer to NULL when the respective - // stream reaches eof. - // - while (ds != nullptr || ms != nullptr) - { - w = "output"; - ifdselect (fds); - - // First read out the diagnostics in case the mapper - // interaction produces more. To make sure we don't get - // blocked by full stderr, the mapper should only handle - // one request at a time. - // - if (ds != nullptr && ds->ready) + ist.fd = nullfd; + } + else { - w = "diagnostics"; - - for (char buf[4096];;) - { - streamsize c (sizeof (buf)); - streamsize n (es.readsome (buf, c)); + optional<bool> r ( + gcc_module_mapper (mm_state, + a, bs, t, li, + l, os, + dd, update, bad_error, + pfx_map, so_map)); - if (puse && n > 0) - puse = false; + more = !r.has_value (); - if (n < c) - break; - } - - if (es.eof ()) - { - es.close (); - ds->fd = nullfd; - ds = nullptr; - } - } - - if (ms != nullptr && ms->ready) - { - w = "module mapper request"; - - gcc_module_mapper (mm_state, - a, bs, t, li, - is, os, - dd, update, bad_error, - pfx_map, so_map); - if (is.eof ()) + if (more || *r) + l.clear (); + else { os.close (); is.close (); - ms->fd = nullfd; - ms = nullptr; + ist.fd = nullfd; } } + + continue; } -#endif - } - catch (const io_error& e) - { - if (pr.wait ()) - fail << "io error handling " << x_lang << " compiler " - << w << ": " << e; - // Fall through. + ifdselect (fds); + + if (dst.ready) + { + if (!dbuf.read ()) + dst.fd = nullfd; + } } - if (mod_mapper) - md.header_units += mm_state.header_units; + md.header_units += mm_state.header_units; + } + catch (const io_error& e) + { + // Note that diag_buffer handles its own io errors so this + // is about mapper stdin/stdout. + // + if (pr.wait ()) + fail << "io error handling " << x_lang << " compiler " + << "module mapper request: " << e; + + // Fall through. } // The idea is to reduce this to the stdout case. // - pr.wait (); - - // With -MG we want to read dependency info even if there is - // an error (in case an outdated header file caused it). But - // with the GCC module mapper an error is non-negotiable, so - // to speak, and so we want to skip all of that. In fact, we - // now write directly to depdb without generating and then + // We now write directly to depdb without generating and then // parsing an intermadiate dependency makefile. // - pr.in_ofd = (ctype == compiler_type::gcc && mod_mapper) - ? auto_fd (nullfd) - : fdopen (*drmp, fdopen_mode::in); + pr.wait (); + pr.in_ofd = nullfd; } + else + { + // If we have no generated header support, then suppress all + // diagnostics (if things go badly we will restart with this + // support). + // + if (drmp == nullptr) // Dependency info goes to stdout. + { + assert (!sense_diag); // Note: could support if necessary. + // For VC with /P the dependency info and diagnostics all go + // to stderr so redirect it to stdout. + // + int err ( + cclass == compiler_class::msvc ? 1 : // stdout + !gen ? -2 : // /dev/null + diag_buffer::pipe (ctx, sense_diag /* force */)); + + pr = process ( + cpath, + args, + 0, + -1, + err, + nullptr, // CWD + env.empty () ? nullptr : env.data ()); + + if (cclass != compiler_class::msvc && gen) + { + dbuf.open (args[0], + move (pr.in_efd), + fdstream_mode::non_blocking); // Skip on stdout. + } + } + else // Dependency info goes to temporary file. + { + // Since we only need to read from one stream (dbuf) let's + // use the simpler blocking setup. + // + int err ( + !gen && !sense_diag ? -2 : // /dev/null + diag_buffer::pipe (ctx, sense_diag /* force */)); + + pr = process (cpath, + args, + 0, + 2, // Send stdout to stderr. + err, + nullptr, // CWD + env.empty () ? nullptr : env.data ()); + + if (gen || sense_diag) + { + dbuf.open (args[0], move (pr.in_efd)); + dbuf.read (sense_diag /* force */); + } + + if (sense_diag) + { + if (!dbuf.buf.empty ()) + { + puse = false; + dbuf.buf.clear (); // Discard. + } + } + + // The idea is to reduce this to the stdout case. + // + // Note that with -MG we want to read dependency info even + // if there is an error (in case an outdated header file + // caused it). + // + pr.wait (); + pr.in_ofd = fdopen (*drmp, fdopen_mode::in); + } + } + + // Read and process dependency information, if any. + // if (pr.in_ofd != nullfd) { + // We have two cases here: reading from stdout and potentially + // stderr (dbuf) or reading from file (see the process startup + // code above for details). If we have to read from two + // streams, then we have to use the non-blocking setup. But we + // cannot use the non-blocking setup uniformly because on + // Windows it's only suppored for pipes. So things are going + // to get a bit hairy. + // + // And there is another twist to this: for MSVC we redirect + // stderr to stdout since the header dependency information is + // part of the diagnostics. If, however, there is some real + // diagnostics, we need to pass it through, potentially with + // buffering. The way we achieve this is by later opening dbuf + // in the EOF state and using it to buffer or stream the + // diagnostics. + // + bool nb (dbuf.is.is_open ()); + // We may not read all the output (e.g., due to a restart). // Before we used to just close the file descriptor to signal // to the other end that we are not interested in the rest. @@ -4576,20 +4473,69 @@ namespace build2 // impolite and complains, loudly (broken pipe). So now we are // going to skip until the end. // - ifdstream is (move (pr.in_ofd), - fdstream_mode::text | fdstream_mode::skip, - ifdstream::badbit); + // Note that this means we are not using skip on dbuf (see + // above for the destruction order details). + // + { + fdstream_mode m (fdstream_mode::text | + fdstream_mode::skip); + + if (nb) + m |= fdstream_mode::non_blocking; + + is.open (move (pr.in_ofd), m); + } + + fdselect_set fds; + if (nb) + fds = {is.fd (), dbuf.is.fd ()}; size_t skip (skip_count); string l, l2; // Reuse. for (bool first (true), second (false); !restart; ) { - if (eof (getline (is, l))) + if (nb) { - if (bad_error && !l2.empty ()) - text << l2; + fdselect_state& ist (fds[0]); + fdselect_state& dst (fds[1]); - break; + // We read until we reach EOF on both streams. + // + if (ist.fd == nullfd && dst.fd == nullfd) + break; + + if (ist.fd != nullfd && getline_non_blocking (is, l)) + { + if (eof (is)) + { + ist.fd = nullfd; + continue; + } + + // Fall through to parse (and clear) the line. + } + else + { + ifdselect (fds); + + if (dst.ready) + { + if (!dbuf.read ()) + dst.fd = nullfd; + } + + continue; + } + } + else + { + if (eof (getline (is, l))) + { + if (bad_error && !l2.empty ()) // MSVC only (see below). + dbuf.write (l2, true /* newline */); + + break; + } } l6 ([&]{trace << "header dependency line '" << l << "'";}); @@ -4640,9 +4586,15 @@ namespace build2 else { l2 = l; - bad_error = true; + + if (!bad_error) + { + dbuf.open_eof (args[0]); + bad_error = true; + } } + l.clear (); continue; } @@ -4652,6 +4604,7 @@ namespace build2 } first = false; + l.clear (); continue; } @@ -4659,8 +4612,13 @@ namespace build2 if (f.empty ()) // Some other diagnostics. { - text << l; - bad_error = true; + if (!bad_error) + { + dbuf.open_eof (args[0]); + bad_error = true; + } + + dbuf.write (l, true /* newline */); break; } @@ -4754,12 +4712,9 @@ namespace build2 if (l.empty () || l[0] != '^' || l[1] != ':' || l[2] != ' ') { - // @@ Hm, we don't seem to redirect stderr to stdout - // for this class of compilers so I wonder why - // we are doing this? - // if (!l.empty ()) - text << l; + l5 ([&]{trace << "invalid header dependency line '" + << l << "'";}); bad_error = true; break; @@ -4774,22 +4729,37 @@ namespace build2 // "^: \". // if (l.size () == 4 && l[3] == '\\') + { + l.clear (); continue; + } else pos = 3; // Skip "^: ". // Fall through to the 'second' block. } - if (second) - { - second = false; - next_make (l, pos); // Skip the source file. - } - while (pos != l.size ()) { - string f (next_make (l, pos)); + string f ( + make_parser::next ( + l, pos, make_parser::type::prereq).first); + + if (pos != l.size () && l[pos] == ':') + { + l5 ([&]{trace << "invalid header dependency line '" + << l << "'";}); + bad_error = true; + break; + } + + // Skip the source file. + // + if (second) + { + second = false; + continue; + } // Skip until where we left off. // @@ -4833,19 +4803,56 @@ namespace build2 } if (bad_error || md.deferred_failure) + { + // Note that it may be tempting to finish reading out the + // diagnostics before bailing out. But that may end up in + // a deadlock if the process gets blocked trying to write + // to stdout. + // break; + } + + l.clear (); + } + + // We may bail out early from the above loop in case of a + // restart or error. Which means the stderr stream (dbuf) may + // still be open and we need to close it before closing the + // stdout stream (which may try to skip). + // + // In this case we may also end up with incomplete diagnostics + // so discard it. + // + // Generally, it may be tempting to start thinking if we + // should discard buffered diagnostics in other cases, such as + // restart. But remember that during serial execution it will + // go straight to stderr so for consistency (and simplicity) + // we should just print it unless there are good reasons not + // to (also remember that in the restartable modes we normally + // redirect stderr to /dev/null; see the process startup code + // for details). + // + if (dbuf.is.is_open ()) + { + dbuf.is.close (); + dbuf.buf.clear (); } // Bail out early if we have deferred a failure. // + // Let's ignore any buffered diagnostics in this case since + // it would appear after the deferred failure note. + // if (md.deferred_failure) { is.close (); - return make_pair (file_cache::entry (), false); + return; } - // In case of VC, we are parsing stderr and if things go - // south, we need to copy the diagnostics for the user to see. + // In case of VC, we are parsing redirected stderr and if + // things go south, we need to copy the diagnostics for the + // user to see. Note that we should have already opened dbuf + // at EOF above. // if (bad_error && cclass == compiler_class::msvc) { @@ -4860,7 +4867,7 @@ namespace build2 l.compare (p.first, 4, "1083") != 0 && msvc_header_c1083 (l, p)) { - diag_stream_lock () << l << endl; + dbuf.write (l, true /* newline */); } } } @@ -4883,27 +4890,42 @@ namespace build2 if (pr.wait ()) { - if (!bad_error) // Ignore expected successes (we are done). { - if (!restart && psrc) - psrcw.close (); + diag_record dr; - continue; + if (bad_error) + dr << fail << "expected error exit status from " + << x_lang << " compiler"; + + if (dbuf.is_open ()) + dbuf.close (move (dr)); // Throws if error. } - fail << "expected error exit status from " << x_lang - << " compiler"; + // Ignore expected successes (we are done). + // + if (!restart && psrc) + psrcw.close (); + + continue; } else if (pr.exit->normal ()) { if (good_error) // Ignore expected errors (restart). + { + if (dbuf.is_open ()) + dbuf.close (); + continue; + } } // Fall through. } catch (const io_error& e) { + // Ignore buffered diagnostics (since reading it could be the + // cause of this failure). + // if (pr.wait ()) fail << "unable to read " << x_lang << " compiler header " << "dependency output: " << e; @@ -4912,18 +4934,23 @@ namespace build2 } assert (pr.exit && !*pr.exit); - const process_exit& e (*pr.exit); + const process_exit& pe (*pr.exit); // For normal exit we assume the child process issued some // diagnostics. // - if (e.normal ()) + if (pe.normal ()) { - // If this run was with the generated header support then we - // have issued diagnostics and it's time to give up. + // If this run was with the generated header support then it's + // time to give up. // if (gen) + { + if (dbuf.is_open ()) + dbuf.close (args, pe, 2 /* verbosity */); + throw failed (); + } // Just to recap, being here means something is wrong with the // source: it can be a missing generated header, it can be an @@ -4941,7 +4968,12 @@ namespace build2 // or will issue diagnostics. // if (restart) + { + if (dbuf.is_open ()) + dbuf.close (); + l6 ([&]{trace << "trying again without generated headers";}); + } else { // In some pathological situations we may end up switching @@ -4966,19 +4998,24 @@ namespace build2 // example, because we have removed all the partially // preprocessed source files). // - if (force_gen_skip && *force_gen_skip == skip_count) { - diag_record dr (fail); + diag_record dr; + if (force_gen_skip && *force_gen_skip == skip_count) + { + dr << + fail << "inconsistent " << x_lang << " compiler behavior" << + info << "run the following two commands to investigate"; - dr << "inconsistent " << x_lang << " compiler behavior" << - info << "run the following two commands to investigate"; + dr << info; + print_process (dr, args.data ()); // No pipes. - dr << info; - print_process (dr, args.data ()); // No pipes. + init_args ((gen = true)); + dr << info << ""; + print_process (dr, args.data ()); // No pipes. + } - init_args ((gen = true)); - dr << info << ""; - print_process (dr, args.data ()); // No pipes. + if (dbuf.is_open ()) + dbuf.close (move (dr)); // Throws if error. } restart = true; @@ -4989,7 +5026,15 @@ namespace build2 continue; } else - run_finish (args, pr); // Throws. + { + if (dbuf.is_open ()) + { + dbuf.close (args, pe, 2 /* verbosity */); + throw failed (); + } + else + run_finish (args, pr, 2 /* verbosity */); + } } catch (const process_error& e) { @@ -5015,7 +5060,9 @@ namespace build2 dd.expect (""); puse = puse && !reprocess && psrc; - return make_pair (move (psrc), puse); + + result.first = move (psrc); + result.second = puse; } // Return the translation unit information (last argument) and its @@ -5128,19 +5175,41 @@ namespace build2 append_options (args, cmode); append_sys_hdr_options (args); - // See perform_update() for details on /external:W0, /EHsc, /MD. + // Note: no append_diag_color_options() call since the + // diagnostics is discarded. + + // See perform_update() for details on the choice of options. // + { + bool sc (find_option_prefixes ( + {"/source-charset:", "-source-charset:"}, args)); + bool ec (find_option_prefixes ( + {"/execution-charset:", "-execution-charset:"}, args)); + + if (!sc && !ec) + args.push_back ("/utf-8"); + else + { + if (!sc) + args.push_back ("/source-charset:UTF-8"); + + if (!ec) + args.push_back ("/execution-charset:UTF-8"); + } + } + if (cvariant != "clang" && isystem (*this)) { - if (find_option_prefix ("/external:I", args) && - !find_option_prefix ("/external:W", args)) + if (find_option_prefixes ({"/external:I", "-external:I"}, args) && + !find_option_prefixes ({"/external:W", "-external:W"}, args)) args.push_back ("/external:W0"); } - if (x_lang == lang::cxx && !find_option_prefix ("/EH", args)) + if (x_lang == lang::cxx && + !find_option_prefixes ({"/EH", "-EH"}, args)) args.push_back ("/EHsc"); - if (!find_option_prefixes ({"/MD", "/MT"}, args)) + if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args)) args.push_back ("/MD"); args.push_back ("/E"); @@ -5154,6 +5223,18 @@ namespace build2 } case compiler_class::gcc: { + append_options (args, cmode, + cmode.size () - (modules && clang ? 1 : 0)); + append_sys_hdr_options (args); + + // Note: no append_diag_color_options() call since the + // diagnostics is discarded. + + // See perform_update() for details on the choice of options. + // + if (!find_option_prefix ("-finput-charset=", args)) + args.push_back ("-finput-charset=UTF-8"); + if (ot == otype::s) { if (tclass == "linux" || tclass == "bsd") @@ -5182,10 +5263,6 @@ namespace build2 } } - append_options (args, cmode, - cmode.size () - (modules && clang ? 1 : 0)); - append_sys_hdr_options (args); - args.push_back ("-E"); append_lang_options (args, md); @@ -5194,12 +5271,36 @@ namespace build2 // if (ps) { - if (ctype == compiler_type::gcc) + switch (ctype) { - // Note that only these two *plus* -x do the trick. - // - args.push_back ("-fpreprocessed"); - args.push_back ("-fdirectives-only"); + case compiler_type::gcc: + { + // Note that only these two *plus* -x do the trick. + // + args.push_back ("-fpreprocessed"); + args.push_back ("-fdirectives-only"); + break; + } + case compiler_type::clang: + { + // See below for details. + // + if (ctype == compiler_type::clang && + cmaj >= (cvariant != "apple" ? 15 : 16)) + { + if (find_options ({"-pedantic", "-pedantic-errors", + "-Wpedantic", "-Werror=pedantic"}, + args)) + { + args.push_back ("-Wno-gnu-line-marker"); + } + } + + break; + } + case compiler_type::msvc: + case compiler_type::icc: + assert (false); } } @@ -5253,10 +5354,10 @@ namespace build2 print_process (args); // We don't want to see warnings multiple times so ignore all - // diagnostics. + // diagnostics (thus no need for diag_buffer). // pr = process (cpath, - args.data (), + args, 0, -1, -2, nullptr, // CWD env.empty () ? nullptr : env.data ()); @@ -5326,7 +5427,15 @@ namespace build2 // accurate (parts of the translation unit could have been // #ifdef'ed out; see __build2_preprocess). // - return reprocess ? string () : move (p.checksum); + // Also, don't use the checksum for header units since it ignores + // preprocessor directives and may therefore cause us to ignore a + // change to an exported macro. @@ TODO: maybe we should add a + // flag to the parser not to waste time calculating the checksum + // in these cases. + // + return reprocess || ut == unit_type::module_header + ? string () + : move (p.checksum); } // Fall through. @@ -5357,7 +5466,7 @@ namespace build2 info << "then run failing command to display compiler diagnostics"; } else - run_finish (args, pr); // Throws. + run_finish (args, pr, 2 /* verbosity */); // Throws. } catch (const process_error& e) { @@ -5770,10 +5879,11 @@ namespace build2 // 1. There is no good place in prerequisite_targets to store the // exported flag (no, using the marking facility across match/execute // is a bad idea). So what we are going to do is put re-exported - // bmi{}s at the back and store (in the target's data pad) the start - // position. One bad aspect about this part is that we assume those - // bmi{}s have been matched by the same rule. But let's not kid - // ourselves, there will be no other rule that matches bmi{}s. + // bmi{}s at the back and store (in the target's auxiliary data + // storage) the start position. One bad aspect about this part is + // that we assume those bmi{}s have been matched by the same + // rule. But let's not kid ourselves, there will be no other rule + // that matches bmi{}s. // // @@ I think now we could use prerequisite_targets::data for this? // @@ -6160,11 +6270,11 @@ namespace build2 // Hash (we know it's a file). // - cs.append (static_cast<const file&> (*bt).path ().string ()); + cs.append (bt->as<file> ().path ().string ()); // Copy over bmi{}s from our prerequisites weeding out duplicates. // - if (size_t j = bt->data<match_data> ().modules.start) + if (size_t j = bt->data<match_data> (a).modules.start) { // Hard to say whether we should reserve or not. We will probably // get quite a bit of duplications. @@ -6186,7 +6296,7 @@ namespace build2 }) == imports.end ()) { pts.push_back (et); - cs.append (static_cast<const file&> (*et).path ().string ()); + cs.append (et->as<file> ().path ().string ()); // Add to the list of imports for further duplicate suppression. // We could have stored reference to the name (e.g., in score) @@ -6226,6 +6336,9 @@ namespace build2 // cc.config module and that is within our amalgmantion seems like a // good place. // + // @@ TODO: maybe we should cache this in compile_rule ctor like we + // do for the header cache? + // const scope* as (&rs); { const scope* ws (as->weak_scope ()); @@ -6241,7 +6354,7 @@ namespace build2 // This is also the module that registers the scope operation // callback that cleans up the subproject. // - if (cast_false<bool> ((*s)["cc.core.vars.loaded"])) + if (cast_false<bool> (s->vars["cc.core.vars.loaded"])) as = s; } while (s != ws); @@ -6377,7 +6490,10 @@ namespace build2 ps.push_back (prerequisite (lt)); for (prerequisite_member p: group_prerequisite_members (a, lt)) { - if (include (a, lt, p) != include_type::normal) // Excluded/ad hoc. + // Ignore update=match. + // + lookup l; + if (include (a, lt, p, &l) != include_type::normal) // Excluded/ad hoc. continue; if (p.is_a<libx> () || @@ -6394,8 +6510,9 @@ namespace build2 move (mf), nullopt, // Use default extension. target_decl::implied, - trace)); - file& bt (static_cast<file&> (p.first)); + trace, + true /* skip_find */)); + file& bt (p.first.as<file> ()); // Note that this is racy and someone might have created this target // while we were preparing the prerequisite list. @@ -6526,7 +6643,9 @@ namespace build2 // process_libraries (a, bs, nullopt, sys_lib_dirs, *f, la, 0, // lflags unused. - imp, lib, nullptr, true /* self */, + imp, lib, nullptr, + true /* self */, + false /* proc_opt_group */, &lib_cache); if (lt != nullptr) @@ -6611,7 +6730,10 @@ namespace build2 // for (prerequisite_member p: group_prerequisite_members (a, t)) { - if (include (a, t, p) != include_type::normal) // Excluded/ad hoc. + // Ignore update=match. + // + lookup l; + if (include (a, t, p, &l) != include_type::normal) // Excluded/ad hoc. continue; if (p.is_a<libx> () || @@ -6629,8 +6751,9 @@ namespace build2 move (mf), nullopt, // Use default extension. target_decl::implied, - trace)); - file& bt (static_cast<file&> (p.first)); + trace, + true /* skip_find */)); + file& bt (p.first.as<file> ()); // Note that this is racy and someone might have created this target // while we were preparing the prerequisite list. @@ -6668,7 +6791,7 @@ namespace build2 // Filter cl.exe noise (msvc.cxx). // void - msvc_filter_cl (ifdstream&, const path& src); + msvc_filter_cl (diag_buffer&, const path& src); // Append header unit-related options. // @@ -6898,12 +7021,11 @@ namespace build2 } target_state compile_rule:: - perform_update (action a, const target& xt) const + perform_update (action a, const target& xt, match_data& md) const { const file& t (xt.as<file> ()); const path& tp (t.path ()); - match_data md (move (t.data<match_data> ())); unit_type ut (md.type); context& ctx (t.ctx); @@ -6926,9 +7048,6 @@ namespace build2 }, md.modules.copied)); // See search_modules() for details. - const file& s (pr.second); - const path* sp (&s.path ()); - // Force recompilation in case of a deferred failure even if nothing // changed. // @@ -6945,11 +7064,14 @@ namespace build2 return *pr.first; } + const file& s (pr.second); + const path* sp (&s.path ()); + // Make sure depdb is no older than any of our prerequisites (see md.mt // logic description above for details). Also save the sequence start // time if doing mtime checks (see the depdb::check_mtime() call below). // - timestamp start (depdb::mtime_check () + timestamp start (!ctx.dry_run && depdb::mtime_check () ? system_clock::now () : timestamp_unknown); @@ -7016,7 +7138,6 @@ namespace build2 small_vector<string, 2> module_args; // Module options storage. size_t out_i (0); // Index of the -o option. - size_t lang_n (0); // Number of lang options. switch (cclass) { @@ -7037,13 +7158,40 @@ namespace build2 if (md.pp != preprocessed::all) append_sys_hdr_options (args); // Extra system header dirs (last). + // Note: could be overridden in mode. + // + append_diag_color_options (args); + + // Set source/execution charsets to UTF-8 unless a custom charset + // is specified. + // + // Note that clang-cl supports /utf-8 and /*-charset. + // + { + bool sc (find_option_prefixes ( + {"/source-charset:", "-source-charset:"}, args)); + bool ec (find_option_prefixes ( + {"/execution-charset:", "-execution-charset:"}, args)); + + if (!sc && !ec) + args.push_back ("/utf-8"); + else + { + if (!sc) + args.push_back ("/source-charset:UTF-8"); + + if (!ec) + args.push_back ("/execution-charset:UTF-8"); + } + } + // If we have any /external:I options but no /external:Wn, then add // /external:W0 to emulate the -isystem semantics. // if (cvariant != "clang" && isystem (*this)) { - if (find_option_prefix ("/external:I", args) && - !find_option_prefix ("/external:W", args)) + if (find_option_prefixes ({"/external:I", "-external:I"}, args) && + !find_option_prefixes ({"/external:W", "-external:W"}, args)) args.push_back ("/external:W0"); } @@ -7057,7 +7205,9 @@ namespace build2 // For C looks like no /EH* (exceptions supported but no C++ objects // destroyed) is a reasonable default. // - if (x_lang == lang::cxx && !find_option_prefix ("/EH", args)) + + if (x_lang == lang::cxx && + !find_option_prefixes ({"/EH", "-EH"}, args)) args.push_back ("/EHsc"); // The runtime is a bit more interesting. At first it may seem like @@ -7079,7 +7229,7 @@ namespace build2 // unreasonable thing to do). So by default we will always use the // release runtime. // - if (!find_option_prefixes ({"/MD", "/MT"}, args)) + if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args)) args.push_back ("/MD"); msvc_sanitize_cl (args); @@ -7104,7 +7254,7 @@ namespace build2 // // @@ MOD: TODO deal with absent relo. // - if (find_options ({"/Zi", "/ZI"}, args)) + if (find_options ({"/Zi", "/ZI", "-Zi", "-ZI"}, args)) { if (fc) args.push_back ("/Fd:"); @@ -7150,6 +7300,65 @@ namespace build2 } case compiler_class::gcc: { + append_options (args, cmode); + + // Clang 15 introduced the unqualified-std-cast-call warning which + // warns about unqualified calls to std::move() and std::forward() + // (because they can be "hijacked" via ADL). Surprisingly, this + // warning is enabled by default, as opposed to with -Wextra or at + // least -Wall. It has also proven to be quite disruptive, causing a + // large number of warnings in a large number of packages. So we are + // going to "remap" it to -Wextra for now and in the future may + // "relax" it to -Wall and potentially to being enabled by default. + // See GitHub issue #259 for background and details. + // + if (x_lang == lang::cxx && + ctype == compiler_type::clang && + cmaj >= 15) + { + bool w (false); // Seen -W[no-]unqualified-std-cast-call + optional<bool> extra; // Seen -W[no-]extra + + for (const char* s: reverse_iterate (args)) + { + if (s != nullptr) + { + if (strcmp (s, "-Wunqualified-std-cast-call") == 0 || + strcmp (s, "-Wno-unqualified-std-cast-call") == 0) + { + w = true; + break; + } + + if (!extra) // Last seen option wins. + { + if (strcmp (s, "-Wextra") == 0) extra = true; + else if (strcmp (s, "-Wno-extra") == 0) extra = false; + } + } + } + + if (!w && (!extra || !*extra)) + args.push_back ("-Wno-unqualified-std-cast-call"); + } + + if (md.pp != preprocessed::all) + append_sys_hdr_options (args); // Extra system header dirs (last). + + // Note: could be overridden in mode. + // + append_diag_color_options (args); + + // Set the input charset to UTF-8 unless a custom one is specified. + // + // Note that the execution charset (-fexec-charset) is UTF-8 by + // default. + // + // Note that early versions of Clang only recognize uppercase UTF-8. + // + if (!find_option_prefix ("-finput-charset=", args)) + args.push_back ("-finput-charset=UTF-8"); + if (ot == otype::s) { // On Darwin, Win32 -fPIC is the default. @@ -7253,11 +7462,6 @@ namespace build2 } } - append_options (args, cmode); - - if (md.pp != preprocessed::all) - append_sys_hdr_options (args); // Extra system header dirs (last). - append_header_options (env, args, header_args, a, t, md, md.dd); append_module_options (env, args, module_args, a, t, md, md.dd); @@ -7332,7 +7536,7 @@ namespace build2 args.push_back ("-c"); } - lang_n = append_lang_options (args, md); + append_lang_options (args, md); if (md.pp == preprocessed::all) { @@ -7381,19 +7585,32 @@ namespace build2 // the source file, not its preprocessed version (so that it's easy to // copy and re-run, etc). Only at level 3 and above print the real deal. // + // @@ TODO: why don't we print env (here and/or below)? Also link rule. + // if (verb == 1) - text << x_name << ' ' << s; + { + const char* name (x_assembler_cpp (s) ? "as-cpp" : + x_objective (s) ? x_obj_name : + x_name); + + print_diag (name, s, t); + } else if (verb == 2) print_process (args); // If we have the (partially) preprocessed output, switch to that. // - bool psrc (md.psrc); + // But we remember the original source/position to restore later. + // + bool psrc (md.psrc); // Note: false if cc.reprocess. bool ptmp (psrc && md.psrc.temporary); + pair<size_t, const char*> osrc; if (psrc) { args.pop_back (); // nullptr + osrc.second = args.back (); args.pop_back (); // sp + osrc.first = args.size (); sp = &md.psrc.path (); @@ -7403,25 +7620,40 @@ namespace build2 { case compiler_type::gcc: { - // The -fpreprocessed is implied by .i/.ii. But not when compiling - // a header unit (there is no .hi/.hii). + // -fpreprocessed is implied by .i/.ii unless compiling a header + // unit (there is no .hi/.hii). Also, we would need to pop -x + // since it takes precedence over the extension, which would mess + // up our osrc logic. So in the end it feels like always passing + // explicit -fpreprocessed is the way to go. // - if (ut == unit_type::module_header) - args.push_back ("-fpreprocessed"); - else - // Pop -x since it takes precedence over the extension. - // - // @@ I wonder why bother and not just add -fpreprocessed? Are - // we trying to save an option or does something break? - // - for (; lang_n != 0; --lang_n) - args.pop_back (); - + // Also note that similarly there is no .Si for .S files. + // + args.push_back ("-fpreprocessed"); args.push_back ("-fdirectives-only"); break; } case compiler_type::clang: { + // Clang 15 and later with -pedantic warns about GNU-style line + // markers that it wrote itself in the -frewrite-includes output + // (llvm-project issue 63284). So we suppress this warning unless + // compiling from source. + // + // In Apple Clang this warning/option are absent in 14.0.3 (which + // is said to be based on vanilla Clang 15.0.5) for some reason + // (let's hope it's because they patched it out rather than due to + // a misleading __LIBCPP_VERSION value). + // + if (ctype == compiler_type::clang && + cmaj >= (cvariant != "apple" ? 15 : 16)) + { + if (find_options ({"-pedantic", "-pedantic-errors", + "-Wpedantic", "-Werror=pedantic"}, args)) + { + args.push_back ("-Wno-gnu-line-marker"); + } + } + // Note that without -x Clang will treat .i/.ii as fully // preprocessed. // @@ -7470,45 +7702,38 @@ namespace build2 file_cache::read psrcr (psrc ? md.psrc.open () : file_cache::read ()); // VC cl.exe sends diagnostics to stdout. It also prints the file - // name being compiled as the first line. So for cl.exe we redirect - // stdout to a pipe, filter that noise out, and send the rest to - // stderr. + // name being compiled as the first line. So for cl.exe we filter + // that noise out. // - // For other compilers redirect stdout to stderr, in case any of - // them tries to pull off something similar. For sane compilers this - // should be harmless. + // For other compilers also redirect stdout to stderr, in case any + // of them tries to pull off something similar. For sane compilers + // this should be harmless. // bool filter (ctype == compiler_type::msvc); process pr (cpath, - args.data (), - 0, (filter ? -1 : 2), 2, + args, + 0, 2, diag_buffer::pipe (ctx, filter /* force */), nullptr, // CWD env.empty () ? nullptr : env.data ()); - if (filter) - { - try - { - ifdstream is ( - move (pr.in_ofd), fdstream_mode::text, ifdstream::badbit); + diag_buffer dbuf (ctx, args[0], pr); - msvc_filter_cl (is, *sp); + if (filter) + msvc_filter_cl (dbuf, *sp); - // If anything remains in the stream, send it all to stderr. - // Note that the eof check is important: if the stream is at - // eof, this and all subsequent writes to the diagnostics stream - // will fail (and you won't see a thing). - // - if (is.peek () != ifdstream::traits_type::eof ()) - diag_stream_lock () << is.rdbuf (); + dbuf.read (); - is.close (); - } - catch (const io_error&) {} // Assume exits with error. + // Restore the original source if we switched to preprocessed. + // + if (psrc) + { + args.resize (osrc.first); + args.push_back (osrc.second); + args.push_back (nullptr); } - run_finish (args, pr); + run_finish (dbuf, args, pr, 1 /* verbosity */); } catch (const process_error& e) { @@ -7559,12 +7784,14 @@ namespace build2 try { process pr (cpath, - args.data (), - 0, 2, 2, + args, + 0, 2, diag_buffer::pipe (ctx), nullptr, // CWD env.empty () ? nullptr : env.data ()); - run_finish (args, pr); + diag_buffer dbuf (ctx, args[0], pr); + dbuf.read (); + run_finish (dbuf, args, pr, 1 /* verbosity */); } catch (const process_error& e) { @@ -7595,25 +7822,27 @@ namespace build2 } target_state compile_rule:: - perform_clean (action a, const target& xt) const + perform_clean (action a, const target& xt, const target_type& srct) const { const file& t (xt.as<file> ()); + // Preprocessed file extension. + // + const char* pext (x_assembler_cpp (srct) ? ".Si" : + x_objective (srct) ? x_obj_pext : + x_pext); + // Compressed preprocessed file extension. // - auto cpext = [this, &t, s = string ()] () mutable -> const char* - { - return (s = t.ctx.fcache.compressed_extension (x_pext)).c_str (); - }; + string cpext (t.ctx.fcache->compressed_extension (pext)); clean_extras extras; - switch (ctype) { - case compiler_type::gcc: extras = {".d", x_pext, cpext (), ".t"}; break; - case compiler_type::clang: extras = {".d", x_pext, cpext ()}; break; - case compiler_type::msvc: extras = {".d", x_pext, cpext (), ".idb", ".pdb"};break; - case compiler_type::icc: extras = {".d"}; break; + case compiler_type::gcc: extras = {".d", pext, cpext.c_str (), ".t"}; break; + case compiler_type::clang: extras = {".d", pext, cpext.c_str ()}; break; + case compiler_type::msvc: extras = {".d", pext, cpext.c_str (), ".idb", ".pdb"}; break; + case compiler_type::icc: extras = {".d"}; break; } return perform_clean_extra (a, t, extras); diff --git a/libbuild2/cc/compile-rule.hxx b/libbuild2/cc/compile-rule.hxx index daea600..a9a22c4 100644 --- a/libbuild2/cc/compile-rule.hxx +++ b/libbuild2/cc/compile-rule.hxx @@ -8,6 +8,7 @@ #include <libbuild2/utility.hxx> #include <libbuild2/rule.hxx> +#include <libbuild2/dyndep.hxx> #include <libbuild2/file-cache.hxx> #include <libbuild2/cc/types.hxx> @@ -21,6 +22,8 @@ namespace build2 namespace cc { + class config_module; + // The order is arranged so that their integral values indicate whether // one is a "stronger" than another. // @@ -37,22 +40,25 @@ namespace build2 }; class LIBBUILD2_CC_SYMEXPORT compile_rule: public simple_rule, - virtual common + virtual common, + dyndep_rule { public: - compile_rule (data&&); + struct match_data; + + compile_rule (data&&, const scope&); virtual bool - match (action, target&, const string&) const override; + match (action, target&) const override; virtual recipe apply (action, target&) const override; target_state - perform_update (action, const target&) const; + perform_update (action, const target&, match_data&) const; target_state - perform_clean (action, const target&) const; + perform_clean (action, const target&, const target_type&) const; public: using appended_libraries = small_vector<const target*, 256>; @@ -60,7 +66,8 @@ namespace build2 void append_library_options (appended_libraries&, strings&, const scope&, - action, const file&, bool, linfo) const; + action, const file&, bool, linfo, + bool, bool) const; optional<path> find_system_header (const path&) const; @@ -70,7 +77,6 @@ namespace build2 functions (function_family&, const char*); // functions.cxx private: - struct match_data; using environment = small_vector<const char*, 2>; template <typename T> @@ -82,7 +88,7 @@ namespace build2 append_library_options (appended_libraries&, T&, const scope&, const scope*, - action, const file&, bool, linfo, + action, const file&, bool, linfo, bool, library_cache*) const; template <typename T> @@ -91,66 +97,44 @@ namespace build2 const scope&, action, const target&, linfo) const; - // Mapping of include prefixes (e.g., foo in <foo/bar>) for auto- - // generated headers to directories where they will be generated. - // - // We are using a prefix map of directories (dir_path_map) instead of - // just a map in order to also cover sub-paths (e.g., <foo/more/bar> if - // we continue with the example). Specifically, we need to make sure we - // don't treat foobar as a sub-directory of foo. - // - // The priority is used to decide who should override whom. Lesser - // values are considered higher priority. See append_prefixes() for - // details. - // - // @@ The keys should be normalized. - // - struct prefix_value - { - dir_path directory; - size_t priority; - }; - using prefix_map = dir_path_map<prefix_value>; + using prefix_map = dyndep_rule::prefix_map; + using srcout_map = dyndep_rule::srcout_map; void - append_prefixes (prefix_map&, const target&, const variable&) const; + append_prefixes (prefix_map&, + const scope&, const target&, + const variable&) const; void append_library_prefixes (appended_libraries&, prefix_map&, const scope&, - action, target&, linfo) const; + action, const target&, linfo) const; prefix_map - build_prefix_map (const scope&, action, target&, linfo) const; + build_prefix_map (const scope&, action, const target&, linfo) const; - small_vector<const target_type*, 2> - map_extension (const scope&, const string&, const string&) const; - - // Src-to-out re-mapping. See extract_headers() for details. - // - using srcout_map = path_map<dir_path>; + struct gcc_module_mapper_state; - struct module_mapper_state; - - bool - gcc_module_mapper (module_mapper_state&, + optional<bool> + gcc_module_mapper (gcc_module_mapper_state&, action, const scope&, file&, linfo, - ifdstream&, ofdstream&, + const string&, ofdstream&, depdb&, bool&, bool&, optional<prefix_map>&, srcout_map&) const; pair<const file*, bool> enter_header (action, const scope&, file&, linfo, path&&, bool, bool, - optional<prefix_map>&, srcout_map&) const; + optional<prefix_map>&, const srcout_map&) const; optional<bool> inject_header (action, file&, const file&, timestamp, bool) const; - pair<file_cache::entry, bool> + void extract_headers (action, const scope&, file&, linfo, const file&, match_data&, - depdb&, bool&, timestamp, module_imports&) const; + depdb&, bool&, timestamp, module_imports&, + pair<file_cache::entry, bool>&) const; string parse_unit (action, file&, linfo, @@ -201,6 +185,7 @@ namespace build2 private: const string rule_id; + const config_module* header_cache_; }; } } diff --git a/libbuild2/cc/functions.cxx b/libbuild2/cc/functions.cxx index cafb7f0..9d408af 100644 --- a/libbuild2/cc/functions.cxx +++ b/libbuild2/cc/functions.cxx @@ -13,11 +13,10 @@ #include <libbuild2/cc/module.hxx> #include <libbuild2/cc/utility.hxx> +#include <libbuild2/functions-name.hxx> // to_target() + namespace build2 { - const target& - to_target (const scope&, name&&, name&&); // libbuild2/functions-name.cxx - namespace cc { using namespace bin; @@ -47,8 +46,13 @@ namespace build2 if (rs == nullptr) fail << f.name << " called out of project"; - if (bs->ctx.phase != run_phase::execute) - fail << f.name << " can only be called during execution"; + // Note that we also allow calling this during match since an ad hoc + // recipe with dynamic dependency extraction (depdb-dyndep) executes its + // depdb preamble during match (after matching all the prerequisites). + // + if (bs->ctx.phase != run_phase::match && + bs->ctx.phase != run_phase::execute) + fail << f.name << " can only be called from recipe"; const module* m (rs->find_module<module> (d.x)); @@ -57,6 +61,9 @@ namespace build2 // We can assume these are present due to function's types signature. // + if (vs[0].null) + throw invalid_argument ("null value"); + names& ts_ns (vs[0].as<names> ()); // <targets> // In a somewhat hackish way strip the outer operation to match how we @@ -70,20 +77,40 @@ namespace build2 { name& n (*i), o; const target& t (to_target (*bs, move (n), move (n.pair ? *++i : o))); + + if (!t.matched (a)) + fail << t << " is not matched" << + info << "make sure this target is listed as prerequisite"; + d.f (r, vs, *m, *bs, a, t); } return value (move (r)); } - // Common thunk for $x.lib_*(<targets>, <otype> [, ...]) functions. + // Common thunk for $x.lib_*(...) functions. + // + // The two supported function signatures are: + // + // $x.lib_*(<targets>, <otype> [, ...]]) + // + // $x.lib_*(<targets>) + // + // For the first signature, the passed targets cannot be library groups + // (so they are always file-based) and linfo is always present. + // + // For the second signature, targets can only be utility libraries + // (including the libul{} group). + // + // If <otype> in the first signature is NULL, then it is treated as + // the second signature. // struct lib_thunk_data { const char* x; void (*f) (void*, strings&, const vector_view<value>&, const module&, const scope&, - action, const file&, bool, linfo); + action, const target&, bool, optional<linfo>); }; static value @@ -102,21 +129,27 @@ namespace build2 if (rs == nullptr) fail << f.name << " called out of project"; - if (bs->ctx.phase != run_phase::execute) - fail << f.name << " can only be called during execution"; + if (bs->ctx.phase != run_phase::match && // See above. + bs->ctx.phase != run_phase::execute) + fail << f.name << " can only be called from recipe"; const module* m (rs->find_module<module> (d.x)); if (m == nullptr) fail << f.name << " called without " << d.x << " module loaded"; - // We can assume these are present due to function's types signature. + // We can assume this is present due to function's types signature. // + if (vs[0].null) + throw invalid_argument ("null value"); + names& ts_ns (vs[0].as<names> ()); // <targets> - names& ot_ns (vs[1].as<names> ()); // <otype> - linfo li; + optional<linfo> li; + if (vs.size () > 1 && !vs[1].null) { + names& ot_ns (vs[1].as<names> ()); // <otype> + string t (convert<string> (move (ot_ns))); const target_type* tt (bs->find_target_type (t)); @@ -162,17 +195,22 @@ namespace build2 name& n (*i), o; const target& t (to_target (*bs, move (n), move (n.pair ? *++i : o))); - const file* f; bool la (false); - - if ((la = (f = t.is_a<libux> ())) || - (la = (f = t.is_a<liba> ())) || - ( (f = t.is_a<libs> ()))) + if (li + ? ((la = t.is_a<libux> ()) || + (la = t.is_a<liba> ()) || + ( t.is_a<libs> ())) + : ((la = t.is_a<libux> ()) || + ( t.is_a<libul> ()))) { - d.f (ls, r, vs, *m, *bs, a, *f, la, li); + if (!t.matched (a)) + fail << t << " is not matched" << + info << "make sure this target is listed as prerequisite"; + + d.f (ls, r, vs, *m, *bs, a, t, la, li); } else - fail << t << " is not a library target"; + fail << t << " is not a library of expected type"; } return value (move (r)); @@ -199,33 +237,61 @@ namespace build2 void compile_rule:: functions (function_family& f, const char* x) { - // $<module>.lib_poptions(<lib-targets>, <otype>) + // $<module>.lib_poptions(<lib-targets>[, <otype>[, <original>]]) // // Return the preprocessor options that should be passed when compiling // sources that depend on the specified libraries. The second argument // is the output target type (obje, objs, etc). // + // The output target type may be omitted for utility libraries (libul{} + // or libu[eas]{}). In this case, only "common interface" options will + // be returned for lib{} dependencies. This is primarily useful for + // obtaining poptions to be passed to tools other than C/C++ compilers + // (for example, Qt moc). + // + // If <original> is true, then return the original -I options without + // performing any translation (for example, to -isystem or /external:I). + // This is the default if <otype> is omitted. To get the translation for + // the common interface options, pass [null] for <otype> and true for + // <original>. + // // Note that passing multiple targets at once is not a mere convenience: // this also allows for more effective duplicate suppression. // - // Note also that this function can only be called during execution - // after all the specified library targets have been matched. Normally - // it is used in ad hoc recipes to implement custom compilation. + // Note also that this function can only be called during execution (or, + // carefully, during match) after all the specified library targets have + // been matched. Normally it is used in ad hoc recipes to implement + // custom compilation. // // Note that this function is not pure. // f.insert (".lib_poptions", false). - insert<lib_thunk_data, names, names> ( + insert<lib_thunk_data, names, optional<names*>, optional<names>> ( &lib_thunk<appended_libraries>, lib_thunk_data { x, [] (void* ls, strings& r, - const vector_view<value>&, const module& m, const scope& bs, - action a, const file& l, bool la, linfo li) + const vector_view<value>& vs, const module& m, const scope& bs, + action a, const target& l, bool la, optional<linfo> li) { + // If this is libul{}, get the matched member (see bin::libul_rule + // for details). + // + const file& f ( + la || li + ? l.as<file> () + : (la = true, + l.prerequisite_targets[a].back ().target->as<file> ())); + + bool common (!li); + bool original (vs.size () > 2 ? convert<bool> (vs[2]) : !li); + + if (!li) + li = link_info (bs, link_type (f).type); + m.append_library_options ( *static_cast<appended_libraries*> (ls), r, - bs, a, l, la, li); + bs, a, f, la, *li, common, original); }}); // $<module>.find_system_header(<name>) @@ -289,9 +355,10 @@ namespace build2 // Note that passing multiple targets at once is not a mere convenience: // this also allows for more effective duplicate suppression. // - // Note also that this function can only be called during execution - // after all the specified library targets have been matched. Normally - // it is used in ad hoc recipes to implement custom linking. + // Note also that this function can only be called during execution (or, + // carefully, during match) after all the specified library targets have + // been matched. Normally it is used in ad hoc recipes to implement + // custom linking. // // Note that this function is not pure. // @@ -302,12 +369,15 @@ namespace build2 x, [] (void* ls, strings& r, const vector_view<value>& vs, const module& m, const scope& bs, - action a, const file& l, bool la, linfo li) + action a, const target& l, bool la, optional<linfo> li) { lflags lf (0); bool rel (true); if (vs.size () > 2) { + if (vs[2].null) + throw invalid_argument ("null value"); + for (const name& f: vs[2].as<names> ()) { string s (convert<string> (name (f))); @@ -326,7 +396,8 @@ namespace build2 m.append_libraries ( *static_cast<appended_libraries*> (ls), r, nullptr /* sha256 */, nullptr /* update */, timestamp_unknown, - bs, a, l, la, lf, li, nullopt /* for_install */, self, rel); + bs, a, l.as<file> (), la, lf, *li, + nullopt /* for_install */, self, rel); }}); // $<module>.lib_rpaths(<lib-targets>, <otype> [, <link> [, <self>]]) @@ -358,13 +429,12 @@ namespace build2 x, [] (void* ls, strings& r, const vector_view<value>& vs, const module& m, const scope& bs, - action a, const file& l, bool la, linfo li) + action a, const target& l, bool la, optional<linfo> li) { bool link (vs.size () > 2 ? convert<bool> (vs[2]) : false); bool self (vs.size () > 3 ? convert<bool> (vs[3]) : true); m.rpath_libraries (*static_cast<rpathed_libraries*> (ls), r, - bs, - a, l, la, li, link, self); + bs, a, l.as<file> (), la, *li, link, self); }}); // $cxx.obj_modules(<obj-targets>) @@ -422,7 +492,16 @@ namespace build2 // look for cc.export.libs and <module>.export.libs. // // 3. No member/group selection/linkup: we resolve *.export.libs on - // whatever is listed. + // whatever is listed (so no liba{}/libs{} overrides will be + // considered). + // + // Because of (2) and (3), this functionality should only be used on a + // controlled list of libraries (usually libraries that belong to the + // same family as this library). + // + // Note that a similar deduplication is also performed when processing + // the libraries. However, it may still make sense to do it once at the + // source for really severe cases (like Boost). // // Note that this function is not pure. // @@ -450,6 +529,9 @@ namespace build2 // We can assume the argument is present due to function's types // signature. // + if (vs[0].null) + throw invalid_argument ("null value"); + names& r (vs[0].as<names> ()); m->deduplicate_export_libs (*bs, vector<name> (r.begin (), r.end ()), diff --git a/libbuild2/cc/gcc.cxx b/libbuild2/cc/gcc.cxx index 30f2092..b553c8c 100644 --- a/libbuild2/cc/gcc.cxx +++ b/libbuild2/cc/gcc.cxx @@ -45,6 +45,13 @@ namespace build2 d = dir_path (o, 2, string::npos); else continue; + + // Ignore relative paths. Or maybe we should warn? + // + if (d.relative ()) + continue; + + d.normalize (); } catch (const invalid_path& e) { @@ -52,13 +59,29 @@ namespace build2 << o << "'"; } - // Ignore relative paths. Or maybe we should warn? - // - if (!d.relative ()) - r.push_back (move (d)); + r.push_back (move (d)); } } +#ifdef _WIN32 + // Some misconfigured MinGW GCC builds add absolute POSIX directories to + // their built-in search paths (e.g., /mingw/{include,lib}) which GCC then + // interprets as absolute paths relative to the current drive (so the set + // of built-in search paths starts depending on where we run things from). + // + // While that's definitely misguided, life is short and we don't want to + // waste it explaining this in long mailing list threads and telling + // people to complain to whomever built their GCC. So we will just + // recreate the behavior in a way that's consistent with GCC and let + // people discover this on their own. + // + static inline void + add_current_drive (string& s) + { + s.insert (0, work.string (), 0, 2); // Add e.g., `c:`. + } +#endif + // Extract system header search paths from GCC (gcc/g++) or compatible // (Clang, Intel) using the `-v -E </dev/null` method. // @@ -113,101 +136,94 @@ namespace build2 if (verb >= 3) print_process (env, args); + // Open pipe to stderr, redirect stdin and stdout to /dev/null. + // + process pr (run_start ( + env, + args, + -2, /* stdin */ + -2, /* stdout */ + -1 /* stderr */)); try { - //@@ TODO: why don't we use run_start() here? Because it's unable to - // open pipe for stderr and we need to change it first, for example, - // making the err parameter a file descriptor rather than a flag. - // + ifdstream is ( + move (pr.in_efd), fdstream_mode::skip, ifdstream::badbit); - // Open pipe to stderr, redirect stdin and stdout to /dev/null. + // Normally the system header paths appear between the following + // lines: // - process pr (xc, - args.data (), - -2, /* stdin */ - -2, /* stdout */ - -1, /* stderr */ - nullptr /* cwd */, - env.vars); - - try + // #include <...> search starts here: + // End of search list. + // + // The exact text depends on the current locale. What we can rely on + // is the presence of the "#include <...>" substring in the "opening" + // line and the fact that the paths are indented with a single space + // character, unlike the "closing" line. + // + // Note that on Mac OS we will also see some framework paths among + // system header paths, followed with a comment. For example: + // + // /Library/Frameworks (framework directory) + // + // For now we ignore framework paths and to filter them out we will + // only consider valid paths to existing directories, skipping those + // which we fail to normalize or stat. @@ Maybe this is a bit too + // loose, especially compared to gcc_library_search_dirs()? + // + string s; + for (bool found (false); getline (is, s); ) { - ifdstream is ( - move (pr.in_efd), fdstream_mode::skip, ifdstream::badbit); - - // Normally the system header paths appear between the following - // lines: - // - // #include <...> search starts here: - // End of search list. - // - // The exact text depends on the current locale. What we can rely on - // is the presence of the "#include <...>" substring in the - // "opening" line and the fact that the paths are indented with a - // single space character, unlike the "closing" line. - // - // Note that on Mac OS we will also see some framework paths among - // system header paths, followed with a comment. For example: - // - // /Library/Frameworks (framework directory) - // - // For now we ignore framework paths and to filter them out we will - // only consider valid paths to existing directories, skipping those - // which we fail to normalize or stat. - // - string s; - for (bool found (false); getline (is, s); ) + if (!found) + found = s.find ("#include <...>") != string::npos; + else { - if (!found) - found = s.find ("#include <...>") != string::npos; - else + if (s[0] != ' ') + break; + + dir_path d; + try { - if (s[0] != ' ') - break; - - try - { - dir_path d (s, 1, s.size () - 1); - - if (d.absolute () && exists (d, true) && - find (r.begin (), r.end (), d.normalize ()) == r.end ()) - r.emplace_back (move (d)); - } - catch (const invalid_path&) - { - // Skip this path. - } - } - } + string ds (s, 1, s.size () - 1); - is.close (); // Don't block. +#ifdef _WIN32 + if (path_traits::is_separator (ds[0])) + add_current_drive (ds); +#endif + d = dir_path (move (ds)); - if (!pr.wait ()) - { - // We have read stderr so better print some diagnostics. - // - diag_record dr (fail); + if (d.relative () || !exists (d, true)) + continue; - dr << "failed to extract " << x_lang << " header search paths" << - info << "command line: "; + d.normalize (); + } + catch (const invalid_path&) + { + continue; + } - print_process (dr, args); + if (find (r.begin (), r.end (), d) == r.end ()) + r.emplace_back (move (d)); } } - catch (const io_error&) + + is.close (); // Don't block. + + if (!run_wait (args, pr)) { - pr.wait (); - fail << "error reading " << x_lang << " compiler -v -E output"; + // We have read stderr so better print some diagnostics. + // + diag_record dr (fail); + + dr << "failed to extract " << x_lang << " header search paths" << + info << "command line: "; + + print_process (dr, args); } } - catch (const process_error& e) + catch (const io_error&) { - error << "unable to execute " << args[0] << ": " << e; - - if (e.child) - exit (1); - - throw failed (); + run_wait (args, pr); + fail << "error reading " << x_lang << " compiler -v -E output"; } // It's highly unlikely not to have any system directories. More likely @@ -271,6 +287,9 @@ namespace build2 // Open pipe to stdout. // + // Note: this function is called in the serial load phase and so no + // diagnostics buffering is needed. + // process pr (run_start (env, args, 0, /* stdin */ @@ -305,7 +324,7 @@ namespace build2 // by that and let run_finish() deal with it. } - run_finish (args, pr); + run_finish (args, pr, 2 /* verbosity */); if (l.empty ()) fail << "unable to extract " << x_lang << " compiler system library " @@ -334,9 +353,35 @@ namespace build2 // for (string::size_type b (0);; e = l.find (d, (b = e + 1))) { - dir_path d (l, b, (e != string::npos ? e - b : e)); + dir_path d; + try + { + string ds (l, b, (e != string::npos ? e - b : e)); + + // Skip empty entries (sometimes found in random MinGW toolchains). + // + if (!ds.empty ()) + { +#ifdef _WIN32 + if (path_traits::is_separator (ds[0])) + add_current_drive (ds); +#endif + + d = dir_path (move (ds)); + + if (d.relative ()) + throw invalid_path (move (d).string ()); + + d.normalize (); + } + } + catch (const invalid_path& e) + { + fail << "invalid directory '" << e.path << "'" << " in " + << args[0] << " -print-search-dirs output"; + } - if (find (r.begin (), r.end (), d.normalize ()) == r.end ()) + if (!d.empty () && find (r.begin (), r.end (), d) == r.end ()) r.emplace_back (move (d)); if (e == string::npos) diff --git a/libbuild2/cc/guess.cxx b/libbuild2/cc/guess.cxx index ff06c5f..a0ed34b 100644 --- a/libbuild2/cc/guess.cxx +++ b/libbuild2/cc/guess.cxx @@ -106,7 +106,7 @@ namespace build2 else if (id.compare (0, p, "icc" ) == 0) type = compiler_type::icc; else throw invalid_argument ( - "invalid compiler type '" + string (id, 0, p) + "'"); + "invalid compiler type '" + string (id, 0, p) + '\''); if (p != string::npos) { @@ -181,12 +181,12 @@ namespace build2 // could also be because there is something wrong with the compiler or // options but that we simply leave to blow up later). // - process pr (run_start (3 /* verbosity */, + process pr (run_start (3 /* verbosity */, xp, args, - -1 /* stdin */, - -1 /* stdout */, - false /* error */)); + -1 /* stdin */, + -1 /* stdout */, + 1 /* stderr (to stdout) */)); string l, r; try { @@ -222,7 +222,7 @@ namespace build2 // that. } - if (!run_finish_code (args.data (), pr, l)) + if (!run_finish_code (args.data (), pr, l, 2 /* verbosity */)) r = "none"; if (r.empty ()) @@ -262,6 +262,8 @@ namespace build2 " stdlib:=\"freebsd\" \n" "# elif defined(__NetBSD__) \n" " stdlib:=\"netbsd\" \n" +"# elif defined(__OpenBSD__) \n" +" stdlib:=\"openbsd\" \n" "# elif defined(__APPLE__) \n" " stdlib:=\"apple\" \n" "# elif defined(__EMSCRIPTEN__) \n" @@ -410,11 +412,13 @@ namespace build2 // // Note that Visual Studio versions prior to 15.0 are not supported. // + // Note also the directories are absolute and normalized. + // struct msvc_info { - dir_path msvc_dir; // VC directory (...\Tools\MSVC\<ver>\). - dir_path psdk_dir; // Platfor SDK version (under Include/, Lib/, etc). - string psdk_ver; // Platfor SDK directory (...\Windows Kits\<ver>\). + dir_path msvc_dir; // VC tools directory (...\Tools\MSVC\<ver>\). + dir_path psdk_dir; // Platform SDK directory (...\Windows Kits\<ver>\). + string psdk_ver; // Platform SDK version (under Include/, Lib/, etc). }; #if defined(_WIN32) && !defined(BUILD2_BOOTSTRAP) @@ -456,13 +460,16 @@ namespace build2 {0x87, 0xBF, 0xD5, 0x77, 0x83, 0x8F, 0x1D, 0x5C}}; // If cl is not empty, then find an installation that contains this cl.exe - // path. + // path. In this case the path must be absolute and normalized. // static optional<msvc_info> - find_msvc (const path& cl = path ()) + find_msvc (const path& cl = path ()) { using namespace butl; + assert (cl.empty () || + (cl.absolute () && cl.normalized (false /* sep */))); + msvc_info r; // Try to obtain the MSVC directory. @@ -528,7 +535,7 @@ namespace build2 // Note: we cannot use bstr_t due to the Clang 9.0 bug #42842. // BSTR p; - if (vs->ResolvePath (L"VC", &p) != S_OK) + if (vs->ResolvePath (L"VC", &p) != S_OK) return dir_path (); unique_ptr<wchar_t, bstr_deleter> deleter (p); @@ -634,36 +641,73 @@ namespace build2 return nullopt; } - // Read the VC version from the file and bail out on error. + // If cl.exe path is not specified, then deduce the default VC tools + // directory for this Visual Studio instance. Otherwise, extract the + // tools directory from this path. + // + // Note that in the latter case we could potentially avoid the above + // iterating over the VS instances, but let's make sure that the + // specified cl.exe path actually belongs to one of them as a sanity + // check. // - string vc_ver; // For example, 14.23.28105. + if (cl.empty ()) + { + // Read the VC version from the file and bail out on error. + // + string vc_ver; // For example, 14.23.28105. - path vp ( - r.msvc_dir / - path ("Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt")); + path vp ( + r.msvc_dir / + path ("Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt")); - try - { - ifdstream is (vp); - vc_ver = trim (is.read_text ()); - } - catch (const io_error&) {} + try + { + ifdstream is (vp); + vc_ver = trim (is.read_text ()); + } + catch (const io_error&) {} - // Make sure that the VC version directory exists. - // - if (!vc_ver.empty ()) - try - { - ((r.msvc_dir /= "Tools") /= "MSVC") /= vc_ver; + if (vc_ver.empty ()) + return nullopt; + + // Make sure that the VC version directory exists. + // + try + { + ((r.msvc_dir /= "Tools") /= "MSVC") /= vc_ver; - if (!dir_exists (r.msvc_dir)) - r.msvc_dir.clear (); + if (!dir_exists (r.msvc_dir)) + return nullopt; + } + catch (const invalid_path&) {return nullopt;} + catch (const system_error&) {return nullopt;} } - catch (const invalid_path&) {} - catch (const system_error&) {} + else + { + (r.msvc_dir /= "Tools") /= "MSVC"; - if (r.msvc_dir.empty ()) - return nullopt; + // Extract the VC tools version from the cl.exe path and append it + // to r.msvc_dir. + // + if (!cl.sub (r.msvc_dir)) + return nullopt; + + // For example, 14.23.28105\bin\Hostx64\x64\cl.exe. + // + path p (cl.leaf (r.msvc_dir)); // Can't throw. + + auto i (p.begin ()); // Tools version. + if (i == p.end ()) + return nullopt; + + r.msvc_dir /= *i; // Can't throw. + + // For good measure, make sure that the tools version is not the + // last component in the cl.exe path. + // + if (++i == p.end ()) + return nullopt; + } } // Try to obtain the latest Platform SDK directory and version. @@ -717,7 +761,7 @@ namespace build2 // for (const dir_entry& de: dir_iterator (r.psdk_dir / dir_path ("Include"), - false /* ignore_dangling */)) + dir_iterator::no_follow)) { if (de.type () == entry_type::directory) { @@ -735,6 +779,16 @@ namespace build2 return nullopt; } + try + { + r.msvc_dir.normalize (); + r.psdk_dir.normalize (); + } + catch (const invalid_path&) + { + return nullopt; + } + return r; } #endif @@ -775,7 +829,8 @@ namespace build2 // Note: allowed to change pre if succeeds. // static guess_result - guess (const char* xm, + guess (context& ctx, + const char* xm, lang xl, const path& xc, const strings& x_mo, @@ -926,10 +981,12 @@ namespace build2 // We try to find the matching installation only for MSVC (for Clang // we extract this information from the compiler). // - if (xc.absolute () && - (pt == type::msvc && !pv)) + if (xc.absolute () && (pt == type::msvc && !pv)) { - if (optional<msvc_info> mi = find_msvc (xc)) + path cl (xc); // Absolute but may not be normalized. + cl.normalize (); // Can't throw since this is an existing path. + + if (optional<msvc_info> mi = find_msvc (cl)) { search_info = info_ptr ( new msvc_info (move (*mi)), msvc_info_deleter); @@ -965,7 +1022,7 @@ namespace build2 #endif string cache; - auto run = [&cs, &env, &args, &cache] ( + auto run = [&ctx, &cs, &env, &args, &cache] ( const char* o, auto&& f, bool checksum = false) -> guess_result @@ -973,9 +1030,10 @@ namespace build2 args[args.size () - 2] = o; cache.clear (); return build2::run<guess_result> ( + ctx, 3 /* verbosity */, env, - args.data (), + args, forward<decltype (f)> (f), false /* error */, false /* ignore_exit */, @@ -1022,7 +1080,7 @@ namespace build2 // The gcc -v output will have a last line in the form: // - // "gcc version X.Y[.Z][...] ..." + // "gcc version X[.Y[.Z]][...] ..." // // The "version" word can probably be translated. For example: // @@ -1034,6 +1092,7 @@ namespace build2 // gcc version 5.1.0 (Ubuntu 5.1.0-0ubuntu11~14.04.1) // gcc version 6.0.0 20160131 (experimental) (GCC) // gcc version 9.3-win32 20200320 (GCC) + // gcc version 10-win32 20220324 (GCC) // if (cache.empty ()) { @@ -1273,7 +1332,11 @@ namespace build2 // const char* evars[] = {"CL=", "_CL_=", nullptr}; - r = build2::run<guess_result> (3, process_env (xp, evars), f, false); + r = build2::run<guess_result> (ctx, + 3, + process_env (xp, evars), + f, + false); if (r.empty ()) { @@ -1424,10 +1487,12 @@ namespace build2 // And VC 16 seems to have the runtime version 14.1 (and not 14.2, as // one might expect; DLLs are still *140.dll but there are now _1 and _2 // variants for, say, msvcp140.dll). We will, however, call it 14.2 - // (which is the version of the "toolset") in our target triplet. + // (which is the version of the "toolset") in our target triplet. And we + // will call VC 17 14.3 (which is also the version of the "toolset"). // // year ver cl crt/dll toolset // + // 2022 17.X 19.3X 14.?/140 14.3X // 2019 16.X 19.2X 14.2/140 14.2X // 2017 15.9 19.16 14.1/140 14.16 // 2017 15.8 19.15 14.1/140 @@ -1446,7 +1511,8 @@ namespace build2 // // _MSC_VER is the numeric cl version, e.g., 1921 for 19.21. // - /**/ if (v.major == 19 && v.minor >= 20) return "14.2"; + /**/ if (v.major == 19 && v.minor >= 30) return "14.3"; + else if (v.major == 19 && v.minor >= 20) return "14.2"; else if (v.major == 19 && v.minor >= 10) return "14.1"; else if (v.major == 19 && v.minor == 0) return "14.0"; else if (v.major == 18 && v.minor == 0) return "12.0"; @@ -1470,8 +1536,8 @@ namespace build2 // Studio command prompt puts into INCLUDE) including any paths from the // compiler mode and their count. // - // Note that currently we don't add any ATL/MFC or WinRT paths (but could - // do that probably first checking if they exist/empty). + // Note that currently we don't add any ATL/MFC paths (but could do that + // probably first checking if they exist/empty). // static pair<dir_paths, size_t> msvc_hdr (const msvc_info& mi, const strings& mo) @@ -1483,6 +1549,8 @@ namespace build2 msvc_extract_header_search_dirs (mo, r); size_t rn (r.size ()); + // Note: the resulting directories are normalized by construction. + // r.push_back (dir_path (mi.msvc_dir) /= "include"); // This path structure only appeared in Platform SDK 10 (if anyone wants @@ -1496,6 +1564,7 @@ namespace build2 r.push_back (dir_path (d) /= "ucrt" ); r.push_back (dir_path (d) /= "shared"); r.push_back (dir_path (d) /= "um" ); + r.push_back (dir_path (d) /= "winrt" ); } return make_pair (move (r), rn); @@ -1531,6 +1600,8 @@ namespace build2 msvc_extract_library_search_dirs (mo, r); size_t rn (r.size ()); + // Note: the resulting directories are normalized by construction. + // r.push_back ((dir_path (mi.msvc_dir) /= "lib") /= cpu); // This path structure only appeared in Platform SDK 10 (if anyone wants @@ -1585,7 +1656,8 @@ namespace build2 "LIB", "LINK", "_LINK_", nullptr}; static compiler_info - guess_msvc (const char* xm, + guess_msvc (context&, + const char* xm, lang xl, const path& xc, const string* xv, @@ -1608,6 +1680,7 @@ namespace build2 // "x86" // "x64" // "ARM" + // "ARM64" // compiler_version ver; { @@ -1671,9 +1744,10 @@ namespace build2 for (size_t b (0), e (0), n; (n = next_word (s, b, e, ' ', ',')) != 0; ) { - if (s.compare (b, n, "x64", 3) == 0 || - s.compare (b, n, "x86", 3) == 0 || - s.compare (b, n, "ARM", 3) == 0 || + if (s.compare (b, n, "x64", 3) == 0 || + s.compare (b, n, "x86", 3) == 0 || + s.compare (b, n, "ARM64", 5) == 0 || + s.compare (b, n, "ARM", 3) == 0 || s.compare (b, n, "80x86", 5) == 0) { cpu.assign (s, b, n); @@ -1684,15 +1758,15 @@ namespace build2 if (cpu.empty ()) fail << "unable to extract MSVC target CPU from " << "'" << s << "'"; - // Now we need to map x86, x64, and ARM to the target triplets. The - // problem is, there aren't any established ones so we got to invent - // them ourselves. Based on the discussion in + // Now we need to map x86, x64, ARM, and ARM64 to the target + // triplets. The problem is, there aren't any established ones so we + // got to invent them ourselves. Based on the discussion in // <libbutl/target-triplet.hxx>, we need something in the // CPU-VENDOR-OS-ABI form. // // The CPU part is fairly straightforward with x86 mapped to 'i386' - // (or maybe 'i686'), x64 to 'x86_64', and ARM to 'arm' (it could also - // include the version, e.g., 'amrv8'). + // (or maybe 'i686'), x64 to 'x86_64', ARM to 'arm' (it could also + // include the version, e.g., 'amrv8'), and ARM64 to 'aarch64'. // // The (toolchain) VENDOR is also straightforward: 'microsoft'. Why // not omit it? Two reasons: firstly, there are other compilers with @@ -1702,7 +1776,7 @@ namespace build2 // // OS-ABI is where things are not as clear cut. The OS part shouldn't // probably be just 'windows' since we have Win32 and WinCE. And - // WinRT. And Universal Windows Platform (UWP). So perhaps the + // WinRT. And Universal Windows Platform (UWP). So perhaps the // following values for OS: 'win32', 'wince', 'winrt', 'winup'. // // For 'win32' the ABI part could signal the Microsoft C/C++ runtime @@ -1727,9 +1801,10 @@ namespace build2 // Putting it all together, Visual Studio 2015 will then have the // following target triplets: // - // x86 i386-microsoft-win32-msvc14.0 - // x64 x86_64-microsoft-win32-msvc14.0 - // ARM arm-microsoft-winup-??? + // x86 i386-microsoft-win32-msvc14.0 + // x64 x86_64-microsoft-win32-msvc14.0 + // ARM arm-microsoft-winup-??? + // ARM64 aarch64-microsoft-win32-msvc14.0 // if (cpu == "ARM") fail << "cl.exe ARM/WinRT/UWP target is not yet supported"; @@ -1739,6 +1814,8 @@ namespace build2 t = "x86_64-microsoft-win32-msvc"; else if (cpu == "x86" || cpu == "80x86") t = "i386-microsoft-win32-msvc"; + else if (cpu == "ARM64") + t = "aarch64-microsoft-win32-msvc"; else assert (false); @@ -1750,6 +1827,8 @@ namespace build2 else ot = t = *xt; + target_triplet tt (t); // Shouldn't fail. + // If we have the MSVC installation information, then this means we are // running out of the Visual Studio command prompt and will have to // supply PATH/INCLUDE/LIB/IFCPATH equivalents ourselves. @@ -1761,7 +1840,7 @@ namespace build2 if (const msvc_info* mi = static_cast<msvc_info*> (gr.info.get ())) { - const char* cpu (msvc_cpu (target_triplet (t).cpu)); + const char* cpu (msvc_cpu (tt.cpu)); lib_dirs = msvc_lib (*mi, x_mo, cpu); hdr_dirs = msvc_hdr (*mi, x_mo); @@ -1849,7 +1928,8 @@ namespace build2 "SDKROOT", "MACOSX_DEPLOYMENT_TARGET", nullptr}; static compiler_info - guess_gcc (const char* xm, + guess_gcc (context& ctx, + const char* xm, lang xl, const path& xc, const string* xv, @@ -1868,7 +1948,7 @@ namespace build2 // though language words can be translated and even rearranged (see // examples above). // - // "gcc version X.Y[.Z][...]" + // "gcc version X[.Y[.Z]][...]" // compiler_version ver; { @@ -1907,7 +1987,10 @@ namespace build2 // try { - semantic_version v (string (s, b, e - b), ".-+"); + semantic_version v (string (s, b, e - b), + semantic_version::allow_omit_minor | + semantic_version::allow_build, + ".-+"); ver.major = v.major; ver.minor = v.minor; ver.patch = v.patch; @@ -1959,7 +2042,7 @@ namespace build2 // auto f = [] (string& l, bool) {return move (l);}; - t = run<string> (3, xp, args.data (), f, false); + t = run<string> (ctx, 3, xp, args, f, false); if (t.empty ()) { @@ -1967,7 +2050,7 @@ namespace build2 << "falling back to -dumpmachine";}); args[args.size () - 2] = "-dumpmachine"; - t = run<string> (3, xp, args.data (), f, false); + t = run<string> (ctx, 3, xp, args, f, false); } if (t.empty ()) @@ -2110,9 +2193,9 @@ namespace build2 process pr (run_start (3 /* verbosity */, xp, args, - -2 /* stdin (/dev/null) */, - -1 /* stdout */, - false /* error (2>&1) */)); + -2 /* stdin (to /dev/null) */, + -1 /* stdout */, + 1 /* stderr (to stdout) */)); clang_msvc_info r; @@ -2264,7 +2347,7 @@ namespace build2 // that. } - if (!run_finish_code (args.data (), pr, l)) + if (!run_finish_code (args.data (), pr, l, 2 /* verbosity */)) fail << "unable to extract MSVC information from " << xp; if (const char* w = ( @@ -2298,7 +2381,8 @@ namespace build2 nullptr}; static compiler_info - guess_clang (const char* xm, + guess_clang (context& ctx, + const char* xm, lang xl, const path& xc, const string* xv, @@ -2439,8 +2523,8 @@ namespace build2 // https://gist.github.com/yamaya/2924292 // // Specifically, we now look in the libc++'s __config file for the - // _LIBCPP_VERSION and use the previous version as a conservative - // estimate (note that there could be multiple __config files with + // __LIBCPP_VERSION and use the previous version as a conservative + // estimate (NOTE that there could be multiple __config files with // potentially different versions so compile with -v to see which one // gets picked up). // @@ -2463,34 +2547,42 @@ namespace build2 // 12.0.0 -> 9.0 // 12.0.5 -> 10.0 (yes, seriously!) // 13.0.0 -> 11.0 + // 13.1.6 -> 12.0 + // 14.0.0 -> 12.0 (__LIBCPP_VERSION=130000) + // 14.0.3 -> 15.0.5 (__LIBCPP_VERSION=150006) + // 15.0.0 -> 16.0.1 (__LIBCPP_VERSION=160002) // uint64_t mj (var_ver->major); uint64_t mi (var_ver->minor); uint64_t pa (var_ver->patch); - if (mj >= 13) {mj = 11; mi = 0;} - else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0;} - else if (mj == 12) {mj = 9; mi = 0;} - else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0;} - else if (mj == 11) {mj = 7; mi = 0;} - else if (mj == 10) {mj = 6; mi = 0;} - else if (mj == 9 && mi >= 1) {mj = 5; mi = 0;} - else if (mj == 9) {mj = 4; mi = 0;} - else if (mj == 8) {mj = 3; mi = 9;} - else if (mj == 7 && mi >= 3) {mj = 3; mi = 8;} - else if (mj == 7) {mj = 3; mi = 7;} - else if (mj == 6 && mi >= 1) {mj = 3; mi = 5;} - else if (mj == 6) {mj = 3; mi = 4;} - else if (mj == 5 && mi >= 1) {mj = 3; mi = 3;} - else if (mj == 5) {mj = 3; mi = 2;} - else if (mj == 4 && mi >= 2) {mj = 3; mi = 1;} - else {mj = 3; mi = 0;} + + if (mj >= 15) {mj = 16; mi = 0; pa = 1;} + else if (mj == 14 && (mi > 0 || pa >= 3)) {mj = 15; mi = 0; pa = 5;} + else if (mj == 14 || (mj == 13 && mi >= 1)) {mj = 12; mi = 0; pa = 0;} + else if (mj == 13) {mj = 11; mi = 0; pa = 0;} + else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0; pa = 0;} + else if (mj == 12) {mj = 9; mi = 0; pa = 0;} + else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0; pa = 0;} + else if (mj == 11) {mj = 7; mi = 0; pa = 0;} + else if (mj == 10) {mj = 6; mi = 0; pa = 0;} + else if (mj == 9 && mi >= 1) {mj = 5; mi = 0; pa = 0;} + else if (mj == 9) {mj = 4; mi = 0; pa = 0;} + else if (mj == 8) {mj = 3; mi = 9; pa = 0;} + else if (mj == 7 && mi >= 3) {mj = 3; mi = 8; pa = 0;} + else if (mj == 7) {mj = 3; mi = 7; pa = 0;} + else if (mj == 6 && mi >= 1) {mj = 3; mi = 5; pa = 0;} + else if (mj == 6) {mj = 3; mi = 4; pa = 0;} + else if (mj == 5 && mi >= 1) {mj = 3; mi = 3; pa = 0;} + else if (mj == 5) {mj = 3; mi = 2; pa = 0;} + else if (mj == 4 && mi >= 2) {mj = 3; mi = 1; pa = 0;} + else {mj = 3; mi = 0; pa = 0;} ver = compiler_version { - to_string (mj) + '.' + to_string (mi) + ".0", + to_string (mj) + '.' + to_string (mi) + '.' + to_string (pa), mj, mi, - 0, + pa, ""}; } else if (emscr) @@ -2543,7 +2635,7 @@ namespace build2 // for LC_ALL. // auto f = [] (string& l, bool) {return move (l);}; - t = run<string> (3, xp, args.data (), f, false); + t = run<string> (ctx, 3, xp, args, f, false); if (t.empty ()) fail << "unable to extract target architecture from " << xc @@ -2771,7 +2863,8 @@ namespace build2 } static compiler_info - guess_icc (const char* xm, + guess_icc (context& ctx, + const char* xm, lang xl, const path& xc, const string* xv, @@ -2835,7 +2928,7 @@ namespace build2 // // @@ TODO: running without the mode options. // - s = run<string> (3, env, "-V", f, false); + s = run<string> (ctx, 3, env, "-V", f, false); if (s.empty ()) fail << "unable to extract signature from " << xc << " -V output"; @@ -2961,7 +3054,7 @@ namespace build2 // The -V output is sent to STDERR. // - t = run<string> (3, env, args.data (), f, false); + t = run<string> (ctx, 3, env, args, f, false); if (t.empty ()) fail << "unable to extract target architecture from " << xc @@ -3012,7 +3105,7 @@ namespace build2 // { auto f = [] (string& l, bool) {return move (l);}; - t = run<string> (3, xp, "-dumpmachine", f); + t = run<string> (ctx, 3, xp, "-dumpmachine", f); } if (t.empty ()) @@ -3093,7 +3186,8 @@ namespace build2 static global_cache<compiler_info> cache; const compiler_info& - guess (const char* xm, + guess (context& ctx, + const char* xm, lang xl, const string& ec, const path& xc, @@ -3167,7 +3261,7 @@ namespace build2 if (pre.type != invalid_compiler_type) { - gr = guess (xm, xl, xc, x_mo, xi, pre, cs); + gr = guess (ctx, xm, xl, xc, x_mo, xi, pre, cs); if (gr.empty ()) { @@ -3183,13 +3277,14 @@ namespace build2 } if (gr.empty ()) - gr = guess (xm, xl, xc, x_mo, xi, pre, cs); + gr = guess (ctx, xm, xl, xc, x_mo, xi, pre, cs); if (gr.empty ()) fail << "unable to guess " << xl << " compiler type of " << xc << info << "use config." << xm << ".id to specify explicitly"; compiler_info (*gf) ( + context&, const char*, lang, const path&, const string*, const string*, const strings&, const strings*, const strings*, @@ -3209,7 +3304,8 @@ namespace build2 case compiler_type::icc: gf = &guess_icc; break; } - compiler_info r (gf (xm, xl, xc, xv, xt, + compiler_info r (gf (ctx, + xm, xl, xc, xv, xt, x_mo, c_po, x_po, c_co, x_co, c_lo, x_lo, move (gr), cs)); @@ -3367,6 +3463,7 @@ namespace build2 // In the future we will probably have to maintain per-standard additions. // static const char* std_importable[] = { + "<initializer_list>", // Note: keep first (present in freestanding). "<algorithm>", "<any>", "<array>", @@ -3391,7 +3488,6 @@ namespace build2 "<fstream>", "<functional>", "<future>", - "<initializer_list>", "<iomanip>", "<ios>", "<iosfwd>", @@ -3490,6 +3586,9 @@ namespace build2 // is currently not provided by GCC. Though entering missing headers // should be harmless. // + // Plus, a freestanding implementation may only have a subset of such + // headers (see [compliance]). + // pair<const path, importable_headers::groups>* p; auto add_groups = [&p] (bool imp) { @@ -3511,29 +3610,39 @@ namespace build2 } else { + // While according to [compliance] a freestanding implementation + // should provide a subset of headers, including <initializer_list>, + // there seem to be cases where no headers are provided at all (see GH + // issue #219). So if we cannot find <initializer_list>, we just skip + // the whole thing. + // p = hs.insert_angle (sys_hdr_dirs, std_importable[0]); - assert (p != nullptr); - add_groups (true); + if (p != nullptr) + { + assert (p != nullptr); - dir_path d (p->first.directory ()); + add_groups (true); - auto add_header = [&hs, &d, &p, add_groups] (const char* f, bool imp) - { - path fp (d); - fp.combine (f + 1, strlen (f) - 2, '\0'); // Assuming simple. + dir_path d (p->first.directory ()); - p = &hs.insert_angle (move (fp), f); - add_groups (imp); - }; + auto add_header = [&hs, &d, &p, add_groups] (const char* f, bool imp) + { + path fp (d); + fp.combine (f + 1, strlen (f) - 2, '\0'); // Assuming simple. - for (size_t i (1); - i != sizeof (std_importable) / sizeof (std_importable[0]); - ++i) - add_header (std_importable[i], true); + p = &hs.insert_angle (move (fp), f); + add_groups (imp); + }; - for (const char* f: std_non_importable) - add_header (f, false); + for (size_t i (1); + i != sizeof (std_importable) / sizeof (std_importable[0]); + ++i) + add_header (std_importable[i], true); + + for (const char* f: std_non_importable) + add_header (f, false); + } } } } diff --git a/libbuild2/cc/guess.hxx b/libbuild2/cc/guess.hxx index 53acc15..7cbbd87 100644 --- a/libbuild2/cc/guess.hxx +++ b/libbuild2/cc/guess.hxx @@ -253,7 +253,8 @@ namespace build2 // that most of it will be the same, at least for C and C++. // const compiler_info& - guess (const char* xm, // Module (for var names in diagnostics). + guess (context&, + const char* xm, // Module (for var names in diagnostics). lang xl, // Language. const string& ec, // Environment checksum. const path& xc, // Compiler path. diff --git a/libbuild2/cc/init.cxx b/libbuild2/cc/init.cxx index affc4ab..33a1133 100644 --- a/libbuild2/cc/init.cxx +++ b/libbuild2/cc/init.cxx @@ -86,7 +86,10 @@ namespace build2 // Enter variables. // - auto& vp (rs.var_pool ()); + // All the variables we enter are qualified so go straight for the + // public variable pool. + // + auto& vp (rs.var_pool (true /* public */)); auto v_t (variable_visibility::target); @@ -113,6 +116,13 @@ namespace build2 vp.insert<vector<name>> ("cc.export.libs"); vp.insert<vector<name>> ("cc.export.impl_libs"); + // Header (-I) and library (-L) search paths to use in the generated .pc + // files instead of the default install.{include,lib}. Relative paths + // are resolved as install paths. + // + vp.insert<dir_paths> ("cc.pkconfig.include"); + vp.insert<dir_paths> ("cc.pkconfig.lib"); + // Hint variables (not overridable). // vp.insert<string> ("config.cc.id", false); @@ -126,15 +136,20 @@ namespace build2 vp.insert<string> ("cc.runtime"); vp.insert<string> ("cc.stdlib"); - // Target type, for example, "C library" or "C++ library". Should be set - // on the target as a rule-specific variable by the matching rule to the - // name of the module (e.g., "c", "cxx"). Currenly only set for - // libraries and is used to decide which *.libs to use during static - // linking. - // - // It can also be the special "cc" value which means a C-common library - // but specific language is not known. Used in the import installed - // logic. + // Library target type in the <lang>[,<type>...] form where <lang> is + // "c" (C library), "cxx" (C++ library), or "cc" (C-common library but + // the specific language is not known). Currently recognized <type> + // values are "binless" (library is binless) and "recursively-binless" + // (library and all its prerequisite libraries are binless). Note that + // another indication of a binless library is an empty path, which could + // be easier/faster to check. Note also that there should be no + // whitespaces of any kind and <lang> is always first. + // + // This value should be set on the library target as a rule-specific + // variable by the matching rule. It is also saved in the generated + // pkg-config files. Currently <lang> is used to decide which *.libs to + // use during static linking. The "cc" language is used in the import + // installed logic. // // Note that this variable cannot be set via the target type/pattern- // specific mechanism (see process_libraries()). @@ -326,10 +341,11 @@ namespace build2 // if (!cast_false<bool> (rs["bin.config.loaded"])) { - // Prepare configuration hints. They are only used on the first load - // of bin.config so we only populate them on our first load. + // Prepare configuration hints (pretend it belongs to root scope). + // They are only used on the first load of bin.config so we only + // populate them on our first load. // - variable_map h (rs.ctx); + variable_map h (rs); if (first) { diff --git a/libbuild2/cc/install-rule.cxx b/libbuild2/cc/install-rule.cxx index 560b8a7..dae65db 100644 --- a/libbuild2/cc/install-rule.cxx +++ b/libbuild2/cc/install-rule.cxx @@ -90,14 +90,16 @@ namespace build2 { return (x_header (p) || p.is_a (x_src) || - (x_mod != nullptr && p.is_a (*x_mod))); + (x_mod != nullptr && p.is_a (*x_mod)) || + (x_asp != nullptr && p.is_a (*x_asp)) || + (x_obj != nullptr && p.is_a (*x_obj))); }; if (t.is_a<exe> ()) { if (header_source (p)) pt = nullptr; - else if (p.type.see_through) + else if (p.type.see_through ()) { for (i.enter_group (); i.group (); ) { @@ -151,17 +153,29 @@ namespace build2 } bool install_rule:: - match (action a, target& t, const string& hint) const + match (action a, target& t, const string&, match_extra& me) const { - // @@ How do we split the hint between the two? - // - // We only want to handle installation if we are also the ones building // this target. So first run link's match(). // - return link_.match (a, t, hint) && file_rule::match (a, t, ""); + return link_.sub_match (x_link, update_id, a, t, me) && + file_rule::match (a, t); } + // Wrap the file_rule's recipe into a data-carrying recipe. + // + struct install_match_data + { + build2::recipe recipe; + link_rule::libs_paths libs_paths; + + target_state + operator() (action a, const target& t) + { + return recipe (a, t); + } + }; + recipe install_rule:: apply (action a, target& t) const { @@ -175,7 +189,7 @@ namespace build2 // Signal to the link rule that this is update for install. And if the // update has already been executed, verify it was done for install. // - auto& md (t.data<link_rule::match_data> ()); + auto& md (t.data<link_rule::match_data> (a.inner_action ())); if (md.for_install) { @@ -194,19 +208,18 @@ namespace build2 // storage if we are un/installing (used in the *_extra() functions // below). // - static_assert (sizeof (link_rule::libs_paths) <= target::data_size, - "insufficient space"); - if (file* f = t.is_a<libs> ()) { if (!f->path ().empty ()) // Not binless. { const string* p (cast_null<string> (t["bin.lib.prefix"])); const string* s (cast_null<string> (t["bin.lib.suffix"])); - t.data ( + + return install_match_data { + move (r), link_.derive_libs_paths (*f, p != nullptr ? p->c_str (): nullptr, - s != nullptr ? s->c_str (): nullptr)); + s != nullptr ? s->c_str (): nullptr)}; } } } @@ -224,11 +237,11 @@ namespace build2 // Here we may have a bunch of symlinks that we need to install. // const scope& rs (t.root_scope ()); - auto& lp (t.data<link_rule::libs_paths> ()); + auto& lp (t.data<install_match_data> (perform_install_id).libs_paths); - auto ln = [&rs, &id] (const path& f, const path& l) + auto ln = [&t, &rs, &id] (const path& f, const path& l) { - install_l (rs, id, f.leaf (), l.leaf (), 2 /* verbosity */); + install_l (rs, id, l.leaf (), t, f.leaf (), 2 /* verbosity */); return true; }; @@ -258,11 +271,11 @@ namespace build2 // Here we may have a bunch of symlinks that we need to uninstall. // const scope& rs (t.root_scope ()); - auto& lp (t.data<link_rule::libs_paths> ()); + auto& lp (t.data<install_match_data> (perform_uninstall_id).libs_paths); - auto rm = [&rs, &id] (const path& l) + auto rm = [&rs, &id] (const path& f, const path& l) { - return uninstall_f (rs, id, nullptr, l.leaf (), 2 /* verbosity */); + return uninstall_l (rs, id, l.leaf (), f.leaf (), 2 /* verbosity */); }; const path& lk (lp.link); @@ -270,10 +283,12 @@ namespace build2 const path& so (lp.soname); const path& in (lp.interm); - if (!lk.empty ()) r = rm (lk) || r; - if (!ld.empty ()) r = rm (ld) || r; - if (!so.empty ()) r = rm (so) || r; - if (!in.empty ()) r = rm (in) || r; + const path* f (lp.real); + + if (!in.empty ()) {r = rm (*f, in) || r; f = ∈} + if (!so.empty ()) {r = rm (*f, so) || r; f = &so;} + if (!ld.empty ()) {r = rm (*f, ld) || r; f = &ld;} + if (!lk.empty ()) {r = rm (*f, lk) || r; } } return r; @@ -325,14 +340,16 @@ namespace build2 { return (x_header (p) || p.is_a (x_src) || - (x_mod != nullptr && p.is_a (*x_mod))); + (x_mod != nullptr && p.is_a (*x_mod)) || + (x_asp != nullptr && p.is_a (*x_asp)) || + (x_obj != nullptr && p.is_a (*x_obj))); }; if (t.is_a<libue> ()) { if (header_source (p)) pt = nullptr; - else if (p.type.see_through) + else if (p.type.see_through ()) { for (i.enter_group (); i.group (); ) { @@ -372,12 +389,13 @@ namespace build2 } bool libux_install_rule:: - match (action a, target& t, const string& hint) const + match (action a, target& t, const string&, match_extra& me) const { // We only want to handle installation if we are also the ones building // this target. So first run link's match(). // - return link_.match (a, t, hint) && alias_rule::match (a, t, ""); + return link_.sub_match (x_link, update_id, a, t, me) && + alias_rule::match (a, t); } } } diff --git a/libbuild2/cc/install-rule.hxx b/libbuild2/cc/install-rule.hxx index acd1bd8..6998d63 100644 --- a/libbuild2/cc/install-rule.hxx +++ b/libbuild2/cc/install-rule.hxx @@ -38,8 +38,12 @@ namespace build2 filter (const scope*, action, const target&, prerequisite_iterator&) const override; + // Note: rule::match() override. + // virtual bool - match (action, target&, const string&) const override; + match (action, target&, const string&, match_extra&) const override; + + using file_rule::match; // Make Clang happy. virtual recipe apply (action, target&) const override; @@ -71,8 +75,12 @@ namespace build2 filter (const scope*, action, const target&, prerequisite_iterator&) const override; + // Note: rule::match() override. + // virtual bool - match (action, target&, const string&) const override; + match (action, target&, const string&, match_extra&) const override; + + using alias_rule::match; // Make Clang happy. private: const link_rule& link_; diff --git a/libbuild2/cc/lexer+raw-string-literal.test.testscript b/libbuild2/cc/lexer+raw-string-literal.test.testscript index bca489a..a6455eb 100644 --- a/libbuild2/cc/lexer+raw-string-literal.test.testscript +++ b/libbuild2/cc/lexer+raw-string-literal.test.testscript @@ -16,6 +16,7 @@ R"X(a b)X" R"X(a\ b)X" +R""(a)"" EOI <string literal> <string literal> @@ -24,6 +25,7 @@ EOI <string literal> <string literal> <string literal> +<string literal> EOO : prefix diff --git a/libbuild2/cc/lexer.cxx b/libbuild2/cc/lexer.cxx index beeb970..467c0b1 100644 --- a/libbuild2/cc/lexer.cxx +++ b/libbuild2/cc/lexer.cxx @@ -734,8 +734,8 @@ namespace build2 // R"<delimiter>(<raw_characters>)<delimiter>" // // Where <delimiter> is a potentially-empty character sequence made of - // any source character but parentheses, backslash and spaces. It can be - // at most 16 characters long. + // any source character but parentheses, backslash, and spaces (in + // particular, it can be `"`). It can be at most 16 characters long. // // Note that the <raw_characters> are not processed in any way, not even // for line continuations. @@ -750,7 +750,7 @@ namespace build2 { c = geth (); - if (eos (c) || c == '\"' || c == ')' || c == '\\' || c == ' ') + if (eos (c) || c == ')' || c == '\\' || c == ' ') fail (l) << "invalid raw string literal"; if (c == '(') diff --git a/libbuild2/cc/lexer.test.cxx b/libbuild2/cc/lexer.test.cxx index 0d7d12f..39e4279 100644 --- a/libbuild2/cc/lexer.test.cxx +++ b/libbuild2/cc/lexer.test.cxx @@ -6,6 +6,7 @@ #include <libbuild2/types.hxx> #include <libbuild2/utility.hxx> +#include <libbuild2/cc/types.hxx> #include <libbuild2/cc/lexer.hxx> #undef NDEBUG diff --git a/libbuild2/cc/link-rule.cxx b/libbuild2/cc/link-rule.cxx index fa9a1f1..e2bdf5d 100644 --- a/libbuild2/cc/link-rule.cxx +++ b/libbuild2/cc/link-rule.cxx @@ -20,6 +20,8 @@ #include <libbuild2/bin/target.hxx> #include <libbuild2/bin/utility.hxx> +#include <libbuild2/install/utility.hxx> + #include <libbuild2/cc/target.hxx> // c, pc* #include <libbuild2/cc/utility.hxx> @@ -156,7 +158,7 @@ namespace build2 { if (s[0] == '-') { - // -l<name>, -l <name> + // -l<name>, -l <name> (Note: not -pthread, which is system) // if (s[1] == 'l') { @@ -256,8 +258,6 @@ namespace build2 : common (move (d)), rule_id (string (x) += ".link 3") { - static_assert (sizeof (match_data) <= target::data_size, - "insufficient space"); } link_rule::match_result link_rule:: @@ -282,17 +282,25 @@ namespace build2 { // If excluded or ad hoc, then don't factor it into our tests. // - if (include (a, t, p) != include_type::normal) + // Note that here we don't validate the update operation override + // value (since we may not match). Instead we do this in apply(). + // + lookup l; + if (include (a, t, p, a.operation () == update_id ? &l : nullptr) != + include_type::normal) continue; if (p.is_a (x_src) || (x_mod != nullptr && p.is_a (*x_mod)) || + (x_asp != nullptr && p.is_a (*x_asp)) || + (x_obj != nullptr && p.is_a (*x_obj)) || // Header-only X library (or library with C source and X header). (library && x_header (p, false /* c_hdr */))) { r.seen_x = true; } - else if (p.is_a<c> () || + else if (p.is_a<c> () || + (x_obj != nullptr && p.is_a<m> ()) || // Header-only C library. (library && p.is_a<h> ())) { @@ -347,6 +355,11 @@ namespace build2 const target* pg (nullptr); const target* pt (p.search_existing ()); + auto search = [&t, &p] (const target_type& tt) + { + return search_existing (t.ctx, p.prerequisite.key (tt)); + }; + if (p.is_a<libul> ()) { if (pt != nullptr) @@ -369,23 +382,33 @@ namespace build2 { // It's possible we have no group but have a member so try that. // - const target_type& tt (ot == otype::a ? libua::static_type : - ot == otype::s ? libus::static_type : - libue::static_type); + if (ot != otype::e) + { + // We know this prerequisite member is a prerequisite since + // otherwise the above search would have returned the member + // target. + // + pt = search (ot == otype::a + ? libua::static_type + : libus::static_type); + } + else + { + // Similar semantics to bin::link_member(): prefer static over + // shared. + // + pt = search (libua::static_type); - // We know this prerequisite member is a prerequisite since - // otherwise the above search would have returned the member - // target. - // - pt = search_existing (t.ctx, p.prerequisite.key (tt)); + if (pt == nullptr) + pt = search (libus::static_type); + } } } else if (!p.is_a<libue> ()) { // See if we also/instead have a group. // - pg = search_existing (t.ctx, - p.prerequisite.key (libul::static_type)); + pg = search (libul::static_type); if (pt == nullptr) swap (pt, pg); @@ -426,7 +449,7 @@ namespace build2 } bool link_rule:: - match (action a, target& t, const string& hint) const + match (action a, target& t, const string& hint, match_extra&) const { // NOTE: may be called multiple times and for both inner and outer // operations (see the install rules). @@ -465,17 +488,22 @@ namespace build2 return false; } - if (!(r.seen_x || r.seen_c || r.seen_obj || r.seen_lib)) + // Sometimes we may need to have a binless library whose only purpose is + // to export dependencies on other libraries (potentially in a platform- + // specific manner; think the whole -pthread mess). So allow a library + // without any sources with a hint. + // + if (!(r.seen_x || r.seen_c || r.seen_obj || r.seen_lib || !hint.empty ())) { - l4 ([&]{trace << "no " << x_lang << ", C, or obj/lib prerequisite " - << "for target " << t;}); + l4 ([&]{trace << "no " << x_lang << ", C, obj/lib prerequisite or " + << "hint for target " << t;}); return false; } // We will only chain a C source if there is also an X source or we were // explicitly told to. // - if (r.seen_c && !r.seen_x && hint < x) + if (r.seen_c && !r.seen_x && hint.empty ()) { l4 ([&]{trace << "C prerequisite without " << x_lang << " or hint " << "for target " << t;}); @@ -813,6 +841,12 @@ namespace build2 // if (const libul* ul = pt->is_a<libul> ()) { + // @@ Isn't libul{} member already picked or am I missing something? + // If not, then we may need the same in recursive-binless logic. + // +#if 0 + assert (false); // @@ TMP (remove before 0.16.0 release) +#endif ux = &link_member (*ul, a, li)->as<libux> (); } else if ((ux = pt->is_a<libue> ()) || @@ -829,8 +863,20 @@ namespace build2 return nullptr; }; + // Given the cc.type value return true if the library is recursively + // binless. + // + static inline bool + recursively_binless (const string& type) + { + size_t p (type.find ("recursively-binless")); + return (p != string::npos && + type[p - 1] == ',' && // <lang> is first. + (type[p += 19] == '\0' || type[p] == ',')); + } + recipe link_rule:: - apply (action a, target& xt) const + apply (action a, target& xt, match_extra&) const { tracer trace (x, "link_rule::apply"); @@ -840,7 +886,11 @@ namespace build2 // Note that for_install is signalled by install_rule and therefore // can only be relied upon during execute. // - match_data& md (t.data (match_data ())); + // Note that we don't really need to set it as target data: while there + // are calls to get it, they should only happen after the target has + // been matched. + // + match_data md (*this); const scope& bs (t.base_scope ()); const scope& rs (*bs.root_scope ()); @@ -849,11 +899,6 @@ namespace build2 otype ot (lt.type); linfo li (link_info (bs, ot)); - // Set the library type (C, C++, etc) as rule-specific variable. - // - if (lt.library ()) - t.state[a].assign (c_type) = string (x); - bool binless (lt.library ()); // Binary-less until proven otherwise. bool user_binless (lt.library () && cast_false<bool> (t[b_binless])); @@ -875,7 +920,7 @@ namespace build2 // We do libraries first in order to indicate that we will execute these // targets before matching any of the obj/bmi{}. This makes it safe for // compile::apply() to unmatch them and therefore not to hinder - // parallelism. + // parallelism (or mess up for-install'ness). // // We also create obj/bmi{} chain targets because we need to add // (similar to lib{}) all the bmi{} as prerequisites to all the other @@ -899,33 +944,100 @@ namespace build2 return a.operation () == clean_id && !pt.dir.sub (rs.out_path ()); }; + bool update_match (false); // Have update during match. + auto& pts (t.prerequisite_targets[a]); size_t start (pts.size ()); for (prerequisite_member p: group_prerequisite_members (a, t)) { - include_type pi (include (a, t, p)); + // Note that we have to recognize update=match for *(update), not just + // perform(update). But only actually update for perform(update). + // + lookup l; // The `update` variable value, if any. + include_type pi ( + include (a, t, p, a.operation () == update_id ? &l : nullptr)); // We pre-allocate a NULL slot for each (potential; see clean) // prerequisite target. // pts.push_back (prerequisite_target (nullptr, pi)); - const target*& pt (pts.back ()); + auto& pto (pts.back ()); + + // Use bit 2 of prerequisite_target::include to signal update during + // match. + // + // Not that for now we only allow updating during match ad hoc and + // mark 3 (headers, etc; see below) prerequisites. + // + // By default we update during match headers and ad hoc sources (which + // are commonly marked as such because they are #include'ed). + // + optional<bool> um; + + if (l) + { + const string& v (cast<string> (l)); + + if (v == "match") + um = true; + else if (v == "execute") + um = false; + else if (v != "false" && v != "true") + { + fail << "unrecognized update variable value '" << v + << "' specified for prerequisite " << p.prerequisite; + } + } + + // Skip excluded and ad hoc (unless updated during match) on this + // pass. + // + if (pi != include_type::normal) + { + if (a == perform_update_id && pi == include_type::adhoc) + { + // By default update ad hoc headers/sources during match (see + // above). + // +#if 1 + if (!um) + um = (p.is_a (x_src) || p.is_a<c> () || + (x_mod != nullptr && p.is_a (*x_mod)) || + (x_asp != nullptr && p.is_a (*x_asp)) || + (x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ())) || + x_header (p, true)); +#endif + + if (*um) + { + pto.target = &p.search (t); // mark 0 + pto.include |= prerequisite_target::include_udm; + update_match = true; + } + } - if (pi != include_type::normal) // Skip excluded and ad hoc. continue; + } + + const target*& pt (pto); - // Mark: - // 0 - lib + // Mark (2 bits): + // + // 0 - lib or update during match // 1 - src // 2 - mod - // 3 - obj/bmi and also lib not to be cleaned + // 3 - obj/bmi and also lib not to be cleaned (and other stuff) // - uint8_t m (0); + uint8_t mk (0); bool mod (x_mod != nullptr && p.is_a (*x_mod)); + bool hdr (false); - if (mod || p.is_a (x_src) || p.is_a<c> ()) + if (mod || + p.is_a (x_src) || p.is_a<c> () || + (x_asp != nullptr && p.is_a (*x_asp)) || + (x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ()))) { binless = binless && (mod ? user_binless : false); @@ -976,8 +1088,8 @@ namespace build2 // be the group -- we will pick a member in part 2 below. // pair<target&, ulock> r ( - search_locked ( - t, rtt, d, dir_path (), *cp.tk.name, nullptr, cp.scope)); + search_new_locked ( + ctx, rtt, d, dir_path (), *cp.tk.name, nullptr, cp.scope)); // If we shouldn't clean obj{}, then it is fair to assume we // shouldn't clean the source either (generated source will be in @@ -1013,7 +1125,7 @@ namespace build2 } pt = &r.first; - m = mod ? 2 : 1; + mk = mod ? 2 : 1; } else if (p.is_a<libx> () || p.is_a<liba> () || @@ -1022,12 +1134,8 @@ namespace build2 { // Handle imported libraries. // - // Note that since the search is rule-specific, we don't cache the - // target in the prerequisite. - // if (p.proj ()) - pt = search_library ( - a, sys_lib_dirs, usr_lib_dirs, p.prerequisite); + pt = search_library (a, sys_lib_dirs, usr_lib_dirs, p.prerequisite); // The rest is the same basic logic as in search_and_match(). // @@ -1035,13 +1143,17 @@ namespace build2 pt = &p.search (t); if (skip (*pt)) - m = 3; // Mark so it is not matched. + mk = 3; // Mark so it is not matched. // If this is the lib{}/libul{} group, then pick the appropriate - // member. + // member. Also note this in prerequisite_target::include (used + // by process_libraries()). // if (const libx* l = pt->is_a<libx> ()) + { pt = link_member (*l, a, li); + pto.include |= include_group; + } } else { @@ -1054,8 +1166,11 @@ namespace build2 // Windows module definition (.def). For other platforms (and for // static libraries) treat it as an ordinary prerequisite. // - else if (p.is_a<def> () && tclass == "windows" && ot != otype::a) + else if (p.is_a<def> ()) { + if (tclass != "windows" || ot == otype::a) + continue; + pt = &p.search (t); } // @@ -1065,11 +1180,14 @@ namespace build2 // else { - if (!p.is_a<objx> () && !p.is_a<bmix> () && !x_header (p, true)) + if (!p.is_a<objx> () && + !p.is_a<bmix> () && + !(hdr = x_header (p, true))) { // @@ Temporary hack until we get the default outer operation // for update. This allows operations like test and install to - // skip such tacked on stuff. + // skip such tacked on stuff. @@ This doesn't feel temporary + // anymore... // // Note that ad hoc inputs have to be explicitly marked with the // include=adhoc prerequisite-specific variable. @@ -1097,21 +1215,58 @@ namespace build2 !pt->is_a<hbmix> () && cast_false<bool> ((*pt)[b_binless]))); - m = 3; + mk = 3; } if (user_binless && !binless) fail << t << " cannot be binless due to " << p << " prerequisite"; - mark (pt, m); + // Upgrade update during match prerequisites to mark 0 (see above for + // details). + // + if (a == perform_update_id) + { + // By default update headers during match (see above). + // +#if 1 + if (!um) + um = hdr; +#endif + + if (*um) + { + if (mk != 3) + fail << "unable to update during match prerequisite " << p << + info << "updating this type of prerequisites during match is " + << "not supported by this rule"; + + mk = 0; + pto.include |= prerequisite_target::include_udm; + update_match = true; + } + } + + mark (pt, mk); } - // Match lib{} (the only unmarked) in parallel and wait for completion. + // Match lib{} first and then update during match (the only unmarked) in + // parallel and wait for completion. We need to match libraries first + // because matching generated headers/sources may lead to matching some + // of the libraries (for example, if generation requires some of the + // metadata; think poptions needed by Qt moc). // - match_members (a, t, pts, start); + { + auto mask (prerequisite_target::include_udm); + + match_members (a, t, pts, start, {mask, 0}); + + if (update_match) + match_members (a, t, pts, start, {mask, mask}); + } // Check if we have any binful utility libraries. // + bool rec_binless (false); // Recursively-binless. if (binless) { if (const libux* l = find_binful (a, t, li)) @@ -1122,8 +1277,128 @@ namespace build2 fail << t << " cannot be binless due to binful " << *l << " prerequisite"; } + + // See if we are recursively-binless. + // + if (binless) + { + rec_binless = true; + + for (const target* pt: t.prerequisite_targets[a]) + { + if (pt == nullptr || unmark (pt) != 0) // See above. + continue; + + const file* ft; + if ((ft = pt->is_a<libs> ()) || + (ft = pt->is_a<liba> ()) || + (ft = pt->is_a<libux> ())) + { + if (ft->path ().empty ()) // Binless. + { + // The same lookup as in process_libraries(). + // + if (const string* t = cast_null<string> ( + ft->state[a].lookup_original ( + c_type, true /* target_only */).first)) + { + if (recursively_binless (*t)) + continue; + } + } + + rec_binless = false; + break; + } + } + + // Another thing we must check is for the presence of any simple + // libraries (-lm, shell32.lib, etc) in *.export.libs. See + // process_libraries() for details. + // + if (rec_binless) + { + auto find = [&t, &bs] (const variable& v) -> lookup + { + return t.lookup_original (v, false, &bs).first; + }; + + auto has_simple = [] (lookup l) + { + if (const auto* ns = cast_null<vector<name>> (l)) + { + for (auto i (ns->begin ()), e (ns->end ()); i != e; ++i) + { + if (i->pair) + ++i; + else if (i->simple ()) // -l<name>, etc. + return true; + } + } + + return false; + }; + + if (lt.shared_library ()) // process_libraries()::impl == false + { + if (has_simple (find (x_export_libs)) || + has_simple (find (c_export_libs))) + rec_binless = false; + } + else // process_libraries()::impl == true + { + lookup x (find (x_export_impl_libs)); + lookup c (find (c_export_impl_libs)); + + if (x.defined () || c.defined ()) + { + if (has_simple (x) || has_simple (c)) + rec_binless = false; + } + else + { + // These are strings and we assume if either is defined and + // not empty, then we have simple libraries. + // + if (((x = find (x_libs)) && !x->empty ()) || + ((c = find (c_libs)) && !c->empty ())) + rec_binless = false; + } + } + } + } + } + + // Set the library type (C, C++, binless) as rule-specific variable. + // + if (lt.library ()) + { + string v (x); + + if (rec_binless) + v += ",recursively-binless"; + else if (binless) + v += ",binless"; + + t.state[a].assign (c_type) = move (v); } + // If we have any update during match prerequisites, now is the time to + // update them. Note that we have to do it before any further matches + // since they may rely on these prerequisites already being updated (for + // example, object file matches may need the headers to be already + // updated). We also must do it after matching all our prerequisite + // libraries since they may generate headers that we depend upon. + // + // Note that we ignore the result and whether it renders us out of date, + // leaving it to the common execute logic in perform_update(). + // + // Note also that update_during_match_prerequisites() spoils + // prerequisite_target::data. + // + if (update_match) + update_during_match_prerequisites (trace, a, t); + // Now that we know for sure whether we are binless, derive file name(s) // and add ad hoc group members. Note that for binless we still need the // .pc member (whose name depends on the libray prefix) so we take care @@ -1267,11 +1542,26 @@ namespace build2 if (wasm.path ().empty ()) wasm.derive_path (); + // We don't want to print this member at level 1 diagnostics. + // + wasm.state[a].assign (ctx.var_backlink) = names { + name ("group"), name ("false")}; + // If we have -pthread then we get additional .worker.js file // which is used for thread startup. In a somewhat hackish way we // represent it as an exe{} member to make sure it gets installed // next to the main .js file. // + // @@ Note that our recommendation is to pass -pthread in *.libs + // but checking that is not straightforward (it could come from + // one of the libraries that we are linking). We could have called + // append_libraries() (similar to $x.lib_libs()) and then looked + // there. But this is quite heavy handed and it's not clear this + // is worth the trouble since the -pthread support in Emscripten + // is quite high-touch (i.e., it's not like we can write a library + // that starts some threads and then run its test as on any other + // POSIX platform). + // if (find_option ("-pthread", cmode) || find_option ("-pthread", t, c_loptions) || find_option ("-pthread", t, x_loptions)) @@ -1280,6 +1570,11 @@ namespace build2 if (worker.path ().empty ()) worker.derive_path (); + + // We don't want to print this member at level 1 diagnostics. + // + worker.state[a].assign (ctx.var_backlink) = names { + name ("group"), name ("false")}; } } @@ -1288,22 +1583,31 @@ namespace build2 // if (!binless && ot != otype::a && tsys == "win32-msvc") { - if (find_option ("/DEBUG", t, c_loptions, true) || - find_option ("/DEBUG", t, x_loptions, true)) + const string* o; + if ((o = find_option_prefix ("/DEBUG", t, c_loptions, true)) != nullptr || + (o = find_option_prefix ("/DEBUG", t, x_loptions, true)) != nullptr) { - const target_type& tt (*bs.find_target_type ("pdb")); + if (icasecmp (*o, "/DEBUG:NONE") != 0) + { + const target_type& tt (*bs.find_target_type ("pdb")); - // We call the target foo.{exe,dll}.pdb rather than just foo.pdb - // because we can have both foo.exe and foo.dll in the same - // directory. - // - file& pdb (add_adhoc_member<file> (t, tt, e)); + // We call the target foo.{exe,dll}.pdb rather than just + // foo.pdb because we can have both foo.exe and foo.dll in the + // same directory. + // + file& pdb (add_adhoc_member<file> (t, tt, e)); - // Note that the path is derived from the exe/dll path (so it - // will include the version in case of a dll). - // - if (pdb.path ().empty ()) - pdb.derive_path (t.path ()); + // Note that the path is derived from the exe/dll path (so it + // will include the version in case of a dll). + // + if (pdb.path ().empty ()) + pdb.derive_path (t.path ()); + + // We don't want to print this member at level 1 diagnostics. + // + pdb.state[a].assign (ctx.var_backlink) = names { + name ("group"), name ("false")}; + } } } @@ -1325,6 +1629,13 @@ namespace build2 // we will use its bin.lib to decide what will be installed and in // perform_update() we will confirm that it is actually installed. // + // This, of course, works only if we actually have explicit lib{}. + // But the user could only have liba{} (common in testing frameworks + // that provide main()) or only libs{} (e.g., plugin that can also + // be linked). It's also theoretically possible to have both liba{} + // and libs{} but no lib{}, in which case it feels correct not to + // generate the common file at all. + // if (ot != otype::e) { // Note that here we always use the lib name prefix, even on @@ -1336,7 +1647,13 @@ namespace build2 // Note also that the order in which we are adding these members // is important (see add_addhoc_member() for details). // - if (ot == otype::a || !link_members (rs).a) + if (operator>= (t.group->decl, target_decl::implied) // @@ VC14 + ? ot == (link_members (rs).a ? otype::a : otype::s) + : search_existing (ctx, + ot == otype::a + ? libs::static_type + : liba::static_type, + t.dir, t.out, t.name) == nullptr) { auto& pc (add_adhoc_member<pc> (t)); @@ -1369,14 +1686,12 @@ namespace build2 // exists (windows_rpath_assembly() does take care to clean it up // if not used). // -#ifdef _WIN32 - target& dir = -#endif + target& dir ( add_adhoc_member (t, fsdir::static_type, path_cast<dir_path> (t.path () + ".dlls"), t.out, - string () /* name */); + string () /* name */)); // By default our backlinking logic will try to symlink the // directory and it can even be done on Windows using junctions. @@ -1390,9 +1705,15 @@ namespace build2 // Wine. So we only resort to copy-link'ing if we are running on // Windows. // + // We also don't want to print this member at level 1 diagnostics. + // + dir.state[a].assign (ctx.var_backlink) = names { #ifdef _WIN32 - dir.state[a].assign (ctx.var_backlink) = "copy"; + name ("copy"), name ("false") +#else + name ("group"), name ("false") #endif + }; } } } @@ -1414,23 +1735,24 @@ namespace build2 continue; // New mark: + // 0 - already matched // 1 - completion // 2 - verification // - uint8_t m (unmark (pt)); + uint8_t mk (unmark (pt)); - if (m == 3) // obj/bmi or lib not to be cleaned + if (mk == 3) // obj/bmi or lib not to be cleaned { - m = 1; // Just completion. + mk = 1; // Just completion. // Note that if this is a library not to be cleaned, we keep it // marked for completion (see the next phase). } - else if (m == 1 || m == 2) // Source/module chain. + else if (mk == 1 || mk == 2) // Source/module chain. { - bool mod (m == 2); + bool mod (mk == 2); // p is_a x_mod - m = 1; + mk = 1; const target& rt (*pt); bool group (!p.prerequisite.belongs (t)); // Group's prerequisite. @@ -1462,7 +1784,21 @@ namespace build2 if (!pt->has_prerequisites () && (!group || !rt.has_prerequisites ())) { - prerequisites ps {p.as_prerequisite ()}; // Source. + prerequisites ps; + + // Add source. + // + // Remove the update variable (we may have stray update=execute + // that was specified together with the header). + // + { + prerequisite pc (p.as_prerequisite ()); + + if (!pc.vars.empty ()) + pc.vars.erase (*ctx.var_update); + + ps.push_back (move (pc)); + } // Add our lib*{} (see the export.* machinery for details) and // bmi*{} (both original and chained; see module search logic) @@ -1481,7 +1817,7 @@ namespace build2 // might depend on the imported one(s) which we will never "see" // unless we start with this library. // - // Note: have similar logic in make_module_sidebuild(). + // Note: have similar logic in make_{module,header}_sidebuild(). // size_t j (start); for (prerequisite_member p: group_prerequisite_members (a, t)) @@ -1567,7 +1903,11 @@ namespace build2 // Most of the time we will have just a single source so fast- // path that case. // - if (p1.is_a (mod ? *x_mod : x_src) || p1.is_a<c> ()) + if (mod + ? p1.is_a (*x_mod) + : (p1.is_a (x_src) || p1.is_a<c> () || + (x_asp != nullptr && p1.is_a (*x_asp)) || + (x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ())))) { src = true; continue; // Check the rest of the prerequisites. @@ -1580,8 +1920,12 @@ namespace build2 p1.is_a<libx> () || p1.is_a<liba> () || p1.is_a<libs> () || p1.is_a<libux> () || p1.is_a<bmi> () || p1.is_a<bmix> () || - (p.is_a (mod ? *x_mod : x_src) && x_header (p1)) || - (p.is_a<c> () && p1.is_a<h> ())) + ((mod || + p.is_a (x_src) || + (x_asp != nullptr && p.is_a (*x_asp)) || + (x_obj != nullptr && p.is_a (*x_obj))) && x_header (p1)) || + ((p.is_a<c> () || + (x_obj != nullptr && p.is_a<m> ())) && p1.is_a<h> ())) continue; fail << "synthesized dependency for prerequisite " << p @@ -1594,14 +1938,14 @@ namespace build2 if (!src) fail << "synthesized dependency for prerequisite " << p << " would be incompatible with existing target " << *pt << - info << "no existing c/" << x_name << " source prerequisite" << + info << "no existing c/" << x_lang << " source prerequisite" << info << "specify corresponding " << rtt.name << "{} " << "dependency explicitly"; - m = 2; // Needs verification. + mk = 2; // Needs verification. } } - else // lib*{} + else // lib*{} or update during match { // If this is a static library, see if we need to link it whole. // Note that we have to do it after match since we rely on the @@ -1610,6 +1954,8 @@ namespace build2 bool u; if ((u = pt->is_a<libux> ()) || pt->is_a<liba> ()) { + // Note: go straight for the public variable pool. + // const variable& var (ctx.var_pool["bin.whole"]); // @@ Cache. // See the bin module for the lookup semantics discussion. Note @@ -1619,7 +1965,7 @@ namespace build2 lookup l (p.prerequisite.vars[var]); if (!l.defined ()) - l = pt->lookup_original (var, true).first; + l = pt->lookup_original (var, true /* target_only */).first; if (!l.defined ()) { @@ -1638,7 +1984,7 @@ namespace build2 } } - mark (pt, m); + mark (pt, mk); } // Process prerequisites, pass 3: match everything and verify chains. @@ -1651,10 +1997,10 @@ namespace build2 i = start; for (prerequisite_member p: group_prerequisite_members (a, t)) { - bool adhoc (pts[i].adhoc); + bool adhoc (pts[i].adhoc ()); const target*& pt (pts[i++]); - uint8_t m; + uint8_t mk; if (pt == nullptr) { @@ -1664,10 +2010,15 @@ namespace build2 continue; pt = &p.search (t); - m = 1; // Mark for completion. + mk = 1; // Mark for completion. } - else if ((m = unmark (pt)) != 0) + else { + mk = unmark (pt); + + if (mk == 0) + continue; // Already matched. + // If this is a library not to be cleaned, we can finally blank it // out. // @@ -1679,7 +2030,7 @@ namespace build2 } match_async (a, *pt, ctx.count_busy (), t[a].task_count); - mark (pt, m); + mark (pt, mk); } wg.wait (); @@ -1694,15 +2045,15 @@ namespace build2 // Skipped or not marked for completion. // - uint8_t m; - if (pt == nullptr || (m = unmark (pt)) == 0) + uint8_t mk; + if (pt == nullptr || (mk = unmark (pt)) == 0) continue; - build2::match (a, *pt); + match_complete (a, *pt); // Nothing else to do if not marked for verification. // - if (m == 1) + if (mk == 1) continue; // Finish verifying the existing dependency (which is now matched) @@ -1714,7 +2065,11 @@ namespace build2 for (prerequisite_member p1: group_prerequisite_members (a, *pt)) { - if (p1.is_a (mod ? *x_mod : x_src) || p1.is_a<c> ()) + if (mod + ? p1.is_a (*x_mod) + : (p1.is_a (x_src) || p1.is_a<c> () || + (x_asp != nullptr && p1.is_a (*x_asp)) || + (x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ())))) { // Searching our own prerequisite is ok, p1 must already be // resolved. @@ -1750,14 +2105,11 @@ namespace build2 switch (a) { - case perform_update_id: return [this] (action a, const target& t) - { - return perform_update (a, t); - }; - case perform_clean_id: return [this] (action a, const target& t) - { - return perform_clean (a, t); - }; + // Keep the recipe (which is match_data) after execution to allow the + // install rule to examine it. + // + case perform_update_id: t.keep_data (a); // Fall through. + case perform_clean_id: return md; default: return noop_recipe; // Configure update. } } @@ -1804,7 +2156,7 @@ namespace build2 const target* const* lc, const small_vector<reference_wrapper<const string>, 2>& ns, lflags f, - const string* type, // cc.type + const string* type, // Whole cc.type in the <lang>[,...] form. bool) { // Note: see also make_header_sidebuild(). @@ -1825,6 +2177,13 @@ namespace build2 // that range of elements to the end of args. See GitHub issue #114 // for details. // + // One case where we can prune the graph is if the library is + // recursively-binless. It's tempting to wish that we can do the same + // just for binless, but alas that's not the case: we have to hoist + // its binful interface dependency because, for example, it must + // appear after the preceding static library of which this binless + // library is a dependency. + // // From the process_libraries() semantics we know that this callback // is always called and always after the options callbacks. // @@ -1836,8 +2195,13 @@ namespace build2 { // Hoist the elements corresponding to this library to the end. // Note that we cannot prune the traversal since we need to see the - // last occurrence of each library. + // last occurrence of each library, unless the library is + // recursively-binless (in which case there will be no need to + // hoist since there can be no libraries among the elements). // + if (type != nullptr && recursively_binless (*type)) + return false; + d.ls.hoist (d.args, *al); return true; } @@ -1883,9 +2247,12 @@ namespace build2 // install or both not. We can only do this if the library is build // by our link_rule. // - else if (d.for_install && type != nullptr && *type != "cc") + else if (d.for_install && + type != nullptr && + *type != "cc" && + type->compare (0, 3, "cc,") != 0) { - auto& md (l->data<link_rule::match_data> ()); + auto& md (l->data<link_rule::match_data> (d.a)); assert (md.for_install); // Must have been executed. // The user will get the target name from the context info. @@ -2043,6 +2410,8 @@ namespace build2 // if (const target* g = exp && l.is_a<libs> () ? l.group : &l) { + // Note: go straight for the public variable pool. + // const variable& var ( com ? (exp ? c_export_loptions : c_loptions) @@ -2061,7 +2430,9 @@ namespace build2 process_libraries (a, bs, li, sys_lib_dirs, l, la, - lf, imp, lib, opt, self, + lf, imp, lib, opt, + self, + false /* proc_opt_group */, lib_cache); } @@ -2075,9 +2446,14 @@ namespace build2 // Use -rpath-link only on targets that support it (Linux, *BSD). Note // that we don't really need it for top-level libraries. // + // Note that more recent versions of FreeBSD are using LLVM lld without + // any mentioning of -rpath-link in the man pages. + // + auto have_link = [this] () {return tclass == "linux" || tclass == "bsd";}; + if (link) { - if (tclass != "linux" && tclass != "bsd") + if (!have_link ()) return; } @@ -2107,8 +2483,56 @@ namespace build2 { rpathed_libraries& ls; strings& args; - bool link; - } d {ls, args, link}; + bool rpath; + bool rpath_link; + } d {ls, args, false, false}; + + if (link) + d.rpath_link = true; + else + { + // While one would naturally expect -rpath to be a superset of + // -rpath-link, according to GNU ld: + // + // "The -rpath option is also used when locating shared objects which + // are needed by shared objects explicitly included in the link; see + // the description of the -rpath-link option. Searching -rpath in + // this way is only supported by native linkers and cross linkers + // which have been configured with the --with-sysroot option." + // + // So we check if this is cross-compilation and request both options + // if that's the case (we have no easy way of detecting whether the + // linker has been configured with the --with-sysroot option, whatever + // that means, so we will just assume the worst case). + // + d.rpath = true; + + if (have_link ()) + { + // Detecting cross-compilation is not as easy as it seems. Comparing + // complete target triplets proved too strict. For example, we may be + // running on x86_64-apple-darwin17.7.0 while the compiler is + // targeting x86_64-apple-darwin17.3.0. Also, there is the whole i?86 + // family of CPUs which, at least for linking, should probably be + // considered the same. + // + const target_triplet& h (*bs.ctx.build_host); + const target_triplet& t (ctgt); + + auto x86 = [] (const string& c) + { + return (c.size () == 4 && + c[0] == 'i' && + (c[1] >= '3' && c[1] <= '6') && + c[2] == '8' && + c[3] == '6'); + }; + + if (t.system != h.system || + (t.cpu != h.cpu && !(x86 (t.cpu) && x86 (h.cpu)))) + d.rpath_link = true; + } + } auto lib = [&d, this] ( const target* const* lc, @@ -2130,13 +2554,22 @@ namespace build2 auto append = [&d] (const string& f) { - string o (d.link ? "-Wl,-rpath-link," : "-Wl,-rpath,"); - size_t p (path::traits_type::rfind_separator (f)); assert (p != string::npos); - o.append (f, 0, (p != 0 ? p : 1)); // Don't include trailing slash. - d.args.push_back (move (o)); + if (d.rpath) + { + string o ("-Wl,-rpath,"); + o.append (f, 0, (p != 0 ? p : 1)); // Don't include trailing slash. + d.args.push_back (move (o)); + } + + if (d.rpath_link) + { + string o ("-Wl,-rpath-link,"); + o.append (f, 0, (p != 0 ? p : 1)); + d.args.push_back (move (o)); + } }; if (l != nullptr) @@ -2213,7 +2646,10 @@ namespace build2 process_libraries (a, bs, li, sys_lib_dirs, l, la, 0 /* lflags */, - imp, lib, nullptr, false /* self */, lib_cache); + imp, lib, nullptr, + false /* self */, + false /* proc_opt_group */, + lib_cache); } void link_rule:: @@ -2275,7 +2711,7 @@ namespace build2 // Filter link.exe noise (msvc.cxx). // void - msvc_filter_link (ifdstream&, const file&, otype); + msvc_filter_link (diag_buffer&, const file&, otype); // Translate target CPU to the link.exe/lib.exe /MACHINE option. // @@ -2283,7 +2719,7 @@ namespace build2 msvc_machine (const string& cpu); // msvc.cxx target_state link_rule:: - perform_update (action a, const target& xt) const + perform_update (action a, const target& xt, match_data& md) const { tracer trace (x, "link_rule::perform_update"); @@ -2295,8 +2731,6 @@ namespace build2 const scope& bs (t.base_scope ()); const scope& rs (*bs.root_scope ()); - match_data& md (t.data<match_data> ()); - // Unless the outer install rule signalled that this is update for // install, signal back that we've performed plain update. // @@ -2325,14 +2759,33 @@ namespace build2 // Note that execute_prerequisites() blanks out all the ad hoc // prerequisites so we don't need to worry about them from now on. // + // There is an interesting trade-off between the straight and reverse + // execution. With straight we may end up with inaccurate progress if + // most of our library prerequisites (typically specified last) are + // already up to date. In this case, the progress will first increase + // slowly as we compile this target's source files and then jump + // straight to 100% as we "realize" that all the libraries (and all + // their prerequisites) are already up to date. + // + // Switching to reverse fixes this but messes up incremental building: + // now instead of starting to compile source files right away, we will + // first spend some time making sure all the libraries are up to date + // (which, in case of an error in the source code, will be a complete + // waste). + // + // There doesn't seem to be an easy way to distinguish between + // incremental and from-scratch builds and on balance fast incremental + // builds feel more important. + // target_state ts; - if (optional<target_state> s = - execute_prerequisites (a, - t, - mt, - [] (const target&, size_t) {return false;})) + if (optional<target_state> s = execute_prerequisites ( + a, t, + mt, + [] (const target&, size_t) {return false;})) + { ts = *s; + } else { // An ad hoc prerequisite renders us out-of-date. Let's update from @@ -2346,7 +2799,7 @@ namespace build2 // those that don't match. Note that we have to do it after updating // prerequisites to keep the dependency counts straight. // - if (const variable* var_fi = ctx.var_pool.find ("for_install")) + if (const variable* var_fi = rs.var_pool ().find ("for_install")) { // Parallel prerequisites/prerequisite_targets loop. // @@ -2379,6 +2832,13 @@ namespace build2 // install or no install, the reason is unless and until we are updating // for install, we have no idea where-to things will be installed. // + // There is a further complication: we may have no intention of + // installing the library but still need to update it for install (see + // install_scope() for background). In which case we may still not have + // the installation directories. We handle this in pkconfig_save() by + // skipping the generation of .pc files (and letting the install rule + // complain if we do end up trying to install them). + // if (for_install && lt.library () && !lt.utility) { bool la (lt.static_library ()); @@ -2392,8 +2852,12 @@ namespace build2 if (!m->is_a (la ? pca::static_type : pcs::static_type)) { - if (t.group->matched (a)) + if (operator>= (t.group->decl, target_decl::implied) // @@ VC14 + ? t.group->matched (a) + : true) + { pkgconfig_save (a, t, la, true /* common */, binless); + } else // Mark as non-existent not to confuse the install rule. // @@ -2505,14 +2969,19 @@ namespace build2 try { + // We assume that what we write to stdin is small enough to + // fit into the pipe's buffer without blocking. + // process pr (rc, args, - -1 /* stdin */, - 1 /* stdout */, - 2 /* stderr */, - nullptr /* cwd */, + -1 /* stdin */, + 1 /* stdout */, + diag_buffer::pipe (ctx) /* stderr */, + nullptr /* cwd */, env_ptrs.empty () ? nullptr : env_ptrs.data ()); + diag_buffer dbuf (ctx, args[0], pr); + try { ofdstream os (move (pr.out_fd)); @@ -2536,7 +3005,8 @@ namespace build2 // was caused by that and let run_finish() deal with it. } - run_finish (args, pr); + dbuf.read (); + run_finish (dbuf, args, pr, 2 /* verbosity */); } catch (const process_error& e) { @@ -2591,6 +3061,8 @@ namespace build2 { // For VC we use link.exe directly. // + // Note: go straight for the public variable pool. + // const string& cs ( cast<string> ( rs[tsys == "win32-msvc" @@ -2684,6 +3156,9 @@ namespace build2 // probably safe to assume that the two came from the same version // of binutils/LLVM. // + // @@ Note also that GNU ar deprecated -T in favor of --thin in + // version 2.38. + // if (lt.utility) { const string& id (cast<string> (rs["bin.ar.id"])); @@ -2797,10 +3272,72 @@ namespace build2 rpath_libraries (sargs, bs, a, t, li, for_install /* link */); lookup l; - if ((l = t["bin.rpath"]) && !l->empty ()) + { + // See if we need to make the specified paths relative using the + // $ORIGIN (Linux, BSD) or @loader_path (Mac OS) mechanisms. + // + optional<dir_path> origin; + if (for_install && cast_false<bool> (rs["install.relocatable"])) + { + // Note that both $ORIGIN and @loader_path will be expanded to + // the path of the binary that we are building (executable or + // shared library) as opposed to top-level executable. + // + path p (install::resolve_file (t)); + + // If the file is not installable then the install.relocatable + // semantics does not apply, naturally. + // + if (!p.empty ()) + origin = p.directory (); + } + + bool origin_used (false); for (const dir_path& p: cast<dir_paths> (l)) - sargs.push_back ("-Wl,-rpath," + p.string ()); + { + string o ("-Wl,-rpath,"); + + // Note that we only rewrite absolute paths so if the user + // specified $ORIGIN or @loader_path manually, we will pass it + // through as is. + // + if (origin && p.absolute ()) + { + dir_path l; + try + { + l = p.relative (*origin); + } + catch (const invalid_path&) + { + fail << "unable to make rpath " << p << " relative to " + << *origin << + info << "required for relocatable installation"; + } + + o += (tclass == "macos" ? "@loader_path" : "$ORIGIN"); + + if (!l.empty ()) + { + o += path_traits::directory_separator; + o += l.string (); + } + + origin_used = true; + } + else + o += p.string (); + + sargs.push_back (move (o)); + } + + // According to the Internet, `-Wl,-z,origin` is not needed except + // potentially for older BSDs. + // + if (origin_used && tclass == "bsd") + sargs.push_back ("-Wl,-z,origin"); + } if ((l = t["bin.rpath_link"]) && !l->empty ()) { @@ -2834,25 +3371,24 @@ namespace build2 // Extra system library dirs (last). // - assert (sys_lib_dirs_extra <= sys_lib_dirs.size ()); + assert (sys_lib_dirs_mode + sys_lib_dirs_extra <= sys_lib_dirs.size ()); + + // Note that the mode options are added as part of cmode. + // + auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode); + auto x (b + sys_lib_dirs_extra); if (tsys == "win32-msvc") { // If we have no LIB environment variable set, then we add all of // them. But we want extras to come first. // - // Note that the mode options are added as part of cmode. - // - auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode); - auto m (sys_lib_dirs.begin () + sys_lib_dirs_extra); - auto e (sys_lib_dirs.end ()); - - for (auto i (m); i != e; ++i) + for (auto i (b); i != x; ++i) sargs1.push_back ("/LIBPATH:" + i->string ()); if (!getenv ("LIB")) { - for (auto i (b); i != m; ++i) + for (auto i (x), e (sys_lib_dirs.end ()); i != e; ++i) sargs1.push_back ("/LIBPATH:" + i->string ()); } @@ -2863,7 +3399,7 @@ namespace build2 append_option_values ( args, "-L", - sys_lib_dirs.begin () + sys_lib_dirs_extra, sys_lib_dirs.end (), + b, x, [] (const dir_path& d) {return d.string ().c_str ();}); } } @@ -3048,6 +3584,10 @@ namespace build2 // path relt (relative (tp)); + path reli; // Import library. + if (lt.shared_library () && (tsys == "win32-msvc" || tsys == "mingw32")) + reli = relative (find_adhoc_member<libi> (t)->path ()); + const process_path* ld (nullptr); if (lt.static_library ()) { @@ -3179,7 +3719,7 @@ namespace build2 // derived from the import library by changing the extension. // Lucky for us -- there is no option to name it. // - out2 += relative (find_adhoc_member<libi> (t)->path ()).string (); + out2 += reli.string (); } else { @@ -3192,14 +3732,17 @@ namespace build2 // If we have /DEBUG then name the .pdb file. It is an ad hoc group // member. // - if (find_option ("/DEBUG", args, true)) + if (const char* o = find_option_prefix ("/DEBUG", args, true)) { - const file& pdb ( - *find_adhoc_member<file> (t, *bs.find_target_type ("pdb"))); + if (icasecmp (o, "/DEBUG:NONE") != 0) + { + const file& pdb ( + *find_adhoc_member<file> (t, *bs.find_target_type ("pdb"))); - out1 = "/PDB:"; - out1 += relative (pdb.path ()).string (); - args.push_back (out1.c_str ()); + out1 = "/PDB:"; + out1 += relative (pdb.path ()).string (); + args.push_back (out1.c_str ()); + } } out = "/OUT:" + relt.string (); @@ -3213,6 +3756,8 @@ namespace build2 { ld = &cpath; + append_diag_color_options (args); + // Add the option that triggers building a shared library and // take care of any extras (e.g., import library). // @@ -3228,8 +3773,7 @@ namespace build2 // On Windows libs{} is the DLL and an ad hoc group member // is the import library. // - const file& imp (*find_adhoc_member<libi> (t)); - out = "-Wl,--out-implib=" + relative (imp.path ()).string (); + out = "-Wl,--out-implib=" + reli.string (); args.push_back (out.c_str ()); } } @@ -3387,12 +3931,25 @@ namespace build2 } if (verb == 1) - text << (lt.static_library () ? "ar " : "ld ") << t; + print_diag (lt.static_library () ? "ar" : "ld", t); else if (verb == 2) print_process (args); + // Do any necessary fixups to the command line to make it runnable. + // + // Notice the split in the diagnostics: at verbosity level 1 we print + // the "logical" command line while at level 2 and above -- what we are + // actually executing. + // + // We also need to save the original for the diag_buffer::close() call + // below if at verbosity level 1. + // + cstrings oargs; + // Adjust linker parallelism. // + // Note that we are not going to bother with oargs for this. + // string jobs_arg; scheduler::alloc_guard jobs_extra; @@ -3412,7 +3969,7 @@ namespace build2 auto i (find_option_prefix ("-flto", args.rbegin (), args.rend ())); if (i != args.rend () && strcmp (*i, "-flto=auto") == 0) { - jobs_extra = scheduler::alloc_guard (ctx.sched, 0); + jobs_extra = scheduler::alloc_guard (*ctx.sched, 0); jobs_arg = "-flto=" + to_string (1 + jobs_extra.n); *i = jobs_arg.c_str (); } @@ -3431,7 +3988,7 @@ namespace build2 strcmp (*i, "-flto=thin") == 0 && !find_option_prefix ("-flto-jobs=", args)) { - jobs_extra = scheduler::alloc_guard (ctx.sched, 0); + jobs_extra = scheduler::alloc_guard (*ctx.sched, 0); jobs_arg = "-flto-jobs=" + to_string (1 + jobs_extra.n); args.insert (i.base (), jobs_arg.c_str ()); // After -flto=thin. } @@ -3443,12 +4000,6 @@ namespace build2 } } - // Do any necessary fixups to the command line to make it runnable. - // - // Notice the split in the diagnostics: at verbosity level 1 we print - // the "logical" command line while at level 2 and above -- what we are - // actually executing. - // // On Windows we need to deal with the command line length limit. The // best workaround seems to be passing (part of) the command line in an // "options file" ("response file" in Microsoft's terminology). Both @@ -3534,19 +4085,20 @@ namespace build2 fail << "unable to write to " << f << ": " << e; } + if (verb == 1) + oargs = args; + // Replace input arguments with @file. // targ = '@' + f.string (); args.resize (args_input); args.push_back (targ.c_str()); args.push_back (nullptr); - - //@@ TODO: leave .t file if linker failed and verb > 2? } } #endif - if (verb > 2) + if (verb >= 3) print_process (args); // Remove the target file if any of the subsequent (after the linker) @@ -3564,51 +4116,52 @@ namespace build2 { // VC tools (both lib.exe and link.exe) send diagnostics to stdout. // Also, link.exe likes to print various gratuitous messages. So for - // link.exe we redirect stdout to a pipe, filter that noise out, and - // send the rest to stderr. + // link.exe we filter that noise out. // // For lib.exe (and any other insane linker that may try to pull off // something like this) we are going to redirect stdout to stderr. // For sane compilers this should be harmless. // // Note that we don't need this for LLD's link.exe replacement which - // is quiet. + // is thankfully quiet. // bool filter (tsys == "win32-msvc" && !lt.static_library () && cast<string> (rs["bin.ld.id"]) != "msvc-lld"); process pr (*ld, - args.data (), - 0 /* stdin */, - (filter ? -1 : 2) /* stdout */, - 2 /* stderr */, - nullptr /* cwd */, + args, + 0 /* stdin */, + 2 /* stdout */, + diag_buffer::pipe (ctx, filter /* force */) /* stderr */, + nullptr /* cwd */, env_ptrs.empty () ? nullptr : env_ptrs.data ()); + diag_buffer dbuf (ctx, args[0], pr); + if (filter) + msvc_filter_link (dbuf, t, ot); + + dbuf.read (); + { - try - { - ifdstream is ( - move (pr.in_ofd), fdstream_mode::text, ifdstream::badbit); + bool e (pr.wait ()); - msvc_filter_link (is, t, ot); +#ifdef _WIN32 + // Keep the options file if we have shown it. + // + if (!e && verb >= 3) + trm.cancel (); +#endif - // If anything remains in the stream, send it all to stderr. - // Note that the eof check is important: if the stream is at - // eof, this and all subsequent writes to the diagnostics stream - // will fail (and you won't see a thing). - // - if (is.peek () != ifdstream::traits_type::eof ()) - diag_stream_lock () << is.rdbuf (); + dbuf.close (oargs.empty () ? args : oargs, + *pr.exit, + 1 /* verbosity */); - is.close (); - } - catch (const io_error&) {} // Assume exits with error. + if (!e) + throw failed (); } - run_finish (args, pr); jobs_extra.deallocate (); } catch (const process_error& e) @@ -3631,12 +4184,24 @@ namespace build2 throw failed (); } - // Clean up executable's import library (see above for details). + // Clean up executable's import library (see above for details). And + // make sure we have an import library for a shared library. // - if (lt.executable () && tsys == "win32-msvc") + if (tsys == "win32-msvc") { - try_rmfile (relt + ".lib", true /* ignore_errors */); - try_rmfile (relt + ".exp", true /* ignore_errors */); + if (lt.executable ()) + { + try_rmfile (relt + ".lib", true /* ignore_errors */); + try_rmfile (relt + ".exp", true /* ignore_errors */); + } + else if (lt.shared_library ()) + { + if (!file_exists (reli, + false /* follow_symlinks */, + true /* ignore_error */)) + fail << "linker did not produce import library " << reli << + info << "perhaps this library does not export any symbols?"; + } } // Set executable bit on the .js file so that it can be run with a @@ -3668,10 +4233,13 @@ namespace build2 print_process (args); if (!ctx.dry_run) - run (rl, + { + run (ctx, + rl, args, - dir_path () /* cwd */, + 1 /* finish_verbosity */, env_ptrs.empty () ? nullptr : env_ptrs.data ()); + } } // For Windows generate (or clean up) rpath-emulating assembly. @@ -3776,12 +4344,11 @@ namespace build2 } target_state link_rule:: - perform_clean (action a, const target& xt) const + perform_clean (action a, const target& xt, match_data& md) const { const file& t (xt.as<file> ()); ltype lt (link_type (t)); - const match_data& md (t.data<match_data> ()); clean_extras extras; clean_adhoc_extras adhoc_extras; @@ -3854,5 +4421,25 @@ namespace build2 return perform_clean_extra (a, t, extras, adhoc_extras); } + + const target* link_rule:: + import (const prerequisite_key& pk, + const optional<string>&, + const location&) const + { + tracer trace (x, "link_rule::import"); + + // @@ TODO: do we want to make metadata loading optional? + // + optional<dir_paths> usr_lib_dirs; + const target* r (search_library (nullopt /* action */, + sys_lib_dirs, usr_lib_dirs, + pk)); + + if (r == nullptr) + l4 ([&]{trace << "unable to find installed library " << pk;}); + + return r; + } } } diff --git a/libbuild2/cc/link-rule.hxx b/libbuild2/cc/link-rule.hxx index c6d06d2..9b491c2 100644 --- a/libbuild2/cc/link-rule.hxx +++ b/libbuild2/cc/link-rule.hxx @@ -18,11 +18,13 @@ namespace build2 { namespace cc { - class LIBBUILD2_CC_SYMEXPORT link_rule: public simple_rule, virtual common + class LIBBUILD2_CC_SYMEXPORT link_rule: public rule, virtual common { public: link_rule (data&&); + struct match_data; + struct match_result { bool seen_x = false; @@ -46,18 +48,21 @@ namespace build2 match (action, const target&, const target*, otype, bool) const; virtual bool - match (action, target&, const string&) const override; + match (action, target&, const string&, match_extra&) const override; virtual recipe - apply (action, target&) const override; + apply (action, target&, match_extra&) const override; target_state - perform_update (action, const target&) const; + perform_update (action, const target&, match_data&) const; target_state - perform_clean (action, const target&) const; + perform_clean (action, const target&, match_data&) const; - using simple_rule::match; // To make Clang happy. + virtual const target* + import (const prerequisite_key&, + const optional<string>&, + const location&) const override; public: // Library handling. @@ -228,9 +233,9 @@ namespace build2 static void functions (function_family&, const char*); // functions.cxx - private: - friend class install_rule; - friend class libux_install_rule; + // Implementation details. + // + public: // Shared library paths. // @@ -273,6 +278,9 @@ namespace build2 struct match_data { + explicit + match_data (const link_rule& r): rule (r) {} + // The "for install" condition is signalled to us by install_rule when // it is matched for the update operation. It also verifies that if we // have already been executed, then it was for install. @@ -307,10 +315,21 @@ namespace build2 size_t start; // Parallel prerequisites/prerequisite_targets start. link_rule::libs_paths libs_paths; + + const link_rule& rule; + + target_state + operator() (action a, const target& t) + { + return a == perform_update_id + ? rule.perform_update (a, t, *this) + : rule.perform_clean (a, t, *this); + } }; // Windows rpath emulation (windows-rpath.cxx). // + private: struct windows_dll { reference_wrapper<const string> dll; diff --git a/libbuild2/cc/module.cxx b/libbuild2/cc/module.cxx index 871cfb6..f33ddf4 100644 --- a/libbuild2/cc/module.cxx +++ b/libbuild2/cc/module.cxx @@ -30,6 +30,8 @@ namespace build2 { tracer trace (x, "guess_init"); + context& ctx (rs.ctx); + bool cc_loaded (cast_false<bool> (rs["cc.core.guess.loaded"])); // Adjust module priority (compiler). Also order cc module before us @@ -41,7 +43,10 @@ namespace build2 config::save_module (rs, x, 250); - auto& vp (rs.var_pool ()); + // All the variables we enter are qualified so go straight for the + // public variable pool. + // + auto& vp (rs.var_pool (true /* public */)); // Must already exist. // @@ -154,6 +159,7 @@ namespace build2 // we are now folding *.std options into mode options. // x_info = &build2::cc::guess ( + ctx, x, x_lang, rs.root_extra->environment_checksum, move (xc), @@ -180,7 +186,8 @@ namespace build2 if (config_sub) { - ct = run<string> (3, + ct = run<string> (ctx, + 3, *config_sub, xi.target.c_str (), [] (string& l, bool) {return move (l);}); @@ -265,9 +272,9 @@ namespace build2 // if (!cc_loaded) { - // Prepare configuration hints. + // Prepare configuration hints (pretend it belongs to root scope). // - variable_map h (rs.ctx); + variable_map h (rs); // Note that all these variables have already been registered. // @@ -376,7 +383,9 @@ namespace build2 // if (!cast_false<bool> (rs["cc.core.config.loaded"])) { - variable_map h (rs.ctx); + // Prepare configuration hints (pretend it belongs to root scope). + // + variable_map h (rs); if (!xi.bin_pattern.empty ()) h.assign ("config.bin.pattern") = xi.bin_pattern; @@ -640,8 +649,8 @@ namespace build2 sys_hdr_dirs_mode = hdr_dirs.second; sys_mod_dirs_mode = mod_dirs ? mod_dirs->second : 0; - sys_lib_dirs_extra = lib_dirs.first.size (); - sys_hdr_dirs_extra = hdr_dirs.first.size (); + sys_lib_dirs_extra = 0; + sys_hdr_dirs_extra = 0; #ifndef _WIN32 // Add /usr/local/{include,lib}. We definitely shouldn't do this if we @@ -657,11 +666,11 @@ namespace build2 // on the next invocation. // { - auto& is (hdr_dirs.first); + auto& hs (hdr_dirs.first); auto& ls (lib_dirs.first); - bool ui (find (is.begin (), is.end (), usr_inc) != is.end ()); - bool uli (find (is.begin (), is.end (), usr_loc_inc) != is.end ()); + bool ui (find (hs.begin (), hs.end (), usr_inc) != hs.end ()); + bool uli (find (hs.begin (), hs.end (), usr_loc_inc) != hs.end ()); #ifdef __APPLE__ // On Mac OS starting from 10.14 there is no longer /usr/include. @@ -686,7 +695,7 @@ namespace build2 // if (!ui && !uli) { - for (const dir_path& d: is) + for (const dir_path& d: hs) { if (path_match (d, a_usr_inc)) { @@ -700,18 +709,29 @@ namespace build2 { bool ull (find (ls.begin (), ls.end (), usr_loc_lib) != ls.end ()); - // Many platforms don't search in /usr/local/lib by default (but do - // for headers in /usr/local/include). So add it as the last option. + // Many platforms don't search in /usr/local/lib by default but do + // for headers in /usr/local/include. + // + // Note that customarily /usr/local/include is searched before + // /usr/include so we add /usr/local/lib before built-in entries + // (there isn't really a way to add it after since all we can do is + // specify it with -L). // if (!ull && exists (usr_loc_lib, true /* ignore_error */)) - ls.push_back (usr_loc_lib); + { + ls.insert (ls.begin () + sys_lib_dirs_mode, usr_loc_lib); + ++sys_lib_dirs_extra; + } // FreeBSD is at least consistent: it searches in neither. Quoting // its wiki: "FreeBSD can't even find libraries that it installed." // So let's help it a bit. // if (!uli && exists (usr_loc_inc, true /* ignore_error */)) - is.push_back (usr_loc_inc); + { + hs.insert (hs.begin () + sys_hdr_dirs_mode, usr_loc_inc); + ++sys_hdr_dirs_extra; + } } } #endif @@ -815,8 +835,11 @@ namespace build2 dr << "\n hdr dirs"; for (size_t i (0); i != incs.size (); ++i) { - if (i == sys_hdr_dirs_extra) + if ((sys_hdr_dirs_mode != 0 && i == sys_hdr_dirs_mode) || + (sys_hdr_dirs_extra != 0 && + i == sys_hdr_dirs_extra + sys_hdr_dirs_mode)) dr << "\n --"; + dr << "\n " << incs[i]; } } @@ -826,8 +849,11 @@ namespace build2 dr << "\n lib dirs"; for (size_t i (0); i != libs.size (); ++i) { - if (i == sys_lib_dirs_extra) + if ((sys_lib_dirs_mode != 0 && i == sys_lib_dirs_mode) || + (sys_lib_dirs_extra != 0 && + i == sys_lib_dirs_extra + sys_lib_dirs_mode)) dr << "\n --"; + dr << "\n " << libs[i]; } } @@ -953,6 +979,9 @@ namespace build2 { using namespace install; + // Note: not registering x_obj or x_asp (they are registered + // seperately by the respective optional submodules). + // rs.insert_target_type (x_src); auto insert_hdr = [&rs, install_loaded] (const target_type& tt) @@ -1083,30 +1112,30 @@ namespace build2 { const install_rule& ir (*this); - r.insert<exe> (perform_install_id, x_install, ir); - r.insert<exe> (perform_uninstall_id, x_uninstall, ir); + r.insert<exe> (perform_install_id, x_install, ir); + r.insert<exe> (perform_uninstall_id, x_install, ir); - r.insert<liba> (perform_install_id, x_install, ir); - r.insert<liba> (perform_uninstall_id, x_uninstall, ir); + r.insert<liba> (perform_install_id, x_install, ir); + r.insert<liba> (perform_uninstall_id, x_install, ir); if (s) { - r.insert<libs> (perform_install_id, x_install, ir); - r.insert<libs> (perform_uninstall_id, x_uninstall, ir); + r.insert<libs> (perform_install_id, x_install, ir); + r.insert<libs> (perform_uninstall_id, x_install, ir); } const libux_install_rule& lr (*this); - r.insert<libue> (perform_install_id, x_install, lr); - r.insert<libue> (perform_uninstall_id, x_uninstall, lr); + r.insert<libue> (perform_install_id, x_install, lr); + r.insert<libue> (perform_uninstall_id, x_install, lr); - r.insert<libua> (perform_install_id, x_install, lr); - r.insert<libua> (perform_uninstall_id, x_uninstall, lr); + r.insert<libua> (perform_install_id, x_install, lr); + r.insert<libua> (perform_uninstall_id, x_install, lr); if (s) { - r.insert<libus> (perform_install_id, x_install, lr); - r.insert<libus> (perform_uninstall_id, x_uninstall, lr); + r.insert<libus> (perform_install_id, x_install, lr); + r.insert<libus> (perform_uninstall_id, x_install, lr); } } } diff --git a/libbuild2/cc/module.hxx b/libbuild2/cc/module.hxx index a91d723..2a8611b 100644 --- a/libbuild2/cc/module.hxx +++ b/libbuild2/cc/module.hxx @@ -4,6 +4,8 @@ #ifndef LIBBUILD2_CC_MODULE_HXX #define LIBBUILD2_CC_MODULE_HXX +#include <unordered_map> + #include <libbuild2/types.hxx> #include <libbuild2/utility.hxx> @@ -78,6 +80,37 @@ namespace build2 bool new_config = false; // See guess() and init() for details. + // Header cache (see compile_rule::enter_header()). + // + // We place it into the config module so that we have an option of + // sharing it for the entire weak amalgamation. + // + public: + // Keep the hash in the key. This way we can compute it outside of the + // lock. + // + struct header_key + { + path file; + size_t hash; + + friend bool + operator== (const header_key& x, const header_key& y) + { + return x.file == y.file; // Note: hash was already compared. + } + }; + + struct header_key_hasher + { + size_t operator() (const header_key& k) const {return k.hash;} + }; + + mutable shared_mutex header_map_mutex; + mutable std::unordered_map<header_key, + const file*, + header_key_hasher> header_map; + private: // Defined in gcc.cxx. // @@ -105,10 +138,10 @@ namespace build2 { public: explicit - module (data&& d) + module (data&& d, const scope& rs) : common (move (d)), link_rule (move (d)), - compile_rule (move (d)), + compile_rule (move (d), rs), install_rule (move (d), *this), libux_install_rule (move (d), *this) {} diff --git a/libbuild2/cc/msvc.cxx b/libbuild2/cc/msvc.cxx index f95cab0..3a7fd6f 100644 --- a/libbuild2/cc/msvc.cxx +++ b/libbuild2/cc/msvc.cxx @@ -164,18 +164,21 @@ namespace build2 // Filter cl.exe and link.exe noise. // + // Note: must be followed with the dbuf.read() call. + // void - msvc_filter_cl (ifdstream& is, const path& src) + msvc_filter_cl (diag_buffer& dbuf, const path& src) + try { // While it appears VC always prints the source name (event if the // file does not exist), let's do a sanity check. Also handle the // command line errors/warnings which come before the file name. // - for (string l; !eof (getline (is, l)); ) + for (string l; !eof (getline (dbuf.is, l)); ) { if (l != src.leaf ().string ()) { - diag_stream_lock () << l << endl; + dbuf.write (l, true /* newline */); if (msvc_sense_diag (l, 'D').first != string::npos) continue; @@ -184,14 +187,19 @@ namespace build2 break; } } + catch (const io_error& e) + { + fail << "unable to read from " << dbuf.args0 << " stderr: " << e; + } void - msvc_filter_link (ifdstream& is, const file& t, otype lt) + msvc_filter_link (diag_buffer& dbuf, const file& t, otype lt) + try { // Filter lines until we encounter something we don't recognize. We also // have to assume the messages can be translated. // - for (string l; getline (is, l); ) + for (string l; getline (dbuf.is, l); ) { // " Creating library foo\foo.dll.lib and object foo\foo.dll.exp" // @@ -216,12 +224,15 @@ namespace build2 // /INCREMENTAL causes linker to sometimes issue messages but now I // can't quite reproduce it. - // - diag_stream_lock () << l << endl; + dbuf.write (l, true /* newline */); break; } } + catch (const io_error& e) + { + fail << "unable to read from " << dbuf.args0 << " stderr: " << e; + } void msvc_extract_header_search_dirs (const strings& v, dir_paths& r) @@ -253,6 +264,13 @@ namespace build2 } else continue; + + // Ignore relative paths. Or maybe we should warn? + // + if (d.relative ()) + continue; + + d.normalize (); } catch (const invalid_path& e) { @@ -260,10 +278,7 @@ namespace build2 << o << "'"; } - // Ignore relative paths. Or maybe we should warn? - // - if (!d.relative ()) - r.push_back (move (d)); + r.push_back (move (d)); } } @@ -284,6 +299,13 @@ namespace build2 d = dir_path (o, 9, string::npos); else continue; + + // Ignore relative paths. Or maybe we should warn? + // + if (d.relative ()) + continue; + + d.normalize (); } catch (const invalid_path& e) { @@ -291,10 +313,7 @@ namespace build2 << o << "'"; } - // Ignore relative paths. Or maybe we should warn? - // - if (!d.relative ()) - r.push_back (move (d)); + r.push_back (move (d)); } } @@ -313,7 +332,7 @@ namespace build2 { try { - r.push_back (dir_path (move (d))); + r.push_back (dir_path (move (d)).normalize ()); } catch (const invalid_path&) { @@ -379,9 +398,22 @@ namespace build2 // Inspect the file and determine if it is static or import library. // Return otype::e if it is neither (which we quietly ignore). // + static global_cache<otype> library_type_cache; + static otype library_type (const process_path& ld, const path& l) { + string key; + { + sha256 cs; + cs.append (ld.effect_string ()); + cs.append (l.string ()); + key = cs.string (); + + if (const otype* r = library_type_cache.find (key)) + return *r; + } + // The are several reasonably reliable methods to tell whether it is a // static or import library. One is lib.exe /LIST -- if there aren't any // .obj members, then it is most likely an import library (it can also @@ -422,9 +454,9 @@ namespace build2 // process pr (run_start (ld, args, - 0 /* stdin */, - -1 /* stdout */, - false /* error */)); + 0 /* stdin */, + -1 /* stdout */, + 1 /* stderr (to stdout) */)); bool obj (false), dll (false); string s; @@ -447,14 +479,11 @@ namespace build2 // libhello\hello.lib.obj // hello-0.1.0-a.0.19700101000000.dll // - // Archive member name at 746: [...]hello.dll[/][ ]* - // Archive member name at 8C70: [...]hello.lib.obj[/][ ]* - // size_t n (s.size ()); for (; n != 0 && s[n - 1] == ' '; --n) ; // Skip trailing spaces. - if (n >= 7) // At least ": X.obj" or ": X.dll". + if (n >= 5) // At least "X.obj" or "X.dll". { n -= 4; // Beginning of extension. @@ -480,7 +509,7 @@ namespace build2 io = true; } - if (!run_finish_code (args, pr, s) || io) + if (!run_finish_code (args, pr, s, 2 /* verbosity */) || io) { diag_record dr; dr << warn << "unable to detect " << l << " library type, ignoring" << @@ -489,23 +518,25 @@ namespace build2 return otype::e; } - if (obj && dll) + otype r; + if (obj != dll) + r = obj ? otype::a : otype::s; + else { - warn << l << " looks like hybrid static/import library, ignoring"; - return otype::e; - } + if (obj && dll) + warn << l << " looks like hybrid static/import library, ignoring"; - if (!obj && !dll) - { - warn << l << " looks like empty static or import library, ignoring"; - return otype::e; + if (!obj && !dll) + warn << l << " looks like empty static or import library, ignoring"; + + r = otype::e; } - return obj ? otype::a : otype::s; + return library_type_cache.insert (move (key), r); } template <typename T> - static T* + static pair<T*, bool> msvc_search_library (const process_path& ld, const dir_path& d, const prerequisite_key& p, @@ -551,20 +582,26 @@ namespace build2 // timestamp mt (mtime (f)); - if (mt != timestamp_nonexistent && library_type (ld, f) == lt) + pair<T*, bool> r (nullptr, true); + + if (mt != timestamp_nonexistent) { - // Enter the target. - // - T* t; - common::insert_library (p.scope->ctx, t, name, d, ld, e, exist, trace); - t->path_mtime (move (f), mt); - return t; + if (library_type (ld, f) == lt) + { + // Enter the target. + // + common::insert_library ( + p.scope->ctx, r.first, name, d, ld, e, exist, trace); + r.first->path_mtime (move (f), mt); + } + else + r.second = false; // Don't search for binless. } - return nullptr; + return r; } - liba* common:: + pair<bin::liba*, bool> common:: msvc_search_static (const process_path& ld, const dir_path& d, const prerequisite_key& p, @@ -572,14 +609,21 @@ namespace build2 { tracer trace (x, "msvc_search_static"); - liba* r (nullptr); + liba* a (nullptr); + bool b (true); - auto search = [&r, &ld, &d, &p, exist, &trace] ( + auto search = [&a, &b, &ld, &d, &p, exist, &trace] ( const char* pf, const char* sf) -> bool { - r = msvc_search_library<liba> ( - ld, d, p, otype::a, pf, sf, exist, trace); - return r != nullptr; + pair<liba*, bool> r (msvc_search_library<liba> ( + ld, d, p, otype::a, pf, sf, exist, trace)); + + if (r.first != nullptr) + a = r.first; + else if (!r.second) + b = false; + + return a != nullptr; }; // Try: @@ -592,10 +636,10 @@ namespace build2 search ("", "") || search ("lib", "") || search ("", "lib") || - search ("", "_static") ? r : nullptr; + search ("", "_static") ? make_pair (a, true) : make_pair (nullptr, b); } - libs* common:: + pair<bin::libs*, bool> common:: msvc_search_shared (const process_path& ld, const dir_path& d, const prerequisite_key& pk, @@ -606,12 +650,14 @@ namespace build2 assert (pk.scope != nullptr); libs* s (nullptr); + bool b (true); - auto search = [&s, &ld, &d, &pk, exist, &trace] ( + auto search = [&s, &b, &ld, &d, &pk, exist, &trace] ( const char* pf, const char* sf) -> bool { - if (libi* i = msvc_search_library<libi> ( - ld, d, pk, otype::s, pf, sf, exist, trace)) + pair<libi*, bool> r (msvc_search_library<libi> ( + ld, d, pk, otype::s, pf, sf, exist, trace)); + if (r.first != nullptr) { ulock l ( insert_library ( @@ -619,6 +665,8 @@ namespace build2 if (!exist) { + libi* i (r.first); + if (l.owns_lock ()) { s->adhoc_member = i; // We are first. @@ -632,6 +680,8 @@ namespace build2 s->path_mtime (path (), i->mtime ()); } } + else if (!r.second) + b = false; return s != nullptr; }; @@ -644,7 +694,7 @@ namespace build2 return search ("", "") || search ("lib", "") || - search ("", "dll") ? s : nullptr; + search ("", "dll") ? make_pair (s, true) : make_pair (nullptr, b); } } } diff --git a/libbuild2/cc/pkgconfig-libpkg-config.cxx b/libbuild2/cc/pkgconfig-libpkg-config.cxx new file mode 100644 index 0000000..ecbc019 --- /dev/null +++ b/libbuild2/cc/pkgconfig-libpkg-config.cxx @@ -0,0 +1,271 @@ +// file : libbuild2/cc/pkgconfig-libpkg-config.cxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#ifndef BUILD2_BOOTSTRAP + +#include <libbuild2/cc/pkgconfig.hxx> + +#include <new> // std::bad_alloc + +#include <libbuild2/diagnostics.hxx> + +namespace build2 +{ + namespace cc + { + // The package dependency traversal depth limit. + // + static const int max_depth = 100; + + static void + error_handler (unsigned int, + const char* file, + size_t line, + const char* msg, + const pkg_config_client_t*, + const void*) + { + if (file != nullptr) + { + path_name n (file); + const location l (n, static_cast<uint64_t> (line)); + error (l) << msg; + } + else + error << msg; + } + + // Deleters. + // + struct fragments_deleter + { + void operator() (pkg_config_list_t* f) const + { + pkg_config_fragment_free (f); + } + }; + + // Convert fragments to strings. Skip the -I/-L options that refer to + // system directories. + // + static strings + to_strings (const pkg_config_list_t& frags, + char type, + const pkg_config_list_t& sysdirs) + { + assert (type == 'I' || type == 'L'); + + strings r; + auto add = [&r] (const pkg_config_fragment_t* frag) + { + string s; + if (frag->type != '\0') + { + s += '-'; + s += frag->type; + } + + s += frag->data; + r.push_back (move (s)); + }; + + // Option that is separated from its value, for example: + // + // -I /usr/lib + // + const pkg_config_fragment_t* opt (nullptr); + + pkg_config_node_t *node; + LIBPKG_CONFIG_FOREACH_LIST_ENTRY(frags.head, node) + { + auto frag (static_cast<const pkg_config_fragment_t*> (node->data)); + + // Add the separated option and directory, unless the latest is a + // system one. + // + if (opt != nullptr) + { + assert (frag->type == '\0'); // See pkg_config_fragment_add(). + + if (!pkg_config_path_match_list (frag->data, &sysdirs)) + { + add (opt); + add (frag); + } + + opt = nullptr; + continue; + } + + // Skip the -I/-L option if it refers to a system directory. + // + if (frag->type == type) + { + // The option is separated from a value, that will (presumably) + // follow. + // + if (*frag->data == '\0') + { + opt = frag; + continue; + } + + if (pkg_config_path_match_list (frag->data, &sysdirs)) + continue; + } + + add (frag); + } + + if (opt != nullptr) // Add the dangling option. + add (opt); + + return r; + } + + // Note that some libpkg-config functions can potentially return NULL, + // failing to allocate the required memory block. However, we will not + // check the returned value for NULL as the library doesn't do so, prior + // to filling the allocated structures. So such a code complication on our + // side would be useless. Also, for some functions the NULL result has a + // special semantics, for example "not found". @@ TODO: can we fix this? + // This is now somewhat addressed, see the eflags argument in + // pkg_config_pkg_find(). + // + pkgconfig:: + pkgconfig (path_type p, + const dir_paths& pc_dirs, + const dir_paths& sys_lib_dirs, + const dir_paths& sys_hdr_dirs) + : path (move (p)) + { + auto add_dirs = [] (pkg_config_list_t& dir_list, + const dir_paths& dirs, + bool suppress_dups) + { + for (const auto& d: dirs) + pkg_config_path_add (d.string ().c_str (), &dir_list, suppress_dups); + }; + + // Initialize the client handle. + // + // Note: omit initializing the filters from environment/defaults. + // + unique_ptr<pkg_config_client_t, void (*) (pkg_config_client_t*)> c ( + pkg_config_client_new (&error_handler, + nullptr /* handler_data */, + false /* init_filters */), + [] (pkg_config_client_t* c) {pkg_config_client_free (c);}); + + if (c == nullptr) + throw std::bad_alloc (); + + add_dirs (c->filter_libdirs, sys_lib_dirs, false /* suppress_dups */); + add_dirs (c->filter_includedirs, sys_hdr_dirs, false /* suppress_dups */); + + // Note that the loaded file directory is added to the (for now empty) + // .pc file search list. Also note that loading of the dependency + // packages is delayed until the flags retrieval, and their file + // directories are not added to the search list. + // + // @@ Hm, is there a way to force this resolution? But we may not + // need this (e.g., only loading from variables). + // + unsigned int e; + pkg_ = pkg_config_pkg_find (c.get (), path.string ().c_str (), &e); + + if (pkg_ == nullptr) + { + if (e == LIBPKG_CONFIG_ERRF_OK) + fail << "package '" << path << "' not found"; + else + // Diagnostics should have already been issued except for allocation + // errors. + // + fail << "unable to load package '" << path << "'"; + } + + // Add the .pc file search directories. + // + assert (c->dir_list.length == 1); // Package file directory (see above). + add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */); + + client_ = c.release (); + } + + void pkgconfig:: + free () + { + assert (client_ != nullptr && pkg_ != nullptr); + + pkg_config_pkg_unref (client_, pkg_); + pkg_config_client_free (client_); + } + + strings pkgconfig:: + cflags (bool stat) const + { + assert (client_ != nullptr); // Must not be empty. + + pkg_config_client_set_flags ( + client_, + // Walk through the private package dependencies (Requires.private) + // besides the public ones while collecting the flags. Note that we do + // this for both static and shared linking. @@ Hm, I wonder why...? + // + LIBPKG_CONFIG_PKG_PKGF_SEARCH_PRIVATE | + + // Collect flags from Cflags.private besides those from Cflags for the + // static linking. + // + (stat + ? LIBPKG_CONFIG_PKG_PKGF_ADD_PRIVATE_FRAGMENTS + : 0)); + + pkg_config_list_t f = LIBPKG_CONFIG_LIST_INITIALIZER; // Empty list. + int e (pkg_config_pkg_cflags (client_, pkg_, &f, max_depth)); + + if (e != LIBPKG_CONFIG_ERRF_OK) + throw failed (); // Assume the diagnostics is issued. + + unique_ptr<pkg_config_list_t, fragments_deleter> fd (&f); + return to_strings (f, 'I', client_->filter_includedirs); + } + + strings pkgconfig:: + libs (bool stat) const + { + assert (client_ != nullptr); // Must not be empty. + + pkg_config_client_set_flags ( + client_, + // Additionally collect flags from the private dependency packages + // (see above) and from the Libs.private value for the static linking. + // + (stat + ? LIBPKG_CONFIG_PKG_PKGF_SEARCH_PRIVATE | + LIBPKG_CONFIG_PKG_PKGF_ADD_PRIVATE_FRAGMENTS + : 0)); + + pkg_config_list_t f = LIBPKG_CONFIG_LIST_INITIALIZER; // Empty list. + int e (pkg_config_pkg_libs (client_, pkg_, &f, max_depth)); + + if (e != LIBPKG_CONFIG_ERRF_OK) + throw failed (); // Assume the diagnostics is issued. + + unique_ptr<pkg_config_list_t, fragments_deleter> fd (&f); + return to_strings (f, 'L', client_->filter_libdirs); + } + + optional<string> pkgconfig:: + variable (const char* name) const + { + assert (client_ != nullptr); // Must not be empty. + + const char* r (pkg_config_tuple_find (client_, &pkg_->vars, name)); + return r != nullptr ? optional<string> (r) : nullopt; + } + } +} + +#endif // BUILD2_BOOTSTRAP diff --git a/libbuild2/cc/pkgconfig-libpkgconf.cxx b/libbuild2/cc/pkgconfig-libpkgconf.cxx new file mode 100644 index 0000000..f3754d3 --- /dev/null +++ b/libbuild2/cc/pkgconfig-libpkgconf.cxx @@ -0,0 +1,355 @@ +// file : libbuild2/cc/pkgconfig-libpkgconf.cxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#ifndef BUILD2_BOOTSTRAP + +#include <libbuild2/cc/pkgconfig.hxx> + +#include <libbuild2/diagnostics.hxx> + +// Note that the libpkgconf library did not used to provide the version macro +// that we could use to compile the code conditionally against different API +// versions. Thus, we need to sense the pkgconf_client_new() function +// signature ourselves to call it properly. +// +namespace details +{ + void* + pkgconf_cross_personality_default (); // Never called. +} + +using namespace details; + +template <typename H> +static inline pkgconf_client_t* +call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*), + H error_handler, + void* error_handler_data) +{ + return f (error_handler, error_handler_data); +} + +template <typename H, typename P> +static inline pkgconf_client_t* +call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*, P), + H error_handler, + void* error_handler_data) +{ + return f (error_handler, + error_handler_data, + ::pkgconf_cross_personality_default ()); +} + +namespace build2 +{ + namespace cc + { + // The libpkgconf library is not thread-safe, even on the pkgconf_client_t + // level (see issue #128 for details). While it seems that the obvious + // thread-safety issues are fixed, the default personality initialization, + // which is still not thread-safe. So let's keep the mutex for now not to + // introduce potential issues. + // + static mutex pkgconf_mutex; + + // The package dependency traversal depth limit. + // + static const int pkgconf_max_depth = 100; + + // Normally the error_handler() callback can be called multiple times to + // report a single error (once per message line), to produce a multi-line + // message like this: + // + // Package foo was not found in the pkg-config search path.\n + // Perhaps you should add the directory containing `foo.pc'\n + // to the PKG_CONFIG_PATH environment variable\n + // Package 'foo', required by 'bar', not found\n + // + // For the above example callback will be called 4 times. To suppress all + // the junk we will use PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS to get just: + // + // Package 'foo', required by 'bar', not found\n + // + // Also disable merging options like -framework into a single fragment, if + // possible. + // + static const int pkgconf_flags = + PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS + | PKGCONF_PKG_PKGF_SKIP_PROVIDES +#ifdef PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS + | PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS +#endif + ; + +#if defined(LIBPKGCONF_VERSION) && LIBPKGCONF_VERSION >= 10900 + static bool + pkgconf_error_handler (const char* msg, + const pkgconf_client_t*, + void*) +#else + static bool + pkgconf_error_handler (const char* msg, + const pkgconf_client_t*, + const void*) +#endif + { + error << runtime_error (msg); // Sanitize the message (trailing dot). + return true; + } + + // Deleters. Note that they are thread-safe. + // + struct fragments_deleter + { + void operator() (pkgconf_list_t* f) const {pkgconf_fragment_free (f);} + }; + + // Convert fragments to strings. Skip the -I/-L options that refer to system + // directories. + // + static strings + to_strings (const pkgconf_list_t& frags, + char type, + const pkgconf_list_t& sysdirs) + { + assert (type == 'I' || type == 'L'); + + strings r; + + auto add = [&r] (const pkgconf_fragment_t* frag) + { + string s; + if (frag->type != '\0') + { + s += '-'; + s += frag->type; + } + + s += frag->data; + r.push_back (move (s)); + }; + + // Option that is separated from its value, for example: + // + // -I /usr/lib + // + const pkgconf_fragment_t* opt (nullptr); + + pkgconf_node_t *node; + PKGCONF_FOREACH_LIST_ENTRY(frags.head, node) + { + auto frag (static_cast<const pkgconf_fragment_t*> (node->data)); + + // Add the separated option and directory, unless the latest is a + // system one. + // + if (opt != nullptr) + { + // Note that we should restore the directory path that was + // (mis)interpreted as an option, for example: + // + // -I -Ifoo + // + // In the above example option '-I' is followed by directory + // '-Ifoo', which is represented by libpkgconf library as fragment + // 'foo' with type 'I'. + // + if (!pkgconf_path_match_list ( + frag->type == '\0' + ? frag->data + : (string ({'-', frag->type}) + frag->data).c_str (), + &sysdirs)) + { + add (opt); + add (frag); + } + + opt = nullptr; + continue; + } + + // Skip the -I/-L option if it refers to a system directory. + // + if (frag->type == type) + { + // The option is separated from a value, that will (presumably) + // follow. + // + if (*frag->data == '\0') + { + opt = frag; + continue; + } + + if (pkgconf_path_match_list (frag->data, &sysdirs)) + continue; + } + + add (frag); + } + + if (opt != nullptr) // Add the dangling option. + add (opt); + + return r; + } + + // Note that some libpkgconf functions can potentially return NULL, + // failing to allocate the required memory block. However, we will not + // check the returned value for NULL as the library doesn't do so, prior + // to filling the allocated structures. So such a code complication on our + // side would be useless. Also, for some functions the NULL result has a + // special semantics, for example "not found". + // + pkgconfig:: + pkgconfig (path_type p, + const dir_paths& pc_dirs, + const dir_paths& sys_lib_dirs, + const dir_paths& sys_hdr_dirs) + : path (move (p)) + { + auto add_dirs = [] (pkgconf_list_t& dir_list, + const dir_paths& dirs, + bool suppress_dups, + bool cleanup = false) + { + if (cleanup) + { + pkgconf_path_free (&dir_list); + dir_list = PKGCONF_LIST_INITIALIZER; + } + + for (const auto& d: dirs) + pkgconf_path_add (d.string ().c_str (), &dir_list, suppress_dups); + }; + + mlock l (pkgconf_mutex); + + // Initialize the client handle. + // + unique_ptr<pkgconf_client_t, void (*) (pkgconf_client_t*)> c ( + call_pkgconf_client_new (&pkgconf_client_new, + pkgconf_error_handler, + nullptr /* handler_data */), + [] (pkgconf_client_t* c) {pkgconf_client_free (c);}); + + pkgconf_client_set_flags (c.get (), pkgconf_flags); + + // Note that the system header and library directory lists are + // automatically pre-filled by the pkgconf_client_new() call (see + // above). We will re-create these lists from scratch. + // + add_dirs (c->filter_libdirs, + sys_lib_dirs, + false /* suppress_dups */, + true /* cleanup */); + + add_dirs (c->filter_includedirs, + sys_hdr_dirs, + false /* suppress_dups */, + true /* cleanup */); + + // Note that the loaded file directory is added to the (yet empty) + // search list. Also note that loading of the prerequisite packages is + // delayed until flags retrieval, and their file directories are not + // added to the search list. + // + pkg_ = pkgconf_pkg_find (c.get (), path.string ().c_str ()); + + if (pkg_ == nullptr) + fail << "package '" << path << "' not found or invalid"; + + // Add the .pc file search directories. + // + assert (c->dir_list.length == 1); // Package file directory (see above). + add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */); + + client_ = c.release (); + } + + void pkgconfig:: + free () + { + assert (pkg_ != nullptr); + + mlock l (pkgconf_mutex); + pkgconf_pkg_unref (client_, pkg_); + pkgconf_client_free (client_); + } + + strings pkgconfig:: + cflags (bool stat) const + { + assert (client_ != nullptr); // Must not be empty. + + mlock l (pkgconf_mutex); + + pkgconf_client_set_flags ( + client_, + pkgconf_flags | + + // Walk through the private package dependencies (Requires.private) + // besides the public ones while collecting the flags. Note that we do + // this for both static and shared linking. + // + PKGCONF_PKG_PKGF_SEARCH_PRIVATE | + + // Collect flags from Cflags.private besides those from Cflags for the + // static linking. + // + (stat + ? PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS + : 0)); + + pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization. + int e (pkgconf_pkg_cflags (client_, pkg_, &f, pkgconf_max_depth)); + + if (e != PKGCONF_PKG_ERRF_OK) + throw failed (); // Assume the diagnostics is issued. + + unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter. + return to_strings (f, 'I', client_->filter_includedirs); + } + + strings pkgconfig:: + libs (bool stat) const + { + assert (client_ != nullptr); // Must not be empty. + + mlock l (pkgconf_mutex); + + pkgconf_client_set_flags ( + client_, + pkgconf_flags | + + // Additionally collect flags from the private dependency packages + // (see above) and from the Libs.private value for the static linking. + // + (stat + ? PKGCONF_PKG_PKGF_SEARCH_PRIVATE | + PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS + : 0)); + + pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization. + int e (pkgconf_pkg_libs (client_, pkg_, &f, pkgconf_max_depth)); + + if (e != PKGCONF_PKG_ERRF_OK) + throw failed (); // Assume the diagnostics is issued. + + unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter. + return to_strings (f, 'L', client_->filter_libdirs); + } + + optional<string> pkgconfig:: + variable (const char* name) const + { + assert (client_ != nullptr); // Must not be empty. + + mlock l (pkgconf_mutex); + const char* r (pkgconf_tuple_find (client_, &pkg_->vars, name)); + return r != nullptr ? optional<string> (r) : nullopt; + } + } +} + +#endif // BUILD2_BOOTSTRAP diff --git a/libbuild2/cc/pkgconfig.cxx b/libbuild2/cc/pkgconfig.cxx index 151473d..4fd7486 100644 --- a/libbuild2/cc/pkgconfig.cxx +++ b/libbuild2/cc/pkgconfig.cxx @@ -1,13 +1,6 @@ // file : libbuild2/cc/pkgconfig.cxx -*- C++ -*- // license : MIT; see accompanying LICENSE file -// In order not to complicate the bootstrap procedure with libpkgconf building -// exclude functionality that involves reading of .pc files. -// -#ifndef BUILD2_BOOTSTRAP -# include <libpkgconf/libpkgconf.h> -#endif - #include <libbuild2/scope.hxx> #include <libbuild2/target.hxx> #include <libbuild2/context.hxx> @@ -25,436 +18,25 @@ #include <libbuild2/cc/utility.hxx> #include <libbuild2/cc/common.hxx> +#include <libbuild2/cc/pkgconfig.hxx> #include <libbuild2/cc/compile-rule.hxx> #include <libbuild2/cc/link-rule.hxx> -#ifndef BUILD2_BOOTSTRAP - -// Note that the libpkgconf library doesn't provide the version macro that we -// could use to compile the code conditionally against different API versions. -// Thus, we need to sense the pkgconf_client_new() function signature -// ourselves to call it properly. -// -namespace details -{ - void* - pkgconf_cross_personality_default (); // Never called. -} - -using namespace details; - -template <typename H> -static inline pkgconf_client_t* -call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*), - H error_handler, - void* error_handler_data) -{ - return f (error_handler, error_handler_data); -} - -template <typename H, typename P> -static inline pkgconf_client_t* -call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*, P), - H error_handler, - void* error_handler_data) -{ - return f (error_handler, - error_handler_data, - ::pkgconf_cross_personality_default ()); -} - -#endif - -using namespace std; -using namespace butl; +using namespace std; // VC16 namespace build2 { -#ifndef BUILD2_BOOTSTRAP - - // Load package information from a .pc file. Filter out the -I/-L options - // that refer to system directories. This makes sure all the system search - // directories are "pushed" to the back which minimizes the chances of - // picking up wrong (e.g., old installed version) header/library. - // - // Note that the prerequisite package .pc files search order is as follows: - // - // - in directory of the specified file - // - in pc_dirs directories (in the natural order) - // - class pkgconf - { - public: - using path_type = build2::path; - - path_type path; - - public: - explicit - pkgconf (path_type, - const dir_paths& pc_dirs, - const dir_paths& sys_hdr_dirs, - const dir_paths& sys_lib_dirs); - - // Create a special empty object. Querying package information on such - // an object is illegal. - // - pkgconf () = default; - - ~pkgconf (); - - // Movable-only type. - // - pkgconf (pkgconf&& p) - : path (move (p.path)), - client_ (p.client_), - pkg_ (p.pkg_) - { - p.client_ = nullptr; - p.pkg_ = nullptr; - } - - pkgconf& - operator= (pkgconf&& p) - { - if (this != &p) - { - this->~pkgconf (); - new (this) pkgconf (move (p)); // Assume noexcept move-construction. - } - return *this; - } - - pkgconf (const pkgconf&) = delete; - pkgconf& operator= (const pkgconf&) = delete; - - strings - cflags (bool stat) const; - - strings - libs (bool stat) const; - - string - variable (const char*) const; - - string - variable (const string& s) const {return variable (s.c_str ());} - - private: - // Keep them as raw pointers not to deal with API thread-unsafety in - // deleters and introducing additional mutex locks. - // - pkgconf_client_t* client_ = nullptr; - pkgconf_pkg_t* pkg_ = nullptr; - }; - - // Currently the library is not thread-safe, even on the pkgconf_client_t - // level (see issue #128 for details). - // - // @@ An update: seems that the obvious thread-safety issues are fixed. - // However, let's keep mutex locking for now not to introduce potential - // issues before we make sure that there are no other ones. - // - static mutex pkgconf_mutex; - - // The package dependency traversal depth limit. - // - static const int pkgconf_max_depth = 100; - - // Normally the error_handler() callback can be called multiple times to - // report a single error (once per message line), to produce a multi-line - // message like this: - // - // Package foo was not found in the pkg-config search path.\n - // Perhaps you should add the directory containing `foo.pc'\n - // to the PKG_CONFIG_PATH environment variable\n - // Package 'foo', required by 'bar', not found\n - // - // For the above example callback will be called 4 times. To suppress all the - // junk we will use PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS to get just: - // - // Package 'foo', required by 'bar', not found\n - // - // Also disable merging options like -framework into a single fragment, if - // possible. - // - static const int pkgconf_flags = - PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS -#ifdef PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS - | PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS -#endif - ; - - static bool - pkgconf_error_handler (const char* msg, const pkgconf_client_t*, const void*) - { - error << runtime_error (msg); // Sanitize the message. - return true; - } - - // Deleters. Note that they are thread-safe. - // - struct fragments_deleter - { - void operator() (pkgconf_list_t* f) const {pkgconf_fragment_free (f);} - }; - - // Convert fragments to strings. Skip the -I/-L options that refer to system - // directories. - // - static strings - to_strings (const pkgconf_list_t& frags, - char type, - const pkgconf_list_t& sysdirs) - { - assert (type == 'I' || type == 'L'); - - strings r; - - auto add = [&r] (const pkgconf_fragment_t* frag) - { - string s; - if (frag->type != '\0') - { - s += '-'; - s += frag->type; - } - - s += frag->data; - r.push_back (move (s)); - }; - - // Option that is separated from its value, for example: - // - // -I /usr/lib - // - const pkgconf_fragment_t* opt (nullptr); - - pkgconf_node_t *node; - PKGCONF_FOREACH_LIST_ENTRY(frags.head, node) - { - auto frag (static_cast<const pkgconf_fragment_t*> (node->data)); - - // Add the separated option and directory, unless the latest is a system - // one. - // - if (opt != nullptr) - { - // Note that we should restore the directory path that was - // (mis)interpreted as an option, for example: - // - // -I -Ifoo - // - // In the above example option '-I' is followed by directory '-Ifoo', - // which is represented by libpkgconf library as fragment 'foo' with - // type 'I'. - // - if (!pkgconf_path_match_list ( - frag->type == '\0' - ? frag->data - : (string ({'-', frag->type}) + frag->data).c_str (), - &sysdirs)) - { - add (opt); - add (frag); - } - - opt = nullptr; - continue; - } - - // Skip the -I/-L option if it refers to a system directory. - // - if (frag->type == type) - { - // The option is separated from a value, that will (presumably) follow. - // - if (*frag->data == '\0') - { - opt = frag; - continue; - } - - if (pkgconf_path_match_list (frag->data, &sysdirs)) - continue; - } - - add (frag); - } - - if (opt != nullptr) // Add the dangling option. - add (opt); - - return r; - } - - // Note that some libpkgconf functions can potentially return NULL, failing - // to allocate the required memory block. However, we will not check the - // returned value for NULL as the library doesn't do so, prior to filling the - // allocated structures. So such a code complication on our side would be - // useless. Also, for some functions the NULL result has a special semantics, - // for example "not found". - // - pkgconf:: - pkgconf (path_type p, - const dir_paths& pc_dirs, - const dir_paths& sys_lib_dirs, - const dir_paths& sys_hdr_dirs) - : path (move (p)) - { - auto add_dirs = [] (pkgconf_list_t& dir_list, - const dir_paths& dirs, - bool suppress_dups, - bool cleanup = false) - { - if (cleanup) - { - pkgconf_path_free (&dir_list); - dir_list = PKGCONF_LIST_INITIALIZER; - } - - for (const auto& d: dirs) - pkgconf_path_add (d.string ().c_str (), &dir_list, suppress_dups); - }; - - mlock l (pkgconf_mutex); - - // Initialize the client handle. - // - unique_ptr<pkgconf_client_t, void (*) (pkgconf_client_t*)> c ( - call_pkgconf_client_new (&pkgconf_client_new, - pkgconf_error_handler, - nullptr /* handler_data */), - [] (pkgconf_client_t* c) {pkgconf_client_free (c);}); - - pkgconf_client_set_flags (c.get (), pkgconf_flags); - - // Note that the system header and library directory lists are - // automatically pre-filled by the pkgconf_client_new() call (see above). - // We will re-create these lists from scratch. - // - add_dirs (c->filter_libdirs, - sys_lib_dirs, - false /* suppress_dups */, - true /* cleanup */); - - add_dirs (c->filter_includedirs, - sys_hdr_dirs, - false /* suppress_dups */, - true /* cleanup */); - - // Note that the loaded file directory is added to the (yet empty) search - // list. Also note that loading of the prerequisite packages is delayed - // until flags retrieval, and their file directories are not added to the - // search list. - // - pkg_ = pkgconf_pkg_find (c.get (), path.string ().c_str ()); - - if (pkg_ == nullptr) - fail << "package '" << path << "' not found or invalid"; - - // Add the .pc file search directories. - // - assert (c->dir_list.length == 1); // Package file directory (see above). - add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */); - - client_ = c.release (); - } - - pkgconf:: - ~pkgconf () - { - if (client_ != nullptr) // Not empty. - { - assert (pkg_ != nullptr); - - mlock l (pkgconf_mutex); - pkgconf_pkg_unref (client_, pkg_); - pkgconf_client_free (client_); - } - } - - strings pkgconf:: - cflags (bool stat) const - { - assert (client_ != nullptr); // Must not be empty. - - mlock l (pkgconf_mutex); - - pkgconf_client_set_flags ( - client_, - pkgconf_flags | - - // Walk through the private package dependencies (Requires.private) - // besides the public ones while collecting the flags. Note that we do - // this for both static and shared linking. - // - PKGCONF_PKG_PKGF_SEARCH_PRIVATE | - - // Collect flags from Cflags.private besides those from Cflags for the - // static linking. - // - (stat - ? PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS - : 0)); - - pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization. - int e (pkgconf_pkg_cflags (client_, pkg_, &f, pkgconf_max_depth)); - - if (e != PKGCONF_PKG_ERRF_OK) - throw failed (); // Assume the diagnostics is issued. - - unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter. - return to_strings (f, 'I', client_->filter_includedirs); - } - - strings pkgconf:: - libs (bool stat) const - { - assert (client_ != nullptr); // Must not be empty. - - mlock l (pkgconf_mutex); - - pkgconf_client_set_flags ( - client_, - pkgconf_flags | - - // Additionally collect flags from the private dependency packages - // (see above) and from the Libs.private value for the static linking. - // - (stat - ? PKGCONF_PKG_PKGF_SEARCH_PRIVATE | - PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS - : 0)); - - pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization. - int e (pkgconf_pkg_libs (client_, pkg_, &f, pkgconf_max_depth)); - - if (e != PKGCONF_PKG_ERRF_OK) - throw failed (); // Assume the diagnostics is issued. - - unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter. - return to_strings (f, 'L', client_->filter_libdirs); - } - - string pkgconf:: - variable (const char* name) const - { - assert (client_ != nullptr); // Must not be empty. - - mlock l (pkgconf_mutex); - const char* r (pkgconf_tuple_find (client_, &pkg_->vars, name)); - return r != nullptr ? string (r) : string (); - } - -#endif - namespace cc { using namespace bin; // In pkg-config backslashes, spaces, etc are escaped with a backslash. // + // @@ TODO: handle empty values (save as ''?) + // + // Note: may contain variable expansions (e.g, ${pcfiledir}) so unclear + // if can use quoting. + // static string escape (const string& s) { @@ -481,6 +63,35 @@ namespace build2 return r; } + // Resolve metadata value type from type name. Return in the second half + // of the pair whether this is a dir_path-based type. + // + static pair<const value_type*, bool> + metadata_type (const string& tn) + { + bool d (false); + const value_type* r (nullptr); + + if (tn == "bool") r = &value_traits<bool>::value_type; + else if (tn == "int64") r = &value_traits<int64_t>::value_type; + else if (tn == "uint64") r = &value_traits<uint64_t>::value_type; + else if (tn == "string") r = &value_traits<string>::value_type; + else if (tn == "path") r = &value_traits<path>::value_type; + else if (tn == "dir_path") {r = &value_traits<dir_path>::value_type; d = true;} + else if (tn == "int64s") r = &value_traits<int64s>::value_type; + else if (tn == "uint64s") r = &value_traits<uint64s>::value_type; + else if (tn == "strings") r = &value_traits<strings>::value_type; + else if (tn == "paths") r = &value_traits<paths>::value_type; + else if (tn == "dir_paths") {r = &value_traits<dir_paths>::value_type; d = true;} + + return make_pair (r, d); + } + + // In order not to complicate the bootstrap procedure with libpkg-config + // building, exclude functionality that involves reading of .pc files. + // +#ifndef BUILD2_BOOTSTRAP + // Try to find a .pc file in the pkgconfig/ subdirectory of libd, trying // several names derived from stem. If not found, return false. If found, // load poptions, loptions, libs, and modules, set the corresponding @@ -497,9 +108,8 @@ namespace build2 // Also note that the bootstrapped version of build2 will not search for // .pc files, always returning false (see above for the reasoning). // -#ifndef BUILD2_BOOTSTRAP - // Derive pkgconf search directories from the specified library search + // Derive pkg-config search directories from the specified library search // directory passing them to the callback function for as long as it // returns false (e.g., not found). Return true if the callback returned // true. @@ -543,8 +153,8 @@ namespace build2 return false; } - // Search for the .pc files in the pkgconf directories that correspond to - // the specified library directory. If found, return static (first) and + // Search for the .pc files in the pkg-config directories that correspond + // to the specified library directory. If found, return static (first) and // shared (second) library .pc files. If common is false, then only // consider our .static/.shared files. // @@ -554,6 +164,8 @@ namespace build2 const string& stem, bool common) const { + tracer trace (x, "pkgconfig_search"); + // When it comes to looking for .pc files we have to decide where to // search (which directory(ies)) as well as what to search for (which // names). Suffix is our ".shared" or ".static" extension. @@ -575,28 +187,36 @@ namespace build2 // then you get something like zlib which calls it zlib.pc. So let's // just do it. // - f = dir; - f /= "lib"; - f += stem; - f += sfx; - f += ".pc"; - if (exists (f)) - return f; + // And as you think you've covered all the bases, someone decides to + // play with the case (libXau.* vs xau.pc). So let's also try the + // lower-case versions of the stem unless we are on a case-insensitive + // filesystem. + // + auto check = [&dir, & sfx, &f] (const string& n) + { + f = dir; + f /= n; + f += sfx; + f += ".pc"; + return exists (f); + }; - f = dir; - f /= stem; - f += sfx; - f += ".pc"; - if (exists (f)) + if (check ("lib" + stem) || check (stem)) return f; +#ifndef _WIN32 + string lstem (lcase (stem)); + + if (lstem != stem) + { + if (check ("lib" + lstem) || check (lstem)) + return f; + } +#endif + if (proj) { - f = dir; - f /= proj->string (); - f += sfx; - f += ".pc"; - if (exists (f)) + if (check (proj->string ())) return f; } @@ -636,15 +256,18 @@ namespace build2 if (pkgconfig_derive (libd, check)) { + l6 ([&]{trace << "found " << libd << stem << " in " + << (d.a.empty () ? d.a : d.s).directory ();}); + r.first = move (d.a); r.second = move (d.s); } return r; - }; + } bool common:: - pkgconfig_load (action a, + pkgconfig_load (optional<action> act, const scope& s, lib& lt, liba* at, @@ -653,7 +276,8 @@ namespace build2 const string& stem, const dir_path& libd, const dir_paths& top_sysd, - const dir_paths& top_usrd) const + const dir_paths& top_usrd, + pair<bool, bool> metaonly) const { assert (at != nullptr || st != nullptr); @@ -663,12 +287,16 @@ namespace build2 if (p.first.empty () && p.second.empty ()) return false; - pkgconfig_load (a, s, lt, at, st, p, libd, top_sysd, top_usrd); + pkgconfig_load ( + act, s, lt, at, st, p, libd, top_sysd, top_usrd, metaonly); return true; } + // Action should be absent if called during the load phase. If metaonly is + // true then only load the metadata. + // void common:: - pkgconfig_load (action a, + pkgconfig_load (optional<action> act, const scope& s, lib& lt, liba* at, @@ -676,7 +304,8 @@ namespace build2 const pair<path, path>& paths, const dir_path& libd, const dir_paths& top_sysd, - const dir_paths& top_usrd) const + const dir_paths& top_usrd, + pair<bool /* a */, bool /* s */> metaonly) const { tracer trace (x, "pkgconfig_load"); @@ -687,24 +316,66 @@ namespace build2 assert (!ap.empty () || !sp.empty ()); - // Extract --cflags and set them as lib?{}:export.poptions. Note that we - // still pass --static in case this is pkgconf which has Cflags.private. + // Append -I<dir> or -L<dir> option suppressing duplicates. // - auto parse_cflags = [&trace, this] (target& t, - const pkgconf& pc, - bool la) + auto append_dir = [] (strings& ops, string&& o) { + char c (o[1]); + + // @@ Should we normalize the path for good measure? But on the other + // hand, most of the time when it's not normalized, it will likely + // be "consistently-relative", e.g., something like + // ${prefix}/lib/../include. I guess let's wait and see for some + // real-world examples. + // + // Well, we now support generating relocatable .pc files that have + // a bunch of -I${pcfiledir}/../../include and -L${pcfiledir}/.. . + // + // On the other hand, there could be symlinks involved and just + // normalize() may not be correct. + // + // Note that we do normalize -L paths in the usrd logic later + // (but not when seeting as *.export.loptions). + + for (const string& x: ops) + { + if (x.size () > 2 && x[0] == '-' && x[1] == c) + { + if (path_traits::compare (x.c_str () + 2, x.size () - 2, + o.c_str () + 2, o.size () - 2) == 0) + return; // Duplicate. + } + } + + ops.push_back (move (o)); + }; + + // Extract --cflags and set them as lib?{}:export.poptions.. + // + auto parse_cflags = [&trace, this, &append_dir] (target& t, + const pkgconfig& pc, + bool la) + { + // Note that we normalize `-[IDU] <arg>` to `-[IDU]<arg>`. + // strings pops; - bool arg (false); - for (auto& o: pc.cflags (la)) + char arg ('\0'); // Option with pending argument. + for (string& o: pc.cflags (la)) { if (arg) { // Can only be an argument for -I, -D, -U options. // - pops.push_back (move (o)); - arg = false; + o.insert (0, 1, arg); + o.insert (0, 1, '-'); + + if (arg == 'I') + append_dir (pops, move (o)); + else + pops.push_back (move (o)); + + arg = '\0'; continue; } @@ -713,11 +384,17 @@ namespace build2 // We only keep -I, -D and -U. // if (n >= 2 && - o[0] == '-' && - (o[1] == 'I' || o[1] == 'D' || o[1] == 'U')) + o[0] == '-' && (o[1] == 'I' || o[1] == 'D' || o[1] == 'U')) { - pops.push_back (move (o)); - arg = (n == 2); + if (n > 2) + { + if (o[1] == 'I') + append_dir (pops, move (o)); + else + pops.push_back (move (o)); + } + else + arg = o[1]; continue; } @@ -726,7 +403,7 @@ namespace build2 } if (arg) - fail << "argument expected after " << pops.back () << + fail << "argument expected after -" << arg << info << "while parsing pkg-config --cflags " << pc.path; if (!pops.empty ()) @@ -746,12 +423,16 @@ namespace build2 // Parse --libs into loptions/libs (interface and implementation). If // ps is not NULL, add each resolved library target as a prerequisite. // - auto parse_libs = [a, &s, top_sysd, this] (target& t, - bool binless, - const pkgconf& pc, - bool la, - prerequisites* ps) + auto parse_libs = [this, + &append_dir, + act, &s, top_sysd] (target& t, + bool binless, + const pkgconfig& pc, + bool la, + prerequisites* ps) { + // Note that we normalize `-L <arg>` to `-L<arg>`. + // strings lops; vector<name> libs; @@ -760,22 +441,29 @@ namespace build2 // library is binless. But sometimes we may have other linker options, // for example, -Wl,... or -pthread. It's probably a bad idea to // ignore them. Also, theoretically, we could have just the library - // name/path. + // name/path. Note that (after some meditation) we consider -pthread + // a special form of -l. // // The tricky part, of course, is to know whether what follows after // an option we don't recognize is its argument or another option or // library. What we do at the moment is stop recognizing just library // names (without -l) after seeing an unknown option. // - bool arg (false), first (true), known (true), have_L; - for (auto& o: pc.libs (la)) + bool first (true), known (true), have_L (false); + + string self; // The library itself (-l of just name/path). + + char arg ('\0'); // Option with pending argument. + for (string& o: pc.libs (la)) { if (arg) { - // Can only be an argument for an loption. + // Can only be an argument for an -L option. // - lops.push_back (move (o)); - arg = false; + o.insert (0, 1, arg); + o.insert (0, 1, '-'); + append_dir (lops, move (o)); + arg = '\0'; continue; } @@ -785,44 +473,54 @@ namespace build2 // if (n >= 2 && o[0] == '-' && o[1] == 'L') { + if (n > 2) + append_dir (lops, move (o)); + else + arg = o[1]; have_L = true; - lops.push_back (move (o)); - arg = (n == 2); continue; } - // See if that's -l or just the library name/path. + // See if that's -l, -pthread, or just the library name/path. // - if ((known && o[0] != '-') || - (n > 2 && o[0] == '-' && o[1] == 'l')) + if ((known && n != 0 && o[0] != '-') || + (n > 2 && o[0] == '-' && (o[1] == 'l' || o == "-pthread"))) { // Unless binless, the first one is the library itself, which we // skip. Note that we don't verify this and theoretically it could // be some other library, but we haven't encountered such a beast // yet. // + // What we have enountered (e.g., in the Magick++ library) is the + // library itself repeated in Libs.private. So now we save it and + // filter all its subsequent occurences. + // + // @@ To be safe we probably shouldn't rely on the position and + // filter out all occurrences of the library itself (by name?) + // and complain if none were encountered. + // + // Note also that the same situation can occur if we have a + // binful library for which we could not find the library + // binary and are treating it as binless. We now have a diag + // frame around the call to search_library() to help diagnose + // such situations. + // if (first) { first = false; if (!binless) + { + self = move (o); + continue; + } + } + else + { + if (!binless && o == self) continue; } - // @@ If by some reason this is the library itself (doesn't go - // first or libpkgconf parsed libs in some bizarre way) we will - // have a dependency cycle by trying to lock its target inside - // search_library() as by now it is already locked. To be safe - // we probably shouldn't rely on the position and filter out - // all occurrences of the library itself (by name?) and - // complain if none were encountered. - // - // Note also that the same situation can occur if we have a - // binful library for which we could not find the library - // binary and are treating it as binless. We now have a diag - // frame around the call to search_library() to help diagnose - // such situations. - // libs.push_back (name (move (o))); continue; } @@ -834,7 +532,7 @@ namespace build2 } if (arg) - fail << "argument expected after " << lops.back () << + fail << "argument expected after -" << arg << info << "while parsing pkg-config --libs " << pc.path; // Space-separated list of escaped library flags. @@ -842,7 +540,7 @@ namespace build2 auto lflags = [&pc, la] () -> string { string r; - for (const auto& o: pc.libs (la)) + for (const string& o: pc.libs (la)) { if (!r.empty ()) r += ' '; @@ -851,7 +549,7 @@ namespace build2 return r; }; - if (first && !binless) + if (!binless && self.empty ()) fail << "library expected in '" << lflags () << "'" << info << "while parsing pkg-config --libs " << pc.path; @@ -864,8 +562,8 @@ namespace build2 // import installed, or via a .pc file (which we could have generated // from the export stub). The exception is "runtime libraries" (which // are really the extension of libc or the operating system in case of - // Windows) such as -lm, -ldl, -lpthread, etc. Those we will detect - // and leave as -l*. + // Windows) such as -lm, -ldl, -lpthread (or its -pthread variant), + // etc. Those we will detect and leave as -l*. // // If we managed to resolve all the -l's (sans runtime), then we can // omit -L's for a nice and tidy command line. @@ -892,20 +590,28 @@ namespace build2 if (l[0] != '-') // e.g., just shell32.lib continue; else if (cmp ("advapi32") || + cmp ("authz") || cmp ("bcrypt") || + cmp ("comdlg32") || cmp ("crypt32") || - cmp ("dbgeng") || cmp ("d2d1") || cmp ("d3d", 3) || // d3d* + cmp ("dbgeng") || + cmp ("dbghelp") || + cmp ("dnsapi") || cmp ("dwmapi") || cmp ("dwrite") || cmp ("dxgi") || cmp ("dxguid") || cmp ("gdi32") || + cmp ("glu32") || cmp ("imagehlp") || cmp ("imm32") || + cmp ("iphlpapi") || cmp ("kernel32") || + cmp ("mincore") || cmp ("mpr") || + cmp ("msimg32") || cmp ("mswsock") || cmp ("msxml", 5) || // msxml* cmp ("netapi32") || @@ -913,6 +619,9 @@ namespace build2 cmp ("odbc32") || cmp ("ole32") || cmp ("oleaut32") || + cmp ("opengl32") || + cmp ("powrprof") || + cmp ("psapi") || cmp ("rpcrt4") || cmp ("secur32") || cmp ("shell32") || @@ -922,6 +631,8 @@ namespace build2 cmp ("userenv") || cmp ("uuid") || cmp ("version") || + cmp ("windowscodecs") || + cmp ("winhttp") || cmp ("winmm") || cmp ("winspool") || cmp ("ws2") || @@ -938,6 +649,11 @@ namespace build2 } continue; } + else if (tsys == "mingw32") + { + if (l == "-pthread") + continue; + } } else { @@ -947,6 +663,7 @@ namespace build2 l == "-lm" || l == "-ldl" || l == "-lrt" || + l == "-pthread" || l == "-lpthread") continue; @@ -982,18 +699,13 @@ namespace build2 { usrd = dir_paths (); - for (auto i (lops.begin ()); i != lops.end (); ++i) + for (const string& o: lops) { - const string& o (*i); - - if (o.size () >= 2 && o[0] == '-' && o[1] == 'L') + // Note: always in the -L<dir> form (see above). + // + if (o.size () > 2 && o[0] == '-' && o[1] == 'L') { - string p; - - if (o.size () == 2) - p = *++i; // We've verified it's there. - else - p = string (o, 2); + string p (o, 2); try { @@ -1004,6 +716,7 @@ namespace build2 << lflags () << "'" << info << "while parsing pkg-config --libs " << pc.path; + d.normalize (); usrd->push_back (move (d)); } catch (const invalid_path& e) @@ -1034,7 +747,7 @@ namespace build2 dr << info (f) << "while resolving pkg-config dependency " << l; }); - lt = search_library (a, top_sysd, usrd, pk); + lt = search_library (act, top_sysd, usrd, pk); } if (lt != nullptr) @@ -1083,24 +796,16 @@ namespace build2 { // Translate -L to /LIBPATH. // - for (auto i (lops.begin ()); i != lops.end (); ) + for (string& o: lops) { - string& o (*i); size_t n (o.size ()); - if (n >= 2 && o[0] == '-' && o[1] == 'L') + // Note: always in the -L<dir> form (see above). + // + if (n > 2 && o[0] == '-' && o[1] == 'L') { o.replace (0, 2, "/LIBPATH:"); - - if (n == 2) - { - o += *++i; // We've verified it's there. - i = lops.erase (i); - continue; - } } - - ++i; } } @@ -1124,6 +829,10 @@ namespace build2 // may escape things even on non-Windows platforms, for example, // spaces. So we use a slightly modified version of next_word(). // + // @@ TODO: handle quotes (e.g., empty values; see parse_metadata()). + // I wonder what we get here if something is quoted in the + // .pc file. + // auto next = [] (const string& s, size_t& b, size_t& e) -> string { string r; @@ -1159,17 +868,123 @@ namespace build2 return r; }; + // Parse the build2.metadata variable value and, if user is true, + // extract the user metadata, if any, and set extracted variables on the + // specified target. + // + auto parse_metadata = [&next] (target& t, + pkgconfig& pc, + const string& md, + bool user) + { + const location loc (pc.path); + + context& ctx (t.ctx); + + optional<uint64_t> ver; + optional<string> pfx; + + variable_pool* vp (nullptr); // Resolve lazily. + + string s; + for (size_t b (0), e (0); !(s = next (md, b, e)).empty (); ) + { + if (!ver) + { + try + { + ver = value_traits<uint64_t>::convert (name (s), nullptr); + } + catch (const invalid_argument& e) + { + fail (loc) << "invalid version in build2.metadata variable: " + << e; + } + + if (*ver != 1) + fail (loc) << "unexpected metadata version " << *ver; + + if (!user) + return; + + continue; + } + + if (!pfx) + { + if (s.empty ()) + fail (loc) << "empty variable prefix in build2.metadata varible"; + + pfx = s; + continue; + } + + // The rest is variable name/type pairs. + // + size_t p (s.find ('/')); + + if (p == string::npos) + fail (loc) << "expected name/type pair instead of '" << s << "'"; + + string vn (s, 0, p); + string tn (s, p + 1); + + optional<string> val (pc.variable (vn)); + + if (!val) + fail (loc) << "metadata variable " << vn << " not set"; + + pair<const value_type*, bool> vt (metadata_type (tn)); + if (vt.first == nullptr) + fail (loc) << "unknown metadata type " << tn; + + names ns; + for (size_t b (0), e (0); !(s = next (*val, b, e)).empty (); ) + { + ns.push_back (vt.second + ? name (dir_path (move (s))) + : name (move (s))); + } + + // These should be public (qualified) variables so go straight for + // the public variable pool. + // + if (vp == nullptr) + vp = &ctx.var_pool.rw (); // Load phase if user==true. + + const variable& var (vp->insert (move (vn))); + + value& v (t.assign (var)); + v.assign (move (ns), &var); + typify (v, *vt.first, &var); + } + + if (!ver) + fail (loc) << "version expected in build2.metadata variable"; + + if (!pfx) + return; // No user metadata. + + // Set export.metadata to indicate the presence of user metadata. + // + t.assign (ctx.var_export_metadata) = names { + name (std::to_string (*ver)), name (move (*pfx))}; + }; + // Parse modules, enter them as targets, and add them to the // prerequisites. // auto parse_modules = [&trace, this, - &next, &s, <] (const pkgconf& pc, + &next, &s, <] (const pkgconfig& pc, prerequisites& ps) { - string val (pc.variable ("cxx_modules")); + optional<string> val (pc.variable ("cxx.modules")); + + if (!val) + return; string m; - for (size_t b (0), e (0); !(m = next (val, b, e)).empty (); ) + for (size_t b (0), e (0); !(m = next (*val, b, e)).empty (); ) { // The format is <name>=<path> with `..` used as a partition // separator (see pkgconfig_save() for details). @@ -1178,18 +993,26 @@ namespace build2 if (p == string::npos || p == 0 || // Empty name. p == m.size () - 1) // Empty path. - fail << "invalid module information in '" << val << "'" << - info << "while parsing pkg-config --variable=cxx_modules " + fail << "invalid module information in '" << *val << "'" << + info << "while parsing pkg-config --variable=cxx.modules " << pc.path; string mn (m, 0, p); path mp (m, p + 1, string::npos); + + // Must be absolute but may not be normalized due to a relocatable + // .pc file. We assume there are no symlink shenanigans that would + // require realize(). + // + if (!mp.normalized ()) + mp.normalize (); + path mf (mp.leaf ()); // Extract module properties, if any. // - string pp (pc.variable ("cxx_module_preprocessed." + mn)); - string se (pc.variable ("cxx_module_symexport." + mn)); + optional<string> pp (pc.variable ("cxx.module_preprocessed." + mn)); + optional<string> se (pc.variable ("cxx.module_symexport." + mn)); // Replace the partition separator. // @@ -1234,11 +1057,12 @@ namespace build2 // { value& v (mt.vars.assign (x_preprocessed)); // NULL - if (!pp.empty ()) v = move (pp); + if (pp) + v = move (*pp); } { - mt.vars.assign (x_symexport) = (se == "true"); + mt.vars.assign (x_symexport) = (se && *se == "true"); } tl.second.unlock (); @@ -1260,18 +1084,29 @@ namespace build2 // the prerequisites. // auto parse_headers = [&trace, this, - &next, &s, <] (const pkgconf& pc, + &next, &s, <] (const pkgconfig& pc, const target_type& tt, const char* lang, prerequisites& ps) { - string var (string (lang) + "_importable_headers"); - string val (pc.variable (var)); + string var (string (lang) + ".importable_headers"); + optional<string> val (pc.variable (var)); + + if (!val) + return; string h; - for (size_t b (0), e (0); !(h = next (val, b, e)).empty (); ) + for (size_t b (0), e (0); !(h = next (*val, b, e)).empty (); ) { path hp (move (h)); + + // Must be absolute but may not be normalized due to a relocatable + // .pc file. We assume there are no symlink shenanigans that would + // require realize(). + // + if (!hp.normalized ()) + hp.normalize (); + path hf (hp.leaf ()); auto tl ( @@ -1309,19 +1144,10 @@ namespace build2 } }; - // For now we only populate prerequisites for lib{}. To do it for - // liba{} would require weeding out duplicates that are already in - // lib{}. + // Load the information from the pkg-config files. // - // Currently, this information is only used by the modules machinery to - // resolve module names to module files (but we cannot only do this if - // modules are enabled since the same installed library can be used by - // multiple builds). - // - prerequisites prs; - - pkgconf apc; - pkgconf spc; + pkgconfig apc; + pkgconfig spc; // Create the .pc files search directory list. // @@ -1329,9 +1155,16 @@ namespace build2 // Note that we rely on the "small function object" optimization here. // - auto add_pc_dir = [&pc_dirs] (dir_path&& d) -> bool + auto add_pc_dir = [&trace, &pc_dirs] (dir_path&& d) -> bool { - pc_dirs.emplace_back (move (d)); + // Suppress duplicated. + // + if (find (pc_dirs.begin (), pc_dirs.end (), d) == pc_dirs.end ()) + { + l6 ([&]{trace << "search path " << d;}); + pc_dirs.emplace_back (move (d)); + } + return false; }; @@ -1341,18 +1174,115 @@ namespace build2 bool pa (at != nullptr && !ap.empty ()); if (pa || sp.empty ()) - apc = pkgconf (ap, pc_dirs, sys_lib_dirs, sys_hdr_dirs); + apc = pkgconfig (ap, pc_dirs, sys_lib_dirs, sys_hdr_dirs); bool ps (st != nullptr && !sp.empty ()); if (ps || ap.empty ()) - spc = pkgconf (sp, pc_dirs, sys_lib_dirs, sys_hdr_dirs); + spc = pkgconfig (sp, pc_dirs, sys_lib_dirs, sys_hdr_dirs); + + // Load the user metadata if we are in the load phase. Otherwise just + // determine if we have metadata. + // + // Note also that we are not failing here if the metadata was requested + // but not present (potentially only partially) letting the caller + // (i.e., the import machinery) verify that the export.metadata was set + // on the target being imported. This would also allow supporting + // optional metadata. + // + bool apc_meta (false); + bool spc_meta (false); + if (!act) + { + // We can only do it during the load phase. + // + assert (lt.ctx.phase == run_phase::load); + + pkgconfig& ipc (ps ? spc : apc); // As below. + + // Since it's not easy to say if things are the same, we load a copy + // into the group and each member, if any. + // + // @@ TODO: check if already loaded? Don't we have the same problem + // below with reloading the rest for lt? What if we passed NULL + // in this case (and I suppose another bool in metaonly)? + // + if (optional<string> md = ipc.variable ("build2.metadata")) + parse_metadata (lt, ipc, *md, true); + + if (pa) + { + if (optional<string> md = apc.variable ("build2.metadata")) + { + parse_metadata (*at, apc, *md, true); + apc_meta = true; + } + } + + if (ps) + { + if (optional<string> md = spc.variable ("build2.metadata")) + { + parse_metadata (*st, spc, *md, true); + spc_meta = true; + } + } + + // If we only need metadata, then we are done. + // + if (at != nullptr && metaonly.first) + { + pa = false; + at = nullptr; + } + + if (st != nullptr && metaonly.second) + { + ps = false; + st = nullptr; + } + + if (at == nullptr && st == nullptr) + return; + } + else + { + if (pa) + { + if (optional<string> md = apc.variable ("build2.metadata")) + { + parse_metadata (*at, apc, *md, false); + apc_meta = true; + } + } + + if (ps) + { + if (optional<string> md = spc.variable ("build2.metadata")) + { + parse_metadata (*st, spc, *md, false); + spc_meta = true; + } + } + } // Sort out the interface dependencies (which we are setting on lib{}). // If we have the shared .pc variant, then we use that. Otherwise -- // static but extract without the --static option (see also the saving // logic). // - pkgconf& ipc (ps ? spc : apc); // Interface package info. + pkgconfig& ipc (ps ? spc : apc); // Interface package info. + bool ipc_meta (ps ? spc_meta : apc_meta); + + // For now we only populate prerequisites for lib{}. To do it for + // liba{} would require weeding out duplicates that are already in + // lib{}. + // + // Currently, this information is only used by the modules machinery to + // resolve module names to module files (but we cannot only do this if + // modules are enabled since the same installed library can be used by + // multiple builds). + // + prerequisites prs; parse_libs ( lt, @@ -1370,12 +1300,30 @@ namespace build2 if (ps) parse_cflags (*st, spc, false); + // @@ TODO: we can now load cc.type if there is metadata (but need to + // return this rather than set, see search_library() for + // details). + + // Load the bin.whole flag (whole archive). + // + if (at != nullptr && (pa ? apc_meta : spc_meta)) + { + // Note that if unspecified we leave it unset letting the consumer + // override it, if necessary (see the bin.lib lookup semantics for + // details). + // + if (optional<string> v = (pa ? apc : spc).variable ("bin.whole")) + { + at->vars.assign ("bin.whole") = (*v == "true"); + } + } + // For now we assume static and shared variants export the same set of // modules/importable headers. While technically possible, having // different sets will most likely lead to all sorts of complications // (at least for installed libraries) and life is short. // - if (modules) + if (modules && ipc_meta) { parse_modules (ipc, prs); @@ -1403,7 +1351,7 @@ namespace build2 } bool common:: - pkgconfig_load (action, + pkgconfig_load (optional<action>, const scope&, lib&, liba*, @@ -1412,13 +1360,14 @@ namespace build2 const string&, const dir_path&, const dir_paths&, - const dir_paths&) const + const dir_paths&, + pair<bool, bool>) const { return false; } void common:: - pkgconfig_load (action, + pkgconfig_load (optional<action>, const scope&, lib&, liba*, @@ -1426,7 +1375,8 @@ namespace build2 const pair<path, path>&, const dir_path&, const dir_paths&, - const dir_paths&) const + const dir_paths&, + pair<bool, bool>) const { assert (false); // Should never be called. } @@ -1440,6 +1390,11 @@ namespace build2 // file must be generated based on the static library to get accurate // Libs.private. // + // The other things that we omit from the common variant are -l options + // for binless libraries (so that it's usable from other build systems) as + // well as metadata (which could become incomplete due the previous + // omissions; for example, importable headers metadata). + // void link_rule:: pkgconfig_save (action a, const file& l, @@ -1459,41 +1414,147 @@ namespace build2 /* */ pcs::static_type))); assert (t != nullptr); + const path& p (t->path ()); + + // If we are uninstalling, skip regenerating the file if it already + // exists (I think we could have skipped this even if it doesn't exist, + // but let's keep things close to the install case). + // + if (ctx.current_action ().outer_operation () == uninstall_id) + { + if (exists (p)) + return; + } + // This is the lib{} group if we are generating the common file and the // target itself otherwise. // - const file& g (common ? l.group->as<file> () : l); + const target& g (common ? *l.group : l); // By default we assume things go into install.{include, lib}. // + // If include.lib does not resolve, then assume this is update-for- + // install without actual install and remove the file if it exists. + // + // @@ Shouldn't we use target's install value rather than install.lib + // in case it gets installed into a custom location? I suppose one + // can now use cc.pkgconfig.lib to customize this. + // using install::resolve_dir; - dir_path idir (resolve_dir (g, cast<dir_path> (g["install.include"]))); - dir_path ldir (resolve_dir (g, cast<dir_path> (g["install.lib"]))); + small_vector<dir_path, 1> ldirs; - const path& p (t->path ()); + if (const dir_paths* ds = cast_null<dir_paths> (g[c_pkgconfig_lib])) + { + for (const dir_path& d: *ds) + { + bool f (ldirs.empty ()); - // If we are uninstalling, skip regenerating the file if it already - // exists (I think we could have skipped this even if it doesn't exist, - // but let's keep things close to the install case). - // - if (ctx.current_action ().outer_operation () == uninstall_id) + ldirs.push_back (resolve_dir (g, d, {}, !f /* fail_unknown */)); + + if (f && ldirs.back ().empty ()) + break; + } + } + else + ldirs.push_back (resolve_dir (g, + cast<dir_path> (g["install.lib"]), + {}, + false /* fail_unknown */)); + + if (!ldirs.empty () && ldirs.front ().empty ()) { - if (exists (p)) - return; + rmfile (ctx, p, 3 /* verbosity */); + return; } + small_vector<dir_path, 1> idirs; + + if (const dir_paths* ds = cast_null<dir_paths> (g[c_pkgconfig_include])) + { + for (const dir_path& d: *ds) + idirs.push_back (resolve_dir (g, d)); + } + else + idirs.push_back (resolve_dir (g, + cast<dir_path> (g["install.include"]))); + // Note that generation can take some time if we have a large number of // prerequisite libraries. // - if (verb) - text << "pc " << *t; - else if (verb >= 2) + if (verb >= 2) text << "cat >" << p; + else if (verb) + print_diag ("pc", g, *t); if (ctx.dry_run) return; + // See if we should be generating a relocatable .pc file and if so get + // its installation location. The plan is to make all absolute paths + // that we write relative to this location and prefix them with the + // built-in ${pcfiledir} variable (which supported by everybody: the + // original pkg-config, pkgconf, and our libpkg-config library). + // + dir_path rel_base; + if (cast_false<bool> (rs["install.relocatable"])) + { + path f (install::resolve_file (*t)); + if (!f.empty ()) // Shouldn't happen but who knows. + rel_base = f.directory (); + } + + // Note: reloc_*path() expect absolute and normalized paths. + // + // Note also that reloc_path() can be used on dir_path to get the path + // without the trailing slash. + // + auto reloc_path = [&rel_base, + s = string ()] (const path& p, + const char* what) mutable + -> const string& + { + if (rel_base.empty ()) + return p.string (); + + try + { + s = p.relative (rel_base).string (); + } + catch (const invalid_path&) + { + fail << "unable to make " << what << " path " << p << " relative to " + << rel_base; + } + + if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator); + s.insert (0, "${pcfiledir}"); + return s; + }; + + auto reloc_dir_path = [&rel_base, + s = string ()] (const dir_path& p, + const char* what) mutable + -> const string& + { + if (rel_base.empty ()) + return (s = p.representation ()); + + try + { + s = p.relative (rel_base).representation (); + } + catch (const invalid_path&) + { + fail << "unable to make " << what << " path " << p << " relative to " + << rel_base; + } + + if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator); + s.insert (0, "${pcfiledir}"); + return s; + }; + auto_rmfile arm (p); try @@ -1511,6 +1572,20 @@ namespace build2 fail << "no version variable in project " << n << info << "while generating " << p; + // When comparing versions, pkg-config uses RPM semantics, which is + // basically comparing each all-digit/alpha fragments in order. + // This means, for example, a semver with a pre-release will be + // compared incorrectly (pre-release will be greater than the final + // version). We could detect if this project uses stdver and chop + // off any pre-release information (so, essentially only saving the + // major.minor.patch part). But that means such .pc files will + // contain inaccurate version information. And seeing that we don't + // recommend using pkg-config (rather primitive) package dependency + // support, having complete version information for documentation + // seems more important. + // + // @@ Maybe still makes sense to only save version.project_id? + // const string& v (cast<string> (vl)); os << "Name: " << n << endl; @@ -1627,13 +1702,11 @@ namespace build2 return n; }; - // @@ TODO: support whole archive? - // - // Cflags. // os << "Cflags:"; - os << " -I" << escape (idir.string ()); + for (const dir_path& d: idirs) + os << " -I" << escape (reloc_path (d, "header search")); save_poptions (x_export_poptions); save_poptions (c_export_poptions); os << endl; @@ -1652,7 +1725,8 @@ namespace build2 // While we don't need it for a binless library itselt, it may be // necessary to resolve its binful dependencies. // - os << " -L" << escape (ldir.string ()); + for (const dir_path& d: ldirs) + os << " -L" << escape (reloc_path (d, "library search")); // Now process ourselves as if we were being linked to something (so // pretty similar to link_rule::append_libraries()). We also reuse @@ -1668,7 +1742,8 @@ namespace build2 appended_libraries* pls; // Previous. appended_libraries* ls; // Current. strings& args; - } d {os, nullptr, &ls, args}; + bool common; + } d {os, nullptr, &ls, args, common}; auto imp = [&priv] (const target&, bool la) {return priv && la;}; @@ -1712,7 +1787,17 @@ namespace build2 if (l != nullptr) { if (l->is_a<libs> () || l->is_a<liba> ()) // See through libux. - d.args.push_back (save_library_target (*l)); + { + // Omit binless libraries from the common .pc file (see + // above). + // + // Note that in this case we still want to recursively + // traverse such libraries since they may still link to some + // non-binless system libraries (-lm, etc). + // + if (!d.common || !l->path ().empty ()) + d.args.push_back (save_library_target (*l)); + } } else { @@ -1734,7 +1819,7 @@ namespace build2 //@@ TODO: should we filter -L similar to -I? //@@ TODO: how will the Libs/Libs.private work? - //@@ TODO: remember to use escape() + //@@ TODO: remember to use reloc_*() and escape(). if (d.pls != nullptr && d.pls->find (l) != nullptr) return true; @@ -1755,7 +1840,10 @@ namespace build2 library_cache lib_cache; process_libraries (a, bs, li, sys_lib_dirs, l, la, 0, // Link flags. - imp, lib, opt, !binless /* self */, &lib_cache); + imp, lib, opt, + !binless /* self */, + false /* proc_opt_group */, // @@ !priv? + &lib_cache); for (const string& a: args) os << ' ' << a; @@ -1777,11 +1865,325 @@ namespace build2 process_libraries (a, bs, li, sys_lib_dirs, l, la, 0, // Link flags. - imp, lib, opt, false /* self */, &lib_cache); + imp, lib, opt, + false /* self */, + false /* proc_opt_group */, // @@ !priv? + &lib_cache); for (const string& a: args) os << ' ' << a; os << endl; + + // See also bin.whole below. + } + } + + // Save metadata unless this is the common .pc file (see above). + // + if (common) + { + os.close (); + arm.cancel (); + return; + } + + // The build2.metadata variable is a general indication of the + // metadata being present. Its value is the metadata version + // optionally followed by the user metadata variable prefix and + // variable list (see below for details). Having only the version + // indicates the absense of user metadata. + // + // See if we have the user metadata. + // + lookup um (g[ctx.var_export_metadata]); // Target visibility. + + if (um && !um->empty ()) + { + const names& ns (cast<names> (um)); + + // First verify the version. + // + uint64_t ver; + try + { + // Note: does not change the passed name. + // + ver = value_traits<uint64_t>::convert ( + ns[0], ns[0].pair ? &ns[1] : nullptr); + } + catch (const invalid_argument& e) + { + fail << "invalid metadata version in library " << g << ": " << e; + } + + if (ver != 1) + fail << "unexpected metadata version " << ver << " in library " + << g; + + // Next verify the metadata variable prefix. + // + if (ns.size () != 2 || !ns[1].simple ()) + fail << "invalid metadata variable prefix in library " << g; + + const string& pfx (ns[1].value); + + // Now find all the target-specific variables with this prefix. + // + // If this is the common .pc file, then we only look in the group. + // Otherwise, in the member and the group. + // + // To allow setting different values for the for-install and + // development build cases (required when a library comes with + // additional "assets"), we recognize the special .for_install + // variable name suffix: if there is a both <prefix>.<name> and + // <prefix>.<name>.for_install variables, then here we take the + // value from the latter. Note that we don't consider just + // <prefix>.for_install as special (so it's available to the user). + // + // We only expect a handful of variables so let's use a vector and + // linear search instead of a map. + // + struct binding + { + const string* name; // Name to be saved (without .for_install). + const variable* var; // Actual variable (potentially .for_install). + const value* val; // Actual value. + }; + vector<binding> vars; + + auto append = [&l, &pfx, &vars, + tmp = string ()] (const target& t, bool dup) mutable + { + for (auto p (t.vars.lookup_namespace (pfx)); + p.first != p.second; + ++p.first) + { + const variable* var (&p.first->first.get ()); + + // Handle .for_install. + // + // The plan is as follows: if this is .for_install, then just + // verify we also have the value without the suffix and skip + // it. Otherwise, check if there also the .for_install variant + // and if so, use that instead. While we could probably do this + // more efficiently by remembering what we saw in vars, this is + // not performance-sensitive and so we keep it simple for now. + // + const string* name; + { + const string& v (var->name); + size_t n (v.size ()); + + if (n > pfx.size () + 1 + 12 && // <prefix>..for_install + v.compare (n - 12, 12, ".for_install") == 0) + { + tmp.assign (v, 0, n - 12); + + if (t.vars.find (tmp) == t.vars.end ()) + fail << v << " variant without " << tmp << " in library " + << l; + + continue; + } + else + { + name = &v; + + tmp = v; tmp += ".for_install"; + + auto i (t.vars.find (tmp)); + if (i != t.vars.end ()) + var = &i->first.get (); + } + } + + if (dup) + { + if (find_if (vars.begin (), vars.end (), + [name] (const binding& p) + { + return *p.name == *name; + }) != vars.end ()) + continue; + } + + // Re-lookup the value in order to apply target type/pattern + // specific prepends/appends. + // + lookup l (t[*var]); + assert (l.defined ()); + + vars.push_back (binding {name, var, l.value}); + } + }; + + append (g, false); + + if (!common) + { + if (l.group != nullptr) + append (*l.group, true); + } + + // First write the build2.metadata variable with the version, + // prefix, and all the variable names/types (which should not + // require any escaping). + // + os << endl + << "build2.metadata = " << ver << ' ' << pfx; + + for (const binding& b: vars) + { + const variable& var (*b.var); + const value& val (*b.val); + + // There is no notion of NULL in pkg-config variables and it's + // probably best not to conflate them with empty. + // + if (val.null) + fail << "null value in exported variable " << var + << " of library " << l; + + if (val.type == nullptr) + fail << "untyped value in exported variable " << var + << " of library " << l; + + // Tighten this to only a sensible subset of types (see + // parsing/serialization code for some of the potential problems). + // + if (!metadata_type (val.type->name).first) + fail << "unsupported value type " << val.type->name + << " in exported variable " << var << " of library " << l; + + os << " \\" << endl + << *b.name << '/' << val.type->name; + } + + os << endl + << endl; + + // Now the variables themselves. + // + string s; // Reuse the buffer. + for (const binding& b: vars) + { + const variable& var (*b.var); + const value& val (*b.val); + + names ns; + names_view nv (reverse (val, ns, true /* reduce */)); + + os << *b.name << " ="; + + auto append = [&rel_base, + &reloc_path, + &reloc_dir_path, + &l, &var, &val, &s] (const name& v) + { + // If this is absolute path or dir_path, then attempt to + // relocate. Without that the result will not be relocatable. + // + if (v.simple ()) + { + path p; + if (!rel_base.empty () && + val.type != nullptr && + (val.type->is_a<path> () || val.type->is_a<paths> ()) && + (p = path (v.value)).absolute ()) + { + p.normalize (); + s += reloc_path (p, var.name.c_str ()); + } + else + s += v.value; + } + else if (v.directory ()) + { + if (!rel_base.empty () && v.dir.absolute ()) + { + dir_path p (v.dir); + p.normalize (); + s += reloc_dir_path (p, var.name.c_str ()); + } + else + s += v.dir.representation (); + } + else + // It seems like we shouldn't end up here due to the type + // check but let's keep it for good measure. + // + fail << "simple or directory value expected instead of '" + << v << "' in exported variable " << var << " of library " + << l; + }; + + for (auto i (nv.begin ()); i != nv.end (); ++i) + { + s.clear (); + append (*i); + + if (i->pair) + { + // @@ What if the value contains the pair character? Maybe + // quote the halves in this case? Note: need to handle in + // parse_metadata() above if enable here. Note: none of the + // types currently allowed use pairs. +#if 0 + s += i->pair; + append (*++i); +#else + fail << "pair in exported variable " << var << " of library " + << l; +#endif + } + + os << ' ' << escape (s); + } + + os << endl; + } + } + else + { + // No user metadata. + // + os << endl + << "build2.metadata = 1" << endl; + } + + // Save cc.type (see init() for the format documentation). + // + // Note that this value is set by link_rule and therefore should + // be there. + // + { + const string& t ( + cast<string> ( + l.state[a].lookup_original ( + c_type, true /* target_only */).first)); + + // If common, then only save the language (the rest could be + // static/shared-specific; strictly speaking even the language could + // be, but that seems far fetched). + // + os << endl + << "cc.type = " << (common ? string (t, 0, t.find (',')) : t) + << endl; + } + + // Save the bin.whole (whole archive) flag (see the link rule for + // details on the lookup semantics). + // + if (la) + { + // Note: go straight for the public variable pool. + // + if (cast_false<bool> (l.lookup_original ( + ctx.var_pool["bin.whole"], + true /* target_only */).first)) + { + os << endl + << "bin.whole = true" << endl; } } @@ -1838,7 +2240,7 @@ namespace build2 const target* mt (nullptr); for (const target* t: pt->prerequisite_targets[a]) { - if ((mt = t->is_a (*x_mod))) + if (t != nullptr && (mt = t->is_a (*x_mod))) break; } @@ -1888,7 +2290,7 @@ namespace build2 if (size_t n = mods.size ()) { os << endl - << "cxx_modules ="; + << "cxx.modules ="; // The partition separator (`:`) is not a valid character in the // variable name. In fact, from the pkg-config source we can see @@ -1906,33 +2308,35 @@ namespace build2 // Module names shouldn't require escaping. // os << (n != 1 ? " \\\n" : " ") - << m.name << '=' << escape (m.file.string ()); + << m.name << '=' + << escape (reloc_path (m.file, "module interface")); } os << endl; // Module-specific properties. The format is: // - // <lang>_module_<property>.<module> = <value> + // <lang>.module_<property>.<module> = <value> // for (const module& m: mods) { if (!m.preprocessed.empty ()) - os << "cxx_module_preprocessed." << m.name << " = " + os << "cxx.module_preprocessed." << m.name << " = " << m.preprocessed << endl; if (m.symexport) - os << "cxx_module_symexport." << m.name << " = true" << endl; + os << "cxx.module_symexport." << m.name << " = true" << endl; } } if (size_t n = c_hdrs.size ()) { os << endl - << "c_importable_headers ="; + << "c.importable_headers ="; for (const path& h: c_hdrs) - os << (n != 1 ? " \\\n" : " ") << escape (h.string ()); + os << (n != 1 ? " \\\n" : " ") + << escape (reloc_path (h, "header unit")); os << endl; } @@ -1940,10 +2344,11 @@ namespace build2 if (size_t n = x_hdrs.size ()) { os << endl - << x << "_importable_headers ="; + << x << ".importable_headers ="; for (const path& h: x_hdrs) - os << (n != 1 ? " \\\n" : " ") << escape (h.string ()); + os << (n != 1 ? " \\\n" : " ") + << escape (reloc_path (h, "header unit")); os << endl; } diff --git a/libbuild2/cc/pkgconfig.hxx b/libbuild2/cc/pkgconfig.hxx new file mode 100644 index 0000000..a1bcdee --- /dev/null +++ b/libbuild2/cc/pkgconfig.hxx @@ -0,0 +1,129 @@ +// file : libbuild2/cc/pkgconfig.hxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_CC_PKGCONFIG_HXX +#define LIBBUILD2_CC_PKGCONFIG_HXX + +// In order not to complicate the bootstrap procedure with libpkg-config +// building, exclude functionality that involves reading of .pc files. +// +#ifndef BUILD2_BOOTSTRAP + +#ifndef BUILD2_LIBPKGCONF +# include <libpkg-config/pkg-config.h> +#else +# include <libpkgconf/libpkgconf.h> +#endif + +#include <libbuild2/types.hxx> +#include <libbuild2/utility.hxx> + +namespace build2 +{ + namespace cc + { + // Load package information from a .pc file. Filter out the -I/-L options + // that refer to system directories. This makes sure all the system search + // directories are "pushed" to the back which minimizes the chances of + // picking up wrong (e.g., old installed version) header/library. + // + // Note that the prerequisite package .pc files search order is as + // follows: + // + // - in the directory of the specified file + // - in pc_dirs directories (in the specified order) + // + // Issue diagnostics and throw failed on any errors. + // + class pkgconfig + { + public: + using path_type = build2::path; + + path_type path; + + public: + pkgconfig (path_type, + const dir_paths& pc_dirs, + const dir_paths& sys_hdr_dirs, + const dir_paths& sys_lib_dirs); + + // Create an unloaded/empty object. Querying package information on such + // an object is illegal. + // + pkgconfig () = default; + ~pkgconfig (); + + // Movable-only type. + // + pkgconfig (pkgconfig&&) noexcept; + pkgconfig& operator= (pkgconfig&&) noexcept; + + pkgconfig (const pkgconfig&) = delete; + pkgconfig& operator= (const pkgconfig&) = delete; + + strings + cflags (bool static_) const; + + strings + libs (bool static_) const; + + optional<string> + variable (const char*) const; + + optional<string> + variable (const string& s) const {return variable (s.c_str ());} + + private: + void + free (); + +#ifndef BUILD2_LIBPKGCONF + pkg_config_client_t* client_ = nullptr; + pkg_config_pkg_t* pkg_ = nullptr; +#else + pkgconf_client_t* client_ = nullptr; + pkgconf_pkg_t* pkg_ = nullptr; +#endif + }; + + inline pkgconfig:: + ~pkgconfig () + { + if (client_ != nullptr) // Not empty. + free (); + } + + inline pkgconfig:: + pkgconfig (pkgconfig&& p) noexcept + : path (move (p.path)), + client_ (p.client_), + pkg_ (p.pkg_) + { + p.client_ = nullptr; + p.pkg_ = nullptr; + } + + inline pkgconfig& pkgconfig:: + operator= (pkgconfig&& p) noexcept + { + if (this != &p) + { + if (client_ != nullptr) // Not empty. + free (); + + path = move (p.path); + client_ = p.client_; + pkg_ = p.pkg_; + + p.client_ = nullptr; + p.pkg_ = nullptr; + } + return *this; + } + } +} + +#endif // BUILD2_BOOTSTRAP + +#endif // LIBBUILD2_CC_PKGCONFIG_HXX diff --git a/libbuild2/cc/target.cxx b/libbuild2/cc/target.cxx index b17e1ef..6c5d7c8 100644 --- a/libbuild2/cc/target.cxx +++ b/libbuild2/cc/target.cxx @@ -21,11 +21,10 @@ namespace build2 nullptr, nullptr, &target_search, - false + target_type::flag::none }; extern const char h_ext_def[] = "h"; - const target_type h::static_type { "h", @@ -36,11 +35,10 @@ namespace build2 &target_pattern_var<h_ext_def>, nullptr, &file_search, - false + target_type::flag::none }; extern const char c_ext_def[] = "c"; - const target_type c::static_type { "c", @@ -51,11 +49,38 @@ namespace build2 &target_pattern_var<c_ext_def>, nullptr, &file_search, - false + target_type::flag::none }; - extern const char pc_ext[] = "pc"; // VC14 rejects constexpr. + extern const char m_ext_def[] = "m"; + const target_type m::static_type + { + "m", + &cc::static_type, + &target_factory<m>, + nullptr, /* fixed_extension */ + &target_extension_var<m_ext_def>, + &target_pattern_var<m_ext_def>, + nullptr, + &file_search, + target_type::flag::none + }; + extern const char S_ext_def[] = "S"; + const target_type S::static_type + { + "S", + &cc::static_type, + &target_factory<S>, + nullptr, /* fixed_extension */ + &target_extension_var<S_ext_def>, + &target_pattern_var<S_ext_def>, + nullptr, + &file_search, + target_type::flag::none + }; + + extern const char pc_ext[] = "pc"; // VC14 rejects constexpr. const target_type pc::static_type { "pc", @@ -66,11 +91,10 @@ namespace build2 &target_pattern_fix<pc_ext>, &target_print_0_ext_verb, // Fixed extension, no use printing. &file_search, - false + target_type::flag::none }; extern const char pca_ext[] = "static.pc"; // VC14 rejects constexpr. - const target_type pca::static_type { "pca", @@ -81,11 +105,10 @@ namespace build2 &target_pattern_fix<pca_ext>, &target_print_0_ext_verb, // Fixed extension, no use printing. &file_search, - false + target_type::flag::none }; extern const char pcs_ext[] = "shared.pc"; // VC14 rejects constexpr. - const target_type pcs::static_type { "pcs", @@ -96,7 +119,7 @@ namespace build2 &target_pattern_fix<pcs_ext>, &target_print_0_ext_verb, // Fixed extension, no use printing. &file_search, - false + target_type::flag::none }; } } diff --git a/libbuild2/cc/target.hxx b/libbuild2/cc/target.hxx index 7067421..a078422 100644 --- a/libbuild2/cc/target.hxx +++ b/libbuild2/cc/target.hxx @@ -23,11 +23,14 @@ namespace build2 class LIBBUILD2_CC_SYMEXPORT cc: public file { public: - using file::file; + cc (context& c, dir_path d, dir_path o, string n) + : file (c, move (d), move (o), move (n)) + { + dynamic_type = &static_type; + } public: static const target_type static_type; - virtual const target_type& dynamic_type () const = 0; }; // There is hardly a c-family compilation without a C header inclusion. @@ -36,11 +39,14 @@ namespace build2 class LIBBUILD2_CC_SYMEXPORT h: public cc { public: - using cc::cc; + h (context& c, dir_path d, dir_path o, string n) + : cc (c, move (d), move (o), move (n)) + { + dynamic_type = &static_type; + } public: static const target_type static_type; - virtual const target_type& dynamic_type () const {return static_type;} }; // This one we define in cc but the target type is only registered by the @@ -52,11 +58,46 @@ namespace build2 class LIBBUILD2_CC_SYMEXPORT c: public cc { public: - using cc::cc; + c (context& ctx, dir_path d, dir_path o, string n) + : cc (ctx, move (d), move (o), move (n)) + { + dynamic_type = &static_type; + } + + public: + static const target_type static_type; + }; + + // Objective-C source file (the same rationale for having it here as for + // c{} above). + // + class LIBBUILD2_CC_SYMEXPORT m: public cc + { + public: + m (context& c, dir_path d, dir_path o, string n) + : cc (c, move (d), move (o), move (n)) + { + dynamic_type = &static_type; + } + + public: + static const target_type static_type; + }; + + // Assembler with C preprocessor source file (the same rationale for + // having it here as for c{} above). + // + class LIBBUILD2_CC_SYMEXPORT S: public cc + { + public: + S (context& c, dir_path d, dir_path o, string n) + : cc (c, move (d), move (o), move (n)) + { + dynamic_type = &static_type; + } public: static const target_type static_type; - virtual const target_type& dynamic_type () const {return static_type;} }; // pkg-config file targets. @@ -64,31 +105,40 @@ namespace build2 class LIBBUILD2_CC_SYMEXPORT pc: public file // .pc (common) { public: - using file::file; + pc (context& c, dir_path d, dir_path o, string n) + : file (c, move (d), move (o), move (n)) + { + dynamic_type = &static_type; + } public: static const target_type static_type; - virtual const target_type& dynamic_type () const {return static_type;} }; class LIBBUILD2_CC_SYMEXPORT pca: public pc // .static.pc { public: - using pc::pc; + pca (context& c, dir_path d, dir_path o, string n) + : pc (c, move (d), move (o), move (n)) + { + dynamic_type = &static_type; + } public: static const target_type static_type; - virtual const target_type& dynamic_type () const {return static_type;} }; class LIBBUILD2_CC_SYMEXPORT pcs: public pc // .shared.pc { public: - using pc::pc; + pcs (context& c, dir_path d, dir_path o, string n) + : pc (c, move (d), move (o), move (n)) + { + dynamic_type = &static_type; + } public: static const target_type static_type; - virtual const target_type& dynamic_type () const {return static_type;} }; } } diff --git a/libbuild2/cc/types.cxx b/libbuild2/cc/types.cxx index 8ee4fa9..c6cfae9 100644 --- a/libbuild2/cc/types.cxx +++ b/libbuild2/cc/types.cxx @@ -6,6 +6,7 @@ #include <libbuild2/cc/utility.hxx> using namespace std; +using namespace butl; namespace build2 { @@ -123,6 +124,8 @@ namespace build2 size_t importable_headers:: insert_angle_pattern (const dir_paths& sys_hdr_dirs, const string& pat) { + tracer trace ("importable_headers::insert_angle_pattern"); + assert (pat.front () == '<' && pat.back () == '>' && path_pattern (pat)); // First see if it has already been inserted. @@ -172,7 +175,17 @@ namespace build2 try { - path_search (f, process, dir); + path_search ( + f, + process, + dir, + path_match_flags::follow_symlinks, + [&trace] (const dir_entry& de) + { + l5 ([&]{trace << "skipping inaccessible/dangling entry " + << de.base () / de.path ();}); + return true; + }); } catch (const system_error& e) { diff --git a/libbuild2/cc/types.hxx b/libbuild2/cc/types.hxx index c5b35f5..93f31bc 100644 --- a/libbuild2/cc/types.hxx +++ b/libbuild2/cc/types.hxx @@ -175,6 +175,10 @@ namespace build2 const target_type& bmi; const target_type& hbmi; }; + + // "Unhide" operator<< from the build2 namespace. + // + using build2::operator<<; } } diff --git a/libbuild2/cc/utility.cxx b/libbuild2/cc/utility.cxx index ffe3e03..e02f85a 100644 --- a/libbuild2/cc/utility.cxx +++ b/libbuild2/cc/utility.cxx @@ -3,10 +3,6 @@ #include <libbuild2/cc/utility.hxx> -#include <libbuild2/file.hxx> - -using namespace std; - namespace build2 { namespace cc @@ -17,58 +13,5 @@ namespace build2 const dir_path module_build_dir (dir_path (module_dir) /= "build"); const dir_path module_build_modules_dir ( dir_path (module_build_dir) /= "modules"); - - void - normalize_header (path& f) - { - // Interestingly, on most paltforms and with most compilers (Clang on - // Linux being a notable exception) most system/compiler headers are - // already normalized. - // - path_abnormality a (f.abnormalities ()); - if (a != path_abnormality::none) - { - // While we can reasonably expect this path to exit, things do go - // south from time to time (like compiling under wine with file - // wlantypes.h included as WlanTypes.h). - // - try - { - // If we have any parent components, then we have to verify the - // normalized path matches realized. - // - path r; - if ((a & path_abnormality::parent) == path_abnormality::parent) - { - r = f; - r.realize (); - } - - try - { - f.normalize (); - - // Note that we might still need to resolve symlinks in the - // normalized path. - // - if (!r.empty () && f != r && path (f).realize () != r) - f = move (r); - } - catch (const invalid_path&) - { - assert (!r.empty ()); // Shouldn't have failed if no `..`. - f = move (r); // Fallback to realize. - } - } - catch (const invalid_path&) - { - fail << "invalid header path '" << f.string () << "'"; - } - catch (const system_error& e) - { - fail << "invalid header path '" << f.string () << "': " << e; - } - } - } } } diff --git a/libbuild2/cc/utility.hxx b/libbuild2/cc/utility.hxx index 42e53e3..6ba4a20 100644 --- a/libbuild2/cc/utility.hxx +++ b/libbuild2/cc/utility.hxx @@ -9,6 +9,7 @@ #include <libbuild2/utility.hxx> #include <libbuild2/target.hxx> +#include <libbuild2/filesystem.hxx> #include <libbuild2/bin/target.hxx> #include <libbuild2/bin/utility.hxx> @@ -51,29 +52,11 @@ namespace build2 // Normalize an absolute path to an existing header. // - // We used to just normalize the path but that could result in an invalid - // path (e.g., for some system/compiler headers on CentOS 7 with Clang - // 3.4) because of the symlinks (if a directory component is a symlink, - // then any following `..` are resolved relative to the target; see - // path::normalize() for background). - // - // Initially, to fix this, we realized (i.e., realpath(3)) it instead. - // But that turned out also not to be quite right since now we have all - // the symlinks resolved: conceptually it feels correct to keep the - // original header names since that's how the user chose to arrange things - // and practically this is how the compilers see/report them (e.g., the - // GCC module mapper). - // - // So now we have a pretty elaborate scheme where we try to use the - // normalized path if possible and fallback to realized. Normalized paths - // will work for situations where `..` does not cross symlink boundaries, - // which is the sane case. And for the insane case we only really care - // about out-of-project files (i.e., system/compiler headers). In other - // words, if you have the insane case inside your project, then you are on - // your own. - // - void - normalize_header (path&); + inline void + normalize_header (path& f) + { + normalize_external (f, "header"); + } } } diff --git a/libbuild2/cc/windows-rpath.cxx b/libbuild2/cc/windows-rpath.cxx index 2d90ace..bd5a928 100644 --- a/libbuild2/cc/windows-rpath.cxx +++ b/libbuild2/cc/windows-rpath.cxx @@ -128,7 +128,9 @@ namespace build2 library_cache lib_cache; for (const prerequisite_target& pt: t.prerequisite_targets[a]) { - if (pt.adhoc || pt == nullptr) + // Note: during execute so check for ad hoc first to avoid data races. + // + if (pt.adhoc () || pt == nullptr) continue; bool la; @@ -139,7 +141,9 @@ namespace build2 ( f = pt->is_a<libs> ())) process_libraries (a, bs, li, sys_lib_dirs, *f, la, pt.data, - imp, lib, nullptr, true /* self */, + imp, lib, nullptr, + true /* self */, + false /* proc_opt_group */, &lib_cache); } @@ -253,7 +257,9 @@ namespace build2 library_cache lib_cache; for (const prerequisite_target& pt: t.prerequisite_targets[a]) { - if (pt.adhoc || pt == nullptr) + // Note: during execute so check for ad hoc first to avoid data races. + // + if (pt.adhoc () || pt == nullptr) continue; bool la; @@ -264,7 +270,9 @@ namespace build2 ( f = pt->is_a<libs> ())) process_libraries (a, bs, li, sys_lib_dirs, *f, la, pt.data, - imp, lib, nullptr, true /* self */, + imp, lib, nullptr, + true /* self */, + false /* proc_opt_group */, &lib_cache); } @@ -361,11 +369,16 @@ namespace build2 // of the same amalgamation. This way if the amalgamation is moved // as a whole, the links will remain valid. // + // Note: mkanylink() is from libbutl and thus doesn't handle the + // dry-run mode. + // try { - switch (mkanylink (f, l, - true /* copy */, - f.sub (as.out_path ()) /* relative */)) + switch (as.ctx.dry_run + ? entry_type::symlink + : mkanylink (f, l, + true /* copy */, + f.sub (as.out_path ()) /* relative */)) { case entry_type::regular: print ("cp"); break; case entry_type::symlink: print ("ln -s"); break; |