diff options
author | Boris Kolpackov <boris@codesynthesis.com> | 2015-06-26 17:25:10 +0200 |
---|---|---|
committer | Boris Kolpackov <boris@codesynthesis.com> | 2015-06-26 17:25:10 +0200 |
commit | 18568ff0ff3dce89d694b494c5dfc9a32e63c9e6 (patch) | |
tree | cd36895fdab3b30b8b61fcc3e12f8a92bb292203 | |
parent | 95239b7c5404965d4f5ef997b5b75bf542a25192 (diff) |
Part two of dependency injection with auto-generation support
-rw-r--r-- | build/algorithm | 11 | ||||
-rw-r--r-- | build/algorithm.cxx | 4 | ||||
-rw-r--r-- | build/algorithm.ixx | 11 | ||||
-rw-r--r-- | build/cli/rule.cxx | 147 | ||||
-rw-r--r-- | build/config/utility | 8 | ||||
-rw-r--r-- | build/cxx/rule | 4 | ||||
-rw-r--r-- | build/cxx/rule.cxx | 425 | ||||
-rw-r--r-- | build/rule.cxx | 11 | ||||
-rw-r--r-- | build/target | 3 | ||||
-rw-r--r-- | build/target.cxx | 11 | ||||
-rw-r--r-- | tests/cli/lib/libtest/test/base.cli | 7 | ||||
-rw-r--r-- | tests/cli/lib/libtest/test/buildfile | 11 | ||||
-rw-r--r-- | tests/cli/lib/libtest/test/test.cli | 5 |
13 files changed, 416 insertions, 242 deletions
diff --git a/build/algorithm b/build/algorithm index 59021f5..ad53b95 100644 --- a/build/algorithm +++ b/build/algorithm @@ -66,7 +66,7 @@ namespace build // in the same or a subdirectory of dir. // void - search_and_match (action, target&, const dir_path& dir); + search_and_match (action, target&, const dir_path&); // Unless already available, match, and, if necessary, execute // (not yet implemented) the group in order to obtain its members @@ -90,6 +90,15 @@ namespace build target_state execute (action, target&); + // A special version of the above that should be used for "direct" + // and "now" execution, that is, side-stepping the normal target- + // prerequisite relationship (so no dependents count is decremented) + // and execution order (so this function will never return postponed + // target state). + // + target_state + execute_direct (action, target&); + // The default prerequisite execute implementation. It calls execute() // on each non-ignored (non-NULL) prerequisite target in a loop. If this // target is a member of a group, then it first does this to the group's diff --git a/build/algorithm.cxx b/build/algorithm.cxx index f4da3bd..4f30944 100644 --- a/build/algorithm.cxx +++ b/build/algorithm.cxx @@ -196,13 +196,13 @@ namespace build g.recipe (a, p.first->apply (a, g, p.second)); } - // Note the we use execute_impl() rather than execute() here + // Note that we use execute_direct() rather than execute() here // to sidestep the dependents count logic. In this context, // this is by definition the first attempt to execute this // rule (otherwise we would have already known the members // list) and we really do need to execute it now. // - execute_impl (a, g); + execute_direct (a, g); r = g.members (a); assert (r.members != nullptr); // What "next step" did the group expect? diff --git a/build/algorithm.ixx b/build/algorithm.ixx index 3a6f183..7300848 100644 --- a/build/algorithm.ixx +++ b/build/algorithm.ixx @@ -83,4 +83,15 @@ namespace build } } } + + inline target_state + execute_direct (action a, target& t) + { + switch (t.state) + { + case target_state::unchanged: + case target_state::changed: return t.state; + default: return execute_impl (a, t); + } + } } diff --git a/build/cli/rule.cxx b/build/cli/rule.cxx index abcbb70..6f8b648 100644 --- a/build/cli/rule.cxx +++ b/build/cli/rule.cxx @@ -8,6 +8,7 @@ #include <build/scope> #include <build/target> +#include <build/context> #include <build/algorithm> #include <build/diagnostics> @@ -217,68 +218,95 @@ namespace build // cli* s (execute_prerequisites<cli> (a, t, t.h ()->mtime ())); + target_state ts; + if (s == nullptr) - return target_state::unchanged; + ts = target_state::unchanged; + else + { + // Translate source path to relative (to working directory). This + // results in easier to read diagnostics. + // + path relo (relative (t.dir)); + path rels (relative (s->path ())); - // Translate source path to relative (to working directory). This - // results in easier to read diagnostics. - // - path relo (relative (t.dir)); - path rels (relative (s->path ())); + scope& rs (t.root_scope ()); + const string& cli (rs["config.cli"].as<const string&> ()); - scope& rs (t.root_scope ()); - const string& cli (rs["config.cli"].as<const string&> ()); + vector<const char*> args {cli.c_str ()}; - vector<const char*> args {cli.c_str ()}; + // See if we need to pass any --?xx-suffix options. + // + append_extension (args, *t.h (), "--hxx-suffix", "hxx"); + append_extension (args, *t.c (), "--cxx-suffix", "cxx"); + if (t.i () != nullptr) + append_extension (args, *t.i (), "--ixx-suffix", "ixx"); - // See if we need to pass any --?xx-suffix options. - // - append_extension (args, *t.h (), "--hxx-suffix", "hxx"); - append_extension (args, *t.c (), "--cxx-suffix", "cxx"); - if (t.i () != nullptr) - append_extension (args, *t.i (), "--ixx-suffix", "ixx"); + append_options (args, t, "cli.options"); - append_options (args, t, "cli.options"); + if (!relo.empty ()) + { + args.push_back ("-o"); + args.push_back (relo.string ().c_str ()); + } - if (!relo.empty ()) - { - args.push_back ("-o"); - args.push_back (relo.string ().c_str ()); - } + args.push_back (rels.string ().c_str ()); + args.push_back (nullptr); - args.push_back (rels.string ().c_str ()); - args.push_back (nullptr); + if (verb) + print_process (args); + else + text << "cli " << *s; - if (verb) - print_process (args); - else - text << "cli " << *s; + try + { + process pr (args.data ()); - try - { - process pr (args.data ()); + if (!pr.wait ()) + throw failed (); - if (!pr.wait ()) - throw failed (); + timestamp s (system_clock::now ()); - timestamp ts (system_clock::now ()); + // Update member timestamps. + // + t.h ()->mtime (s); + t.c ()->mtime (s); + if (t.i () != nullptr) + t.i ()->mtime (s); - t.h ()->mtime (ts); - t.c ()->mtime (ts); - if (t.i () != nullptr) - t.i ()->mtime (ts); + ts = target_state::changed; + } + catch (const process_error& e) + { + error << "unable to execute " << args[0] << ": " << e.what (); - return target_state::changed; + if (e.child ()) + exit (1); + + throw failed (); + } } - catch (const process_error& e) - { - error << "unable to execute " << args[0] << ": " << e.what (); - if (e.child ()) - exit (1); + // Update member recipes. Without that the state update below + // won't stick. + // + if (!t.h ()->recipe (a)) + t.h ()->recipe (a, &delegate); + + if (!t.c ()->recipe (a)) + t.c ()->recipe (a, &delegate); - throw failed (); - } + if (t.i () != nullptr && !t.i ()->recipe (a)) + t.i ()->recipe (a, &delegate); + + // Update member states. + // + t.h ()->state = ts; + t.c ()->state = ts; + if (t.i () != nullptr) + t.i ()->state = ts; + + return ts; } target_state compile:: @@ -286,19 +314,30 @@ namespace build { cli_cxx& t (static_cast<cli_cxx&> (xt)); - target_state ts (target_state::unchanged); + // The reverse order of update: first delete the files, then clean + // prerequisites. Also update timestamp in case there are operations + // after us that could use the information. + // + // + bool r (false); + + if (t.i () != nullptr) + { + r = rmfile (t.i ()->path (), *t.i ()) || r; + t.i ()->mtime (timestamp_nonexistent); + } - if (t.i () != nullptr && - build::perform_clean (a, *t.i ()) == target_state::changed) - ts = target_state::changed; + r = rmfile (t.c ()->path (), *t.c ()) || r; + t.c ()->mtime (timestamp_nonexistent); - if (build::perform_clean (a, *t.c ()) == target_state::changed) - ts = target_state::changed; + r = rmfile (t.h ()->path (), *t.h ()) || r; + t.h ()->mtime (timestamp_nonexistent); - if (build::perform_clean (a, *t.h ()) == target_state::changed) - ts = target_state::changed; + // Clean prerequisites. + // + target_state ts (reverse_execute_prerequisites (a, t)); - return ts; + return r ? target_state::changed : ts; } target_state compile:: diff --git a/build/config/utility b/build/config/utility index 403a91e..e59f0e5 100644 --- a/build/config/utility +++ b/build/config/utility @@ -53,11 +53,13 @@ namespace build { for (const name& n: val.template as<const list_value&> ()) { - if (!n.simple ()) + if (n.simple ()) + args.push_back (n.value.c_str ()); + else if (n.directory ()) + args.push_back (n.dir.string ().c_str ()); + else fail << "expected option instead of " << n << info << "in variable " << var; - - args.push_back (n.value.c_str ()); } } } diff --git a/build/cxx/rule b/build/cxx/rule index 8c575fe..8b42fc2 100644 --- a/build/cxx/rule +++ b/build/cxx/rule @@ -30,10 +30,6 @@ namespace build static target_state perform_update (action, target&); - - private: - void - inject_prerequisites (action, target&, const cxx&, scope&) const; }; class link: public rule diff --git a/build/cxx/rule.cxx b/build/cxx/rule.cxx index 47f925d..8d91e1c 100644 --- a/build/cxx/rule.cxx +++ b/build/cxx/rule.cxx @@ -108,6 +108,9 @@ namespace build return nullptr; } + static void + inject_prerequisites (action, target&, cxx&, scope&); + recipe compile:: apply (action a, target& xt, void* v) const { @@ -149,10 +152,11 @@ namespace build t.prerequisite_targets.push_back (&pt); } - // Inject additional prerequisites. For now we only do it for - // update and default. + // Inject additional prerequisites. We only do it for update + // since chances are we will have to update some of our + // prerequisites in the process (auto-generated source code). // - if (a.operation () == update_id || a.operation () == default_id) + if (a.operation () == update_id) { // The cached prerequisite target (sp.target) should be the // same as what is in t.prerequisite_targets since we used @@ -161,8 +165,7 @@ namespace build prerequisite& sp (*static_cast<prerequisite*> (v)); cxx& st (dynamic_cast<cxx&> (*sp.target)); - if (st.mtime () != timestamp_nonexistent) - inject_prerequisites (a, t, st, sp.scope); + inject_prerequisites (a, t, st, sp.scope); } switch (a) @@ -240,7 +243,7 @@ namespace build if (++i == e) break; // Let the compiler complain. - d = dir_path (i->value); + d = i->simple () ? dir_path (i->value) : i->dir; } else if (i->value.compare (0, 2, "-I") == 0) d = dir_path (i->value, 2, string::npos); @@ -370,8 +373,8 @@ namespace build return r; } - void compile:: - inject_prerequisites (action a, target& t, const cxx& s, scope& ds) const + static void + inject_prerequisites (action a, target& t, cxx& s, scope& ds) { tracer trace ("cxx::compile::inject_prerequisites"); @@ -408,212 +411,298 @@ namespace build args.push_back ("-MQ"); // Quoted target name. args.push_back ("*"); // Old versions can't handle empty target name. - // We are using absolute source file path in order to get - // absolute paths in the result. Any relative paths in the - // result are non-existent generated headers. + // We are using absolute source file path in order to get absolute + // paths in the result. Any relative paths in the result are non- + // existent, potentially auto-generated headers. // // @@ We will also have to use absolute -I paths to guarantee - // that. + // that. Or just detect relative paths and error out? // args.push_back (s.path ().string ().c_str ()); - args.push_back (nullptr); - if (verb >= 2) - print_process (args); - level5 ([&]{trace << "target: " << t;}); - try - { - process pr (args.data (), false, false, true); - ifdstream is (pr.in_ofd); - prefix_map pm; // Build it lazily. + // Build the prefix map lazily only if we have non-existent files. + // Also reuse it over restarts since it doesn't change. + // + prefix_map pm; + + // If any prerequisites that we have extracted changed, then we + // have to redo the whole thing. The reason for this is auto- + // generated headers: the updated header may now include a yet- + // non-existent header. Unless we discover this and generate it + // (which, BTW, will trigger another restart since that header, + // in turn, can also include auto-generated headers), we will + // end up with an error during compilation proper. + // + // One complication with this restart logic is that we will see + // a "prefix" of prerequsites that we have already processed + // (i.e., they are already in our prerequsite_targets list) and + // we don't want to keep redoing this over and over again. One + // thing to note, however, is that the prefix that we have seen + // on the previous run must appear exactly the same in the + // subsequent run. The reason for this is that none of the files + // that it can possibly be based on have changed and thus it + // should be exactly the same. To put it another way, the + // presence or absence of a file in the dependency output can + // only depend on the previous files (assuming the compiler + // outputs them as it encounters them and it is hard to think + // of a reason why would someone do otherwise). And we have + // already made sure that all those files are up to date. And + // here is the way we are going to exploit this: we are going + // to keep track of how many prerequsites we have processed so + // far and on restart skip right to the next one. + // + // Also, before we do all that, make sure the source file itself + // if up to date. + // + execute_direct (a, s); - for (bool first (true), second (true); !is.eof (); ) - { - string l; - getline (is, l); + size_t skip_count (0); + for (bool restart (true); restart; ) + { + restart = false; - if (is.fail () && !is.eof ()) - fail << "io error while parsing g++ -M output"; + if (verb >= 2) + print_process (args); - size_t pos (0); + try + { + process pr (args.data (), false, false, true); + ifdstream is (pr.in_ofd); - if (first) + size_t skip (skip_count); + for (bool first (true), second (true); !(restart || is.eof ()); ) { - // Empty output should mean the wait() call below will return - // false. - // - if (l.empty ()) - break; + string l; + getline (is, l); - assert (l[0] == '*' && l[1] == ':' && l[2] == ' '); + if (is.fail () && !is.eof ()) + fail << "io error while parsing g++ -M output"; - first = false; + size_t pos (0); - // While normally we would have the source file on the - // first line, if too long, it will be move to the next - // line and all we will have on this line is "*: \". - // - if (l.size () == 4 && l[3] == '\\') - continue; - else - pos = 3; // Skip "*: ". + if (first) + { + // Empty output should mean the wait() call below will return + // false. + // + if (l.empty ()) + break; - // Fall through to the 'second' block. - } + assert (l[0] == '*' && l[1] == ':' && l[2] == ' '); - if (second) - { - second = false; - next (l, pos); // Skip the source file. - } + first = false; - auto g ( - make_exception_guard ( - [](const target& s) - { - info << "while extracting dependencies from " << s; - }, - s)); + // While normally we would have the source file on the + // first line, if too long, it will be moved to the next + // line and all we will have on this line is "*: \". + // + if (l.size () == 4 && l[3] == '\\') + continue; + else + pos = 3; // Skip "*: ". - while (pos != l.size ()) - { - path f (next (l, pos)); - f.normalize (); + // Fall through to the 'second' block. + } - if (!f.absolute ()) + if (second) { - // This is probably as often an error as an auto-generated - // file, so trace at level 3. - // - level3 ([&]{trace << "non-existent header '" << f << "'";}); + second = false; + next (l, pos); // Skip the source file. + } - // If we already did it and build_prefix_map() returned empty, - // then we would have failed below. - // - if (pm.empty ()) - pm = build_prefix_map (t); + // If things go wrong (and they often do in this area), give + // the user a bit extra context. + // + auto g ( + make_exception_guard ( + [](target& s) + { + info << "while extracting dependencies from " << s; + }, + s)); - // First try the whole file. Then just the directory. - // - // @@ Has to be a separate map since the prefix can be - // the same as the file name. - // - // auto i (pm.find (f)); + while (pos != l.size ()) + { + string fs (next (l, pos)); - // Find the most qualified prefix of which we are a - // sub-path. + // Skip until where we left off. // - auto i (pm.end ()); + if (skip != 0) + { + skip--; + continue; + } - if (!pm.empty ()) + path f (move (fs)); + f.normalize (); + + if (!f.absolute ()) { - const dir_path& d (f.directory ()); - i = pm.upper_bound (d); - --i; // Greatest less than. + // This is probably as often an error as an auto-generated + // file, so trace at level 3. + // + level3 ([&]{trace << "non-existent header '" << f << "'";}); + + // If we already did it and build_prefix_map() returned empty, + // then we would have failed below. + // + if (pm.empty ()) + pm = build_prefix_map (t); + + // First try the whole file. Then just the directory. + // + // @@ Has to be a separate map since the prefix can be + // the same as the file name. + // + // auto i (pm.find (f)); + + // Find the most qualified prefix of which we are a + // sub-path. + // + auto i (pm.end ()); + + if (!pm.empty ()) + { + const dir_path& d (f.directory ()); + i = pm.upper_bound (d); + --i; // Greatest less than. - if (!d.sub (i->first)) // We might still not be a sub. - i = pm.end (); - } + if (!d.sub (i->first)) // We might still not be a sub. + i = pm.end (); + } - if (i == pm.end ()) - fail << "unable to map presumably auto-generated header '" - << f << "' to a project"; + if (i == pm.end ()) + fail << "unable to map presumably auto-generated header '" + << f << "' to a project"; - f = i->second / f; - } + f = i->second / f; + } - level5 ([&]{trace << "injecting " << f;}); + level5 ([&]{trace << "injecting " << f;}); - // Split the name into its directory part, the name part, and - // extension. Here we can assume the name part is a valid - // filesystem name. - // - // Note that if the file has no extension, we record an empty - // extension rather than NULL (which would signify that the - // default extension should be added). - // - dir_path d (f.directory ()); - string n (f.leaf ().base ().string ()); - const char* es (f.extension ()); - const string* e (&extension_pool.find (es != nullptr ? es : "")); + // Split the name into its directory part, the name part, and + // extension. Here we can assume the name part is a valid + // filesystem name. + // + // Note that if the file has no extension, we record an empty + // extension rather than NULL (which would signify that the + // default extension should be added). + // + dir_path d (f.directory ()); + string n (f.leaf ().base ().string ()); + const char* es (f.extension ()); + const string* e (&extension_pool.find (es != nullptr ? es : "")); - // Find or insert target. - // - // If there is no extension (e.g., standard C++ headers), - // then assume it is a header. Otherwise, let the standard - // mechanism derive the type from the extension. @@ TODO. - // + // Determine the target type. + // + const target_type* tt (nullptr); + + // See if this directory is part of any project out_root + // hierarchy. Note that this will miss all the headers + // that come from src_root (so they will be treated as + // generic C headers below). Generally, we don't have + // the ability to determine that some file belongs to + // src_root of some project. But that's not a problem + // for our purposes: it is only important for us to + // accurately determine target types for headers that + // could be auto-generated. + // + if (scope* r = scopes.find (d).root_scope ()) + { + // Get cached (or build) a map of the extensions for the + // C/C++ files this project is using. + // + const ext_map& m (build_ext_map (*r)); + + auto i (m.find (e)); + if (i != m.end ()) + tt = i->second; + } - // Determine the target type. - // - const target_type* tt (nullptr); - - // See if this directory is part of any project out_root - // hierarchy. Note that this will miss all the headers - // that come from src_root (so they will be treated as - // generic C headers below). Generally, we don't have - // the ability to determine that some file belongs to - // src_root of some project. But that's not a problem - // for our purposes: it is only important for us to - // accurately determine target types for headers that - // could be auto-generated. - // - if (scope* r = scopes.find (d).root_scope ()) - { - // Get cahed (or build) a map of the extensions for the - // C/C++ files this project is using. + // If it is outside any project, or the project doesn't have + // such an extension, assume it is a plain old C header. // - const ext_map& m (build_ext_map (*r)); + if (tt == nullptr) + tt = &h::static_type; - auto i (m.find (e)); - if (i != m.end ()) - tt = i->second; - } + // Find or insert target. + // + path_target& pt ( + static_cast<path_target&> (search (*tt, d, n, e, &ds))); - // If it is outside any project, or the project doesn't have - // such an extension, assume it is a plain old C header. - // - if (tt == nullptr) - tt = &h::static_type; + // Assign path. + // + if (pt.path ().empty ()) + pt.path (move (f)); - path_target& pt ( - static_cast<path_target&> (search (*tt, d, n, e, &ds))); + // Match to a rule. + // + build::match (a, pt); - // Assign path. - // - if (pt.path ().empty ()) - pt.path (move (f)); + // Update it. + // + // There would normally be a lot of headers for every source + // file (think all the system headers) and this can get + // expensive. At the same time, most of these headers are + // existing files that we will never be updating (again, + // system headers, for example) and the rule that will match + // them is fallback path_rule. So we are going to do a little + // fast-path optimization by detecting this common case. + // + recipe_function* const* recipe ( + pt.recipe (a).target<recipe_function*> ()); - // Match to a rule. - // - build::match (a, pt); + if (recipe == nullptr || *recipe != &path_rule::perform_update) + { + // We only want to restart if our call to execute() actually + // caused an update. In particular, the target could already + // have been in target_state::changed because of a dependency + // extraction run for some other source file. + // + target_state os (pt.state); + execute_direct (a, pt); + + if (pt.state != os && pt.state != target_state::unchanged) + { + level5 ([&]{trace << "updated " << pt << ", restarting";}); + restart = true; + } + } - // Add to our prerequisite target list. - // - t.prerequisite_targets.push_back (&pt); + // Add to our prerequisite target list. + // + t.prerequisite_targets.push_back (&pt); + skip_count++; + } } - } - // We assume the child process issued some diagnostics. - // - if (!pr.wait ()) - throw failed (); - } - catch (const process_error& e) - { - error << "unable to execute " << args[0] << ": " << e.what (); + // We may not have read all the output (e.g., due to a restart), + // so close the file descriptor before waiting to avoid blocking + // the other end. + // + is.close (); - // In a multi-threaded program that fork()'ed but did not exec(), - // it is unwise to try to do any kind of cleanup (like unwinding - // the stack and running destructors). - // - if (e.child ()) - exit (1); + // We assume the child process issued some diagnostics. + // + if (!pr.wait ()) + throw failed (); + } + catch (const process_error& e) + { + error << "unable to execute " << args[0] << ": " << e.what (); - throw failed (); + // In a multi-threaded program that fork()'ed but did not exec(), + // it is unwise to try to do any kind of cleanup (like unwinding + // the stack and running destructors). + // + if (e.child ()) + exit (1); + + throw failed (); + } } } diff --git a/build/rule.cxx b/build/rule.cxx index baed3ba..34aae9d 100644 --- a/build/rule.cxx +++ b/build/rule.cxx @@ -39,7 +39,7 @@ namespace build // are not doing anything for this action so not checking if the file // exists seems harmless. What about, say, configure_update? Again, // whether we match or not, there is nothing to be done for this - // action. And who knows, maybe the file doesn't exist during + // action. And, who knows, maybe the file doesn't exist during // configure_update but will magically appear during perform_update. // So the overall guideline seems to be this: if we don't do anything // for the action (other than performing it on the prerequisites), @@ -160,10 +160,6 @@ namespace build recipe fsdir_rule:: apply (action a, target& t, void*) const { - // Inject dependency on the parent directory. - // - inject_parent_fsdir (a, t); - switch (a.operation ()) { // For default, we don't do anything other than letting our @@ -171,6 +167,11 @@ namespace build // case default_id: case update_id: + // Inject dependency on the parent directory. Note that we + // don't do it for clean since we shouldn't be removing it. + // + inject_parent_fsdir (a, t); + search_and_match (a, t); break; // For clean, ignore prerequisites that are not in the same or a diff --git a/build/target b/build/target index 1de521e..8161438 100644 --- a/build/target +++ b/build/target @@ -35,6 +35,9 @@ namespace build // enum class target_state {unknown, postponed, unchanged, changed, failed}; + std::ostream& + operator<< (std::ostream&, target_state); + // Recipe. // // The returned target state should be changed, unchanged, or diff --git a/build/target.cxx b/build/target.cxx index 3b030ee..6cd1fc9 100644 --- a/build/target.cxx +++ b/build/target.cxx @@ -17,6 +17,17 @@ using namespace std; namespace build { + // target_state + // + static const char* target_state_[] = { + "unknown", "postponed", "unchanged", "changed", "failed"}; + + ostream& + operator<< (ostream& os, target_state ts) + { + return os << target_state_[static_cast<size_t> (ts)]; + } + // recipe // const recipe empty_recipe; diff --git a/tests/cli/lib/libtest/test/base.cli b/tests/cli/lib/libtest/test/base.cli new file mode 100644 index 0000000..bbf9999 --- /dev/null +++ b/tests/cli/lib/libtest/test/base.cli @@ -0,0 +1,7 @@ +namespace test +{ + class base + { + bool --version; + }; +} diff --git a/tests/cli/lib/libtest/test/buildfile b/tests/cli/lib/libtest/test/buildfile index 5325f14..61841e7 100644 --- a/tests/cli/lib/libtest/test/buildfile +++ b/tests/cli/lib/libtest/test/buildfile @@ -1,13 +1,18 @@ -lib{test}: cxx{utility} cxx{test} extra/cxx{test} +lib{test}: cxx{utility} cxx{test base} extra/cxx{test} cxx{test} hxx{test}: cli{test} +cxx{base} hxx{base}: cli{base} + +cli.options += -I $src_root --include-with-brackets extra/: { cxx{test} hxx{test}: cli{test} - cli.options += --cli-namespace test::extra::cli + cli.options += --include-prefix test/extra --guard-prefix TEST_EXTRA \ + --cli-namespace test::extra::cli } cxx.poptions += -I$out_root -I$src_root lib{test}: cxx.export.poptions = -I$out_root -I$src_root -cli.options += --cli-namespace test::cli +cli.options += --include-prefix test --guard-prefix TEST \ +--cli-namespace test::cli diff --git a/tests/cli/lib/libtest/test/test.cli b/tests/cli/lib/libtest/test/test.cli index 8bc51e8..f19d56c 100644 --- a/tests/cli/lib/libtest/test/test.cli +++ b/tests/cli/lib/libtest/test/test.cli @@ -1,8 +1,9 @@ +include <test/base.cli>; + namespace test { - class options + class options: base { bool --help; - bool --version; }; } |