aboutsummaryrefslogtreecommitdiff
path: root/libbuild2
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2019-10-23 12:21:08 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2019-10-23 12:27:19 +0200
commit6d700e3601a3469981995fd364d1a1ff7f158e5e (patch)
treec566f2ef764d48b369f18b0ac42ca723f1de57d9 /libbuild2
parent33e665c41928824a0410f0328a1fe9873381faaf (diff)
Un-tune scheduler when building build system modules
Diffstat (limited to 'libbuild2')
-rw-r--r--libbuild2/context.hxx3
-rw-r--r--libbuild2/install/operation.cxx4
-rw-r--r--libbuild2/module.cxx16
-rw-r--r--libbuild2/operation.cxx159
-rw-r--r--libbuild2/operation.hxx7
-rw-r--r--libbuild2/scheduler.cxx29
-rw-r--r--libbuild2/scheduler.hxx47
-rw-r--r--libbuild2/test/operation.cxx2
8 files changed, 167 insertions, 100 deletions
diff --git a/libbuild2/context.hxx b/libbuild2/context.hxx
index 0a2deea..e720321 100644
--- a/libbuild2/context.hxx
+++ b/libbuild2/context.hxx
@@ -131,6 +131,9 @@ namespace build2
// future we can do save/restore but then we would need some indication that
// we have switched to another task).
//
+ // Note that sharing the same scheduler between multiple top-level contexts
+ // can currently be problematic due to operation-specific scheduler tuning.
+ //
// The loaded_modules state (module.hxx) is shared among all the contexts
// (there is no way to have multiple shared library loading "contexts") and
// is protected by loaded_modules_lock. A nested context should normally
diff --git a/libbuild2/install/operation.cxx b/libbuild2/install/operation.cxx
index a2ad7d0..4515f14 100644
--- a/libbuild2/install/operation.cxx
+++ b/libbuild2/install/operation.cxx
@@ -39,7 +39,7 @@ namespace build2
"installed",
"has nothing to install", // We cannot "be installed".
execution_mode::first,
- 0,
+ 0 /* concurrency */, // Run serially.
&install_pre,
nullptr
};
@@ -62,7 +62,7 @@ namespace build2
"uninstalled",
"is not installed",
execution_mode::last,
- 0,
+ 0 /* concurrency */, // Run serially
&install_pre,
nullptr
};
diff --git a/libbuild2/module.cxx b/libbuild2/module.cxx
index 45cbd30..b73ddb3 100644
--- a/libbuild2/module.cxx
+++ b/libbuild2/module.cxx
@@ -243,11 +243,25 @@ namespace build2
diag_frame::stack_guard diag_cutoff (nullptr);
auto df = make_diag_frame (
- [&loc, &mod](const diag_record& dr)
+ [&loc, &mod] (const diag_record& dr)
{
dr << info (loc) << "while loading build system module " << mod;
});
+ // Un-tune the scheduler.
+ //
+ // Note that we can only do this if we are running serially because
+ // otherwise we cannot guarantee the scheduler is idle (we could
+ // have waiting threads from the outer context). This is fine for
+ // now since the only two tuning level we use are serial and full
+ // concurrency (turns out currently we don't really need this: we
+ // will always be called during load or match phases and we always
+ // do parallel match; but let's keep it in case things change).
+ //
+ auto sched_tune (ctx.sched.serial ()
+ ? scheduler::tune_guard (ctx.sched, 0)
+ : scheduler::tune_guard ());
+
// Note that for now we suppress progress since it would clash with
// the progress of what we are already doing (maybe in the future we
// can do save/restore but then we would need some sort of
diff --git a/libbuild2/operation.cxx b/libbuild2/operation.cxx
index 0bf87d5..a9b6107 100644
--- a/libbuild2/operation.cxx
+++ b/libbuild2/operation.cxx
@@ -271,100 +271,105 @@ namespace build2
if (ctx.current_mode == execution_mode::last)
reverse (ts.begin (), ts.end ());
- // Tune the scheduler.
- //
- switch (ctx.current_inner_oif->concurrency)
- {
- case 0: ctx.sched.tune (1); break; // Run serially.
- case 1: break; // Run as is.
- default: assert (false); // Not yet supported.
- }
-
phase_lock pl (ctx, run_phase::execute); // Never switched.
- // Set the dry-run flag.
- //
- ctx.dry_run = ctx.dry_run_option;
-
- // Setup progress reporting if requested.
- //
- string what; // Note: must outlive monitor_guard.
- scheduler::monitor_guard mg;
-
- if (prog && show_progress (1 /* max_verb */))
{
- size_t init (ctx.target_count.load (memory_order_relaxed));
- size_t incr (init > 100 ? init / 100 : 1); // 1%.
+ // Tune the scheduler.
+ //
+ using tune_guard = scheduler::tune_guard;
+ tune_guard sched_tune;
- if (init != incr)
+ switch (ctx.current_inner_oif->concurrency)
{
- what = "% of targets " + diag_did (ctx, a);
+ case 0: sched_tune = tune_guard (ctx.sched, 1); break; // Run serially.
+ case 1: break; // Run as is.
+ default: assert (false); // Not supported.
+ }
- mg = ctx.sched.monitor (
- ctx.target_count,
- init - incr,
- [init, incr, &what, &ctx] (size_t c) -> size_t
- {
- size_t p ((init - c) * 100 / init);
- size_t s (ctx.skip_count.load (memory_order_relaxed));
+ // Set the dry-run flag.
+ //
+ ctx.dry_run = ctx.dry_run_option;
- diag_progress_lock pl;
- diag_progress = ' ';
- diag_progress += to_string (p);
- diag_progress += what;
+ // Setup progress reporting if requested.
+ //
+ string what; // Note: must outlive monitor_guard.
+ scheduler::monitor_guard mg;
- if (s != 0)
- {
- diag_progress += " (";
- diag_progress += to_string (s);
- diag_progress += " skipped)";
- }
+ if (prog && show_progress (1 /* max_verb */))
+ {
+ size_t init (ctx.target_count.load (memory_order_relaxed));
+ size_t incr (init > 100 ? init / 100 : 1); // 1%.
- return c - incr;
- });
- }
- }
+ if (init != incr)
+ {
+ what = "% of targets " + diag_did (ctx, a);
- // Similar logic to execute_members(): first start asynchronous execution
- // of all the top-level targets.
- //
- {
- atomic_count task_count (0);
- wait_guard wg (ctx, task_count);
+ mg = ctx.sched.monitor (
+ ctx.target_count,
+ init - incr,
+ [init, incr, &what, &ctx] (size_t c) -> size_t
+ {
+ size_t p ((init - c) * 100 / init);
+ size_t s (ctx.skip_count.load (memory_order_relaxed));
+
+ diag_progress_lock pl;
+ diag_progress = ' ';
+ diag_progress += to_string (p);
+ diag_progress += what;
+
+ if (s != 0)
+ {
+ diag_progress += " (";
+ diag_progress += to_string (s);
+ diag_progress += " skipped)";
+ }
+
+ return c - incr;
+ });
+ }
+ }
- for (const action_target& at: ts)
+ // Similar logic to execute_members(): first start asynchronous
+ // execution of all the top-level targets.
+ //
{
- const target& t (at.as_target ());
+ atomic_count task_count (0);
+ wait_guard wg (ctx, task_count);
- l5 ([&]{trace << diag_doing (a, t);});
+ for (const action_target& at: ts)
+ {
+ const target& t (at.as_target ());
- target_state s (execute_async (a, t, 0, task_count, false));
+ l5 ([&]{trace << diag_doing (a, t);});
- // Bail out if the target has failed and we weren't instructed to keep
- // going.
- //
- if (s == target_state::failed && !ctx.keep_going)
- break;
- }
+ target_state s (execute_async (a, t, 0, task_count, false));
- wg.wait ();
- }
+ // Bail out if the target has failed and we weren't instructed to
+ // keep going.
+ //
+ if (s == target_state::failed && !ctx.keep_going)
+ break;
+ }
- // We are now running serially.
- //
+ wg.wait ();
+ }
- ctx.sched.tune (0); // Restore original scheduler settings.
+ // We are now running serially.
+ //
- // Clear the dry-run flag.
- //
- ctx.dry_run = false;
+ // Clear the dry-run flag.
+ //
+ ctx.dry_run = false;
- // Clear the progress if present.
- //
- if (mg)
- {
- diag_progress_lock pl;
- diag_progress.clear ();
+ // Clear the progress if present.
+ //
+ if (mg)
+ {
+ diag_progress_lock pl;
+ diag_progress.clear ();
+ }
+
+ // Restore original scheduler settings.
}
// Print skip count if not zero. Note that we print it regardless of the
@@ -582,7 +587,7 @@ namespace build2
"",
"",
execution_mode::first,
- 1,
+ 1 /* concurrency */,
nullptr,
nullptr
};
@@ -606,7 +611,7 @@ namespace build2
"updated",
"is up to date",
execution_mode::first,
- 1,
+ 1 /* concurrency */,
nullptr,
nullptr
};
@@ -620,7 +625,7 @@ namespace build2
"cleaned",
"is clean",
execution_mode::last,
- 1,
+ 1 /* concurrency */,
nullptr,
nullptr
};
diff --git a/libbuild2/operation.hxx b/libbuild2/operation.hxx
index 520b37b..125c692 100644
--- a/libbuild2/operation.hxx
+++ b/libbuild2/operation.hxx
@@ -221,8 +221,11 @@ namespace build2
const execution_mode mode;
- // This is the operation's concurrency multiplier. 0 means run serially,
- // 1 means run at hardware concurrency (unless overridden by the user).
+ // This is the operation's concurrency multiplier. 0 means run serially, 1
+ // means run at hardware concurrency (or the concurrency specified by the
+ // user).
+ //
+ // Note: 0 and 1 are currently the only valid values.
//
const size_t concurrency;
diff --git a/libbuild2/scheduler.cxx b/libbuild2/scheduler.cxx
index 3c06176..bd2694c 100644
--- a/libbuild2/scheduler.cxx
+++ b/libbuild2/scheduler.cxx
@@ -125,7 +125,7 @@ namespace build2
else if (active_ == 0 && external_ == 0)
{
// Note that we tried to handle this directly in this thread but that
- // wouldn't work for the phase lock case where we cal; deactivate and
+ // wouldn't work for the phase lock case where we call deactivate and
// then go wait on a condition variable: we would be doing deadlock
// detection while holding the lock that prevents other threads from
// making progress! So it has to be a separate monitoring thread.
@@ -391,24 +391,35 @@ namespace build2
dead_thread_ = thread (deadlock_monitor, this);
}
- void scheduler::
+ size_t scheduler::
tune (size_t max_active)
{
// Note that if we tune a parallel scheduler to run serially, we will
// still have the deadlock monitoring thread running.
+ // With multiple initial active threads we will need to make changes to
+ // max_active_ visible to other threads and which we currently say can be
+ // accessed between startup and shutdown without a lock.
+ //
+ assert (init_active_ == 1);
+
if (max_active == 0)
max_active = orig_max_active_;
- assert (max_active >= init_active_ &&
- max_active <= orig_max_active_);
+ if (max_active != max_active_)
+ {
+ assert (max_active >= init_active_ &&
+ max_active <= orig_max_active_);
+
+ // The scheduler must not be active though some threads might still be
+ // comming off from finishing a task. So we busy-wait for them.
+ //
+ lock l (wait_idle ());
- // The scheduler must not be active though some threads might still be
- // comming off from finishing a task. So we busy-wait for them.
- //
- lock l (wait_idle ());
+ swap (max_active_, max_active);
+ }
- max_active_ = max_active;
+ return max_active == orig_max_active_ ? 0 : max_active;
}
auto scheduler::
diff --git a/libbuild2/scheduler.hxx b/libbuild2/scheduler.hxx
index 0107753..4495cb9 100644
--- a/libbuild2/scheduler.hxx
+++ b/libbuild2/scheduler.hxx
@@ -209,17 +209,46 @@ namespace build2
// Tune a started up scheduler.
//
- // Currently one cannot increase the number of max_active. Pass 0 to
- // restore the initial value.
+ // Currently one cannot increase the number of (initial) max_active, only
+ // decrease it. Pass 0 to restore the initial value. Returns the old
+ // value (0 if it is initial).
//
// Note that tuning can only be done while the scheduler is inactive, that
- // is, no threads are executing a task or are suspended. For example, in a
+ // is, no threads are executing or waiting on a task. For example, in a
// setup with a single initial active thread that would be after a return
- // from the top-level wait() call.
+ // from the top-level wait() call. Tuning the scheduler with more than one
+ // initial active threads is currently not supported.
//
- void
+ size_t
tune (size_t max_active);
+ struct tune_guard
+ {
+ tune_guard (): s_ (nullptr) {}
+ tune_guard (scheduler& s, size_t ma): s_ (&s), o_ (s_->tune (ma)) {}
+ tune_guard (tune_guard&& x): s_ (x.s_), o_ (x.o_) {x.s_ = nullptr;}
+ tune_guard& operator= (tune_guard&& x)
+ {
+ if (&x != this)
+ {
+ s_ = x.s_;
+ o_ = x.o_;
+ x.s_ = nullptr;
+ }
+ return *this;
+ }
+
+ ~tune_guard ()
+ {
+ if (s_ != nullptr)
+ s_->tune (o_);
+ }
+
+ private:
+ scheduler* s_;
+ size_t o_;
+ };
+
// Return true if the scheduler is configured to run tasks serially.
//
// Note: can only be called from threads that have observed startup.
@@ -415,8 +444,10 @@ namespace build2
// active <= max_active
// (init_active + helpers) <= max_threads (soft; see activate_helper())
//
- // Note that the first three are immutable between startup() and
- // shutdown() so can be accessed without a lock (but see join()).
+ // Note that the first three are immutable between the startup() and
+ // shutdown() calls so can be accessed without a lock (but see join() and
+ // except for max_active_ which can be changed by tune() but only when the
+ // scheduler is idle).
//
size_t init_active_ = 0; // Initially active threads.
size_t max_active_ = 0; // Maximum number of active threads.
@@ -430,7 +461,7 @@ namespace build2
//
size_t active_ = 0; // Active master threads executing a task.
size_t idle_ = 0; // Idle helper threads waiting for a task.
- size_t waiting_ = 0; // Suspended master threads waiting for their tasks.
+ size_t waiting_ = 0; // Suspended master threads waiting on their tasks.
size_t ready_ = 0; // Ready master thread waiting to become active.
size_t starting_ = 0; // Helper threads starting up.
diff --git a/libbuild2/test/operation.cxx b/libbuild2/test/operation.cxx
index 0ca8da0..061b2cc 100644
--- a/libbuild2/test/operation.cxx
+++ b/libbuild2/test/operation.cxx
@@ -33,7 +33,7 @@ namespace build2
"tested",
"has nothing to test", // We cannot "be tested".
execution_mode::first,
- 1,
+ 1 /* concurrency */,
&test_pre,
nullptr
};