aboutsummaryrefslogtreecommitdiff
path: root/libbuild2
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2022-04-18 07:38:22 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2022-04-18 07:38:22 +0200
commit912ac87012ffc2fd0c6fb21823a0244c787ce5ba (patch)
tree4861fd4b2bcdcfb61b97f215e3a24efaca25af57 /libbuild2
parent56e38cffbd730e66261d9da29c8da91d39ebb167 (diff)
Avoid locking target set if in load phase
Diffstat (limited to 'libbuild2')
-rw-r--r--libbuild2/algorithm.cxx38
-rw-r--r--libbuild2/bash/rule.cxx3
-rw-r--r--libbuild2/file.cxx3
-rw-r--r--libbuild2/target.cxx31
-rw-r--r--libbuild2/target.hxx20
5 files changed, 65 insertions, 30 deletions
diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx
index 5011b26..8ef88e4 100644
--- a/libbuild2/algorithm.cxx
+++ b/libbuild2/algorithm.cxx
@@ -332,22 +332,28 @@ namespace build2
if (*mp != nullptr) // Might already be there.
return **mp;
- pair<target&, ulock> r (
- t.ctx.targets.insert_locked (tt,
- move (dir),
- move (out),
- move (n),
- nullopt /* ext */,
- target_decl::implied,
- trace));
-
- assert (r.second);
-
- target& m (r.first);
- *mp = &m;
- m.group = &t;
-
- return m;
+ target* m (nullptr);
+ {
+ pair<target&, ulock> r (
+ t.ctx.targets.insert_locked (tt,
+ move (dir),
+ move (out),
+ move (n),
+ nullopt /* ext */,
+ target_decl::implied,
+ trace));
+
+ if (r.second) // Inserted.
+ {
+ m = &r.first;
+ m->group = &t;
+ }
+ }
+
+ assert (m != nullptr);
+ *mp = m;
+
+ return *m;
};
// Return the matching rule or NULL if no match and try_match is true.
diff --git a/libbuild2/bash/rule.cxx b/libbuild2/bash/rule.cxx
index 3048d3c..db3cc3e 100644
--- a/libbuild2/bash/rule.cxx
+++ b/libbuild2/bash/rule.cxx
@@ -161,6 +161,9 @@ namespace build2
if (mt != timestamp_nonexistent)
{
+ // @@ Do we actually need _locked(), isn't path_mtime()
+ // atomic?
+ //
auto rp (t.ctx.targets.insert_locked (bash::static_type,
ap.directory (),
dir_path () /* out */,
diff --git a/libbuild2/file.cxx b/libbuild2/file.cxx
index d789d20..31db943 100644
--- a/libbuild2/file.cxx
+++ b/libbuild2/file.cxx
@@ -2880,6 +2880,9 @@ namespace build2
if (!t || *t == nullptr)
{
+ // Note: we need the lock because process_path() call below is not
+ // MT-safe.
+ //
pair<target&, ulock> r (insert_target (trace, ctx, tt, p));
t = &r.first;
diff --git a/libbuild2/target.cxx b/libbuild2/target.cxx
index 1806e61..7eaa3a6 100644
--- a/libbuild2/target.cxx
+++ b/libbuild2/target.cxx
@@ -631,7 +631,9 @@ namespace build2
const target* target_set::
find (const target_key& k, tracer& trace) const
{
- slock sl (mutex_);
+ bool load (ctx.phase == run_phase::load);
+
+ slock sl (mutex_, defer_lock); if (!load) sl.lock ();
map_type::const_iterator i (map_.find (k));
if (i == map_.end ())
@@ -650,14 +652,18 @@ namespace build2
// Between us releasing the shared lock and acquiring unique the
// extension could change and possibly a new target that matches the
// key could be inserted. In this case we simply re-run find ().
+ // Naturally, can't happen during load.
//
- sl.unlock ();
- ul = ulock (mutex_);
-
- if (ext) // Someone set the extension.
+ if (!load)
{
- ul.unlock ();
- return find (k, trace);
+ sl.unlock ();
+ ul = ulock (mutex_);
+
+ if (ext) // Someone set the extension.
+ {
+ ul.unlock ();
+ return find (k, trace);
+ }
}
}
@@ -691,7 +697,8 @@ namespace build2
string name,
optional<string> ext,
target_decl decl,
- tracer& trace)
+ tracer& trace,
+ bool need_lock)
{
target_key tk {&tt, &dir, &out, &name, move (ext)};
target* t (const_cast<target*> (find (tk, trace)));
@@ -715,7 +722,9 @@ namespace build2
// case we proceed pretty much like find() except already under the
// exclusive lock.
//
- ulock ul (mutex_);
+ ulock ul (mutex_, defer_lock);
+ if (ctx.phase != run_phase::load || need_lock)
+ ul.lock ();
auto p (map_.emplace (target_key {&tt, &t->dir, &t->out, &t->name, e},
unique_ptr<target> (t)));
@@ -728,6 +737,10 @@ namespace build2
t->decl = decl;
t->state.inner.target_ = t;
t->state.outer.target_ = t;
+
+ if (ctx.phase != run_phase::load && !need_lock)
+ ul.unlock ();
+
return pair<target&, ulock> (*t, move (ul));
}
diff --git a/libbuild2/target.hxx b/libbuild2/target.hxx
index e6622f4..696d5d0 100644
--- a/libbuild2/target.hxx
+++ b/libbuild2/target.hxx
@@ -1522,7 +1522,7 @@ namespace build2
const dir_path& out,
const string& name) const
{
- slock l (mutex_);
+ slock l (mutex_, defer_lock); if (ctx.phase != run_phase::load) l.lock ();
auto i (map_.find (target_key {&type, &dir, &out, &name, nullopt}));
return i != map_.end () ? i->second.get () : nullptr;
}
@@ -1536,7 +1536,12 @@ namespace build2
// If the target was inserted, keep the map exclusive-locked and return
// the lock. In this case, the target is effectively still being created
- // since nobody can see it until the lock is released.
+ // since nobody can see it until the lock is released. Note that there
+ // is normally quite a bit of contention around this map so make sure to
+ // not hold the lock longer than absolutely necessary.
+ //
+ // If need_lock is false, then release the lock (the target insertion is
+ // indicated by the presence of the associated mutex).
//
pair<target&, ulock>
insert_locked (const target_type&,
@@ -1545,8 +1550,12 @@ namespace build2
string name,
optional<string> ext,
target_decl,
- tracer&);
+ tracer&,
+ bool need_lock = true);
+ // As above but instead of the lock return an indication of whether the
+ // target was inserted.
+ //
pair<target&, bool>
insert (const target_type& tt,
dir_path dir,
@@ -1562,9 +1571,10 @@ namespace build2
move (name),
move (ext),
decl,
- t));
+ t,
+ false));
- return pair<target&, bool> (p.first, p.second.owns_lock ()); // Clang 3.7
+ return pair<target&, bool> (p.first, p.second.mutex () != nullptr);
}
// Note that the following versions always enter implied targets.