Signed-off-by: Paul E. McKenney <pau...@linux.vnet.ibm.com>
---
include/linux/cgroup.h | 1 -
1 files changed, 0 insertions(+), 1 deletions(-)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a73e1ce..c9bbcb2 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -489,7 +489,6 @@ static inline struct cgroup_subsys_state *task_subsys_state(
{
return rcu_dereference_check(task->cgroups->subsys[subsys_id],
rcu_read_lock_held() ||
- !rcu_scheduler_active ||
cgroup_lock_is_held());
}
--
1.6.6
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majo...@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Signed-off-by: Paul E. McKenney <pau...@linux.vnet.ibm.com>
---
include/linux/rcupdate.h | 26 +++++++++++++++++---------
1 files changed, 17 insertions(+), 9 deletions(-)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index c843736..af51d5f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -104,12 +104,14 @@ extern struct lockdep_map rcu_sched_lock_map;
* an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
* this assumes we are in an RCU read-side critical section unless it can
* prove otherwise.
+ *
+ * Check rcu_scheduler_active to prevent false positives during boot.
*/
static inline int rcu_read_lock_held(void)
{
- if (debug_locks)
- return lock_is_held(&rcu_lock_map);
- return 1;
+ if (!rcu_scheduler_active || !debug_locks)
+ return 1;
+ return lock_is_held(&rcu_lock_map);
}
/**
@@ -119,12 +121,14 @@ static inline int rcu_read_lock_held(void)
* an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
* this assumes we are in an RCU-bh read-side critical section unless it can
* prove otherwise.
+ *
+ * Check rcu_scheduler_active to prevent false positives during boot.
*/
static inline int rcu_read_lock_bh_held(void)
{
- if (debug_locks)
- return lock_is_held(&rcu_bh_lock_map);
- return 1;
+ if (!rcu_scheduler_active || !debug_locks)
+ return 1;
+ return lock_is_held(&rcu_bh_lock_map);
}
/**
@@ -135,14 +139,18 @@ static inline int rcu_read_lock_bh_held(void)
* this assumes we are in an RCU-sched read-side critical section unless it
* can prove otherwise. Note that disabling of preemption (including
* disabling irqs) counts as an RCU-sched read-side critical section.
+ *
+ * Check rcu_scheduler_active to prevent false positives during boot.
*/
static inline int rcu_read_lock_sched_held(void)
{
int lockdep_opinion = 0;
+ if (!rcu_scheduler_active || !debug_locks)
+ return 1;
if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
- return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
+ return lockdep_opinion || preempt_count() != 0;
}
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -166,7 +174,7 @@ static inline int rcu_read_lock_bh_held(void)
static inline int rcu_read_lock_sched_held(void)
{
- return preempt_count() != 0 || !rcu_scheduler_active;
+ return !rcu_scheduler_active || preempt_count() != 0;
}
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -184,7 +192,7 @@ static inline int rcu_read_lock_sched_held(void)
*/
#define rcu_dereference_check(p, c) \
({ \
- if (debug_locks && !(c)) \
+ if (rcu_scheduler_active && debug_locks && !(c)) \
lockdep_rcu_dereference(__FILE__, __LINE__); \
rcu_dereference_raw(p); \
})
> + if (!rcu_scheduler_active || !debug_locks)
> + return 1;
> + return lock_is_held(&rcu_lock_map);
> + if (!rcu_scheduler_active || !debug_locks)
> + return 1;
> + return lock_is_held(&rcu_bh_lock_map);
i guess there could be a common helper here?
Also, could we clear rcu_scheduler_active when we clear debug_locks? That way
only a single test is needed, a generic 'is lock debugging active'.
(Which test should probably be unlikely() as well?)
Ingo
Will do!
> Also, could we clear rcu_scheduler_active when we clear debug_locks? That way
> only a single test is needed, a generic 'is lock debugging active'.
Doing that will break synchronize_rcu(), which returns immediately if
!rcu_scheduler_active.
> (Which test should probably be unlikely() as well?)
Good point, will fix.
Thanx, Paul
> On Tue, Mar 02, 2010 at 01:20:59PM +0100, Ingo Molnar wrote:
> >
> > * Paul E. McKenney <pau...@linux.vnet.ibm.com> wrote:
> >
> > > + if (!rcu_scheduler_active || !debug_locks)
> > > + return 1;
> > > + return lock_is_held(&rcu_lock_map);
> >
> > > + if (!rcu_scheduler_active || !debug_locks)
> > > + return 1;
> > > + return lock_is_held(&rcu_bh_lock_map);
> >
> > i guess there could be a common helper here?
>
> Will do!
>
> > Also, could we clear rcu_scheduler_active when we clear debug_locks? That way
> > only a single test is needed, a generic 'is lock debugging active'.
>
> Doing that will break synchronize_rcu(), which returns immediately if
> !rcu_scheduler_active.
Ok - then have an debug_rcu flag which is cleared appropriately - so that the
fastpath impact is reduced?
Ingo
Good point, will do!
Thanx, Paul