summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2013-10-08 02:51:59 +0400
committerIngo Molnar <mingo@kernel.org>2013-11-06 15:40:26 +0400
commit1ca7d67cf5d5a2aef26a8d9afd789006fa098347 (patch)
tree8f4f7d1f189d7a08983ab5ef522330f08f337459 /include/linux
parent827da44c61419f29ae3be198c342e2147f1a10cb (diff)
downloadlinux-1ca7d67cf5d5a2aef26a8d9afd789006fa098347.tar.xz
seqcount: Add lockdep functionality to seqcount/seqlock structures
Currently seqlocks and seqcounts don't support lockdep. After running across a seqcount related deadlock in the timekeeping code, I used a less-refined and more focused variant of this patch to narrow down the cause of the issue. This is a first-pass attempt to properly enable lockdep functionality on seqlocks and seqcounts. Since seqcounts are used in the vdso gettimeofday code, I've provided non-lockdep accessors for those needs. I've also handled one case where there were nested seqlock writers and there may be more edge cases. Comments and feedback would be appreciated! Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: netdev@vger.kernel.org Link: http://lkml.kernel.org/r/1381186321-4906-3-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/seqlock.h79
3 files changed, 82 insertions, 13 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 5cd0f0949927..b0ed422e4e4a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -32,10 +32,10 @@ extern struct fs_struct init_fs;
#endif
#ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ \
- .mems_allowed_seq = SEQCNT_ZERO,
+#define INIT_CPUSET_SEQ(tsk) \
+ .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
#else
-#define INIT_CPUSET_SEQ
+#define INIT_CPUSET_SEQ(tsk)
#endif
#define INIT_SIGNALS(sig) { \
@@ -220,7 +220,7 @@ extern struct task_group root_task_group;
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
- INIT_CPUSET_SEQ \
+ INIT_CPUSET_SEQ(tsk) \
INIT_VTIME(tsk) \
}
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index cfc2f119779a..92b1bfc5da60 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -497,6 +497,10 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
#define rwlock_release(l, n, i) lock_release(l, n, i)
+#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define seqcount_release(l, n, i) lock_release(l, n, i)
+
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define mutex_release(l, n, i) lock_release(l, n, i)
@@ -504,11 +508,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
-# define rwsem_release(l, n, i) lock_release(l, n, i)
+#define rwsem_release(l, n, i) lock_release(l, n, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
-# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
+#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 21a209336e79..1e8a8b6e837d 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -34,6 +34,7 @@
#include <linux/spinlock.h>
#include <linux/preempt.h>
+#include <linux/lockdep.h>
#include <asm/processor.h>
/*
@@ -44,10 +45,50 @@
*/
typedef struct seqcount {
unsigned sequence;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} seqcount_t;
-#define SEQCNT_ZERO { 0 }
-#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
+static inline void __seqcount_init(seqcount_t *s, const char *name,
+ struct lock_class_key *key)
+{
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ lockdep_init_map(&s->dep_map, name, key, 0);
+ s->sequence = 0;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+ .dep_map = { .name = #lockname } \
+
+# define seqcount_init(s) \
+ do { \
+ static struct lock_class_key __key; \
+ __seqcount_init((s), #s, &__key); \
+ } while (0)
+
+static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
+{
+ seqcount_t *l = (seqcount_t *)s;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
+ seqcount_release(&l->dep_map, 1, _RET_IP_);
+ local_irq_restore(flags);
+}
+
+#else
+# define SEQCOUNT_DEP_MAP_INIT(lockname)
+# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
+# define seqcount_lockdep_reader_access(x)
+#endif
+
+#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
+
/**
* __read_seqcount_begin - begin a seq-read critical section (without barrier)
@@ -76,6 +117,22 @@ repeat:
}
/**
+ * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * read_seqcount_begin_no_lockdep opens a read critical section of the given
+ * seqcount, but without any lockdep checking. Validity of the critical
+ * section is tested by checking read_seqcount_retry function.
+ */
+static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
+{
+ unsigned ret = __read_seqcount_begin(s);
+ smp_rmb();
+ return ret;
+}
+
+/**
* read_seqcount_begin - begin a seq-read critical section
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
@@ -86,9 +143,8 @@ repeat:
*/
static inline unsigned read_seqcount_begin(const seqcount_t *s)
{
- unsigned ret = __read_seqcount_begin(s);
- smp_rmb();
- return ret;
+ seqcount_lockdep_reader_access(s);
+ return read_seqcount_begin_no_lockdep(s);
}
/**
@@ -108,6 +164,8 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
+
+ seqcount_lockdep_reader_access(s);
smp_rmb();
return ret & ~1;
}
@@ -152,14 +210,21 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
-static inline void write_seqcount_begin(seqcount_t *s)
+static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
s->sequence++;
smp_wmb();
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+}
+
+static inline void write_seqcount_begin(seqcount_t *s)
+{
+ write_seqcount_begin_nested(s, 0);
}
static inline void write_seqcount_end(seqcount_t *s)
{
+ seqcount_release(&s->dep_map, 1, _RET_IP_);
smp_wmb();
s->sequence++;
}
@@ -188,7 +253,7 @@ typedef struct {
*/
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
- .seqcount = SEQCNT_ZERO, \
+ .seqcount = SEQCNT_ZERO(lockname), \
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
}