diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/refcount.h | 131 |
1 files changed, 75 insertions, 56 deletions
diff --git a/include/linux/refcount.h b/include/linux/refcount.h index e719b5b1220e..e3b218d669ce 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -47,8 +47,8 @@ static inline unsigned int refcount_read(const refcount_t *r) #ifdef CONFIG_REFCOUNT_FULL #include <linux/bug.h> -#define REFCOUNT_MAX (UINT_MAX - 1) -#define REFCOUNT_SATURATED UINT_MAX +#define REFCOUNT_MAX INT_MAX +#define REFCOUNT_SATURATED (INT_MIN / 2) /* * Variant of atomic_t specialized for reference counts. @@ -56,9 +56,47 @@ static inline unsigned int refcount_read(const refcount_t *r) * The interface matches the atomic_t interface (to aid in porting) but only * provides the few functions one should use for reference counting. * - * It differs in that the counter saturates at REFCOUNT_SATURATED and will not - * move once there. This avoids wrapping the counter and causing 'spurious' - * use-after-free issues. + * Saturation semantics + * ==================== + * + * refcount_t differs from atomic_t in that the counter saturates at + * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the + * counter and causing 'spurious' use-after-free issues. In order to avoid the + * cost associated with introducing cmpxchg() loops into all of the saturating + * operations, we temporarily allow the counter to take on an unchecked value + * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow + * or overflow has occurred. Although this is racy when multiple threads + * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly + * equidistant from 0 and INT_MAX we minimise the scope for error: + * + * INT_MAX REFCOUNT_SATURATED UINT_MAX + * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff) + * +--------------------------------+----------------+----------------+ + * <---------- bad value! ----------> + * + * (in a signed view of the world, the "bad value" range corresponds to + * a negative counter value). + * + * As an example, consider a refcount_inc() operation that causes the counter + * to overflow: + * + * int old = atomic_fetch_add_relaxed(r); + * // old is INT_MAX, refcount now INT_MIN (0x8000_0000) + * if (old < 0) + * atomic_set(r, REFCOUNT_SATURATED); + * + * If another thread also performs a refcount_inc() operation between the two + * atomic operations, then the count will continue to edge closer to 0. If it + * reaches a value of 1 before /any/ of the threads reset it to the saturated + * value, then a concurrent refcount_dec_and_test() may erroneously free the + * underlying object. Given the precise timing details involved with the + * round-robin scheduling of each thread manipulating the refcount and the need + * to hit the race multiple times in succession, there doesn't appear to be a + * practical avenue of attack even if using refcount_add() operations with + * larger increments. + * + * Memory ordering + * =============== * * Memory ordering rules are slightly relaxed wrt regular atomic_t functions * and provide only what is strictly required for refcounts. @@ -109,25 +147,19 @@ static inline unsigned int refcount_read(const refcount_t *r) */ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) { - unsigned int new, val = atomic_read(&r->refs); + int old = refcount_read(r); do { - if (!val) - return false; - - if (unlikely(val == REFCOUNT_SATURATED)) - return true; - - new = val + i; - if (new < val) - new = REFCOUNT_SATURATED; + if (!old) + break; + } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); - } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); - - WARN_ONCE(new == REFCOUNT_SATURATED, - "refcount_t: saturated; leaking memory.\n"); + if (unlikely(old < 0 || old + i < 0)) { + refcount_set(r, REFCOUNT_SATURATED); + WARN_ONCE(1, "refcount_t: saturated; leaking memory.\n"); + } - return true; + return old; } /** @@ -148,7 +180,13 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) */ static inline void refcount_add(int i, refcount_t *r) { - WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); + int old = atomic_fetch_add_relaxed(i, &r->refs); + + WARN_ONCE(!old, "refcount_t: addition on 0; use-after-free.\n"); + if (unlikely(old <= 0 || old + i <= 0)) { + refcount_set(r, REFCOUNT_SATURATED); + WARN_ONCE(old, "refcount_t: saturated; leaking memory.\n"); + } } /** @@ -166,23 +204,7 @@ static inline void refcount_add(int i, refcount_t *r) */ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) { - unsigned int new, val = atomic_read(&r->refs); - - do { - new = val + 1; - - if (!val) - return false; - - if (unlikely(!new)) - return true; - - } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); - - WARN_ONCE(new == REFCOUNT_SATURATED, - "refcount_t: saturated; leaking memory.\n"); - - return true; + return refcount_add_not_zero(1, r); } /** @@ -199,7 +221,7 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) */ static inline void refcount_inc(refcount_t *r) { - WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); + refcount_add(1, r); } /** @@ -224,26 +246,19 @@ static inline void refcount_inc(refcount_t *r) */ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) { - unsigned int new, val = atomic_read(&r->refs); - - do { - if (unlikely(val == REFCOUNT_SATURATED)) - return false; + int old = atomic_fetch_sub_release(i, &r->refs); - new = val - i; - if (new > val) { - WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); - return false; - } - - } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); - - if (!new) { + if (old == i) { smp_acquire__after_ctrl_dep(); return true; } - return false; + if (unlikely(old < 0 || old - i < 0)) { + refcount_set(r, REFCOUNT_SATURATED); + WARN_ONCE(1, "refcount_t: underflow; use-after-free.\n"); + } + + return false; } /** @@ -276,9 +291,13 @@ static inline __must_check bool refcount_dec_and_test(refcount_t *r) */ static inline void refcount_dec(refcount_t *r) { - WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); -} + int old = atomic_fetch_sub_release(1, &r->refs); + if (unlikely(old <= 1)) { + refcount_set(r, REFCOUNT_SATURATED); + WARN_ONCE(1, "refcount_t: decrement hit 0; leaking memory.\n"); + } +} #else /* CONFIG_REFCOUNT_FULL */ #define REFCOUNT_MAX INT_MAX |