summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2019-07-19 11:55:05 +0300
committerTakashi Iwai <tiwai@suse.de>2019-07-19 16:47:00 +0300
commit0e279dcea0ec897af1c979ebee4ec92b461793f5 (patch)
tree404599ef22d243be51004f7252fc6188aa22f70c
parent70256b42caaf3e13c2932c2be7903a73fbe8bb8b (diff)
downloadlinux-0e279dcea0ec897af1c979ebee4ec92b461793f5.tar.xz
ALSA: pcm: Fix refcount_inc() on zero usage
The recent rewrite of PCM link lock management introduced the refcount in snd_pcm_group object, managed by the kernel refcount_t API. This caused unexpected kernel warnings when the kernel is built with CONFIG_REFCOUNT_FULL=y. As the warning line indicates, the problem is obviously that we start with refcount=0 and do refcount_inc() for adding each PCM link, while refcount_t API doesn't like refcount_inc() performed on zero. For adapting the proper refcount_t usage, this patch changes the logic slightly: - The initial refcount is 1, assuming the single list entry - The refcount is incremented / decremented at each PCM link addition and deletion - ... which allows us concentrating only on the refcount as a release condition Fixes: f57f3df03a8e ("ALSA: pcm: More fine-grained PCM link locking") BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=204221 Reported-and-tested-by: Duncan Overbruck <kernel@duncano.de> Cc: <stable@vger.kernel.org> Signed-off-by: Takashi Iwai <tiwai@suse.de>
-rw-r--r--sound/core/pcm_native.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 860543a4c840..12dd9b318db1 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -77,7 +77,7 @@ void snd_pcm_group_init(struct snd_pcm_group *group)
spin_lock_init(&group->lock);
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->substreams);
- refcount_set(&group->refs, 0);
+ refcount_set(&group->refs, 1);
}
/* define group lock helpers */
@@ -1096,8 +1096,7 @@ static void snd_pcm_group_unref(struct snd_pcm_group *group,
if (!group)
return;
- do_free = refcount_dec_and_test(&group->refs) &&
- list_empty(&group->substreams);
+ do_free = refcount_dec_and_test(&group->refs);
snd_pcm_group_unlock(group, substream->pcm->nonatomic);
if (do_free)
kfree(group);
@@ -2020,6 +2019,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
snd_pcm_group_lock_irq(target_group, nonatomic);
snd_pcm_stream_lock(substream1);
snd_pcm_group_assign(substream1, target_group);
+ refcount_inc(&target_group->refs);
snd_pcm_stream_unlock(substream1);
snd_pcm_group_unlock_irq(target_group, nonatomic);
_end:
@@ -2056,13 +2056,14 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
snd_pcm_group_lock_irq(group, nonatomic);
relink_to_local(substream);
+ refcount_dec(&group->refs);
/* detach the last stream, too */
if (list_is_singular(&group->substreams)) {
relink_to_local(list_first_entry(&group->substreams,
struct snd_pcm_substream,
link_list));
- do_free = !refcount_read(&group->refs);
+ do_free = refcount_dec_and_test(&group->refs);
}
snd_pcm_group_unlock_irq(group, nonatomic);