diff options
author | Takashi Iwai <tiwai@suse.de> | 2022-03-22 20:07:19 +0300 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2022-03-22 22:56:42 +0300 |
commit | 3c3201f8c7bb77eb53b08a3ca8d9a4ddc500b4c0 (patch) | |
tree | cdc35dbfdb8c8f75bf2d2ba62912327d616f570d /sound/core | |
parent | dca947d4d26dbf925a64a6cfb2ddbc035e831a3d (diff) | |
download | linux-3c3201f8c7bb77eb53b08a3ca8d9a4ddc500b4c0.tar.xz |
ALSA: pcm: Fix races among concurrent prepare and hw_params/hw_free calls
Like the previous fixes to hw_params and hw_free ioctl races, we need
to paper over the concurrent prepare ioctl calls against hw_params and
hw_free, too.
This patch implements the locking with the existing
runtime->buffer_mutex for prepare ioctls. Unlike the previous case
for snd_pcm_hw_hw_params() and snd_pcm_hw_free(), snd_pcm_prepare() is
performed to the linked streams, hence the lock can't be applied
simply on the top. For tracking the lock in each linked substream, we
modify snd_pcm_action_group() slightly and apply the buffer_mutex for
the case stream_lock=false (formerly there was no lock applied)
there.
Cc: <stable@vger.kernel.org>
Reviewed-by: Jaroslav Kysela <perex@perex.cz>
Link: https://lore.kernel.org/r/20220322170720.3529-4-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound/core')
-rw-r--r-- | sound/core/pcm_native.c | 32 |
1 files changed, 18 insertions, 14 deletions
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 266895374b83..0e4fbf5fd87b 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1190,15 +1190,17 @@ struct action_ops { static int snd_pcm_action_group(const struct action_ops *ops, struct snd_pcm_substream *substream, snd_pcm_state_t state, - bool do_lock) + bool stream_lock) { struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; int res = 0, depth = 1; snd_pcm_group_for_each_entry(s, substream) { - if (do_lock && s != substream) { - if (s->pcm->nonatomic) + if (s != substream) { + if (!stream_lock) + mutex_lock_nested(&s->runtime->buffer_mutex, depth); + else if (s->pcm->nonatomic) mutex_lock_nested(&s->self_group.mutex, depth); else spin_lock_nested(&s->self_group.lock, depth); @@ -1226,18 +1228,18 @@ static int snd_pcm_action_group(const struct action_ops *ops, ops->post_action(s, state); } _unlock: - if (do_lock) { - /* unlock streams */ - snd_pcm_group_for_each_entry(s1, substream) { - if (s1 != substream) { - if (s1->pcm->nonatomic) - mutex_unlock(&s1->self_group.mutex); - else - spin_unlock(&s1->self_group.lock); - } - if (s1 == s) /* end */ - break; + /* unlock streams */ + snd_pcm_group_for_each_entry(s1, substream) { + if (s1 != substream) { + if (!stream_lock) + mutex_unlock(&s1->runtime->buffer_mutex); + else if (s1->pcm->nonatomic) + mutex_unlock(&s1->self_group.mutex); + else + spin_unlock(&s1->self_group.lock); } + if (s1 == s) /* end */ + break; } return res; } @@ -1367,10 +1369,12 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops, /* Guarantee the group members won't change during non-atomic action */ down_read(&snd_pcm_link_rwsem); + mutex_lock(&substream->runtime->buffer_mutex); if (snd_pcm_stream_linked(substream)) res = snd_pcm_action_group(ops, substream, state, false); else res = snd_pcm_action_single(ops, substream, state); + mutex_unlock(&substream->runtime->buffer_mutex); up_read(&snd_pcm_link_rwsem); return res; } |