summaryrefslogtreecommitdiff
path: root/sound/core/seq
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2019-03-28 17:55:08 +0300
committerTakashi Iwai <tiwai@suse.de>2019-04-09 18:22:46 +0300
commit4b24b960b10b6a4e30beba3ce097fa867b4a085f (patch)
tree5125bbf3b611a39fb50413c41cdd7c310cd85d36 /sound/core/seq
parentfd7ae83de11a597cbe15770382f101c784a79b1c (diff)
downloadlinux-4b24b960b10b6a4e30beba3ce097fa867b4a085f.tar.xz
ALSA: seq: Align temporary re-locking with irqsave version
In a few places in sequencer core, we temporarily unlock / re-lock the pool spin lock while waiting for the allocation in the blocking mode. There spin_unlock_irq() / spin_lock_irq() pairs are called while initially spin_lock_irqsave() is used (and spin_lock_irqrestore() at the end of the function again). This is likely OK for now, but it's a bit confusing and error-prone. This patch replaces these temporary relocking lines with the irqsave variant to make the lock/unlock sequence more consistently. Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound/core/seq')
-rw-r--r--sound/core/seq/seq_fifo.c4
-rw-r--r--sound/core/seq/seq_memory.c4
2 files changed, 4 insertions, 4 deletions
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 72c0302a55d2..613ae10d33b8 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -195,9 +195,9 @@ int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
}
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&f->input_sleep, &wait);
- spin_unlock_irq(&f->lock);
+ spin_unlock_irqrestore(&f->lock, flags);
schedule();
- spin_lock_irq(&f->lock);
+ spin_lock_irqsave(&f->lock, flags);
remove_wait_queue(&f->input_sleep, &wait);
if (signal_pending(current)) {
spin_unlock_irqrestore(&f->lock, flags);
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 6ea4d8a5a71e..ae0b8971f6ce 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -244,13 +244,13 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&pool->output_sleep, &wait);
- spin_unlock_irq(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, flags);
if (mutexp)
mutex_unlock(mutexp);
schedule();
if (mutexp)
mutex_lock(mutexp);
- spin_lock_irq(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
remove_wait_queue(&pool->output_sleep, &wait);
/* interrupted? */
if (signal_pending(current)) {