1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Misc and compatibility things
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/fs.h>
#include <sound/core.h>
void release_and_free_resource(struct resource *res)
{
if (res) {
release_resource(res);
kfree(res);
}
}
EXPORT_SYMBOL(release_and_free_resource);
#ifdef CONFIG_PCI
#include <linux/pci.h>
/**
* snd_pci_quirk_lookup_id - look up a PCI SSID quirk list
* @vendor: PCI SSV id
* @device: PCI SSD id
* @list: quirk list, terminated by a null entry
*
* Look through the given quirk list and finds a matching entry
* with the same PCI SSID. When subdevice is 0, all subdevice
* values may match.
*
* Returns the matched entry pointer, or NULL if nothing matched.
*/
const struct snd_pci_quirk *
snd_pci_quirk_lookup_id(u16 vendor, u16 device,
const struct snd_pci_quirk *list)
{
const struct snd_pci_quirk *q;
for (q = list; q->subvendor || q->subdevice; q++) {
if (q->subvendor != vendor)
continue;
if (!q->subdevice ||
(device & q->subdevice_mask) == q->subdevice)
return q;
}
return NULL;
}
EXPORT_SYMBOL(snd_pci_quirk_lookup_id);
/**
* snd_pci_quirk_lookup - look up a PCI SSID quirk list
* @pci: pci_dev handle
* @list: quirk list, terminated by a null entry
*
* Look through the given quirk list and finds a matching entry
* with the same PCI SSID. When subdevice is 0, all subdevice
* values may match.
*
* Returns the matched entry pointer, or NULL if nothing matched.
*/
const struct snd_pci_quirk *
snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list)
{
if (!pci)
return NULL;
return snd_pci_quirk_lookup_id(pci->subsystem_vendor,
pci->subsystem_device,
list);
}
EXPORT_SYMBOL(snd_pci_quirk_lookup);
#endif
/*
* Deferred async signal helpers
*
* Below are a few helper functions to wrap the async signal handling
* in the deferred work. The main purpose is to avoid the messy deadlock
* around tasklist_lock and co at the kill_fasync() invocation.
* fasync_helper() and kill_fasync() are replaced with snd_fasync_helper()
* and snd_kill_fasync(), respectively. In addition, snd_fasync_free() has
* to be called at releasing the relevant file object.
*/
struct snd_fasync {
struct fasync_struct *fasync;
int signal;
int poll;
int on;
struct list_head list;
};
static DEFINE_SPINLOCK(snd_fasync_lock);
static LIST_HEAD(snd_fasync_list);
static void snd_fasync_work_fn(struct work_struct *work)
{
struct snd_fasync *fasync;
spin_lock_irq(&snd_fasync_lock);
while (!list_empty(&snd_fasync_list)) {
fasync = list_first_entry(&snd_fasync_list, struct snd_fasync, list);
list_del_init(&fasync->list);
spin_unlock_irq(&snd_fasync_lock);
if (fasync->on)
kill_fasync(&fasync->fasync, fasync->signal, fasync->poll);
spin_lock_irq(&snd_fasync_lock);
}
spin_unlock_irq(&snd_fasync_lock);
}
static DECLARE_WORK(snd_fasync_work, snd_fasync_work_fn);
int snd_fasync_helper(int fd, struct file *file, int on,
struct snd_fasync **fasyncp)
{
struct snd_fasync *fasync = NULL;
if (on) {
fasync = kzalloc(sizeof(*fasync), GFP_KERNEL);
if (!fasync)
return -ENOMEM;
INIT_LIST_HEAD(&fasync->list);
}
spin_lock_irq(&snd_fasync_lock);
if (*fasyncp) {
kfree(fasync);
fasync = *fasyncp;
} else {
if (!fasync) {
spin_unlock_irq(&snd_fasync_lock);
return 0;
}
*fasyncp = fasync;
}
fasync->on = on;
spin_unlock_irq(&snd_fasync_lock);
return fasync_helper(fd, file, on, &fasync->fasync);
}
EXPORT_SYMBOL_GPL(snd_fasync_helper);
void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll)
{
unsigned long flags;
if (!fasync || !fasync->on)
return;
spin_lock_irqsave(&snd_fasync_lock, flags);
fasync->signal = signal;
fasync->poll = poll;
list_move(&fasync->list, &snd_fasync_list);
schedule_work(&snd_fasync_work);
spin_unlock_irqrestore(&snd_fasync_lock, flags);
}
EXPORT_SYMBOL_GPL(snd_kill_fasync);
void snd_fasync_free(struct snd_fasync *fasync)
{
if (!fasync)
return;
fasync->on = 0;
flush_work(&snd_fasync_work);
kfree(fasync);
}
EXPORT_SYMBOL_GPL(snd_fasync_free);
|