summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/firewire/core-card.c4
-rw-r--r--drivers/firewire/core-cdev.c71
-rw-r--r--drivers/firewire/core-iso.c102
-rw-r--r--drivers/firewire/core.h14
-rw-r--r--drivers/firewire/ohci.c234
-rw-r--r--include/linux/firewire.h36
-rw-r--r--sound/firewire/amdtp-stream.c31
7 files changed, 266 insertions, 226 deletions
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 0462d7b9e547..a754c6366b97 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -704,8 +704,8 @@ static int dummy_enable_phys_dma(struct fw_card *card,
return -ENODEV;
}
-static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
- int type, int channel, size_t header_size)
+static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, int type,
+ int channel, size_t header_size, size_t header_storage_size)
{
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 49dc1612c691..9e964fdd175c 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -63,10 +63,10 @@ struct client {
u64 bus_reset_closure;
struct fw_iso_context *iso_context;
+ struct mutex iso_context_mutex;
u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
- bool buffer_is_mapped;
struct list_head phy_receiver_link;
u64 phy_receiver_closure;
@@ -306,6 +306,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
INIT_LIST_HEAD(&client->phy_receiver_link);
INIT_LIST_HEAD(&client->link);
kref_init(&client->kref);
+ mutex_init(&client->iso_context_mutex);
file->private_data = client;
@@ -1025,25 +1026,10 @@ static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
return DMA_FROM_DEVICE;
}
-static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
- fw_iso_mc_callback_t callback,
- void *callback_data)
-{
- struct fw_iso_context *ctx;
-
- ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
- 0, 0, 0, NULL, callback_data);
- if (!IS_ERR(ctx))
- ctx->callback.mc = callback;
-
- return ctx;
-}
-
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
- union fw_iso_callback cb;
int ret;
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
@@ -1055,20 +1041,15 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
case FW_ISO_CONTEXT_TRANSMIT:
if (a->speed > SCODE_3200 || a->channel > 63)
return -EINVAL;
-
- cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE:
if (a->header_size < 4 || (a->header_size & 3) ||
a->channel > 63)
return -EINVAL;
-
- cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- cb.mc = iso_mc_callback;
break;
default:
@@ -1076,38 +1057,36 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
}
if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
- context = fw_iso_mc_context_create(client->device->card, cb.mc,
- client);
+ context = fw_iso_mc_context_create(client->device->card, iso_mc_callback, client);
else
- context = fw_iso_context_create(client->device->card, a->type,
- a->channel, a->speed,
- a->header_size, cb.sc, client);
+ context = fw_iso_context_create(client->device->card, a->type, a->channel, a->speed,
+ a->header_size, iso_callback, client);
if (IS_ERR(context))
return PTR_ERR(context);
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
- context->drop_overflow_headers = true;
+ context->flags |= FW_ISO_CONTEXT_FLAG_DROP_OVERFLOW_HEADERS;
// We only support one context at this time.
- guard(spinlock_irq)(&client->lock);
-
- if (client->iso_context != NULL) {
- fw_iso_context_destroy(context);
-
- return -EBUSY;
- }
- if (!client->buffer_is_mapped) {
- ret = fw_iso_buffer_map_dma(&client->buffer,
- client->device->card,
- iso_dma_direction(context));
- if (ret < 0) {
+ scoped_guard(mutex, &client->iso_context_mutex) {
+ if (client->iso_context != NULL) {
fw_iso_context_destroy(context);
- return ret;
+ return -EBUSY;
+ }
+ // The DMA mapping operation is available if the buffer is already allocated by
+ // mmap(2) system call. If not, it is delegated to the system call.
+ if (client->buffer.pages && !client->buffer.dma_addrs) {
+ ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
+ iso_dma_direction(context));
+ if (ret < 0) {
+ fw_iso_context_destroy(context);
+
+ return ret;
+ }
}
- client->buffer_is_mapped = true;
+ client->iso_closure = a->closure;
+ client->iso_context = context;
}
- client->iso_closure = a->closure;
- client->iso_context = context;
a->handle = 0;
@@ -1826,13 +1805,14 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
if (ret < 0)
return ret;
- scoped_guard(spinlock_irq, &client->lock) {
+ scoped_guard(mutex, &client->iso_context_mutex) {
+ // The direction of DMA can be determined if the isochronous context is already
+ // allocated. If not, the DMA mapping operation is postponed after the allocation.
if (client->iso_context) {
ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
iso_dma_direction(client->iso_context));
if (ret < 0)
goto fail;
- client->buffer_is_mapped = true;
}
}
@@ -1879,6 +1859,7 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
if (client->iso_context)
fw_iso_context_destroy(client->iso_context);
+ mutex_destroy(&client->iso_context_mutex);
if (client->buffer.pages)
fw_iso_buffer_destroy(&client->buffer, client->device->card);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index a67493862c85..3190b2ca1298 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -30,48 +30,57 @@
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
{
- int i;
+ struct page **page_array __free(kfree) = kcalloc(page_count, sizeof(page_array[0]), GFP_KERNEL);
- buffer->page_count = 0;
- buffer->page_count_mapped = 0;
- buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]),
- GFP_KERNEL);
- if (buffer->pages == NULL)
+ if (!page_array)
return -ENOMEM;
- for (i = 0; i < page_count; i++) {
- buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
- if (buffer->pages[i] == NULL)
- break;
- }
- buffer->page_count = i;
- if (i < page_count) {
- fw_iso_buffer_destroy(buffer, NULL);
+ // Retrieve noncontiguous pages. The descriptors for 1394 OHCI isochronous DMA contexts
+ // have a set of address and length per each, while the reason to use pages is the
+ // convenience to map them into virtual address space of user process.
+ unsigned long nr_populated = alloc_pages_bulk(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO,
+ page_count, page_array);
+ if (nr_populated != page_count) {
+ // Assuming the above call fills page_array sequentially from the beginning.
+ release_pages(page_array, nr_populated);
return -ENOMEM;
}
+ buffer->page_count = page_count;
+ buffer->pages = no_free_ptr(page_array);
+
return 0;
}
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction)
{
- dma_addr_t address;
+ dma_addr_t *dma_addrs __free(kfree) = kcalloc(buffer->page_count, sizeof(dma_addrs[0]),
+ GFP_KERNEL);
int i;
- buffer->direction = direction;
+ if (!dma_addrs)
+ return -ENOMEM;
+ // Retrieve DMA mapping addresses for the pages. They are not contiguous. Maintain the cache
+ // coherency for the pages by hand.
for (i = 0; i < buffer->page_count; i++) {
- address = dma_map_page(card->device, buffer->pages[i],
- 0, PAGE_SIZE, direction);
- if (dma_mapping_error(card->device, address))
+ // The dma_map_phys() with a physical address per page is available here, instead.
+ dma_addr_t dma_addr = dma_map_page(card->device, buffer->pages[i], 0, PAGE_SIZE,
+ direction);
+ if (dma_mapping_error(card->device, dma_addr))
break;
- set_page_private(buffer->pages[i], address);
+ dma_addrs[i] = dma_addr;
}
- buffer->page_count_mapped = i;
- if (i < buffer->page_count)
+ if (i < buffer->page_count) {
+ while (i-- > 0)
+ dma_unmap_page(card->device, dma_addrs[i], PAGE_SIZE, buffer->direction);
return -ENOMEM;
+ }
+
+ buffer->direction = direction;
+ buffer->dma_addrs = no_free_ptr(dma_addrs);
return 0;
}
@@ -96,34 +105,31 @@ EXPORT_SYMBOL(fw_iso_buffer_init);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
struct fw_card *card)
{
- int i;
- dma_addr_t address;
+ if (buffer->dma_addrs) {
+ for (int i = 0; i < buffer->page_count; ++i) {
+ dma_addr_t dma_addr = buffer->dma_addrs[i];
+ dma_unmap_page(card->device, dma_addr, PAGE_SIZE, buffer->direction);
+ }
+ kfree(buffer->dma_addrs);
+ buffer->dma_addrs = NULL;
+ }
- for (i = 0; i < buffer->page_count_mapped; i++) {
- address = page_private(buffer->pages[i]);
- dma_unmap_page(card->device, address,
- PAGE_SIZE, buffer->direction);
+ if (buffer->pages) {
+ release_pages(buffer->pages, buffer->page_count);
+ kfree(buffer->pages);
+ buffer->pages = NULL;
}
- for (i = 0; i < buffer->page_count; i++)
- __free_page(buffer->pages[i]);
- kfree(buffer->pages);
- buffer->pages = NULL;
buffer->page_count = 0;
- buffer->page_count_mapped = 0;
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
/* Convert DMA address to offset into virtually contiguous buffer. */
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
{
- size_t i;
- dma_addr_t address;
- ssize_t offset;
-
- for (i = 0; i < buffer->page_count; i++) {
- address = page_private(buffer->pages[i]);
- offset = (ssize_t)completed - (ssize_t)address;
+ for (int i = 0; i < buffer->page_count; i++) {
+ dma_addr_t dma_addr = buffer->dma_addrs[i];
+ ssize_t offset = (ssize_t)completed - (ssize_t)dma_addr;
if (offset > 0 && offset <= PAGE_SIZE)
return (i << PAGE_SHIFT) + offset;
}
@@ -131,14 +137,14 @@ size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
return 0;
}
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
- int type, int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data)
+struct fw_iso_context *__fw_iso_context_create(struct fw_card *card, int type, int channel,
+ int speed, size_t header_size, size_t header_storage_size,
+ union fw_iso_callback callback, void *callback_data)
{
struct fw_iso_context *ctx;
- ctx = card->driver->allocate_iso_context(card,
- type, channel, header_size);
+ ctx = card->driver->allocate_iso_context(card, type, channel, header_size,
+ header_storage_size);
if (IS_ERR(ctx))
return ctx;
@@ -146,8 +152,10 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
ctx->type = type;
ctx->channel = channel;
ctx->speed = speed;
+ ctx->flags = 0;
ctx->header_size = header_size;
- ctx->callback.sc = callback;
+ ctx->header_storage_size = header_storage_size;
+ ctx->callback = callback;
ctx->callback_data = callback_data;
trace_isoc_outbound_allocate(ctx, channel, speed);
@@ -156,7 +164,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
return ctx;
}
-EXPORT_SYMBOL(fw_iso_context_create);
+EXPORT_SYMBOL(__fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 41fb39d9a4e6..8b49d7480c37 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -100,8 +100,8 @@ struct fw_card_driver {
void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
struct fw_iso_context *
- (*allocate_iso_context)(struct fw_card *card,
- int type, int channel, size_t header_size);
+ (*allocate_iso_context)(struct fw_card *card, int type, int channel, size_t header_size,
+ size_t header_storage_size);
void (*free_iso_context)(struct fw_iso_context *ctx);
int (*start_iso)(struct fw_iso_context *ctx,
@@ -166,12 +166,22 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction);
+size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed);
static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_func_t func)
{
INIT_WORK(&ctx->work, func);
}
+static inline struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
+ fw_iso_mc_callback_t callback, void *callback_data)
+{
+ union fw_iso_callback cb = { .mc = callback };
+
+ return __fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL, 0, 0, 0, 0, cb,
+ callback_data);
+}
+
/* -topology */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index e3e78dc42530..1c868c1e4a49 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -86,7 +86,7 @@ struct descriptor {
#define AR_BUFFER_SIZE (32*1024)
#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
/* we need at least two pages for proper list management */
-#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
+#define AR_BUFFERS MAX(2, AR_BUFFERS_MIN)
#define MAX_ASYNC_PAYLOAD 4096
#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
@@ -96,6 +96,7 @@ struct ar_context {
struct fw_ohci *ohci;
struct page *pages[AR_BUFFERS];
void *buffer;
+ dma_addr_t dma_addrs[AR_BUFFERS];
struct descriptor *descriptors;
dma_addr_t descriptors_bus;
void *pointer;
@@ -167,14 +168,20 @@ struct at_context {
struct iso_context {
struct fw_iso_context base;
struct context context;
- void *header;
- size_t header_length;
unsigned long flushing_completions;
- u32 mc_buffer_bus;
- u16 mc_completed;
- u16 last_timestamp;
u8 sync;
u8 tags;
+ union {
+ struct {
+ u16 last_timestamp;
+ size_t header_length;
+ void *header;
+ } sc;
+ struct {
+ u32 buffer_bus;
+ u16 completed;
+ } mc;
+ };
};
#define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM)
@@ -513,11 +520,6 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr,
return update_phy_reg(ohci, addr, clear_bits, set_bits);
}
-static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
-{
- return page_private(ctx->pages[i]);
-}
-
static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
{
struct descriptor *d;
@@ -539,18 +541,22 @@ static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
static void ar_context_release(struct ar_context *ctx)
{
struct device *dev = ctx->ohci->card.device;
- unsigned int i;
if (!ctx->buffer)
return;
+ for (int i = 0; i < AR_BUFFERS; ++i) {
+ dma_addr_t dma_addr = ctx->dma_addrs[i];
+ if (dma_addr)
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+ memset(ctx->dma_addrs, 0, sizeof(ctx->dma_addrs));
+
vunmap(ctx->buffer);
+ ctx->buffer = NULL;
- for (i = 0; i < AR_BUFFERS; i++) {
- if (ctx->pages[i])
- dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
- ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
- }
+ release_pages(ctx->pages, AR_BUFFERS);
+ memset(ctx->pages, 0, sizeof(ctx->pages));
}
static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
@@ -643,14 +649,12 @@ static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
i = ar_first_buffer_index(ctx);
while (i != end_buffer_index) {
- dma_sync_single_for_cpu(ctx->ohci->card.device,
- ar_buffer_bus(ctx, i),
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(ctx->ohci->card.device, ctx->dma_addrs[i], PAGE_SIZE,
+ DMA_FROM_DEVICE);
i = ar_next_buffer_index(i);
}
if (end_buffer_offset > 0)
- dma_sync_single_for_cpu(ctx->ohci->card.device,
- ar_buffer_bus(ctx, i),
+ dma_sync_single_for_cpu(ctx->ohci->card.device, ctx->dma_addrs[i],
end_buffer_offset, DMA_FROM_DEVICE);
}
@@ -791,9 +795,8 @@ static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
i = ar_first_buffer_index(ctx);
while (i != end_buffer) {
- dma_sync_single_for_device(ctx->ohci->card.device,
- ar_buffer_bus(ctx, i),
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(ctx->ohci->card.device, ctx->dma_addrs[i], PAGE_SIZE,
+ DMA_FROM_DEVICE);
ar_context_link_page(ctx, i);
i = ar_next_buffer_index(i);
}
@@ -845,31 +848,57 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
{
struct device *dev = ohci->card.device;
unsigned int i;
- dma_addr_t dma_addr;
struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
+ dma_addr_t dma_addrs[AR_BUFFERS];
+ void *vaddr;
struct descriptor *d;
ctx->regs = regs;
ctx->ohci = ohci;
INIT_WORK(&ctx->work, ohci_ar_context_work);
- for (i = 0; i < AR_BUFFERS; i++) {
- ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
- DMA_FROM_DEVICE, GFP_KERNEL);
- if (!ctx->pages[i])
- goto out_of_memory;
- set_page_private(ctx->pages[i], dma_addr);
- dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ // Retrieve noncontiguous pages. The descriptors for 1394 OHCI AR DMA contexts have a set
+ // of address and length per each. The reason to use pages is to construct contiguous
+ // address range in kernel virtual address space.
+ unsigned long nr_populated = alloc_pages_bulk(GFP_KERNEL | GFP_DMA32, AR_BUFFERS, pages);
+
+ if (nr_populated != AR_BUFFERS) {
+ release_pages(pages, nr_populated);
+ return -ENOMEM;
}
- for (i = 0; i < AR_BUFFERS; i++)
- pages[i] = ctx->pages[i];
+ // Map the pages into contiguous kernel virtual addresses so that the packet data
+ // across the pages can be referred as being contiguous, especially across the last
+ // and first pages.
for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
- pages[AR_BUFFERS + i] = ctx->pages[i];
- ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
- if (!ctx->buffer)
- goto out_of_memory;
+ pages[AR_BUFFERS + i] = pages[i];
+ vaddr = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
+ if (!vaddr) {
+ release_pages(pages, nr_populated);
+ return -ENOMEM;
+ }
+
+ // Retrieve DMA mapping addresses for the pages. They are not contiguous. Maintain the cache
+ // coherency for the pages by hand.
+ for (i = 0; i < AR_BUFFERS; i++) {
+ // The dma_map_phys() with a physical address per page is available here, instead.
+ dma_addr_t dma_addr = dma_map_page(dev, pages[i], 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr))
+ break;
+ dma_addrs[i] = dma_addr;
+ dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+ if (i < AR_BUFFERS) {
+ while (i-- > 0)
+ dma_unmap_page(dev, dma_addrs[i], PAGE_SIZE, DMA_FROM_DEVICE);
+ vunmap(vaddr);
+ release_pages(pages, nr_populated);
+ return -ENOMEM;
+ }
+
+ memcpy(ctx->dma_addrs, dma_addrs, sizeof(ctx->dma_addrs));
+ ctx->buffer = vaddr;
+ memcpy(ctx->pages, pages, sizeof(ctx->pages));
ctx->descriptors = ohci->misc_buffer + descriptors_offset;
ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
@@ -880,17 +909,12 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
DESCRIPTOR_STATUS |
DESCRIPTOR_BRANCH_ALWAYS);
- d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
+ d->data_address = cpu_to_le32(ctx->dma_addrs[i]);
d->branch_address = cpu_to_le32(ctx->descriptors_bus +
ar_next_buffer_index(i) * sizeof(struct descriptor));
}
return 0;
-
-out_of_memory:
- ar_context_release(ctx);
-
- return -ENOMEM;
}
static void ar_context_run(struct ar_context *ctx)
@@ -2717,29 +2741,28 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
static void flush_iso_completions(struct iso_context *ctx, enum fw_iso_context_completions_cause cause)
{
- trace_isoc_inbound_single_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
- ctx->header_length);
- trace_isoc_outbound_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
- ctx->header_length);
+ trace_isoc_inbound_single_completions(&ctx->base, ctx->sc.last_timestamp, cause,
+ ctx->sc.header, ctx->sc.header_length);
+ trace_isoc_outbound_completions(&ctx->base, ctx->sc.last_timestamp, cause, ctx->sc.header,
+ ctx->sc.header_length);
- ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
- ctx->header_length, ctx->header,
- ctx->base.callback_data);
- ctx->header_length = 0;
+ ctx->base.callback.sc(&ctx->base, ctx->sc.last_timestamp, ctx->sc.header_length,
+ ctx->sc.header, ctx->base.callback_data);
+ ctx->sc.header_length = 0;
}
static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
{
u32 *ctx_hdr;
- if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
- if (ctx->base.drop_overflow_headers)
+ if (ctx->sc.header_length + ctx->base.header_size > ctx->base.header_storage_size) {
+ if (ctx->base.flags & FW_ISO_CONTEXT_FLAG_DROP_OVERFLOW_HEADERS)
return;
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
}
- ctx_hdr = ctx->header + ctx->header_length;
- ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
+ ctx_hdr = ctx->sc.header + ctx->sc.header_length;
+ ctx->sc.last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
/*
* The two iso header quadlets are byteswapped to little
@@ -2752,7 +2775,7 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
if (ctx->base.header_size > 8)
memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
- ctx->header_length += ctx->base.header_size;
+ ctx->sc.header_length += ctx->base.header_size;
}
static int handle_ir_packet_per_buffer(struct context *context,
@@ -2805,8 +2828,8 @@ static int handle_ir_buffer_fill(struct context *context,
buffer_dma = le32_to_cpu(last->data_address);
if (completed > 0) {
- ctx->mc_buffer_bus = buffer_dma;
- ctx->mc_completed = completed;
+ ctx->mc.buffer_bus = buffer_dma;
+ ctx->mc.completed = completed;
}
if (res_count != 0)
@@ -2825,7 +2848,7 @@ static int handle_ir_buffer_fill(struct context *context,
ctx->base.callback.mc(&ctx->base,
buffer_dma + completed,
ctx->base.callback_data);
- ctx->mc_completed = 0;
+ ctx->mc.completed = 0;
}
return 1;
@@ -2834,17 +2857,16 @@ static int handle_ir_buffer_fill(struct context *context,
static void flush_ir_buffer_fill(struct iso_context *ctx)
{
dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
- ctx->mc_buffer_bus & PAGE_MASK,
- ctx->mc_buffer_bus & ~PAGE_MASK,
- ctx->mc_completed, DMA_FROM_DEVICE);
+ ctx->mc.buffer_bus & PAGE_MASK,
+ ctx->mc.buffer_bus & ~PAGE_MASK,
+ ctx->mc.completed, DMA_FROM_DEVICE);
- trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc_completed,
+ trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc.completed,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
- ctx->base.callback.mc(&ctx->base,
- ctx->mc_buffer_bus + ctx->mc_completed,
+ ctx->base.callback.mc(&ctx->base, ctx->mc.buffer_bus + ctx->mc.completed,
ctx->base.callback_data);
- ctx->mc_completed = 0;
+ ctx->mc.completed = 0;
}
static inline void sync_it_packet_for_cpu(struct context *context,
@@ -2902,18 +2924,18 @@ static int handle_it_packet(struct context *context,
sync_it_packet_for_cpu(context, d);
- if (ctx->header_length + 4 > PAGE_SIZE) {
- if (ctx->base.drop_overflow_headers)
+ if (ctx->sc.header_length + 4 > ctx->base.header_storage_size) {
+ if (ctx->base.flags & FW_ISO_CONTEXT_FLAG_DROP_OVERFLOW_HEADERS)
return 1;
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
}
- ctx_hdr = ctx->header + ctx->header_length;
- ctx->last_timestamp = le16_to_cpu(last->res_count);
+ ctx_hdr = ctx->sc.header + ctx->sc.header_length;
+ ctx->sc.last_timestamp = le16_to_cpu(last->res_count);
/* Present this value as big-endian to match the receive code */
*ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
le16_to_cpu(pd->res_count));
- ctx->header_length += 4;
+ ctx->sc.header_length += 4;
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
@@ -2932,10 +2954,11 @@ static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
ohci->mc_channels = channels;
}
-static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
- int type, int channel, size_t header_size)
+static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, int type, int channel,
+ size_t header_size, size_t header_storage_size)
{
struct fw_ohci *ohci = fw_ohci(card);
+ void *header __free(kvfree) = NULL;
struct iso_context *ctx;
descriptor_callback_t callback;
u64 *channels;
@@ -2990,26 +3013,29 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
}
memset(ctx, 0, sizeof(*ctx));
- ctx->header_length = 0;
- ctx->header = (void *) __get_free_page(GFP_KERNEL);
- if (ctx->header == NULL) {
- ret = -ENOMEM;
- goto out;
+
+ if (type != FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
+ ctx->sc.header_length = 0;
+ header = kvmalloc(header_storage_size, GFP_KERNEL);
+ if (!header) {
+ ret = -ENOMEM;
+ goto out;
+ }
}
+
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
- goto out_with_header;
+ goto out;
fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
- if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
+ if (type != FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
+ ctx->sc.header = no_free_ptr(header);
+ } else {
set_multichannel_mask(ohci, 0);
- ctx->mc_completed = 0;
+ ctx->mc.completed = 0;
}
return &ctx->base;
-
- out_with_header:
- free_page((unsigned long)ctx->header);
out:
scoped_guard(spinlock_irq, &ohci->lock) {
switch (type) {
@@ -3109,7 +3135,11 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
ohci_stop_iso(base);
context_release(&ctx->context);
- free_page((unsigned long)ctx->header);
+
+ if (base->type != FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
+ kvfree(ctx->sc.header);
+ ctx->sc.header = NULL;
+ }
guard(spinlock_irqsave)(&ohci->lock);
@@ -3184,7 +3214,7 @@ static int queue_iso_transmit(struct iso_context *ctx,
struct descriptor *d, *last, *pd;
struct fw_iso_packet *p;
__le32 *header;
- dma_addr_t d_bus, page_bus;
+ dma_addr_t d_bus;
u32 z, header_z, payload_z, irq;
u32 payload_index, payload_end_index, next_page_index;
int page, end_page, i, length, offset;
@@ -3254,11 +3284,11 @@ static int queue_iso_transmit(struct iso_context *ctx,
min(next_page_index, payload_end_index) - payload_index;
pd[i].req_count = cpu_to_le16(length);
- page_bus = page_private(buffer->pages[page]);
- pd[i].data_address = cpu_to_le32(page_bus + offset);
+ dma_addr_t dma_addr = buffer->dma_addrs[page];
+ pd[i].data_address = cpu_to_le32(dma_addr + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
- page_bus, offset, length,
+ dma_addr, offset, length,
DMA_TO_DEVICE);
payload_index += length;
@@ -3287,7 +3317,7 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
{
struct device *device = ctx->context.ohci->card.device;
struct descriptor *d, *pd;
- dma_addr_t d_bus, page_bus;
+ dma_addr_t d_bus;
u32 z, header_z, rest;
int i, j, length;
int page, offset, packet_count, header_size, payload_per_buffer;
@@ -3337,10 +3367,10 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
pd->res_count = pd->req_count;
pd->transfer_status = 0;
- page_bus = page_private(buffer->pages[page]);
- pd->data_address = cpu_to_le32(page_bus + offset);
+ dma_addr_t dma_addr = buffer->dma_addrs[page];
+ pd->data_address = cpu_to_le32(dma_addr + offset);
- dma_sync_single_range_for_device(device, page_bus,
+ dma_sync_single_range_for_device(device, dma_addr,
offset, length,
DMA_FROM_DEVICE);
@@ -3367,7 +3397,7 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
unsigned long payload)
{
struct descriptor *d;
- dma_addr_t d_bus, page_bus;
+ dma_addr_t d_bus;
int page, offset, rest, z, i, length;
page = payload >> PAGE_SHIFT;
@@ -3400,11 +3430,11 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
d->res_count = d->req_count;
d->transfer_status = 0;
- page_bus = page_private(buffer->pages[page]);
- d->data_address = cpu_to_le32(page_bus + offset);
+ dma_addr_t dma_addr = buffer->dma_addrs[page];
+ d->data_address = cpu_to_le32(dma_addr + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
- page_bus, offset, length,
+ dma_addr, offset, length,
DMA_FROM_DEVICE);
rest -= length;
@@ -3457,11 +3487,11 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
case FW_ISO_CONTEXT_RECEIVE:
- if (ctx->header_length != 0)
+ if (ctx->sc.header_length != 0)
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- if (ctx->mc_completed != 0)
+ if (ctx->mc.completed != 0)
flush_ir_buffer_fill(ctx);
break;
default:
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 6143b7d28eac..986d712e4d94 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -526,14 +526,13 @@ struct fw_iso_packet {
struct fw_iso_buffer {
enum dma_data_direction direction;
struct page **pages;
+ dma_addr_t *dma_addrs;
int page_count;
- int page_count_mapped;
};
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed);
struct fw_iso_context;
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
@@ -547,21 +546,26 @@ union fw_iso_callback {
fw_iso_mc_callback_t mc;
};
+enum fw_iso_context_flag {
+ FW_ISO_CONTEXT_FLAG_DROP_OVERFLOW_HEADERS = BIT(0),
+};
+
struct fw_iso_context {
struct fw_card *card;
struct work_struct work;
int type;
int channel;
int speed;
- bool drop_overflow_headers;
+ int flags;
size_t header_size;
+ size_t header_storage_size;
union fw_iso_callback callback;
void *callback_data;
};
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
- int type, int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data);
+struct fw_iso_context *__fw_iso_context_create(struct fw_card *card, int type, int channel,
+ int speed, size_t header_size, size_t header_storage_size,
+ union fw_iso_callback callback, void *callback_data);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
@@ -570,6 +574,26 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+static inline struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type,
+ int channel, int speed, size_t header_size, fw_iso_callback_t callback,
+ void *callback_data)
+{
+ union fw_iso_callback cb = { .sc = callback };
+
+ return __fw_iso_context_create(card, type, channel, speed, header_size, PAGE_SIZE, cb,
+ callback_data);
+}
+
+static inline struct fw_iso_context *fw_iso_context_create_with_header_storage_size(
+ struct fw_card *card, int type, int channel, int speed, size_t header_size,
+ size_t header_storage_size, fw_iso_callback_t callback, void *callback_data)
+{
+ union fw_iso_callback cb = { .sc = callback };
+
+ return __fw_iso_context_create(card, type, channel, speed, header_size, header_storage_size,
+ cb, callback_data);
+}
+
/**
* fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
* @ctx: the isochronous context
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 5cdc34877fc1..223c880af802 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -191,8 +191,6 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime)
{
struct snd_pcm_hardware *hw = &runtime->hw;
- unsigned int ctx_header_size;
- unsigned int maximum_usec_per_period;
int err;
hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
@@ -212,21 +210,6 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
hw->period_bytes_max = hw->period_bytes_min * 2048;
hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
- // Linux driver for 1394 OHCI controller voluntarily flushes isoc
- // context when total size of accumulated context header reaches
- // PAGE_SIZE. This kicks work for the isoc context and brings
- // callback in the middle of scheduled interrupts.
- // Although AMDTP streams in the same domain use the same events per
- // IRQ, use the largest size of context header between IT/IR contexts.
- // Here, use the value of context header in IR context is for both
- // contexts.
- if (!(s->flags & CIP_NO_HEADER))
- ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
- else
- ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
- maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
- CYCLES_PER_SECOND / ctx_header_size;
-
// In IEC 61883-6, one isoc packet can transfer events up to the value
// of syt interval. This comes from the interval of isoc cycle. As 1394
// OHCI controller can generate hardware IRQ per isoc packet, the
@@ -239,9 +222,10 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
// Due to the above protocol design, the minimum PCM frames per
// interrupt should be double of the value of syt interval, thus it is
// 250 usec.
+ // There is no reason, but up to 250 msec to avoid consuming resources so much.
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
- 250, maximum_usec_per_period);
+ 250, USEC_PER_SEC / 4);
if (err < 0)
goto end;
@@ -261,6 +245,7 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
goto end;
+
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
apply_constraint_to_size, NULL,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
@@ -1715,7 +1700,9 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
} else {
dir = DMA_TO_DEVICE;
type = FW_ISO_CONTEXT_TRANSMIT;
- ctx_header_size = 0; // No effect for IT context.
+ // Although no effect for IT context, this value is required to compute the size
+ // of header storage correctly.
+ ctx_header_size = sizeof(__be32);
}
max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
@@ -1724,9 +1711,9 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
return err;
s->queue_size = queue_size;
- s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
- type, channel, speed, ctx_header_size,
- amdtp_stream_first_callback, s);
+ s->context = fw_iso_context_create_with_header_storage_size(
+ fw_parent_device(s->unit)->card, type, channel, speed, ctx_header_size,
+ ctx_header_size * queue_size, amdtp_stream_first_callback, s);
if (IS_ERR(s->context)) {
err = PTR_ERR(s->context);
if (err == -EBUSY)