diff options
-rw-r--r-- | drivers/firewire/core-device.c | 3 | ||||
-rw-r--r-- | drivers/firewire/ohci.c | 6 | ||||
-rw-r--r-- | drivers/firewire/sbp2.c | 67 |
3 files changed, 35 insertions, 41 deletions
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 2c6d5e118ac1..f9e3aee6a211 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -115,6 +115,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) * * The string is taken from a minimal ASCII text descriptor leaf after * the immediate entry with @key. The string is zero-terminated. + * An overlong string is silently truncated such that it and the + * zero byte fit into @size. + * * Returns strlen(buf) or a negative error code. */ int fw_csr_string(const u32 *directory, int key, char *buf, size_t size) diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index a66a3217f1d9..aff9018d0658 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -689,8 +689,7 @@ static void ar_context_release(struct ar_context *ctx) { unsigned int i; - if (ctx->buffer) - vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES); + vunmap(ctx->buffer); for (i = 0; i < AR_BUFFERS; i++) if (ctx->pages[i]) { @@ -1018,8 +1017,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, pages[i] = ctx->pages[i]; for (i = 0; i < AR_WRAPAROUND_PAGES; i++) pages[AR_BUFFERS + i] = ctx->pages[i]; - ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES, - -1, PAGE_KERNEL); + ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); if (!ctx->buffer) goto out_of_memory; diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 7aef911fdc71..64ac8f8f5098 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -174,6 +174,7 @@ struct sbp2_target { unsigned int mgt_orb_timeout; unsigned int max_payload; + spinlock_t lock; int dont_block; /* counter for each logical unit */ int blocked; /* ditto */ }; @@ -270,6 +271,7 @@ struct sbp2_orb { dma_addr_t request_bus; int rcode; void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); + struct sbp2_logical_unit *lu; struct list_head link; }; @@ -321,7 +323,6 @@ struct sbp2_command_orb { u8 command_block[SBP2_MAX_CDB_SIZE]; } request; struct scsi_cmnd *cmd; - struct sbp2_logical_unit *lu; struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); dma_addr_t page_table_bus; @@ -444,7 +445,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, } /* Lookup the orb corresponding to this status write. */ - spin_lock_irqsave(&card->lock, flags); + spin_lock_irqsave(&lu->tgt->lock, flags); list_for_each_entry(orb, &lu->orb_list, link) { if (STATUS_GET_ORB_HIGH(status) == 0 && STATUS_GET_ORB_LOW(status) == orb->request_bus) { @@ -453,7 +454,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, break; } } - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irqrestore(&lu->tgt->lock, flags); if (&orb->link != &lu->orb_list) { orb->callback(orb, &status); @@ -480,18 +481,18 @@ static void complete_transaction(struct fw_card *card, int rcode, * been set and only does the cleanup if the transaction * failed and we didn't already get a status write. */ - spin_lock_irqsave(&card->lock, flags); + spin_lock_irqsave(&orb->lu->tgt->lock, flags); if (orb->rcode == -1) orb->rcode = rcode; if (orb->rcode != RCODE_COMPLETE) { list_del(&orb->link); - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); orb->callback(orb, NULL); kref_put(&orb->kref, free_orb); /* orb callback reference */ } else { - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); } kref_put(&orb->kref, free_orb); /* transaction callback reference */ @@ -507,9 +508,10 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, orb_pointer.high = 0; orb_pointer.low = cpu_to_be32(orb->request_bus); - spin_lock_irqsave(&device->card->lock, flags); + orb->lu = lu; + spin_lock_irqsave(&lu->tgt->lock, flags); list_add_tail(&orb->link, &lu->orb_list); - spin_unlock_irqrestore(&device->card->lock, flags); + spin_unlock_irqrestore(&lu->tgt->lock, flags); kref_get(&orb->kref); /* transaction callback reference */ kref_get(&orb->kref); /* orb callback reference */ @@ -524,13 +526,12 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) struct fw_device *device = target_parent_device(lu->tgt); struct sbp2_orb *orb, *next; struct list_head list; - unsigned long flags; int retval = -ENOENT; INIT_LIST_HEAD(&list); - spin_lock_irqsave(&device->card->lock, flags); + spin_lock_irq(&lu->tgt->lock); list_splice_init(&lu->orb_list, &list); - spin_unlock_irqrestore(&device->card->lock, flags); + spin_unlock_irq(&lu->tgt->lock); list_for_each_entry_safe(orb, next, &list, link) { retval = 0; @@ -687,16 +688,11 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) &d, 4, complete_agent_reset_write_no_wait, t); } -static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) +static inline void sbp2_allow_block(struct sbp2_target *tgt) { - /* - * We may access dont_block without taking card->lock here: - * All callers of sbp2_allow_block() and all callers of sbp2_unblock() - * are currently serialized against each other. - * And a wrong result in sbp2_conditionally_block()'s access of - * dont_block is rather harmless, it simply misses its first chance. - */ - --lu->tgt->dont_block; + spin_lock_irq(&tgt->lock); + --tgt->dont_block; + spin_unlock_irq(&tgt->lock); } /* @@ -705,7 +701,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) * logical units have been finished (indicated by dont_block == 0). * - lu->generation is stale. * - * Note, scsi_block_requests() must be called while holding card->lock, + * Note, scsi_block_requests() must be called while holding tgt->lock, * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to * unblock the target. */ @@ -717,20 +713,20 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) container_of((void *)tgt, struct Scsi_Host, hostdata[0]); unsigned long flags; - spin_lock_irqsave(&card->lock, flags); + spin_lock_irqsave(&tgt->lock, flags); if (!tgt->dont_block && !lu->blocked && lu->generation != card->generation) { lu->blocked = true; if (++tgt->blocked == 1) scsi_block_requests(shost); } - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irqrestore(&tgt->lock, flags); } /* * Unblocks lu->tgt as soon as all its logical units can be unblocked. * Note, it is harmless to run scsi_unblock_requests() outside the - * card->lock protected section. On the other hand, running it inside + * tgt->lock protected section. On the other hand, running it inside * the section might clash with shost->host_lock. */ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) @@ -739,15 +735,14 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) struct fw_card *card = target_parent_device(tgt)->card; struct Scsi_Host *shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); - unsigned long flags; bool unblock = false; - spin_lock_irqsave(&card->lock, flags); + spin_lock_irq(&tgt->lock); if (lu->blocked && lu->generation == card->generation) { lu->blocked = false; unblock = --tgt->blocked == 0; } - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irq(&tgt->lock); if (unblock) scsi_unblock_requests(shost); @@ -756,19 +751,17 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) /* * Prevents future blocking of tgt and unblocks it. * Note, it is harmless to run scsi_unblock_requests() outside the - * card->lock protected section. On the other hand, running it inside + * tgt->lock protected section. On the other hand, running it inside * the section might clash with shost->host_lock. */ static void sbp2_unblock(struct sbp2_target *tgt) { - struct fw_card *card = target_parent_device(tgt)->card; struct Scsi_Host *shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); + spin_lock_irq(&tgt->lock); ++tgt->dont_block; - spin_unlock_irqrestore(&card->lock, flags); + spin_unlock_irq(&tgt->lock); scsi_unblock_requests(shost); } @@ -904,7 +897,7 @@ static void sbp2_login(struct work_struct *work) /* No error during __scsi_add_device() */ lu->has_sdev = true; scsi_device_put(sdev); - sbp2_allow_block(lu); + sbp2_allow_block(tgt); return; @@ -1163,6 +1156,7 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) dev_set_drvdata(&unit->device, tgt); tgt->unit = unit; INIT_LIST_HEAD(&tgt->lu_list); + spin_lock_init(&tgt->lock); tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; if (fw_device_enable_phys_dma(device) < 0) @@ -1359,12 +1353,12 @@ static void complete_command_orb(struct sbp2_orb *base_orb, { struct sbp2_command_orb *orb = container_of(base_orb, struct sbp2_command_orb, base); - struct fw_device *device = target_parent_device(orb->lu->tgt); + struct fw_device *device = target_parent_device(base_orb->lu->tgt); int result; if (status != NULL) { if (STATUS_GET_DEAD(*status)) - sbp2_agent_reset_no_wait(orb->lu); + sbp2_agent_reset_no_wait(base_orb->lu); switch (STATUS_GET_RESPONSE(*status)) { case SBP2_STATUS_REQUEST_COMPLETE: @@ -1390,7 +1384,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb, * or when sending the write (less likely). */ result = DID_BUS_BUSY << 16; - sbp2_conditionally_block(orb->lu); + sbp2_conditionally_block(base_orb->lu); } dma_unmap_single(device->card->device, orb->base.request_bus, @@ -1487,7 +1481,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, /* Initialize rcode to something not RCODE_COMPLETE. */ orb->base.rcode = -1; kref_init(&orb->base.kref); - orb->lu = lu; orb->cmd = cmd; orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); orb->request.misc = cpu_to_be32( |