summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/aoe/aoe.h4
-rw-r--r--drivers/block/aoe/aoeblk.c100
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/aoe/aoedev.c10
-rw-r--r--drivers/block/cciss.c7
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/mtip32xx/Kconfig2
-rw-r--r--drivers/block/nvme-core.c585
-rw-r--r--drivers/block/nvme-scsi.c24
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/pktcdvd.c278
-rw-r--r--drivers/block/rbd.c52
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
15 files changed, 694 insertions, 382 deletions
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 025c41d3cb33..14a9d1912318 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
-#define VERSION "83"
+#define VERSION "85"
#define AOE_MAJOR 152
#define DEVICE_NAME "aoe"
@@ -169,6 +169,7 @@ struct aoedev {
ulong ref;
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
+ struct dentry *debugfs;
struct request_queue *blkq;
struct hd_geometry geo;
sector_t ssize;
@@ -206,6 +207,7 @@ struct ktstate {
int aoeblk_init(void);
void aoeblk_exit(void);
void aoeblk_gdalloc(void *);
+void aoedisk_rm_debugfs(struct aoedev *d);
void aoedisk_rm_sysfs(struct aoedev *d);
int aoechr_init(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 916d9ed5c8aa..dd73e1ff1759 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
+/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
/*
* aoeblk.c
* block device routines
@@ -17,11 +17,13 @@
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
#include <scsi/sg.h>
#include "aoe.h"
static DEFINE_MUTEX(aoeblk_mutex);
static struct kmem_cache *buf_pool_cache;
+static struct dentry *aoe_debugfs_dir;
/* GPFS needs a larger value than the default. */
static int aoe_maxsectors;
@@ -108,6 +110,55 @@ static ssize_t aoedisk_show_payload(struct device *dev,
return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
}
+static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
+{
+ struct aoedev *d;
+ struct aoetgt **t, **te;
+ struct aoeif *ifp, *ife;
+ unsigned long flags;
+ char c;
+
+ d = s->private;
+ seq_printf(s, "rttavg: %d rttdev: %d\n",
+ d->rttavg >> RTTSCALE,
+ d->rttdev >> RTTDSCALE);
+ seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
+ seq_printf(s, "kicked: %ld\n", d->kicked);
+ seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
+ seq_printf(s, "ref: %ld\n", d->ref);
+
+ spin_lock_irqsave(&d->lock, flags);
+ t = d->targets;
+ te = t + d->ntargets;
+ for (; t < te && *t; t++) {
+ c = '\t';
+ seq_printf(s, "falloc: %ld\n", (*t)->falloc);
+ seq_printf(s, "ffree: %p\n",
+ list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
+ seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
+ (*t)->maxout, (*t)->nframes);
+ seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
+ seq_printf(s, "\ttaint:%d\n", (*t)->taint);
+ seq_printf(s, "\tr:%d\n", (*t)->rpkts);
+ seq_printf(s, "\tw:%d\n", (*t)->wpkts);
+ ifp = (*t)->ifs;
+ ife = ifp + ARRAY_SIZE((*t)->ifs);
+ for (; ifp->nd && ifp < ife; ifp++) {
+ seq_printf(s, "%c%s", c, ifp->nd->name);
+ c = ',';
+ }
+ seq_puts(s, "\n");
+ }
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ return 0;
+}
+
+static int aoe_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aoedisk_debugfs_show, inode->i_private);
+}
+
static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
@@ -130,6 +181,44 @@ static const struct attribute_group attr_group = {
.attrs = aoe_attrs,
};
+static const struct file_operations aoe_debugfs_fops = {
+ .open = aoe_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void
+aoedisk_add_debugfs(struct aoedev *d)
+{
+ struct dentry *entry;
+ char *p;
+
+ if (aoe_debugfs_dir == NULL)
+ return;
+ p = strchr(d->gd->disk_name, '/');
+ if (p == NULL)
+ p = d->gd->disk_name;
+ else
+ p++;
+ BUG_ON(*p == '\0');
+ entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
+ &aoe_debugfs_fops);
+ if (IS_ERR_OR_NULL(entry)) {
+ pr_info("aoe: cannot create debugfs file for %s\n",
+ d->gd->disk_name);
+ return;
+ }
+ BUG_ON(d->debugfs);
+ d->debugfs = entry;
+}
+void
+aoedisk_rm_debugfs(struct aoedev *d)
+{
+ debugfs_remove(d->debugfs);
+ d->debugfs = NULL;
+}
+
static int
aoedisk_add_sysfs(struct aoedev *d)
{
@@ -330,6 +419,7 @@ aoeblk_gdalloc(void *vp)
add_disk(gd);
aoedisk_add_sysfs(d);
+ aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
WARN_ON(!(d->flags & DEVFL_GD_NOW));
@@ -351,6 +441,8 @@ err:
void
aoeblk_exit(void)
{
+ debugfs_remove_recursive(aoe_debugfs_dir);
+ aoe_debugfs_dir = NULL;
kmem_cache_destroy(buf_pool_cache);
}
@@ -362,7 +454,11 @@ aoeblk_init(void)
0, 0, NULL);
if (buf_pool_cache == NULL)
return -ENOMEM;
-
+ aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
+ if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
+ pr_info("aoe: cannot create debugfs directory\n");
+ aoe_debugfs_dir = NULL;
+ }
return 0;
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 4d45dba7fb8f..d2515435e23f 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -380,7 +380,6 @@ aoecmd_ata_rw(struct aoedev *d)
{
struct frame *f;
struct buf *buf;
- struct aoetgt *t;
struct sk_buff *skb;
struct sk_buff_head queue;
ulong bcnt, fbcnt;
@@ -391,7 +390,6 @@ aoecmd_ata_rw(struct aoedev *d)
f = newframe(d);
if (f == NULL)
return 0;
- t = *d->tgt;
bcnt = d->maxbcnt;
if (bcnt == 0)
bcnt = DEFAULTBCNT;
@@ -485,7 +483,6 @@ resend(struct aoedev *d, struct frame *f)
struct sk_buff *skb;
struct sk_buff_head queue;
struct aoe_hdr *h;
- struct aoe_atahdr *ah;
struct aoetgt *t;
char buf[128];
u32 n;
@@ -500,7 +497,6 @@ resend(struct aoedev *d, struct frame *f)
return;
}
h = (struct aoe_hdr *) skb_mac_header(skb);
- ah = (struct aoe_atahdr *) (h+1);
if (!(f->flags & FFL_PROBE)) {
snprintf(buf, sizeof(buf),
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 784c92e038d1..e774c50b6842 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -12,6 +12,7 @@
#include <linux/bitmap.h>
#include <linux/kdev_t.h>
#include <linux/moduleparam.h>
+#include <linux/string.h>
#include "aoe.h"
static void dummy_timer(ulong);
@@ -241,16 +242,12 @@ aoedev_downdev(struct aoedev *d)
static int
user_req(char *s, size_t slen, struct aoedev *d)
{
- char *p;
+ const char *p;
size_t lim;
if (!d->gd)
return 0;
- p = strrchr(d->gd->disk_name, '/');
- if (!p)
- p = d->gd->disk_name;
- else
- p += 1;
+ p = kbasename(d->gd->disk_name);
lim = sizeof(d->gd->disk_name);
lim -= p - d->gd->disk_name;
if (slen < lim)
@@ -278,6 +275,7 @@ freedev(struct aoedev *d)
del_timer_sync(&d->timer);
if (d->gd) {
+ aoedisk_rm_debugfs(d);
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 62b6c2cc80b5..d2d95ff5353b 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4258,6 +4258,13 @@ static void cciss_find_board_params(ctlr_info_t *h)
h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
/*
+ * The P600 may exhibit poor performnace under some workloads
+ * if we use the value in the configuration table. Limit this
+ * controller to MAXSGENTRIES (32) instead.
+ */
+ if (h->board_id == 0x3225103C)
+ h->maxsgentries = MAXSGENTRIES;
+ /*
* Limit in-command s/g elements to 32 save dma'able memory.
* Howvever spec says if 0, use 31
*/
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 64fbb8385cdc..b12c11ec4bd2 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -393,7 +393,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
* we must not block on IO to ourselves.
* Context is receiver thread or dmsetup. */
bytes = sizeof(struct page *)*want;
- new_pages = kzalloc(bytes, GFP_NOIO);
+ new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
if (!new_pages) {
new_pages = __vmalloc(bytes,
GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index a56cfcd5d648..77a60bedd7a3 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -636,7 +636,7 @@ ok_to_write:
mg_request(host->breq);
}
-void mg_times_out(unsigned long data)
+static void mg_times_out(unsigned long data)
{
struct mg_host *host = (struct mg_host *)data;
char *name;
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
index 1fca1f996b45..0ba837fc62a8 100644
--- a/drivers/block/mtip32xx/Kconfig
+++ b/drivers/block/mtip32xx/Kconfig
@@ -4,6 +4,6 @@
config BLK_DEV_PCIESSD_MTIP32XX
tristate "Block Device Driver for Micron PCIe SSDs"
- depends on PCI && GENERIC_HARDIRQS
+ depends on PCI
help
This enables the block driver for Micron PCIe SSDs.
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index ce79a590b45b..da52092980e2 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -36,6 +36,7 @@
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/poison.h>
+#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -79,7 +80,9 @@ struct nvme_queue {
u16 sq_head;
u16 sq_tail;
u16 cq_head;
- u16 cq_phase;
+ u8 cq_phase;
+ u8 cqe_seen;
+ u8 q_suspended;
unsigned long cmdid_data[];
};
@@ -115,6 +118,11 @@ static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
}
+static unsigned nvme_queue_extra(int depth)
+{
+ return DIV_ROUND_UP(depth, 8) + (depth * sizeof(struct nvme_cmd_info));
+}
+
/**
* alloc_cmdid() - Allocate a Command ID
* @nvmeq: The queue that will be used for this command
@@ -285,6 +293,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
iod->npages = -1;
iod->length = nbytes;
iod->nents = 0;
+ iod->start_time = jiffies;
}
return iod;
@@ -308,6 +317,30 @@ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
kfree(iod);
}
+static void nvme_start_io_acct(struct bio *bio)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ const int rw = bio_data_dir(bio);
+ int cpu = part_stat_lock();
+ part_round_stats(cpu, &disk->part0);
+ part_stat_inc(cpu, &disk->part0, ios[rw]);
+ part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
+ part_inc_in_flight(&disk->part0, rw);
+ part_stat_unlock();
+}
+
+static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ const int rw = bio_data_dir(bio);
+ unsigned long duration = jiffies - start_time;
+ int cpu = part_stat_lock();
+ part_stat_add(cpu, &disk->part0, ticks[rw], duration);
+ part_round_stats(cpu, &disk->part0);
+ part_dec_in_flight(&disk->part0, rw);
+ part_stat_unlock();
+}
+
static void bio_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
{
@@ -315,9 +348,11 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
struct bio *bio = iod->private;
u16 status = le16_to_cpup(&cqe->status) >> 1;
- if (iod->nents)
+ if (iod->nents) {
dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ nvme_end_io_acct(bio, iod->start_time);
+ }
nvme_free_iod(dev, iod);
if (status)
bio_endio(bio, -EIO);
@@ -422,10 +457,8 @@ static void nvme_bio_pair_endio(struct bio *bio, int err)
if (atomic_dec_and_test(&bp->cnt)) {
bio_endio(bp->parent, bp->err);
- if (bp->bv1)
- kfree(bp->bv1);
- if (bp->bv2)
- kfree(bp->bv2);
+ kfree(bp->bv1);
+ kfree(bp->bv2);
kfree(bp);
}
}
@@ -695,6 +728,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+ nvme_start_io_acct(bio);
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
@@ -709,26 +743,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return result;
}
-static void nvme_make_request(struct request_queue *q, struct bio *bio)
-{
- struct nvme_ns *ns = q->queuedata;
- struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
- int result = -EBUSY;
-
- spin_lock_irq(&nvmeq->q_lock);
- if (bio_list_empty(&nvmeq->sq_cong))
- result = nvme_submit_bio_queue(nvmeq, ns, bio);
- if (unlikely(result)) {
- if (bio_list_empty(&nvmeq->sq_cong))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, bio);
- }
-
- spin_unlock_irq(&nvmeq->q_lock);
- put_nvmeq(nvmeq);
-}
-
-static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
+static int nvme_process_cq(struct nvme_queue *nvmeq)
{
u16 head, phase;
@@ -758,13 +773,40 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
* a big problem.
*/
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
- return IRQ_NONE;
+ return 0;
writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
- return IRQ_HANDLED;
+ nvmeq->cqe_seen = 1;
+ return 1;
+}
+
+static void nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct nvme_ns *ns = q->queuedata;
+ struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+ int result = -EBUSY;
+
+ if (!nvmeq) {
+ put_nvmeq(NULL);
+ bio_endio(bio, -EIO);
+ return;
+ }
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
+ result = nvme_submit_bio_queue(nvmeq, ns, bio);
+ if (unlikely(result)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ }
+
+ nvme_process_cq(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ put_nvmeq(nvmeq);
}
static irqreturn_t nvme_irq(int irq, void *data)
@@ -772,7 +814,9 @@ static irqreturn_t nvme_irq(int irq, void *data)
irqreturn_t result;
struct nvme_queue *nvmeq = data;
spin_lock(&nvmeq->q_lock);
- result = nvme_process_cq(nvmeq);
+ nvme_process_cq(nvmeq);
+ result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
+ nvmeq->cqe_seen = 0;
spin_unlock(&nvmeq->q_lock);
return result;
}
@@ -986,8 +1030,15 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
}
}
-static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
+static void nvme_free_queue(struct nvme_queue *nvmeq)
{
+ spin_lock_irq(&nvmeq->q_lock);
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ bio_endio(bio, -EIO);
+ }
+ spin_unlock_irq(&nvmeq->q_lock);
+
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
@@ -995,17 +1046,28 @@ static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
kfree(nvmeq);
}
-static void nvme_free_queue(struct nvme_dev *dev, int qid)
+static void nvme_free_queues(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->queue_count - 1; i >= 0; i--) {
+ nvme_free_queue(dev->queues[i]);
+ dev->queue_count--;
+ dev->queues[i] = NULL;
+ }
+}
+
+static void nvme_disable_queue(struct nvme_dev *dev, int qid)
{
struct nvme_queue *nvmeq = dev->queues[qid];
int vector = dev->entry[nvmeq->cq_vector].vector;
spin_lock_irq(&nvmeq->q_lock);
- nvme_cancel_ios(nvmeq, false);
- while (bio_list_peek(&nvmeq->sq_cong)) {
- struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
- bio_endio(bio, -EIO);
+ if (nvmeq->q_suspended) {
+ spin_unlock_irq(&nvmeq->q_lock);
+ return;
}
+ nvmeq->q_suspended = 1;
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
@@ -1017,15 +1079,17 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
adapter_delete_cq(dev, qid);
}
- nvme_free_queue_mem(nvmeq);
+ spin_lock_irq(&nvmeq->q_lock);
+ nvme_process_cq(nvmeq);
+ nvme_cancel_ios(nvmeq, false);
+ spin_unlock_irq(&nvmeq->q_lock);
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
struct device *dmadev = &dev->pci_dev->dev;
- unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
- sizeof(struct nvme_cmd_info));
+ unsigned extra = nvme_queue_extra(depth);
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
if (!nvmeq)
return NULL;
@@ -1052,6 +1116,8 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
+ nvmeq->q_suspended = 1;
+ dev->queue_count++;
return nvmeq;
@@ -1075,18 +1141,29 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}
-static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
- int cq_size, int vector)
+static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
{
- int result;
- struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+ struct nvme_dev *dev = nvmeq->dev;
+ unsigned extra = nvme_queue_extra(nvmeq->q_depth);
- if (!nvmeq)
- return ERR_PTR(-ENOMEM);
+ nvmeq->sq_tail = 0;
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ memset(nvmeq->cmdid_data, 0, extra);
+ memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
+ nvme_cancel_ios(nvmeq, false);
+ nvmeq->q_suspended = 0;
+}
+
+static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
+{
+ struct nvme_dev *dev = nvmeq->dev;
+ int result;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
- goto free_nvmeq;
+ return result;
result = adapter_alloc_sq(dev, qid, nvmeq);
if (result < 0)
@@ -1096,19 +1173,17 @@ static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
if (result < 0)
goto release_sq;
- return nvmeq;
+ spin_lock(&nvmeq->q_lock);
+ nvme_init_queue(nvmeq, qid);
+ spin_unlock(&nvmeq->q_lock);
+
+ return result;
release_sq:
adapter_delete_sq(dev, qid);
release_cq:
adapter_delete_cq(dev, qid);
- free_nvmeq:
- dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
- (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
- return ERR_PTR(result);
+ return result;
}
static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
@@ -1152,6 +1227,30 @@ static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
return nvme_wait_ready(dev, cap, true);
}
+static int nvme_shutdown_ctrl(struct nvme_dev *dev)
+{
+ unsigned long timeout;
+ u32 cc;
+
+ cc = (readl(&dev->bar->cc) & ~NVME_CC_SHN_MASK) | NVME_CC_SHN_NORMAL;
+ writel(cc, &dev->bar->cc);
+
+ timeout = 2 * HZ + jiffies;
+ while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) !=
+ NVME_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device shutdown incomplete; abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result;
@@ -1159,16 +1258,17 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u64 cap = readq(&dev->bar->cap);
struct nvme_queue *nvmeq;
- dev->dbs = ((void __iomem *)dev->bar) + 4096;
- dev->db_stride = NVME_CAP_STRIDE(cap);
-
result = nvme_disable_ctrl(dev, cap);
if (result < 0)
return result;
- nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
- if (!nvmeq)
- return -ENOMEM;
+ nvmeq = dev->queues[0];
+ if (!nvmeq) {
+ nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+ if (!nvmeq)
+ return -ENOMEM;
+ dev->queues[0] = nvmeq;
+ }
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -1185,17 +1285,15 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
result = nvme_enable_ctrl(dev, cap);
if (result)
- goto free_q;
+ return result;
result = queue_request_irq(dev, nvmeq, "nvme admin");
if (result)
- goto free_q;
-
- dev->queues[0] = nvmeq;
- return result;
+ return result;
- free_q:
- nvme_free_queue_mem(nvmeq);
+ spin_lock(&nvmeq->q_lock);
+ nvme_init_queue(nvmeq, 0);
+ spin_unlock(&nvmeq->q_lock);
return result;
}
@@ -1314,7 +1412,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.appmask = cpu_to_le16(io.appmask);
if (meta_len) {
- meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len);
+ meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
+ meta_len);
if (IS_ERR(meta_iod)) {
status = PTR_ERR(meta_iod);
meta_iod = NULL;
@@ -1356,6 +1455,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
put_nvmeq(nvmeq);
if (length != (io.nblocks + 1) << ns->lba_shift)
status = -ENOMEM;
+ else if (!nvmeq || nvmeq->q_suspended)
+ status = -EBUSY;
else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
@@ -1453,6 +1554,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
switch (cmd) {
case NVME_IOCTL_ID:
+ force_successful_syscall_return();
return ns->ns_id;
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
@@ -1506,10 +1608,12 @@ static int nvme_kthread(void *data)
if (!nvmeq)
continue;
spin_lock_irq(&nvmeq->q_lock);
- if (nvme_process_cq(nvmeq))
- printk("process_cq did something\n");
+ if (nvmeq->q_suspended)
+ goto unlock;
+ nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, true);
nvme_resubmit_bios(nvmeq);
+ unlock:
spin_unlock_irq(&nvmeq->q_lock);
}
}
@@ -1556,7 +1660,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
-static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
struct nvme_ns *ns;
@@ -1631,14 +1735,19 @@ static int set_queue_count(struct nvme_dev *dev, int count)
status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
&result);
if (status)
- return -EIO;
+ return status < 0 ? -EIO : -EBUSY;
return min(result & 0xffff, result >> 16) + 1;
}
+static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
+{
+ return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+}
+
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct pci_dev *pdev = dev->pci_dev;
- int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
+ int result, cpu, i, vecs, nr_io_queues, size, q_depth;
nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues);
@@ -1647,53 +1756,80 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues)
nr_io_queues = result;
- q_count = nr_io_queues;
- /* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
-
- db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
- if (db_bar_size > 8192) {
+ size = db_bar_size(dev, nr_io_queues);
+ if (size > 8192) {
iounmap(dev->bar);
- dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
+ do {
+ dev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (dev->bar)
+ break;
+ if (!--nr_io_queues)
+ return -ENOMEM;
+ size = db_bar_size(dev, nr_io_queues);
+ } while (1);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->queues[0]->q_db = dev->dbs;
}
- for (i = 0; i < nr_io_queues; i++)
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[0].vector, dev->queues[0]);
+
+ vecs = nr_io_queues;
+ for (i = 0; i < vecs; i++)
dev->entry[i].entry = i;
for (;;) {
- result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
- if (result == 0) {
- break;
- } else if (result > 0) {
- nr_io_queues = result;
- continue;
- } else {
- nr_io_queues = 0;
+ result = pci_enable_msix(pdev, dev->entry, vecs);
+ if (result <= 0)
break;
- }
+ vecs = result;
}
- if (nr_io_queues == 0) {
- nr_io_queues = q_count;
+ if (result < 0) {
+ vecs = nr_io_queues;
+ if (vecs > 32)
+ vecs = 32;
for (;;) {
- result = pci_enable_msi_block(pdev, nr_io_queues);
+ result = pci_enable_msi_block(pdev, vecs);
if (result == 0) {
- for (i = 0; i < nr_io_queues; i++)
+ for (i = 0; i < vecs; i++)
dev->entry[i].vector = i + pdev->irq;
break;
- } else if (result > 0) {
- nr_io_queues = result;
- continue;
- } else {
- nr_io_queues = 1;
+ } else if (result < 0) {
+ vecs = 1;
break;
}
+ vecs = result;
}
}
+ /*
+ * Should investigate if there's a performance win from allocating
+ * more queues than interrupt vectors; it might allow the submission
+ * path to scale better, even if the receive path is limited by the
+ * number of interrupts.
+ */
+ nr_io_queues = vecs;
+
result = queue_request_irq(dev, dev->queues[0], "nvme admin");
- /* XXX: handle failure here */
+ if (result) {
+ dev->queues[0]->q_suspended = 1;
+ goto free_queues;
+ }
+
+ /* Free previously allocated queues that are no longer usable */
+ spin_lock(&dev_list_lock);
+ for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+
+ spin_lock(&nvmeq->q_lock);
+ nvme_cancel_ios(nvmeq, false);
+ spin_unlock(&nvmeq->q_lock);
+
+ nvme_free_queue(nvmeq);
+ dev->queue_count--;
+ dev->queues[i] = NULL;
+ }
+ spin_unlock(&dev_list_lock);
cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < nr_io_queues; i++) {
@@ -1703,11 +1839,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
NVME_Q_DEPTH);
- for (i = 0; i < nr_io_queues; i++) {
- dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
- if (IS_ERR(dev->queues[i + 1]))
- return PTR_ERR(dev->queues[i + 1]);
- dev->queue_count++;
+ for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
+ dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
+ if (!dev->queues[i + 1]) {
+ result = -ENOMEM;
+ goto free_queues;
+ }
}
for (; i < num_possible_cpus(); i++) {
@@ -1715,15 +1852,20 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->queues[i + 1] = dev->queues[target + 1];
}
- return 0;
-}
+ for (i = 1; i < dev->queue_count; i++) {
+ result = nvme_create_queue(dev->queues[i], i);
+ if (result) {
+ for (--i; i > 0; i--)
+ nvme_disable_queue(dev, i);
+ goto free_queues;
+ }
+ }
-static void nvme_free_queues(struct nvme_dev *dev)
-{
- int i;
+ return 0;
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_free_queue(dev, i);
+ free_queues:
+ nvme_free_queues(dev);
+ return result;
}
/*
@@ -1734,7 +1876,8 @@ static void nvme_free_queues(struct nvme_dev *dev)
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
- int res, nn, i;
+ int res;
+ unsigned nn, i;
struct nvme_ns *ns;
struct nvme_id_ctrl *ctrl;
struct nvme_id_ns *id_ns;
@@ -1742,10 +1885,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
dma_addr_t dma_addr;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
- res = nvme_setup_io_queues(dev);
- if (res)
- return res;
-
mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
GFP_KERNEL);
if (!mem)
@@ -1796,23 +1935,86 @@ static int nvme_dev_add(struct nvme_dev *dev)
return res;
}
-static int nvme_dev_remove(struct nvme_dev *dev)
+static int nvme_dev_map(struct nvme_dev *dev)
{
- struct nvme_ns *ns, *next;
+ int bars, result = -ENOMEM;
+ struct pci_dev *pdev = dev->pci_dev;
+
+ if (pci_enable_device_mem(pdev))
+ return result;
+
+ dev->entry[0].vector = pdev->irq;
+ pci_set_master(pdev);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ goto disable_pci;
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ else
+ goto disable_pci;
+
+ pci_set_drvdata(pdev, dev);
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar)
+ goto disable;
+
+ dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+ return 0;
+
+ disable:
+ pci_release_regions(pdev);
+ disable_pci:
+ pci_disable_device(pdev);
+ return result;
+}
+
+static void nvme_dev_unmap(struct nvme_dev *dev)
+{
+ if (dev->pci_dev->msi_enabled)
+ pci_disable_msi(dev->pci_dev);
+ else if (dev->pci_dev->msix_enabled)
+ pci_disable_msix(dev->pci_dev);
+
+ if (dev->bar) {
+ iounmap(dev->bar);
+ dev->bar = NULL;
+ }
+
+ pci_release_regions(dev->pci_dev);
+ if (pci_is_enabled(dev->pci_dev))
+ pci_disable_device(dev->pci_dev);
+}
+
+static void nvme_dev_shutdown(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->queue_count - 1; i >= 0; i--)
+ nvme_disable_queue(dev, i);
spin_lock(&dev_list_lock);
- list_del(&dev->node);
+ list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
+ if (dev->bar)
+ nvme_shutdown_ctrl(dev);
+ nvme_dev_unmap(dev);
+}
+
+static void nvme_dev_remove(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
list_del(&ns->list);
del_gendisk(ns->disk);
nvme_ns_free(ns);
}
-
- nvme_free_queues(dev);
-
- return 0;
}
static int nvme_setup_prp_pools(struct nvme_dev *dev)
@@ -1872,15 +2074,10 @@ static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_dev_remove(dev);
- if (dev->pci_dev->msi_enabled)
- pci_disable_msi(dev->pci_dev);
- else if (dev->pci_dev->msix_enabled)
- pci_disable_msix(dev->pci_dev);
- iounmap(dev->bar);
+ nvme_dev_shutdown(dev);
+ nvme_free_queues(dev);
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
- pci_disable_device(dev->pci_dev);
- pci_release_regions(dev->pci_dev);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -1921,9 +2118,40 @@ static const struct file_operations nvme_dev_fops = {
.compat_ioctl = nvme_dev_ioctl,
};
+static int nvme_dev_start(struct nvme_dev *dev)
+{
+ int result;
+
+ result = nvme_dev_map(dev);
+ if (result)
+ return result;
+
+ result = nvme_configure_admin_queue(dev);
+ if (result)
+ goto unmap;
+
+ spin_lock(&dev_list_lock);
+ list_add(&dev->node, &dev_list);
+ spin_unlock(&dev_list_lock);
+
+ result = nvme_setup_io_queues(dev);
+ if (result && result != -EBUSY)
+ goto disable;
+
+ return result;
+
+ disable:
+ spin_lock(&dev_list_lock);
+ list_del_init(&dev->node);
+ spin_unlock(&dev_list_lock);
+ unmap:
+ nvme_dev_unmap(dev);
+ return result;
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int bars, result = -ENOMEM;
+ int result = -ENOMEM;
struct nvme_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1938,53 +2166,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!dev->queues)
goto free;
- if (pci_enable_device_mem(pdev))
- goto free;
- pci_set_master(pdev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (pci_request_selected_regions(pdev, bars, "nvme"))
- goto disable;
-
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
- pci_set_drvdata(pdev, dev);
-
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- else
- goto disable;
-
result = nvme_set_instance(dev);
if (result)
- goto disable;
-
- dev->entry[0].vector = pdev->irq;
+ goto free;
result = nvme_setup_prp_pools(dev);
if (result)
- goto disable_msix;
+ goto release;
- dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
- if (!dev->bar) {
- result = -ENOMEM;
- goto disable_msix;
+ result = nvme_dev_start(dev);
+ if (result) {
+ if (result == -EBUSY)
+ goto create_cdev;
+ goto release_pools;
}
- result = nvme_configure_admin_queue(dev);
- if (result)
- goto unmap;
- dev->queue_count++;
-
- spin_lock(&dev_list_lock);
- list_add(&dev->node, &dev_list);
- spin_unlock(&dev_list_lock);
-
result = nvme_dev_add(dev);
if (result)
- goto delete;
+ goto shutdown;
+ create_cdev:
scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
dev->miscdev.minor = MISC_DYNAMIC_MINOR;
dev->miscdev.parent = &pdev->dev;
@@ -1999,24 +2202,13 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
remove:
nvme_dev_remove(dev);
- delete:
- spin_lock(&dev_list_lock);
- list_del(&dev->node);
- spin_unlock(&dev_list_lock);
-
+ shutdown:
+ nvme_dev_shutdown(dev);
+ release_pools:
nvme_free_queues(dev);
- unmap:
- iounmap(dev->bar);
- disable_msix:
- if (dev->pci_dev->msi_enabled)
- pci_disable_msi(dev->pci_dev);
- else if (dev->pci_dev->msix_enabled)
- pci_disable_msix(dev->pci_dev);
- nvme_release_instance(dev);
nvme_release_prp_pools(dev);
- disable:
- pci_disable_device(pdev);
- pci_release_regions(pdev);
+ release:
+ nvme_release_instance(dev);
free:
kfree(dev->queues);
kfree(dev->entry);
@@ -2037,8 +2229,30 @@ static void nvme_remove(struct pci_dev *pdev)
#define nvme_link_reset NULL
#define nvme_slot_reset NULL
#define nvme_error_resume NULL
-#define nvme_suspend NULL
-#define nvme_resume NULL
+
+static int nvme_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct nvme_dev *ndev = pci_get_drvdata(pdev);
+
+ nvme_dev_shutdown(ndev);
+ return 0;
+}
+
+static int nvme_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct nvme_dev *ndev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = nvme_dev_start(ndev);
+ /* XXX: should remove gendisks if resume fails */
+ if (ret)
+ nvme_free_queues(ndev);
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
static const struct pci_error_handlers nvme_err_handler = {
.error_detected = nvme_error_detected,
@@ -2062,8 +2276,9 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
- .suspend = nvme_suspend,
- .resume = nvme_resume,
+ .driver = {
+ .pm = &nvme_dev_pm_ops,
+ },
.err_handler = &nvme_err_handler,
};
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 102de2f52b5c..4a4ff4eb8e23 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -933,13 +933,12 @@ static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int res = SNTI_TRANSLATION_SUCCESS;
int xfer_len;
- inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+ inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
if (inq_response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */
inq_response[2] = 0x00; /* Page Length MSB */
inq_response[3] = 0x3C; /* Page Length LSB */
@@ -964,12 +963,11 @@ static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int xfer_len;
u8 *log_response;
- log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
+ log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
if (log_response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
/* Subpage=0x00, Page Length MSB=0 */
@@ -1000,12 +998,11 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
u8 temp_c;
u16 temp_k;
- log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
+ log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
if (log_response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH);
mem = dma_alloc_coherent(&dev->pci_dev->dev,
sizeof(struct nvme_smart_log),
@@ -1069,12 +1066,11 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 temp_c_cur, temp_c_thresh;
u16 temp_k;
- log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
+ log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
if (log_response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(log_response, 0, LOG_TEMP_PAGE_LENGTH);
mem = dma_alloc_coherent(&dev->pci_dev->dev,
sizeof(struct nvme_smart_log),
@@ -1380,12 +1376,11 @@ static int nvme_trans_mode_page_create(struct nvme_ns *ns,
blk_desc_offset = mph_size;
mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
- response = kmalloc(resp_size, GFP_KERNEL);
+ response = kzalloc(resp_size, GFP_KERNEL);
if (response == NULL) {
res = -ENOMEM;
goto out_mem;
}
- memset(response, 0, resp_size);
res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
llbaa, mode_data_length, blk_desc_len);
@@ -2480,12 +2475,11 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
}
id_ns = mem;
- response = kmalloc(resp_size, GFP_KERNEL);
+ response = kzalloc(resp_size, GFP_KERNEL);
if (response == NULL) {
res = -ENOMEM;
goto out_dma;
}
- memset(response, 0, resp_size);
nvme_trans_fill_read_cap(response, id_ns, cdb16);
xfer_len = min(alloc_len, resp_size);
@@ -2554,12 +2548,11 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
goto out_dma;
}
- response = kmalloc(resp_size, GFP_KERNEL);
+ response = kzalloc(resp_size, GFP_KERNEL);
if (response == NULL) {
res = -ENOMEM;
goto out_dma;
}
- memset(response, 0, resp_size);
/* The first LUN ID will always be 0 per the SAM spec */
for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
@@ -2600,12 +2593,11 @@ static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
(FIXED_FMT_SENSE_DATA_SIZE));
- response = kmalloc(resp_size, GFP_KERNEL);
+ response = kzalloc(resp_size, GFP_KERNEL);
if (response == NULL) {
res = -ENOMEM;
goto out;
}
- memset(response, 0, resp_size);
if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
/* Descriptor Format Sense Data */
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 1bbc681688e4..79aa179305b5 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -598,7 +598,7 @@ static ssize_t class_osdblk_remove(struct class *c,
unsigned long ul;
struct list_head *tmp;
- rc = strict_strtoul(buf, 10, &ul);
+ rc = kstrtoul(buf, 10, &ul);
if (rc)
return rc;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f5d0ea11d9fd..56188475cfd3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -44,6 +44,8 @@
*
*************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pktcdvd.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -69,23 +71,24 @@
#define DRIVER_NAME "pktcdvd"
-#if PACKET_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define DPRINTK(fmt, args...)
-#endif
-
-#if PACKET_DEBUG > 1
-#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define VPRINTK(fmt, args...)
-#endif
+#define pkt_err(pd, fmt, ...) \
+ pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_notice(pd, fmt, ...) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_info(pd, fmt, ...) \
+ pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
+
+#define pkt_dbg(level, pd, fmt, ...) \
+do { \
+ if (level == 2 && PACKET_DEBUG >= 2) \
+ pr_notice("%s: %s():" fmt, \
+ pd->name, __func__, ##__VA_ARGS__); \
+ else if (level == 1 && PACKET_DEBUG >= 1) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
+} while (0)
#define MAX_SPEED 0xffff
-#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
- ~(sector_t)((pd)->settings.size - 1))
-
static DEFINE_MUTEX(pktcdvd_mutex);
static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
static struct proc_dir_entry *pkt_proc;
@@ -103,7 +106,10 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
static int pkt_remove_dev(dev_t pkt_dev);
static int pkt_seq_show(struct seq_file *m, void *p);
-
+static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
+{
+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+}
/*
* create and register a pktcdvd kernel object.
@@ -424,7 +430,7 @@ static int pkt_sysfs_init(void)
if (ret) {
kfree(class_pktcdvd);
class_pktcdvd = NULL;
- printk(DRIVER_NAME": failed to create class pktcdvd\n");
+ pr_err("failed to create class pktcdvd\n");
return ret;
}
return 0;
@@ -517,7 +523,7 @@ static void pkt_bio_finished(struct pktcdvd_device *pd)
{
BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
- VPRINTK(DRIVER_NAME": queue empty\n");
+ pkt_dbg(2, pd, "queue empty\n");
atomic_set(&pd->iosched.attention, 1);
wake_up(&pd->wqueue);
}
@@ -734,36 +740,33 @@ out:
return ret;
}
+static const char *sense_key_string(__u8 index)
+{
+ static const char * const info[] = {
+ "No sense", "Recovered error", "Not ready",
+ "Medium error", "Hardware error", "Illegal request",
+ "Unit attention", "Data protect", "Blank check",
+ };
+
+ return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
+}
+
/*
* A generic sense dump / resolve mechanism should be implemented across
* all ATAPI + SCSI devices.
*/
-static void pkt_dump_sense(struct packet_command *cgc)
+static void pkt_dump_sense(struct pktcdvd_device *pd,
+ struct packet_command *cgc)
{
- static char *info[9] = { "No sense", "Recovered error", "Not ready",
- "Medium error", "Hardware error", "Illegal request",
- "Unit attention", "Data protect", "Blank check" };
- int i;
struct request_sense *sense = cgc->sense;
- printk(DRIVER_NAME":");
- for (i = 0; i < CDROM_PACKET_SIZE; i++)
- printk(" %02x", cgc->cmd[i]);
- printk(" - ");
-
- if (sense == NULL) {
- printk("no sense\n");
- return;
- }
-
- printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
-
- if (sense->sense_key > 8) {
- printk(" (INVALID)\n");
- return;
- }
-
- printk(" (%s)\n", info[sense->sense_key]);
+ if (sense)
+ pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
+ CDROM_PACKET_SIZE, cgc->cmd,
+ sense->sense_key, sense->asc, sense->ascq,
+ sense_key_string(sense->sense_key));
+ else
+ pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
}
/*
@@ -806,7 +809,7 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
cgc.cmd[5] = write_speed & 0xff;
if ((ret = pkt_generic_packet(pd, &cgc)))
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -872,7 +875,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- VPRINTK(DRIVER_NAME": write, waiting\n");
+ pkt_dbg(2, pd, "write, waiting\n");
break;
}
pkt_flush_cache(pd);
@@ -881,7 +884,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
} else {
if (!reads_queued && writes_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- VPRINTK(DRIVER_NAME": read, waiting\n");
+ pkt_dbg(2, pd, "read, waiting\n");
break;
}
pd->iosched.writing = 1;
@@ -943,7 +946,7 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
set_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
} else {
- printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
+ pkt_err(pd, "cdrom max_phys_segments too small\n");
return -EIO;
}
}
@@ -987,8 +990,9 @@ static void pkt_end_io_read(struct bio *bio, int err)
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
- (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
+ pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
+ bio, (unsigned long long)pkt->sector,
+ (unsigned long long)bio->bi_sector, err);
if (err)
atomic_inc(&pkt->io_errors);
@@ -1005,7 +1009,7 @@ static void pkt_end_io_packet_write(struct bio *bio, int err)
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
+ pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
pd->stats.pkt_ended++;
@@ -1047,7 +1051,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
spin_unlock(&pkt->lock);
if (pkt->cache_valid) {
- VPRINTK("pkt_gather_data: zone %llx cached\n",
+ pkt_dbg(2, pd, "zone %llx cached\n",
(unsigned long long)pkt->sector);
goto out_account;
}
@@ -1070,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
p = (f * CD_FRAMESIZE) / PAGE_SIZE;
offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
+ pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
f, pkt->pages[p], offset);
if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
BUG();
@@ -1082,7 +1086,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
}
out_account:
- VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
+ pkt_dbg(2, pd, "need %d frames for zone %llx\n",
frames_read, (unsigned long long)pkt->sector);
pd->stats.pkt_started++;
pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
@@ -1183,7 +1187,8 @@ static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state
"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
};
enum packet_data_state old_state = pkt->state;
- VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
+ pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
+ pkt->id, (unsigned long long)pkt->sector,
state_name[old_state], state_name[state]);
#endif
pkt->state = state;
@@ -1202,12 +1207,10 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
struct rb_node *n;
int wakeup;
- VPRINTK("handle_queue\n");
-
atomic_set(&pd->scan_queue, 0);
if (list_empty(&pd->cdrw.pkt_free_list)) {
- VPRINTK("handle_queue: no pkt\n");
+ pkt_dbg(2, pd, "no pkt\n");
return 0;
}
@@ -1224,7 +1227,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
node = first_node;
while (node) {
bio = node->bio;
- zone = ZONE(bio->bi_sector, pd);
+ zone = get_zone(bio->bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
if (p->sector == zone) {
bio = NULL;
@@ -1244,7 +1247,7 @@ try_next_bio:
}
spin_unlock(&pd->lock);
if (!bio) {
- VPRINTK("handle_queue: no bio\n");
+ pkt_dbg(2, pd, "no bio\n");
return 0;
}
@@ -1260,12 +1263,12 @@ try_next_bio:
* to this packet.
*/
spin_lock(&pd->lock);
- VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
+ pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
bio = node->bio;
- VPRINTK("pkt_handle_queue: found zone=%llx\n",
- (unsigned long long)ZONE(bio->bi_sector, pd));
- if (ZONE(bio->bi_sector, pd) != zone)
+ pkt_dbg(2, pd, "found zone=%llx\n",
+ (unsigned long long)get_zone(bio->bi_sector, pd));
+ if (get_zone(bio->bi_sector, pd) != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
@@ -1316,7 +1319,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
BUG();
}
- VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
+ pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
/*
* Fill-in bvec with data from orig_bios.
@@ -1327,7 +1330,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
spin_unlock(&pkt->lock);
- VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
+ pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
pkt->write_size, (unsigned long long)pkt->sector);
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
@@ -1359,7 +1362,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
{
int uptodate;
- VPRINTK("run_state_machine: pkt %d\n", pkt->id);
+ pkt_dbg(2, pd, "pkt %d\n", pkt->id);
for (;;) {
switch (pkt->state) {
@@ -1398,7 +1401,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
if (pkt_start_recovery(pkt)) {
pkt_start_write(pd, pkt);
} else {
- VPRINTK("No recovery possible\n");
+ pkt_dbg(2, pd, "No recovery possible\n");
pkt_set_state(pkt, PACKET_FINISHED_STATE);
}
break;
@@ -1419,8 +1422,6 @@ static void pkt_handle_packets(struct pktcdvd_device *pd)
{
struct packet_data *pkt, *next;
- VPRINTK("pkt_handle_packets\n");
-
/*
* Run state machine for active packets
*/
@@ -1502,9 +1503,9 @@ static int kcdrwd(void *foobar)
if (PACKET_DEBUG > 1) {
int states[PACKET_NUM_STATES];
pkt_count_states(pd, states);
- VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3],
- states[4], states[5]);
+ pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2],
+ states[3], states[4], states[5]);
}
min_sleep_time = MAX_SCHEDULE_TIMEOUT;
@@ -1513,9 +1514,9 @@ static int kcdrwd(void *foobar)
min_sleep_time = pkt->sleep_time;
}
- VPRINTK("kcdrwd: sleeping\n");
+ pkt_dbg(2, pd, "sleeping\n");
residue = schedule_timeout(min_sleep_time);
- VPRINTK("kcdrwd: wake up\n");
+ pkt_dbg(2, pd, "wake up\n");
/* make swsusp happy with our thread */
try_to_freeze();
@@ -1563,9 +1564,10 @@ work_to_do:
static void pkt_print_settings(struct pktcdvd_device *pd)
{
- printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
- printk("%u blocks, ", pd->settings.size >> 2);
- printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
+ pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
+ pd->settings.fp ? "Fixed" : "Variable",
+ pd->settings.size >> 2,
+ pd->settings.block_mode == 8 ? '1' : '2');
}
static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
@@ -1699,7 +1701,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
cgc.sense = &sense;
if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1714,7 +1716,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
cgc.sense = &sense;
if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1749,14 +1751,14 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
/*
* paranoia
*/
- printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
+ pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
return 1;
}
wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
cgc.buflen = cgc.cmd[8] = size;
if ((ret = pkt_mode_select(pd, &cgc))) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1793,7 +1795,7 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
if (ti->rt == 1 && ti->blank == 0)
return 1;
- printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+ pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
return 0;
}
@@ -1811,7 +1813,8 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
case 0x12: /* DVD-RAM */
return 1;
default:
- VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
+ pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
+ pd->mmc3_profile);
return 0;
}
@@ -1820,22 +1823,22 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
* but i'm not sure, should we leave this to user apps? probably.
*/
if (di->disc_type == 0xff) {
- printk(DRIVER_NAME": Unknown disc. No track?\n");
+ pkt_notice(pd, "unknown disc - no track?\n");
return 0;
}
if (di->disc_type != 0x20 && di->disc_type != 0) {
- printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
+ pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
return 0;
}
if (di->erasable == 0) {
- printk(DRIVER_NAME": Disc not erasable\n");
+ pkt_notice(pd, "disc not erasable\n");
return 0;
}
if (di->border_status == PACKET_SESSION_RESERVED) {
- printk(DRIVER_NAME": Can't write to last track (reserved)\n");
+ pkt_err(pd, "can't write to last track (reserved)\n");
return 0;
}
@@ -1860,7 +1863,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
memset(&ti, 0, sizeof(track_information));
if ((ret = pkt_get_disc_info(pd, &di))) {
- printk("failed get_disc\n");
+ pkt_err(pd, "failed get_disc\n");
return ret;
}
@@ -1871,12 +1874,12 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
- printk(DRIVER_NAME": failed get_track\n");
+ pkt_err(pd, "failed get_track\n");
return ret;
}
if (!pkt_writable_track(pd, &ti)) {
- printk(DRIVER_NAME": can't write to this track\n");
+ pkt_err(pd, "can't write to this track\n");
return -EROFS;
}
@@ -1886,11 +1889,11 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
*/
pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
if (pd->settings.size == 0) {
- printk(DRIVER_NAME": detected zero packet size!\n");
+ pkt_notice(pd, "detected zero packet size!\n");
return -ENXIO;
}
if (pd->settings.size > PACKET_MAX_SECTORS) {
- printk(DRIVER_NAME": packet size is too big\n");
+ pkt_err(pd, "packet size is too big\n");
return -EROFS;
}
pd->settings.fp = ti.fp;
@@ -1932,7 +1935,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
pd->settings.block_mode = PACKET_BLOCK_MODE2;
break;
default:
- printk(DRIVER_NAME": unknown data mode\n");
+ pkt_err(pd, "unknown data mode\n");
return -EROFS;
}
return 0;
@@ -1966,10 +1969,10 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
ret = pkt_mode_select(pd, &cgc);
if (ret) {
- printk(DRIVER_NAME": write caching control failed\n");
- pkt_dump_sense(&cgc);
+ pkt_err(pd, "write caching control failed\n");
+ pkt_dump_sense(pd, &cgc);
} else if (!ret && set)
- printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
+ pkt_notice(pd, "enabled write caching\n");
return ret;
}
@@ -2005,7 +2008,7 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
sizeof(struct mode_page_header);
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
}
@@ -2064,7 +2067,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
cgc.cmd[8] = 2;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
@@ -2079,16 +2082,16 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
cgc.cmd[8] = size;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
if (!(buf[6] & 0x40)) {
- printk(DRIVER_NAME": Disc type is not CD-RW\n");
+ pkt_notice(pd, "disc type is not CD-RW\n");
return 1;
}
if (!(buf[6] & 0x4)) {
- printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
+ pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
return 1;
}
@@ -2108,14 +2111,14 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
*speed = us_clv_to_speed[sp];
break;
default:
- printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
+ pkt_notice(pd, "unknown disc sub-type %d\n", st);
return 1;
}
if (*speed) {
- printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
+ pkt_info(pd, "maximum media speed: %d\n", *speed);
return 0;
} else {
- printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
+ pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
return 1;
}
}
@@ -2126,7 +2129,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
struct request_sense sense;
int ret;
- VPRINTK(DRIVER_NAME": Performing OPC\n");
+ pkt_dbg(2, pd, "Performing OPC\n");
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.sense = &sense;
@@ -2134,7 +2137,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
cgc.cmd[0] = GPCMD_SEND_OPC;
cgc.cmd[1] = 1;
if ((ret = pkt_generic_packet(pd, &cgc)))
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -2144,12 +2147,12 @@ static int pkt_open_write(struct pktcdvd_device *pd)
unsigned int write_speed, media_write_speed, read_speed;
if ((ret = pkt_probe_settings(pd))) {
- VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
+ pkt_dbg(2, pd, "failed probe\n");
return ret;
}
if ((ret = pkt_set_write_settings(pd))) {
- DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
+ pkt_dbg(1, pd, "failed saving write settings\n");
return -EIO;
}
@@ -2161,26 +2164,26 @@ static int pkt_open_write(struct pktcdvd_device *pd)
case 0x13: /* DVD-RW */
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
- DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
+ pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
break;
default:
if ((ret = pkt_media_speed(pd, &media_write_speed)))
media_write_speed = 16;
write_speed = min(write_speed, media_write_speed * 177);
- DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
+ pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
break;
}
read_speed = write_speed;
if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
- DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
+ pkt_dbg(1, pd, "couldn't set write speed\n");
return -EIO;
}
pd->write_speed = write_speed;
pd->read_speed = read_speed;
if ((ret = pkt_perform_opc(pd))) {
- DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
+ pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
}
return 0;
@@ -2205,7 +2208,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
goto out;
if ((ret = pkt_get_last_written(pd, &lba))) {
- printk(DRIVER_NAME": pkt_get_last_written failed\n");
+ pkt_err(pd, "pkt_get_last_written failed\n");
goto out_putdev;
}
@@ -2235,11 +2238,11 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
if (write) {
if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- printk(DRIVER_NAME": not enough memory for buffers\n");
+ pkt_err(pd, "not enough memory for buffers\n");
ret = -ENOMEM;
goto out_putdev;
}
- printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
+ pkt_info(pd, "%lukB available on disc\n", lba << 1);
}
return 0;
@@ -2257,7 +2260,7 @@ out:
static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
{
if (flush && pkt_flush_cache(pd))
- DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
+ pkt_dbg(1, pd, "not flushing cache\n");
pkt_lock_door(pd, 0);
@@ -2279,8 +2282,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
struct pktcdvd_device *pd = NULL;
int ret;
- VPRINTK(DRIVER_NAME": entering open\n");
-
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
@@ -2315,7 +2316,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
out_dec:
pd->refcnt--;
out:
- VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
return ret;
@@ -2360,7 +2360,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
pd = q->queuedata;
if (!pd) {
- printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
+ pr_err("%s incorrect request queue\n",
+ bdevname(bio->bi_bdev, b));
goto end_io;
}
@@ -2382,20 +2383,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
- pd->name, (unsigned long long)bio->bi_sector);
+ pkt_notice(pd, "WRITE for ro device (%llu)\n",
+ (unsigned long long)bio->bi_sector);
goto end_io;
}
if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
- printk(DRIVER_NAME": wrong bio size\n");
+ pkt_err(pd, "wrong bio size\n");
goto end_io;
}
blk_queue_bounce(q, &bio);
- zone = ZONE(bio->bi_sector, pd);
- VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
+ zone = get_zone(bio->bi_sector, pd);
+ pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_sector,
(unsigned long long)bio_end_sector(bio));
@@ -2405,7 +2406,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
sector_t last_zone;
int first_sectors;
- last_zone = ZONE(bio_end_sector(bio) - 1, pd);
+ last_zone = get_zone(bio_end_sector(bio) - 1, pd);
if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size);
first_sectors = last_zone - bio->bi_sector;
@@ -2500,7 +2501,7 @@ static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct pktcdvd_device *pd = q->queuedata;
- sector_t zone = ZONE(bmd->bi_sector, pd);
+ sector_t zone = get_zone(bmd->bi_sector, pd);
int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
int remaining = (pd->settings.size << 9) - used;
int remaining2;
@@ -2609,7 +2610,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
struct block_device *bdev;
if (pd->pkt_dev == dev) {
- printk(DRIVER_NAME": Recursive setup not allowed\n");
+ pkt_err(pd, "recursive setup not allowed\n");
return -EBUSY;
}
for (i = 0; i < MAX_WRITERS; i++) {
@@ -2617,11 +2618,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (!pd2)
continue;
if (pd2->bdev->bd_dev == dev) {
- printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
+ pkt_err(pd, "%s already setup\n",
+ bdevname(pd2->bdev, b));
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
- printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
+ pkt_err(pd, "can't chain pktcdvd devices\n");
return -EBUSY;
}
}
@@ -2644,13 +2646,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
atomic_set(&pd->cdrw.pending_bios, 0);
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
if (IS_ERR(pd->cdrw.thread)) {
- printk(DRIVER_NAME": can't start kernel thread\n");
+ pkt_err(pd, "can't start kernel thread\n");
ret = -ENOMEM;
goto out_mem;
}
proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
- DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
+ pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
return 0;
out_mem:
@@ -2665,8 +2667,8 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
struct pktcdvd_device *pd = bdev->bd_disk->private_data;
int ret;
- VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
- MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+ pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
+ cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
mutex_lock(&pktcdvd_mutex);
switch (cmd) {
@@ -2690,7 +2692,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
break;
default:
- VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
+ pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
ret = -ENOTTY;
}
mutex_unlock(&pktcdvd_mutex);
@@ -2743,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
if (!pkt_devs[idx])
break;
if (idx == MAX_WRITERS) {
- printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
+ pr_err("max %d writers supported\n", MAX_WRITERS);
ret = -EBUSY;
goto out_mutex;
}
@@ -2818,7 +2820,7 @@ out_mem:
kfree(pd);
out_mutex:
mutex_unlock(&ctl_mutex);
- printk(DRIVER_NAME": setup of pktcdvd device failed\n");
+ pr_err("setup of pktcdvd device failed\n");
return ret;
}
@@ -2839,7 +2841,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
break;
}
if (idx == MAX_WRITERS) {
- DPRINTK(DRIVER_NAME": dev not setup\n");
+ pr_debug("dev not setup\n");
ret = -ENXIO;
goto out;
}
@@ -2859,7 +2861,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
remove_proc_entry(pd->name, pkt_proc);
- DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
+ pkt_dbg(1, pd, "writer unmapped\n");
del_gendisk(pd->disk);
blk_cleanup_queue(pd->disk->queue);
@@ -2969,7 +2971,7 @@ static int __init pkt_init(void)
ret = register_blkdev(pktdev_major, DRIVER_NAME);
if (ret < 0) {
- printk(DRIVER_NAME": Unable to register block device\n");
+ pr_err("unable to register block device\n");
goto out2;
}
if (!pktdev_major)
@@ -2983,7 +2985,7 @@ static int __init pkt_init(void)
ret = misc_register(&pkt_misc);
if (ret) {
- printk(DRIVER_NAME": Unable to register misc device\n");
+ pr_err("unable to register misc device\n");
goto out_misc;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4ad2ad9a5bb0..b22a7d0fe5b7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -397,15 +397,19 @@ static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
static void rbd_spec_put(struct rbd_spec *spec);
-static struct bus_attribute rbd_bus_attrs[] = {
- __ATTR(add, S_IWUSR, NULL, rbd_add),
- __ATTR(remove, S_IWUSR, NULL, rbd_remove),
- __ATTR_NULL
+static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
+static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
+
+static struct attribute *rbd_bus_attrs[] = {
+ &bus_attr_add.attr,
+ &bus_attr_remove.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(rbd_bus);
static struct bus_type rbd_bus_type = {
.name = "rbd",
- .bus_attrs = rbd_bus_attrs,
+ .bus_groups = rbd_bus_groups,
};
static void rbd_root_dev_release(struct device *dev)
@@ -1557,11 +1561,12 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
obj_request, obj_request->img_request, obj_request->result,
xferred, length);
/*
- * ENOENT means a hole in the image. We zero-fill the
- * entire length of the request. A short read also implies
- * zero-fill to the end of the request. Either way we
- * update the xferred count to indicate the whole request
- * was satisfied.
+ * ENOENT means a hole in the image. We zero-fill the entire
+ * length of the request. A short read also implies zero-fill
+ * to the end of the request. An error requires the whole
+ * length of the request to be reported finished with an error
+ * to the block layer. In each case we update the xferred
+ * count to indicate the whole request was satisfied.
*/
rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
if (obj_request->result == -ENOENT) {
@@ -1570,14 +1575,13 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
else
zero_pages(obj_request->pages, 0, length);
obj_request->result = 0;
- obj_request->xferred = length;
} else if (xferred < length && !obj_request->result) {
if (obj_request->type == OBJ_REQUEST_BIO)
zero_bio_chain(obj_request->bio_list, xferred);
else
zero_pages(obj_request->pages, xferred, length);
- obj_request->xferred = length;
}
+ obj_request->xferred = length;
obj_request_done_set(obj_request);
}
@@ -2163,9 +2167,9 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
struct rbd_obj_request *obj_request = NULL;
struct rbd_obj_request *next_obj_request;
bool write_request = img_request_write_test(img_request);
- struct bio *bio_list = 0;
+ struct bio *bio_list = NULL;
unsigned int bio_offset = 0;
- struct page **pages = 0;
+ struct page **pages = NULL;
u64 img_offset;
u64 resid;
u16 opcode;
@@ -2203,6 +2207,11 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
rbd_segment_name_free(object_name);
if (!obj_request)
goto out_unwind;
+ /*
+ * set obj_request->img_request before creating the
+ * osd_request so that it gets the right snapc
+ */
+ rbd_img_obj_request_add(img_request, obj_request);
if (type == OBJ_REQUEST_BIO) {
unsigned int clone_size;
@@ -2244,11 +2253,6 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
obj_request->pages, length,
offset & ~PAGE_MASK, false, false);
- /*
- * set obj_request->img_request before formatting
- * the osd_request so that it gets the right snapc
- */
- rbd_img_obj_request_add(img_request, obj_request);
if (write_request)
rbd_osd_req_format_write(obj_request);
else
@@ -3702,12 +3706,14 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
if (ret < sizeof (size_buf))
return -ERANGE;
- if (order)
+ if (order) {
*order = size_buf.order;
+ dout(" order %u", (unsigned int)*order);
+ }
*snap_size = le64_to_cpu(size_buf.size);
- dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
- (unsigned long long)snap_id, (unsigned int)*order,
+ dout(" snap_id 0x%016llx snap_size = %llu\n",
+ (unsigned long long)snap_id,
(unsigned long long)*snap_size);
return 0;
@@ -5126,7 +5132,7 @@ static ssize_t rbd_remove(struct bus_type *bus,
bool already = false;
int ret;
- ret = strict_strtoul(buf, 10, &ul);
+ ret = kstrtoul(buf, 10, &ul);
if (ret)
return ret;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 8ed6ccb748cf..b02d53a399f3 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -924,7 +924,6 @@ static int swim_probe(struct platform_device *dev)
return 0;
out_kfree:
- platform_set_drvdata(dev, NULL);
kfree(swd);
out_iounmap:
iounmap(swim_base);
@@ -962,7 +961,6 @@ static int swim_remove(struct platform_device *dev)
if (res)
release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(dev, NULL);
kfree(swd);
return 0;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index fe5c3cd10c34..c2014a0aa206 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -620,7 +620,7 @@ static void backend_changed(struct xenbus_watch *watch,
}
/* Front end dir is a number, which is used as the handle. */
- err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
+ err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
if (err)
return;