summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/DAC960.c4
-rw-r--r--drivers/block/aoe/aoecmd.c41
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/rbd.c50
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/block/zram/zcomp.c76
-rw-r--r--drivers/block/zram/zcomp.h5
-rw-r--r--drivers/block/zram/zram_drv.c20
8 files changed, 87 insertions, 121 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 811e11c82f32..0809cda93cc0 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_PD_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)) {
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_P_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)){
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ab19adb07a12..3c606c09fd5a 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -853,45 +853,6 @@ rqbiocnt(struct request *r)
return n;
}
-/* This can be removed if we are certain that no users of the block
- * layer will ever use zero-count pages in bios. Otherwise we have to
- * protect against the put_page sometimes done by the network layer.
- *
- * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
- * discussion.
- *
- * We cannot use get_page in the workaround, because it insists on a
- * positive page count as a precondition. So we use _refcount directly.
- */
-static void
-bio_pageinc(struct bio *bio)
-{
- struct bio_vec bv;
- struct page *page;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- /* Non-zero page count for non-head members of
- * compound pages is no longer allowed by the kernel.
- */
- page = compound_head(bv.bv_page);
- page_ref_inc(page);
- }
-}
-
-static void
-bio_pagedec(struct bio *bio)
-{
- struct page *page;
- struct bio_vec bv;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- page = compound_head(bv.bv_page);
- page_ref_dec(page);
- }
-}
-
static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{
@@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
buf->rq = rq;
buf->bio = bio;
buf->iter = bio->bi_iter;
- bio_pageinc(bio);
}
static struct buf *
@@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf)
if (buf == d->ip.buf)
d->ip.buf = NULL;
rq = buf->rq;
- bio_pagedec(buf->bio);
mempool_free(buf, d->bufpool);
n = (unsigned long) rq->special;
rq->special = (void *) --n;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 100be556e613..83482721bc01 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
drbd_update_congested(connection);
}
do {
- rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
if (rv == -EAGAIN) {
if (we_should_drop_the_connection(connection, sock))
break;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index abb71628ab61..7b274ff4632c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -415,15 +415,15 @@ struct rbd_device {
};
/*
- * Flag bits for rbd_dev->flags. If atomicity is required,
- * rbd_dev->lock is used to protect access.
- *
- * Currently, only the "removing" flag (which is coupled with the
- * "open_count" field) requires atomic access.
+ * Flag bits for rbd_dev->flags:
+ * - REMOVING (which is coupled with rbd_dev->open_count) is protected
+ * by rbd_dev->lock
+ * - BLACKLISTED is protected by rbd_dev->lock_rwsem
*/
enum rbd_dev_flags {
RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
+ RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
};
static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
@@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work)
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, watch_dwork);
bool was_lock_owner = false;
+ bool need_to_wake = false;
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
@@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work)
was_lock_owner = rbd_release_lock(rbd_dev);
mutex_lock(&rbd_dev->watch_mutex);
- if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
- goto fail_unlock;
+ if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
+ }
ret = __rbd_register_watch(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
- if (ret != -EBLACKLISTED)
+ if (ret == -EBLACKLISTED || ret == -ENOENT) {
+ set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
+ need_to_wake = true;
+ } else {
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork,
RBD_RETRY_DELAY);
- goto fail_unlock;
+ }
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
}
+ need_to_wake = true;
rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
mutex_unlock(&rbd_dev->watch_mutex);
@@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work)
ret);
}
+out:
up_write(&rbd_dev->lock_rwsem);
- wake_requests(rbd_dev, true);
- return;
-
-fail_unlock:
- mutex_unlock(&rbd_dev->watch_mutex);
- up_write(&rbd_dev->lock_rwsem);
+ if (need_to_wake)
+ wake_requests(rbd_dev, true);
}
/*
@@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
up_read(&rbd_dev->lock_rwsem);
schedule();
down_read(&rbd_dev->lock_rwsem);
- } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
+ } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+
finish_wait(&rbd_dev->lock_waitq, &wait);
}
@@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work)
if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
rbd_wait_state_locked(rbd_dev);
+
+ WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ result = -EBLACKLISTED;
+ goto err_unlock;
+ }
}
img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2dc5c96c186a..5545a679abd8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev)
static int init_vq(struct virtio_blk *vblk)
{
- int err = 0;
+ int err;
int i;
vq_callback_t **callbacks;
const char **names;
@@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
- vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
+ vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
- names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
- callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
- vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
+ names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
+ callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
+ vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
if (!names || !callbacks || !vqs) {
err = -ENOMEM;
goto out;
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 4b5cd3a7b2b6..12046f4f00e4 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -160,82 +160,56 @@ int zcomp_decompress(struct zcomp_strm *zstrm,
dst, &dst_len);
}
-static int __zcomp_cpu_notifier(struct zcomp *comp,
- unsigned long action, unsigned long cpu)
+int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
- switch (action) {
- case CPU_UP_PREPARE:
- if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
- break;
- zstrm = zcomp_strm_alloc(comp);
- if (IS_ERR_OR_NULL(zstrm)) {
- pr_err("Can't allocate a compression stream\n");
- return NOTIFY_BAD;
- }
- *per_cpu_ptr(comp->stream, cpu) = zstrm;
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- zstrm = *per_cpu_ptr(comp->stream, cpu);
- if (!IS_ERR_OR_NULL(zstrm))
- zcomp_strm_free(zstrm);
- *per_cpu_ptr(comp->stream, cpu) = NULL;
- break;
- default:
- break;
+ if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
+ return 0;
+
+ zstrm = zcomp_strm_alloc(comp);
+ if (IS_ERR_OR_NULL(zstrm)) {
+ pr_err("Can't allocate a compression stream\n");
+ return -ENOMEM;
}
- return NOTIFY_OK;
+ *per_cpu_ptr(comp->stream, cpu) = zstrm;
+ return 0;
}
-static int zcomp_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
+int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
- unsigned long cpu = (unsigned long)pcpu;
- struct zcomp *comp = container_of(nb, typeof(*comp), notifier);
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
+ struct zcomp_strm *zstrm;
- return __zcomp_cpu_notifier(comp, action, cpu);
+ zstrm = *per_cpu_ptr(comp->stream, cpu);
+ if (!IS_ERR_OR_NULL(zstrm))
+ zcomp_strm_free(zstrm);
+ *per_cpu_ptr(comp->stream, cpu) = NULL;
+ return 0;
}
static int zcomp_init(struct zcomp *comp)
{
- unsigned long cpu;
int ret;
- comp->notifier.notifier_call = zcomp_cpu_notifier;
-
comp->stream = alloc_percpu(struct zcomp_strm *);
if (!comp->stream)
return -ENOMEM;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu) {
- ret = __zcomp_cpu_notifier(comp, CPU_UP_PREPARE, cpu);
- if (ret == NOTIFY_BAD)
- goto cleanup;
- }
- __register_cpu_notifier(&comp->notifier);
- cpu_notifier_register_done();
+ ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
+ if (ret < 0)
+ goto cleanup;
return 0;
cleanup:
- for_each_online_cpu(cpu)
- __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
- cpu_notifier_register_done();
- return -ENOMEM;
+ free_percpu(comp->stream);
+ return ret;
}
void zcomp_destroy(struct zcomp *comp)
{
- unsigned long cpu;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
- __unregister_cpu_notifier(&comp->notifier);
- cpu_notifier_register_done();
-
+ cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
free_percpu(comp->stream);
kfree(comp);
}
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 478cac2ed465..41c1002a7d7d 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -19,11 +19,12 @@ struct zcomp_strm {
/* dynamic per-device compression frontend */
struct zcomp {
struct zcomp_strm * __percpu *stream;
- struct notifier_block notifier;
-
const char *name;
+ struct hlist_node node;
};
+int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
+int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
ssize_t zcomp_available_show(const char *comp, char *buf);
bool zcomp_available_algorithm(const char *comp);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 04365b17ee67..15f58ab44d0b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -30,6 +30,7 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/sysfs.h>
+#include <linux/cpuhotplug.h>
#include "zram_drv.h"
@@ -1403,7 +1404,8 @@ static ssize_t hot_remove_store(struct class *class,
zram = idr_find(&zram_index_idr, dev_id);
if (zram) {
ret = zram_remove(zram);
- idr_remove(&zram_index_idr, dev_id);
+ if (!ret)
+ idr_remove(&zram_index_idr, dev_id);
} else {
ret = -ENODEV;
}
@@ -1412,8 +1414,14 @@ static ssize_t hot_remove_store(struct class *class,
return ret ? ret : count;
}
+/*
+ * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
+ * sense that reading from this file does alter the state of your system -- it
+ * creates a new un-initialized zram device and returns back this device's
+ * device_id (or an error code if it fails to create a new device).
+ */
static struct class_attribute zram_control_class_attrs[] = {
- __ATTR_RO(hot_add),
+ __ATTR(hot_add, 0400, hot_add_show, NULL),
__ATTR_WO(hot_remove),
__ATTR_NULL,
};
@@ -1436,15 +1444,22 @@ static void destroy_devices(void)
idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
idr_destroy(&zram_index_idr);
unregister_blkdev(zram_major, "zram");
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
}
static int __init zram_init(void)
{
int ret;
+ ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
+ zcomp_cpu_up_prepare, zcomp_cpu_dead);
+ if (ret < 0)
+ return ret;
+
ret = class_register(&zram_control_class);
if (ret) {
pr_err("Unable to register zram-control class\n");
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return ret;
}
@@ -1452,6 +1467,7 @@ static int __init zram_init(void)
if (zram_major <= 0) {
pr_err("Unable to get major number\n");
class_unregister(&zram_control_class);
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return -EBUSY;
}