diff options
author | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-08-13 13:11:38 +0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-08-22 23:26:27 +0400 |
commit | 525a48a21da259d00d6ebc5b60563b5bcf022c26 (patch) | |
tree | 79843b0aacbef9b130eec8f5352fe92b6422ab18 /drivers/target | |
parent | 01cde4d54327884a0b61ce8666092f5703557d4b (diff) | |
download | linux-525a48a21da259d00d6ebc5b60563b5bcf022c26.tar.xz |
target: Fix task count > 1 handling breakage and use max_sector page alignment
This patch addresses recent breakage with multiple se_task (task_count > 1)
operation following backend dev->se_sub_dev->se_dev_attrib.max_sectors in new
transport_allocate_data_tasks() code. The initial bug here was a bogus
task->task_sg_nents assignment in transport_allocate_data_tasks() based on
the passed parameter, which now uses DIV_ROUND_UP(task_size, PAGE_SIZE) to
determine the proper number of per task SGL entries for the (task_count > 1)
case.
This also means we now need to enforce a PAGE_SIZE aligned max_sector count
value for this to work as expected without bringing back the pre v3.1
transport_map_mem_to_sg() logic to handle SGL offsets across multiple tasks.
So this patch adds se_dev_align_max_sectors() to round down max_sectors as
necessary to ensure this alignment via se_dev_set_default_attribs() and
se_dev_align_max_sectors() and keeps it simple for (task_count > 1)
operation.
So far this bugfix has been tested with (task_count > 1) operation
using iscsi-target and iblock backends.
Reported-by: Chris Boot <bootc@bootc.net>
Cc: Kiran Patil <kiran.patil@intel.com>
Cc: Andy Grover <agrover@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_device.c | 28 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 7 |
2 files changed, 33 insertions, 2 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ec3fbcda3e3c..4b5237f500a5 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev) return ret; } +u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) +{ + u32 tmp, aligned_max_sectors; + /* + * Limit max_sectors to a PAGE_SIZE aligned value for modern + * transport_allocate_data_tasks() operation. + */ + tmp = rounddown((max_sectors * block_size), PAGE_SIZE); + aligned_max_sectors = (tmp / block_size); + if (max_sectors != aligned_max_sectors) { + printk(KERN_INFO "Rounding down aligned max_sectors from %u" + " to %u\n", max_sectors, aligned_max_sectors); + return aligned_max_sectors; + } + + return max_sectors; +} + void se_dev_set_default_attribs( struct se_device *dev, struct se_dev_limits *dev_limits) @@ -878,6 +896,11 @@ void se_dev_set_default_attribs( * max_sectors is based on subsystem plugin dependent requirements. */ dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; + /* + * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() + */ + limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, + limits->logical_block_size); dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; /* * Set optimal_sectors from max_sectors, which can be lowered via @@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) return -EINVAL; } } + /* + * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() + */ + max_sectors = se_dev_align_max_sectors(max_sectors, + dev->se_sub_dev->se_dev_attrib.block_size); dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ab61c5550852..efac6a9a7438 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4126,7 +4126,11 @@ static int transport_allocate_data_tasks( /* Update new cdb with updated lba/sectors */ cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); - + /* + * This now assumes that passed sg_ents are in PAGE_SIZE chunks + * in order to calculate the number per task SGL entries + */ + task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); /* * Check if the fabric module driver is requesting that all * struct se_task->task_sg[] be chained together.. If so, @@ -4136,7 +4140,6 @@ static int transport_allocate_data_tasks( * It's so much easier and only a waste when task_count > 1. * That is extremely rare. */ - task->task_sg_nents = sgl_nents; if (cmd->se_tfo->task_sg_chaining) { task->task_sg_nents++; task->task_padded_sg = 1; |