summaryrefslogtreecommitdiff
path: root/drivers/dma/ioat/dma_v3.c
diff options
context:
space:
mode:
authorBrice Goglin <Brice.Goglin@inria.fr>2013-08-02 23:18:03 +0400
committerDan Williams <djbw@fb.com>2013-08-23 09:57:39 +0400
commitc4dcf0e2dd7e06db0c5c3f396b2e2b9ce1f6d19f (patch)
tree8a3d52af6e05bfe2703188a71c8c01b9bc4ba95f /drivers/dma/ioat/dma_v3.c
parente03bc654f85604bcd5304debb597f398d1d03778 (diff)
downloadlinux-c4dcf0e2dd7e06db0c5c3f396b2e2b9ce1f6d19f.tar.xz
ioatdma: disable RAID on non-Atom platforms and reenable unaligned copies
Disable RAID on non-Atom platform and remove related fixups such as the 64-byte alignement restriction on legacy DMA operations (introduced in commit f26df1a1 as a workaround for silicon errata). Signed-off-by: Brice Goglin <Brice.Goglin@inria.fr> Acked-by: Dave Jiang <dave.jiang@intel.com> Acked-by: Jon Mason <jon.mason@intel.com> Signed-off-by: Dan Williams <djbw@fb.com>
Diffstat (limited to 'drivers/dma/ioat/dma_v3.c')
-rw-r--r--drivers/dma/ioat/dma_v3.c24
1 files changed, 1 insertions, 23 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b642e035579b..c94e0d210667 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1775,15 +1775,12 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_xeon_cb32(pdev))
- dma->copy_align = 6;
-
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
- if (is_bwd_noraid(pdev))
+ if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
/* dca is incompatible with raid operations */
@@ -1793,7 +1790,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
- dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1812,13 +1808,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma_set_maxpq(dma, 16, 0);
- dma->pq_align = 0;
} else {
dma_set_maxpq(dma, 8, 0);
- if (is_xeon_cb32(pdev))
- dma->pq_align = 6;
- else
- dma->pq_align = 0;
}
if (!(device->cap & IOAT_CAP_XOR)) {
@@ -1829,13 +1820,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma->max_xor = 16;
- dma->xor_align = 0;
} else {
dma->max_xor = 8;
- if (is_xeon_cb32(pdev))
- dma->xor_align = 6;
- else
- dma->xor_align = 0;
}
}
}
@@ -1844,14 +1830,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
- if (is_xeon_cb32(pdev)) {
- dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
- dma->device_prep_dma_xor_val = NULL;
-
- dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
- dma->device_prep_dma_pq_val = NULL;
- }
-
/* starting with CB3.3 super extended descriptors are supported */
if (device->cap & IOAT_CAP_RAID16SS) {
char pool_name[14];