summaryrefslogtreecommitdiff
path: root/drivers/scsi/sg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r--drivers/scsi/sg.c31
1 files changed, 8 insertions, 23 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cd2fdac000c9..8a254bb46a9b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1103,15 +1103,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- if (read_only) {
- unsigned char opcode = WRITE_6;
- Scsi_Ioctl_Command __user *siocp = p;
-
- if (copy_from_user(&opcode, siocp->data, 1))
- return -EFAULT;
- if (sg_allow_access(filp, &opcode))
- return -EPERM;
- }
return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
@@ -1741,15 +1732,11 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
- * preallocated requests are already in use, then using GFP_ATOMIC with
- * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
- * will cause blk_get_request() to sleep until an active command
- * completes, freeing up a request. Neither option is ideal, but
- * GFP_KERNEL is the better choice to prevent userspace from getting an
- * unexpected EWOULDBLOCK.
- *
- * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
- * does not sleep except under memory pressure.
+ * preallocated requests are already in use, then blk_get_request()
+ * will sleep until an active command completes, freeing up a request.
+ * Although waiting in an asynchronous interface is less than ideal, we
+ * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
+ * not expect an EWOULDBLOCK from this condition.
*/
rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
@@ -1888,7 +1875,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
- gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+ gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO;
struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
@@ -1918,9 +1905,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- gfp_mask |= __GFP_ZERO;
-
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
@@ -1931,7 +1915,7 @@ retry:
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
- schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
+ schp->pages[k] = alloc_pages(gfp_mask, order);
if (!schp->pages[k])
goto out;
@@ -2185,6 +2169,7 @@ sg_add_sfp(Sg_device * sdp)
write_lock_irqsave(&sdp->sfd_lock, iflags);
if (atomic_read(&sdp->detaching)) {
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ kfree(sfp);
return ERR_PTR(-ENODEV);
}
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);