diff options
Diffstat (limited to 'drivers/s390')
36 files changed, 760 insertions, 472 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 73cce3ecb97f..a9f60d0ee02e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -41,6 +41,15 @@ #define DASD_DIAG_MOD "dasd_diag_mod" +static unsigned int queue_depth = 32; +static unsigned int nr_hw_queues = 4; + +module_param(queue_depth, uint, 0444); +MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); + +module_param(nr_hw_queues, uint, 0444); +MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); + /* * SECTION: exported variables of dasd.c */ @@ -1222,80 +1231,37 @@ static void dasd_hosts_init(struct dentry *base_dentry, device->hosts_dentry = pde; } -/* - * Allocate memory for a channel program with 'cplength' channel - * command words and 'datasize' additional space. There are two - * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed - * memory and 2) dasd_smalloc_request uses the static ccw memory - * that gets allocated for each device. - */ -struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, - int datasize, - struct dasd_device *device) -{ - struct dasd_ccw_req *cqr; - - /* Sanity checks */ - BUG_ON(datasize > PAGE_SIZE || - (cplength*sizeof(struct ccw1)) > PAGE_SIZE); - - cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); - if (cqr == NULL) - return ERR_PTR(-ENOMEM); - cqr->cpaddr = NULL; - if (cplength > 0) { - cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), - GFP_ATOMIC | GFP_DMA); - if (cqr->cpaddr == NULL) { - kfree(cqr); - return ERR_PTR(-ENOMEM); - } - } - cqr->data = NULL; - if (datasize > 0) { - cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); - if (cqr->data == NULL) { - kfree(cqr->cpaddr); - kfree(cqr); - return ERR_PTR(-ENOMEM); - } - } - cqr->magic = magic; - set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); - dasd_get_device(device); - return cqr; -} -EXPORT_SYMBOL(dasd_kmalloc_request); - -struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, - int datasize, - struct dasd_device *device) +struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, + struct dasd_device *device, + struct dasd_ccw_req *cqr) { unsigned long flags; - struct dasd_ccw_req *cqr; - char *data; - int size; + char *data, *chunk; + int size = 0; - size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; if (cplength > 0) size += cplength * sizeof(struct ccw1); if (datasize > 0) size += datasize; + if (!cqr) + size += (sizeof(*cqr) + 7L) & -8L; + spin_lock_irqsave(&device->mem_lock, flags); - cqr = (struct dasd_ccw_req *) - dasd_alloc_chunk(&device->ccw_chunks, size); + data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); spin_unlock_irqrestore(&device->mem_lock, flags); - if (cqr == NULL) + if (!chunk) return ERR_PTR(-ENOMEM); - memset(cqr, 0, sizeof(struct dasd_ccw_req)); - data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); - cqr->cpaddr = NULL; + if (!cqr) { + cqr = (void *) data; + data += (sizeof(*cqr) + 7L) & -8L; + } + memset(cqr, 0, sizeof(*cqr)); + cqr->mem_chunk = chunk; if (cplength > 0) { - cqr->cpaddr = (struct ccw1 *) data; - data += cplength*sizeof(struct ccw1); - memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); + cqr->cpaddr = data; + data += cplength * sizeof(struct ccw1); + memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); } - cqr->data = NULL; if (datasize > 0) { cqr->data = data; memset(cqr->data, 0, datasize); @@ -1307,33 +1273,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, } EXPORT_SYMBOL(dasd_smalloc_request); -/* - * Free memory of a channel program. This function needs to free all the - * idal lists that might have been created by dasd_set_cda and the - * struct dasd_ccw_req itself. - */ -void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) -{ - struct ccw1 *ccw; - - /* Clear any idals used for the request. */ - ccw = cqr->cpaddr; - do { - clear_normalized_cda(ccw); - } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); - kfree(cqr->cpaddr); - kfree(cqr->data); - kfree(cqr); - dasd_put_device(device); -} -EXPORT_SYMBOL(dasd_kfree_request); - void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) { unsigned long flags; spin_lock_irqsave(&device->mem_lock, flags); - dasd_free_chunk(&device->ccw_chunks, cqr); + dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); spin_unlock_irqrestore(&device->mem_lock, flags); dasd_put_device(device); } @@ -1885,6 +1830,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device, } } +static void __dasd_process_cqr(struct dasd_device *device, + struct dasd_ccw_req *cqr) +{ + char errorstring[ERRORLENGTH]; + + switch (cqr->status) { + case DASD_CQR_SUCCESS: + cqr->status = DASD_CQR_DONE; + break; + case DASD_CQR_ERROR: + cqr->status = DASD_CQR_NEED_ERP; + break; + case DASD_CQR_CLEARED: + cqr->status = DASD_CQR_TERMINATED; + break; + default: + /* internal error 12 - wrong cqr status*/ + snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); + dev_err(&device->cdev->dev, + "An error occurred in the DASD device driver, " + "reason=%s\n", errorstring); + BUG(); + } + if (cqr->callback) + cqr->callback(cqr, cqr->callback_data); +} + /* * the cqrs from the final queue are returned to the upper layer * by setting a dasd_block state and calling the callback function @@ -1895,40 +1867,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device, struct list_head *l, *n; struct dasd_ccw_req *cqr; struct dasd_block *block; - void (*callback)(struct dasd_ccw_req *, void *data); - void *callback_data; - char errorstring[ERRORLENGTH]; list_for_each_safe(l, n, final_queue) { cqr = list_entry(l, struct dasd_ccw_req, devlist); list_del_init(&cqr->devlist); block = cqr->block; - callback = cqr->callback; - callback_data = cqr->callback_data; - if (block) + if (!block) { + __dasd_process_cqr(device, cqr); + } else { spin_lock_bh(&block->queue_lock); - switch (cqr->status) { - case DASD_CQR_SUCCESS: - cqr->status = DASD_CQR_DONE; - break; - case DASD_CQR_ERROR: - cqr->status = DASD_CQR_NEED_ERP; - break; - case DASD_CQR_CLEARED: - cqr->status = DASD_CQR_TERMINATED; - break; - default: - /* internal error 12 - wrong cqr status*/ - snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); - dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", errorstring); - BUG(); - } - if (cqr->callback != NULL) - (callback)(cqr, callback_data); - if (block) + __dasd_process_cqr(device, cqr); spin_unlock_bh(&block->queue_lock); + } } } @@ -3041,7 +2991,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, cqr->callback_data = req; cqr->status = DASD_CQR_FILLED; cqr->dq = dq; - *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr; blk_mq_start_request(req); spin_lock(&block->queue_lock); @@ -3072,7 +3021,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) unsigned long flags; int rc = 0; - cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)); + cqr = blk_mq_rq_to_pdu(req); if (!cqr) return BLK_EH_DONE; @@ -3174,9 +3123,9 @@ static int dasd_alloc_queue(struct dasd_block *block) int rc; block->tag_set.ops = &dasd_mq_ops; - block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *); - block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; - block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; + block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); + block->tag_set.nr_hw_queues = nr_hw_queues; + block->tag_set.queue_depth = queue_depth; block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; rc = blk_mq_alloc_tag_set(&block->tag_set); @@ -4038,7 +3987,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, struct ccw1 *ccw; unsigned long *idaw; - cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); + cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, + NULL); if (IS_ERR(cqr)) { /* internal error 13 - Allocating the RDC request failed*/ diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 5e963fe0e38d..e36a114354fc 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device, int rc; unsigned long flags; - cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data)), - device); + device, NULL); if (IS_ERR(cqr)) return PTR_ERR(cqr); cqr->startdev = device; @@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device, lcu->flags |= NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); } - dasd_kfree_request(cqr, cqr->memdev); + dasd_sfree_request(cqr, cqr->memdev); return rc; } diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 131f1989f6f3..e1fe02477ea8 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, /* Build the request */ datasize = sizeof(struct dasd_diag_req) + count*sizeof(struct dasd_diag_bio); - cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev); + cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev, + blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index be208e7adcb4..bbf95b78ef5d 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device, } cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, 0, /* use rcd_buf as data ara */ - device); + device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate RCD request"); @@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data) + sizeof(struct dasd_rssd_features)), - device); + device, NULL); if (IS_ERR(cqr)) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " "allocate initialization request"); @@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , sizeof(struct dasd_psf_ssc_data), - device); + device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", @@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) cplength = 8; datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device, + NULL); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; @@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, */ itcw_size = itcw_calc_size(0, count, 0); - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, + NULL); if (IS_ERR(cqr)) return cqr; @@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, cplength += count; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, - startdev); + startdev, NULL); if (IS_ERR(cqr)) return cqr; @@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base, } /* Allocate the format ccw request. */ fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, - datasize, startdev); + datasize, startdev, NULL); if (IS_ERR(fcp)) return fcp; @@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( } /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, - startdev); + startdev, blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; @@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, - startdev); + startdev, blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; @@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( /* Allocate the ccw request. */ itcw_size = itcw_calc_size(0, ctidaw, 0); - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, + blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; @@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, - datasize, startdev); + datasize, startdev, blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; @@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device) return -EACCES; useglobal = 0; - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); if (IS_ERR(cqr)) { mutex_lock(&dasd_reserve_mutex); useglobal = 1; @@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device) return -EACCES; useglobal = 0; - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); if (IS_ERR(cqr)) { mutex_lock(&dasd_reserve_mutex); useglobal = 1; @@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) return -EACCES; useglobal = 0; - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); if (IS_ERR(cqr)) { mutex_lock(&dasd_reserve_mutex); useglobal = 1; @@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device, useglobal = 0; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, - sizeof(struct dasd_snid_data), device); + sizeof(struct dasd_snid_data), device, + NULL); if (IS_ERR(cqr)) { mutex_lock(&dasd_reserve_mutex); useglobal = 1; @@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data) + sizeof(struct dasd_rssd_perf_stats_t)), - device); + device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate initialization request"); @@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) psf1 = psf_data[1]; /* setup CCWs for PSF + RSSD */ - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate initialization request"); @@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data) + sizeof(struct dasd_rssd_messages)), - device); + device, NULL); if (IS_ERR(cqr)) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not allocate read message buffer request"); @@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, sizeof(struct dasd_psf_prssd_data) + 1, - device); + device, NULL); if (IS_ERR(cqr)) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not allocate read message buffer request"); @@ -5284,8 +5288,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, int rc; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , - sizeof(struct dasd_psf_cuir_response), - device); + sizeof(struct dasd_psf_cuir_response), + device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index fb2c3599d95c..6ef8714dc693 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) * is a new ccw in device->eer_cqr. Free the "old" * snss request now. */ - dasd_kfree_request(cqr, device); + dasd_sfree_request(cqr, device); } /* @@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device) if (rc) goto out; - cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, - SNSS_DATA_SIZE, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, + SNSS_DATA_SIZE, device, NULL); if (IS_ERR(cqr)) { rc = -ENOMEM; cqr = NULL; @@ -505,7 +505,7 @@ out: spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (cqr) - dasd_kfree_request(cqr, device); + dasd_sfree_request(cqr, device); return rc; } @@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device) in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (cqr && !in_use) - dasd_kfree_request(cqr, device); + dasd_sfree_request(cqr, device); } /* @@ -561,8 +561,8 @@ static int dasd_eer_open(struct inode *inp, struct file *filp) return -EINVAL; } eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; - eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *), - GFP_KERNEL); + eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *), + GFP_KERNEL); if (!eerb->buffer) { kfree(eerb); return -ENOMEM; diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index a6b132f7e869..56007a3e7f11 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard( datasize = sizeof(struct DE_fba_data) + nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1)); - cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); + cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev, + blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; @@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular( datasize += (count - 1)*sizeof(struct LO_fba_data); } /* Allocate the ccw request. */ - cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); + cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev, + blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 96709b1a7bf8..de6b96036aa4 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -158,40 +158,33 @@ do { \ struct dasd_ccw_req { unsigned int magic; /* Eye catcher */ + int intrc; /* internal error, e.g. from start_IO */ struct list_head devlist; /* for dasd_device request queue */ struct list_head blocklist; /* for dasd_block request queue */ - - /* Where to execute what... */ struct dasd_block *block; /* the originating block device */ struct dasd_device *memdev; /* the device used to allocate this */ struct dasd_device *startdev; /* device the request is started on */ struct dasd_device *basedev; /* base device if no block->base */ void *cpaddr; /* address of ccw or tcw */ + short retries; /* A retry counter */ unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */ char status; /* status of this request */ - short retries; /* A retry counter */ + char lpm; /* logical path mask */ unsigned long flags; /* flags of this request */ struct dasd_queue *dq; - - /* ... and how */ unsigned long starttime; /* jiffies time of request start */ unsigned long expires; /* expiration period in jiffies */ - char lpm; /* logical path mask */ void *data; /* pointer to data area */ - - /* these are important for recovering erroneous requests */ - int intrc; /* internal error, e.g. from start_IO */ struct irb irb; /* device status in case of an error */ struct dasd_ccw_req *refers; /* ERP-chain queueing. */ void *function; /* originating ERP action */ + void *mem_chunk; - /* these are for statistics only */ unsigned long buildclk; /* TOD-clock of request generation */ unsigned long startclk; /* TOD-clock of request start */ unsigned long stopclk; /* TOD-clock of request interrupt */ unsigned long endclk; /* TOD-clock of request termination */ - /* Callback that is called after reaching final status. */ void (*callback)(struct dasd_ccw_req *, void *data); void *callback_data; }; @@ -235,14 +228,6 @@ struct dasd_ccw_req { #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ -/* - * There is no reliable way to determine the number of available CPUs on - * LPAR but there is no big performance difference between 1 and the - * maximum CPU number. - * 64 is a good trade off performance wise. - */ -#define DASD_NR_HW_QUEUES 64 -#define DASD_MAX_LCU_DEV 256 #define DASD_REQ_PER_DEV 4 /* Signature for error recovery functions. */ @@ -714,19 +699,10 @@ extern const struct block_device_operations dasd_device_operations; extern struct kmem_cache *dasd_page_cache; struct dasd_ccw_req * -dasd_kmalloc_request(int , int, int, struct dasd_device *); -struct dasd_ccw_req * -dasd_smalloc_request(int , int, int, struct dasd_device *); -void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); +dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *); void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); void dasd_wakeup_cb(struct dasd_ccw_req *, void *); -static inline int -dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device) -{ - return set_normalized_cda(ccw, cda); -} - struct dasd_device *dasd_alloc_device(void); void dasd_free_device(struct dasd_device *); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 0a312e450207..ed607288e696 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -51,9 +51,16 @@ static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev, return copy_from_iter(addr, bytes, i); } +static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev, + pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) +{ + return copy_to_iter(addr, bytes, i); +} + static const struct dax_operations dcssblk_dax_ops = { .direct_access = dcssblk_dax_direct_access, .copy_from_iter = dcssblk_dax_copy_from_iter, + .copy_to_iter = dcssblk_dax_copy_to_iter, }; struct dcssblk_dev_info { @@ -231,9 +238,9 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) if (dev_info->num_of_segments <= 1) return 0; - sort_list = kzalloc( - sizeof(struct segment_info) * dev_info->num_of_segments, - GFP_KERNEL); + sort_list = kcalloc(dev_info->num_of_segments, + sizeof(struct segment_info), + GFP_KERNEL); if (sort_list == NULL) return -ENOMEM; i = 0; diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index db1fbf9b00b5..79eb60958015 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -78,7 +78,7 @@ kbd_alloc(void) { } } kbd->fn_handler = - kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); + kcalloc(NR_FN_HANDLER, sizeof(fn_handler_fn *), GFP_KERNEL); if (!kbd->fn_handler) goto out_func; kbd->accent_table = kmemdup(ebc_accent_table, diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c index 99f41db5123b..1e244f78f192 100644 --- a/drivers/s390/char/sclp_sd.c +++ b/drivers/s390/char/sclp_sd.c @@ -300,7 +300,7 @@ static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di) goto out_result; /* Allocate memory */ - data = vzalloc((size_t) dsize * PAGE_SIZE); + data = vzalloc(array_size((size_t)dsize, PAGE_SIZE)); if (!data) { rc = -ENOMEM; goto out; diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 1c98023cffd4..5b8af2782282 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -719,7 +719,8 @@ tty3270_alloc_view(void) if (!tp) goto out_err; tp->freemem_pages = - kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL); + kmalloc_array(TTY3270_STRING_PAGES, sizeof(void *), + GFP_KERNEL); if (!tp->freemem_pages) goto out_tp; INIT_LIST_HEAD(&tp->freemem); diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 52aa89424318..cbde65ab2170 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -242,7 +242,7 @@ static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, * That means we allocate room for CCWs to cover count/reclen * records plus a NOP. */ - cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1), + cpa = kcalloc(rec_count + 1, sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); if (!cpa) return ERR_PTR(-ENOMEM); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 4369662cfff5..76d3c50bf078 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -152,7 +152,7 @@ static int zcore_memmap_open(struct inode *inode, struct file *filp) char *buf; int i = 0; - buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL); + buf = kcalloc(memblock.memory.cnt, CHUNK_INFO_SIZE, GFP_KERNEL); if (!buf) { return -ENOMEM; } diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index a070ef0efe65..f230516abb96 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -5,6 +5,7 @@ # The following is required for define_trace.h to find ./trace.h CFLAGS_trace.o := -I$(src) +CFLAGS_vfio_ccw_fsm.o := -I$(src) obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 4c14ce428e92..78f1be41b05e 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -536,7 +536,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, int qdio_enable_async_operation(struct qdio_output_q *outq) { - outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q, + outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *), GFP_ATOMIC); if (!outq->aobs) { outq->use_cq = 0; diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 0787b587e4b8..07dea602205b 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -241,8 +241,9 @@ out: /* allocate non-shared indicators and shared indicator */ int __init tiqdio_allocate_memory(void) { - q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, - GFP_KERNEL); + q_indicators = kcalloc(TIQDIO_NR_INDICATORS, + sizeof(struct indicator_t), + GFP_KERNEL); if (!q_indicators) return -ENOMEM; return 0; diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index dce92b2a895d..dbe7c7ac9ac8 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -23,9 +23,13 @@ #define CCWCHAIN_LEN_MAX 256 struct pfn_array { + /* Starting guest physical I/O address. */ unsigned long pa_iova; + /* Array that stores PFNs of the pages need to pin. */ unsigned long *pa_iova_pfn; + /* Array that receives PFNs of the pages pinned. */ unsigned long *pa_pfn; + /* Number of pages pinned from @pa_iova. */ int pa_nr; }; @@ -46,70 +50,33 @@ struct ccwchain { }; /* - * pfn_array_pin() - pin user pages in memory + * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory * @pa: pfn_array on which to perform the operation * @mdev: the mediated device to perform pin/unpin operations + * @iova: target guest physical address + * @len: number of bytes that should be pinned from @iova * - * Attempt to pin user pages in memory. + * Attempt to allocate memory for PFNs, and pin user pages in memory. * * Usage of pfn_array: - * @pa->pa_iova starting guest physical I/O address. Assigned by caller. - * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated - * by caller. - * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by - * caller. - * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by - * caller. - * number of pages pinned. Assigned by callee. + * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in + * this structure will be filled in by this function. * * Returns: * Number of pages pinned on success. - * If @pa->pa_nr is 0 or negative, returns 0. + * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially, + * returns -EINVAL. * If no pages were pinned, returns -errno. */ -static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) -{ - int i, ret; - - if (pa->pa_nr <= 0) { - pa->pa_nr = 0; - return 0; - } - - pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; - for (i = 1; i < pa->pa_nr; i++) - pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; - - ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, - IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); - - if (ret > 0 && ret != pa->pa_nr) { - vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); - pa->pa_nr = 0; - return 0; - } - - return ret; -} - -/* Unpin the pages before releasing the memory. */ -static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) -{ - vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); - pa->pa_nr = 0; - kfree(pa->pa_iova_pfn); -} - -/* Alloc memory for PFNs, then pin pages with them. */ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, u64 iova, unsigned int len) { - int ret = 0; + int i, ret = 0; if (!len) return 0; - if (pa->pa_nr) + if (pa->pa_nr || pa->pa_iova_pfn) return -EINVAL; pa->pa_iova = iova; @@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, return -ENOMEM; pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; - ret = pfn_array_pin(pa, mdev); + pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; + for (i = 1; i < pa->pa_nr; i++) + pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; - if (ret > 0) - return ret; - else if (!ret) + ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, + IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); + + if (ret < 0) { + goto err_out; + } else if (ret > 0 && ret != pa->pa_nr) { + vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); ret = -EINVAL; + goto err_out; + } + return ret; + +err_out: + pa->pa_nr = 0; kfree(pa->pa_iova_pfn); + pa->pa_iova_pfn = NULL; return ret; } +/* Unpin the pages before releasing the memory. */ +static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) +{ + vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); + pa->pa_nr = 0; + kfree(pa->pa_iova_pfn); +} + static int pfn_array_table_init(struct pfn_array_table *pat, int nr) { pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL); @@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp) * This is the chain length not considering any TICs. * You need to do a new round for each TIC target. * + * The program is also validated for absence of not yet supported + * indirect data addressing scenarios. + * * Returns: the length of the ccw chain or -errno. */ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) @@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) do { cnt++; + /* + * As we don't want to fail direct addressing even if the + * orb specified one of the unsupported formats, we defer + * checking for IDAWs in unsupported formats to here. + */ + if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) + return -EOPNOTSUPP; + if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) break; @@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, struct ccw1 *ccw; struct pfn_array_table *pat; unsigned long *idaws; - int idaw_nr; + int ret; ccw = chain->ch_ccw + idx; @@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, * needed when translating a direct ccw to a idal ccw. */ pat = chain->ch_pat + idx; - if (pfn_array_table_init(pat, 1)) - return -ENOMEM; - idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, - ccw->cda, ccw->count); - if (idaw_nr < 0) - return idaw_nr; + ret = pfn_array_table_init(pat, 1); + if (ret) + goto out_init; + + ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count); + if (ret < 0) + goto out_init; /* Translate this direct ccw to a idal ccw. */ - idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL); + idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL); if (!idaws) { - pfn_array_table_unpin_free(pat, cp->mdev); - return -ENOMEM; + ret = -ENOMEM; + goto out_unpin; } ccw->cda = (__u32) virt_to_phys(idaws); ccw->flags |= CCW_FLAG_IDA; @@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, pfn_array_table_idal_create_words(pat, idaws); return 0; + +out_unpin: + pfn_array_table_unpin_free(pat, cp->mdev); +out_init: + ccw->cda = 0; + return ret; } static int ccwchain_fetch_idal(struct ccwchain *chain, @@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain, pat = chain->ch_pat + idx; ret = pfn_array_table_init(pat, idaw_nr); if (ret) - return ret; + goto out_init; /* Translate idal ccw to use new allocated idaws. */ idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); @@ -603,6 +609,8 @@ out_free_idaws: kfree(idaws); out_unpin: pfn_array_table_unpin_free(pat, cp->mdev); +out_init: + ccw->cda = 0; return ret; } @@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) /* * XXX: * Only support prefetch enable mode now. - * Only support 64bit addressing idal. - * Only support 4k IDAW. */ - if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k) + if (!orb->cmd.pfch) return -EOPNOTSUPP; INIT_LIST_HEAD(&cp->ccwchain_list); @@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) ret = ccwchain_loop_tic(chain, cp); if (ret) cp_unpin_free(cp); + /* It is safe to force: if not set but idals used + * ccwchain_calc_length returns an error. + */ + cp->orb.cmd.c64 = 1; return ret; } diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index ea6a2d0b2894..770fa9cfc310 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) { struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); unsigned long flags; + int rc = -EAGAIN; spin_lock_irqsave(sch->lock, flags); if (!device_is_registered(&sch->dev)) @@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) if (cio_update_schib(sch)) { vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); + rc = 0; goto out_unlock; } @@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) private->state = private->mdev ? VFIO_CCW_STATE_IDLE : VFIO_CCW_STATE_STANDBY; } + rc = 0; out_unlock: spin_unlock_irqrestore(sch->lock, flags); - return 0; + return rc; } static struct css_device_id vfio_ccw_sch_ids[] = { diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 3c800642134e..797a82731159 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -13,6 +13,9 @@ #include "ioasm.h" #include "vfio_ccw_private.h" +#define CREATE_TRACE_POINTS +#include "vfio_ccw_trace.h" + static int fsm_io_helper(struct vfio_ccw_private *private) { struct subchannel *sch; @@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private, */ cio_disable_subchannel(sch); } +inline struct subchannel_id get_schid(struct vfio_ccw_private *p) +{ + return p->sch->schid; +} /* * Deal with the ccw command request from the userspace. @@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, union scsw *scsw = &private->scsw; struct ccw_io_region *io_region = &private->io_region; struct mdev_device *mdev = private->mdev; + char *errstr = "request"; private->state = VFIO_CCW_STATE_BOXED; @@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private, /* Don't try to build a cp if transport mode is specified. */ if (orb->tm.b) { io_region->ret_code = -EOPNOTSUPP; + errstr = "transport mode"; goto err_out; } io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), orb); - if (io_region->ret_code) + if (io_region->ret_code) { + errstr = "cp init"; goto err_out; + } io_region->ret_code = cp_prefetch(&private->cp); if (io_region->ret_code) { + errstr = "cp prefetch"; cp_free(&private->cp); goto err_out; } @@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, /* Start channel program and wait for I/O interrupt. */ io_region->ret_code = fsm_io_helper(private); if (io_region->ret_code) { + errstr = "cp fsm_io_helper"; cp_free(&private->cp); goto err_out; } @@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private, err_out: private->state = VFIO_CCW_STATE_IDLE; + trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private), + io_region->ret_code, errstr); } /* diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h new file mode 100644 index 000000000000..b1da53ddec1f --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_trace.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Tracepoints for vfio_ccw driver + * + * Copyright IBM Corp. 2018 + * + * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> + * Halil Pasic <pasic@linux.vnet.ibm.com> + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vfio_ccw + +#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ) +#define _VFIO_CCW_TRACE_ + +#include <linux/tracepoint.h> + +TRACE_EVENT(vfio_ccw_io_fctl, + TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr), + TP_ARGS(fctl, schid, errno, errstr), + + TP_STRUCT__entry( + __field(int, fctl) + __field_struct(struct subchannel_id, schid) + __field(int, errno) + __field(char*, errstr) + ), + + TP_fast_assign( + __entry->fctl = fctl; + __entry->schid = schid; + __entry->errno = errno; + __entry->errstr = errstr; + ), + + TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s", + __entry->schid.cssid, + __entry->schid.ssid, + __entry->schid.sch_no, + __entry->fctl, + __entry->errno, + __entry->errstr) +); + +#endif /* _VFIO_CCW_TRACE_ */ + +/* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE vfio_ccw_trace + +#include <trace/define_trace.h> diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index ed80d00cdb6f..3929c8be8098 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -121,7 +121,7 @@ static int alloc_and_prep_cprbmem(size_t paramblen, * allocate consecutive memory for request CPRB, request param * block, reply CPRB and reply param block */ - cprbmem = kzalloc(2 * cprbplusparamblen, GFP_KERNEL); + cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL); if (!cprbmem) return -ENOMEM; @@ -899,9 +899,9 @@ int pkey_findcard(const struct pkey_seckey *seckey, return -EINVAL; /* fetch status of all crypto cards */ - device_status = kmalloc(MAX_ZDEV_ENTRIES_EXT - * sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); + device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, + sizeof(struct zcrypt_device_status_ext), + GFP_KERNEL); if (!device_status) return -ENOMEM; zcrypt_device_status_mask_ext(device_status); diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 7ce98b70cad3..7617d21cb296 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1379,7 +1379,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, } else ccw_num = 8; - ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + ch->ccw = kcalloc(ccw_num, sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); if (ch->ccw == NULL) goto nomem_return; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 2a5fec55bf60..a246a618f9a4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -829,6 +829,17 @@ struct qeth_trap_id { /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") +static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, + unsigned int elements) +{ + unsigned int i; + + for (i = 0; i < elements; i++) + memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element)); + buf->element[14].sflags = 0; + buf->element[15].sflags = 0; +} + /** * qeth_get_elements_for_range() - find number of SBALEs to cover range. * @start: Start of the address range. @@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, __u16, __u16, enum qeth_prot_versions); int qeth_set_features(struct net_device *, netdev_features_t); -void qeth_recover_features(struct net_device *dev); +void qeth_enable_hw_features(struct net_device *dev); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9f28b6f2efc4..d01ac29fd986 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); -static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - enum qeth_qdio_buffer_states newbufstate); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); struct workqueue_struct *qeth_wq; @@ -374,9 +371,10 @@ static int qeth_alloc_cq(struct qeth_card *card) } card->qdio.no_in_queues = 2; card->qdio.out_bufstates = - kzalloc(card->qdio.no_out_queues * - QDIO_MAX_BUFFERS_PER_Q * - sizeof(struct qdio_outbuf_state), GFP_KERNEL); + kcalloc(card->qdio.no_out_queues * + QDIO_MAX_BUFFERS_PER_Q, + sizeof(struct qdio_outbuf_state), + GFP_KERNEL); outbuf_states = card->qdio.out_bufstates; if (outbuf_states == NULL) { rc = -1; @@ -488,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; + unsigned int i; aob = (struct qaob *) phys_to_virt(phys_aob_addr); QETH_CARD_TEXT(card, 5, "haob"); @@ -512,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, qeth_notify_skbs(buffer->q, buffer, notification); buffer->aob = NULL; - qeth_clear_output_buffer(buffer->q, buffer, - QETH_QDIO_BUF_HANDLED_DELAYED); + /* Free dangling allocations. The attached skbs are handled by + * qeth_cleanup_handled_pending(). + */ + for (i = 0; + i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); + i++) { + if (aob->sba[i] && buffer->is_header[i]) + kmem_cache_free(qeth_core_header_cache, + (void *) aob->sba[i]); + } + atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); - /* from here on: do not touch buffer anymore */ qdio_release_aob(aob); } @@ -2538,8 +2545,9 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) /* outbound */ card->qdio.out_qs = - kzalloc(card->qdio.no_out_queues * - sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); + kcalloc(card->qdio.no_out_queues, + sizeof(struct qeth_qdio_out_q *), + GFP_KERNEL); if (!card->qdio.out_qs) goto out_freepool; for (i = 0; i < card->qdio.no_out_queues; ++i) { @@ -3757,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, QETH_CARD_TEXT(queue->card, 5, "aob"); QETH_CARD_TEXT_(queue->card, 5, "%lx", virt_to_phys(buffer->aob)); + + /* prepare the queue slot for re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, + QETH_MAX_BUFFER_ELEMENTS(card)); if (qeth_init_qdio_out_buf(queue, bidx)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card); @@ -4832,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card) goto out; } - ccw_device_get_id(CARD_RDEV(card), &id); + ccw_device_get_id(CARD_DDEV(card), &id); request->resp_buf_len = sizeof(*response); request->resp_version = DIAG26C_VERSION2; request->op_code = DIAG26C_GET_MAC; @@ -4963,8 +4975,8 @@ static int qeth_qdio_establish(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "qdioest"); - qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), - GFP_KERNEL); + qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q, + GFP_KERNEL); if (!qib_param_field) { rc = -ENOMEM; goto out_free_nothing; @@ -4973,8 +4985,8 @@ static int qeth_qdio_establish(struct qeth_card *card) qeth_create_qib_param_field(card, qib_param_field); qeth_create_qib_param_field_blkt(card, qib_param_field); - in_sbal_ptrs = kzalloc(card->qdio.no_in_queues * - QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), + in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q, + sizeof(void *), GFP_KERNEL); if (!in_sbal_ptrs) { rc = -ENOMEM; @@ -4985,7 +4997,7 @@ static int qeth_qdio_establish(struct qeth_card *card) virt_to_phys(card->qdio.in_q->bufs[i].buffer); } - queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues, + queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *), GFP_KERNEL); if (!queue_start_poll) { rc = -ENOMEM; @@ -4997,8 +5009,9 @@ static int qeth_qdio_establish(struct qeth_card *card) qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); out_sbal_ptrs = - kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * - sizeof(void *), GFP_KERNEL); + kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q, + sizeof(void *), + GFP_KERNEL); if (!out_sbal_ptrs) { rc = -ENOMEM; goto out_free_queue_start_poll; @@ -6456,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ NETIF_F_IPV6_CSUM) /** - * qeth_recover_features() - Restore device features after recovery - * @dev: the recovering net_device - * - * Caller must hold rtnl lock. + * qeth_enable_hw_features() - (Re-)Enable HW functions for device features + * @dev: a net_device */ -void qeth_recover_features(struct net_device *dev) +void qeth_enable_hw_features(struct net_device *dev) { - netdev_features_t features = dev->features; struct qeth_card *card = dev->ml_priv; + netdev_features_t features; + rtnl_lock(); + features = dev->features; /* force-off any feature that needs an IPA sequence. * netdev_update_features() will restart them. */ dev->features &= ~QETH_HW_FEATURES; netdev_update_features(dev); - - if (features == dev->features) - return; - dev_warn(&card->gdev->dev, - "Device recovery failed to restore all offload features\n"); + if (features != dev->features) + dev_warn(&card->gdev->dev, + "Device recovery failed to restore all offload features\n"); + rtnl_unlock(); } -EXPORT_SYMBOL_GPL(qeth_recover_features); +EXPORT_SYMBOL_GPL(qeth_enable_hw_features); int qeth_set_features(struct net_device *dev, netdev_features_t features) { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a7cb37da6a21..2487f0aeb165 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) { - enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; int rc; @@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) { - enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; int rc; @@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -ERESTARTSYS; } + /* avoid racing against concurrent state change: */ + if (!mutex_trylock(&card->conf_mutex)) + return -EAGAIN; + if (!qeth_card_hw_is_reachable(card)) { ether_addr_copy(dev->dev_addr, addr->sa_data); - return 0; + goto out_unlock; } /* don't register the same address twice */ if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; + goto out_unlock; /* add the new address, switch over, drop the old */ rc = qeth_l2_send_setmac(card, addr->sa_data); if (rc) - return rc; + goto out_unlock; ether_addr_copy(old_addr, dev->dev_addr); ether_addr_copy(dev->dev_addr, addr->sa_data); if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) qeth_l2_remove_mac(card, old_addr); card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - return 0; + +out_unlock: + mutex_unlock(&card->conf_mutex); + return rc; } static void qeth_promisc_to_bridge(struct qeth_card *card) @@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) netif_carrier_off(card->dev); qeth_set_allowed_threads(card, 0xffffffff, 0); + + qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { if (recovery_mode && card->info.type != QETH_CARD_TYPE_OSN) { @@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) } /* this also sets saved unicast addresses */ qeth_l2_set_rx_mode(card->dev); - rtnl_lock(); - qeth_recover_features(card->dev); - rtnl_unlock(); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e7fa479adf47..5905dc63e256 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) netif_carrier_on(card->dev); else netif_carrier_off(card->dev); + + qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { rtnl_lock(); if (recovery_mode) @@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) else dev_open(card->dev); qeth_l3_set_rx_mode(card->dev); - qeth_recover_features(card->dev); rtnl_unlock(); } qeth_trace_features(card); diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 18c4f933e8b9..3b368fcf13f4 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -285,6 +285,8 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, struct list_head *entry; unsigned long flags; + lockdep_assert_held(&adapter->erp_lock); + if (unlikely(!debug_level_enabled(dbf->rec, level))) return; @@ -599,16 +601,18 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) } /** - * zfcp_dbf_scsi - trace event for scsi commands - * @tag: identifier for event - * @sc: pointer to struct scsi_cmnd - * @fsf: pointer to struct zfcp_fsf_req + * zfcp_dbf_scsi_common() - Common trace event helper for scsi. + * @tag: Identifier for event. + * @level: trace level of event. + * @sdev: Pointer to SCSI device as context for this event. + * @sc: Pointer to SCSI command, or NULL with task management function (TMF). + * @fsf: Pointer to FSF request, or NULL. */ -void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, - struct zfcp_fsf_req *fsf) +void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev, + struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) { struct zfcp_adapter *adapter = - (struct zfcp_adapter *) sc->device->host->hostdata[0]; + (struct zfcp_adapter *) sdev->host->hostdata[0]; struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf_scsi *rec = &dbf->scsi_buf; struct fcp_resp_with_ext *fcp_rsp; @@ -620,16 +624,28 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); rec->id = ZFCP_DBF_SCSI_CMND; - rec->scsi_result = sc->result; - rec->scsi_retries = sc->retries; - rec->scsi_allowed = sc->allowed; - rec->scsi_id = sc->device->id; - rec->scsi_lun = (u32)sc->device->lun; - rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32); - rec->host_scribble = (unsigned long)sc->host_scribble; - - memcpy(rec->scsi_opcode, sc->cmnd, - min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE)); + if (sc) { + rec->scsi_result = sc->result; + rec->scsi_retries = sc->retries; + rec->scsi_allowed = sc->allowed; + rec->scsi_id = sc->device->id; + rec->scsi_lun = (u32)sc->device->lun; + rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32); + rec->host_scribble = (unsigned long)sc->host_scribble; + + memcpy(rec->scsi_opcode, sc->cmnd, + min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE)); + } else { + rec->scsi_result = ~0; + rec->scsi_retries = ~0; + rec->scsi_allowed = ~0; + rec->scsi_id = sdev->id; + rec->scsi_lun = (u32)sdev->lun; + rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32); + rec->host_scribble = ~0; + + memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE); + } if (fsf) { rec->fsf_req_id = fsf->req_id; @@ -664,6 +680,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, spin_unlock_irqrestore(&dbf->scsi_lock, flags); } +/** + * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks. + * @tag: Identifier for event. + * @adapter: Pointer to zfcp adapter as context for this event. + * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF). + * @ret: Return value of calling function. + * + * This SCSI trace variant does not depend on any of: + * scsi_cmnd, zfcp_fsf_req, scsi_device. + */ +void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter, + unsigned int scsi_id, int ret) +{ + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_scsi *rec = &dbf->scsi_buf; + unsigned long flags; + static int const level = 1; + + if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level))) + return; + + spin_lock_irqsave(&dbf->scsi_lock, flags); + memset(rec, 0, sizeof(*rec)); + + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_SCSI_CMND; + rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */ + rec->scsi_retries = ~0; + rec->scsi_allowed = ~0; + rec->fcp_rsp_info = ~0; + rec->scsi_id = scsi_id; + rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN; + rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32); + rec->host_scribble = ~0; + memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE); + + debug_event(dbf->scsi, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->scsi_lock, flags); +} + static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size) { struct debug_info *d; diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index e2a973cd2573..d116c07ed77a 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -359,7 +359,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd, scmd->device->host->hostdata[0]; if (debug_level_enabled(adapter->dbf->scsi, level)) - zfcp_dbf_scsi(tag, level, scmd, req); + zfcp_dbf_scsi_common(tag, level, scmd->device, scmd, req); } /** @@ -402,16 +402,23 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd, } /** - * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset - * @tag: tag indicating success or failure of reset operation - * @scmnd: SCSI command which caused this error recovery - * @flag: indicates type of reset (Target Reset, Logical Unit Reset) + * zfcp_dbf_scsi_devreset() - Trace event for Logical Unit or Target Reset. + * @tag: Tag indicating success or failure of reset operation. + * @sdev: Pointer to SCSI device as context for this event. + * @flag: Indicates type of reset (Target Reset, Logical Unit Reset). + * @fsf_req: Pointer to FSF request representing the TMF, or NULL. */ static inline -void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag, +void zfcp_dbf_scsi_devreset(char *tag, struct scsi_device *sdev, u8 flag, struct zfcp_fsf_req *fsf_req) { + struct zfcp_adapter *adapter = (struct zfcp_adapter *) + sdev->host->hostdata[0]; char tmp_tag[ZFCP_DBF_TAG_LEN]; + static int const level = 1; + + if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level))) + return; if (flag == FCP_TMF_TGT_RESET) memcpy(tmp_tag, "tr_", 3); @@ -419,7 +426,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag, memcpy(tmp_tag, "lr_", 3); memcpy(&tmp_tag[3], tag, 4); - _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req); + zfcp_dbf_scsi_common(tmp_tag, level, sdev, NULL, fsf_req); } /** diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 1d91a32db08e..e7e6b63905e2 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -19,7 +19,6 @@ enum zfcp_erp_act_flags { ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000, ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000, - ZFCP_STATUS_ERP_DISMISSING = 0x00100000, ZFCP_STATUS_ERP_DISMISSED = 0x00200000, ZFCP_STATUS_ERP_LOWMEM = 0x00400000, ZFCP_STATUS_ERP_NO_REF = 0x00800000, @@ -27,7 +26,6 @@ enum zfcp_erp_act_flags { enum zfcp_erp_steps { ZFCP_ERP_STEP_UNINITIALIZED = 0x0000, - ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, ZFCP_ERP_STEP_PORT_OPENING = 0x0800, @@ -35,16 +33,28 @@ enum zfcp_erp_steps { ZFCP_ERP_STEP_LUN_OPENING = 0x2000, }; +/** + * enum zfcp_erp_act_type - Type of ERP action object. + * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery. + * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery. + * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery. + * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery. + * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with + * either of the first four enum values. + * Used to indicate that an ERP action could not be + * set up despite a detected need for some recovery. + * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with + * either of the first four enum values. + * Used to indicate that ERP not needed because + * the object has ZFCP_STATUS_COMMON_ERP_FAILED. + */ enum zfcp_erp_act_type { ZFCP_ERP_ACTION_REOPEN_LUN = 1, ZFCP_ERP_ACTION_REOPEN_PORT = 2, ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, -}; - -enum zfcp_erp_act_state { - ZFCP_ERP_ACTION_RUNNING = 1, - ZFCP_ERP_ACTION_READY = 2, + ZFCP_ERP_ACTION_NONE = 0xc0, + ZFCP_ERP_ACTION_FAILED = 0xe0, }; enum zfcp_erp_act_result { @@ -62,14 +72,14 @@ static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) ZFCP_STATUS_COMMON_UNBLOCKED | mask); } -static int zfcp_erp_action_exists(struct zfcp_erp_action *act) +static bool zfcp_erp_action_is_running(struct zfcp_erp_action *act) { struct zfcp_erp_action *curr_act; list_for_each_entry(curr_act, &act->adapter->erp_running_head, list) if (act == curr_act) - return ZFCP_ERP_ACTION_RUNNING; - return 0; + return true; + return false; } static void zfcp_erp_action_ready(struct zfcp_erp_action *act) @@ -85,7 +95,7 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act) static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) { act->status |= ZFCP_STATUS_ERP_DISMISSED; - if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING) + if (zfcp_erp_action_is_running(act)) zfcp_erp_action_ready(act); } @@ -126,6 +136,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) } } +static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct scsi_device *sdev) +{ + int need = want; + struct zfcp_scsi_dev *zsdev; + + switch (want) { + case ZFCP_ERP_ACTION_REOPEN_LUN: + zsdev = sdev_to_zfcp(sdev); + if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + need = 0; + break; + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + need = 0; + break; + case ZFCP_ERP_ACTION_REOPEN_PORT: + if (atomic_read(&port->status) & + ZFCP_STATUS_COMMON_ERP_FAILED) { + need = 0; + /* ensure propagation of failed status to new devices */ + zfcp_erp_set_port_status( + port, ZFCP_STATUS_COMMON_ERP_FAILED); + } + break; + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: + if (atomic_read(&adapter->status) & + ZFCP_STATUS_COMMON_ERP_FAILED) { + need = 0; + /* ensure propagation of failed status to new devices */ + zfcp_erp_set_adapter_status( + adapter, ZFCP_STATUS_COMMON_ERP_FAILED); + } + break; + default: + need = 0; + break; + } + + return need; +} + static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, struct zfcp_port *port, struct scsi_device *sdev) @@ -241,48 +294,70 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, return erp_action; } -static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, - struct zfcp_port *port, - struct scsi_device *sdev, - char *id, u32 act_status) +static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct scsi_device *sdev, + char *id, u32 act_status) { - int retval = 1, need; + int need; struct zfcp_erp_action *act; - if (!adapter->erp_thread) - return -EIO; + need = zfcp_erp_handle_failed(want, adapter, port, sdev); + if (!need) { + need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */ + goto out; + } + + if (!adapter->erp_thread) { + need = ZFCP_ERP_ACTION_NONE; /* marker for trace */ + goto out; + } need = zfcp_erp_required_act(want, adapter, port, sdev); if (!need) goto out; act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); - if (!act) + if (!act) { + need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */ goto out; + } atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); wake_up(&adapter->erp_ready_wq); - retval = 0; out: zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need); - return retval; } -static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, +void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter, + u64 port_name, u32 port_id) +{ + unsigned long flags; + static /* don't waste stack */ struct zfcp_port tmpport; + + write_lock_irqsave(&adapter->erp_lock, flags); + /* Stand-in zfcp port with fields just good enough for + * zfcp_dbf_rec_trig() and zfcp_dbf_set_common(). + * Under lock because tmpport is static. + */ + atomic_set(&tmpport.status, -1); /* unknown */ + tmpport.wwpn = port_name; + tmpport.d_id = port_id; + zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL, + ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, + ZFCP_ERP_ACTION_NONE); + write_unlock_irqrestore(&adapter->erp_lock, flags); +} + +static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask, char *id) { zfcp_erp_adapter_block(adapter, clear_mask); zfcp_scsi_schedule_rports_block(adapter); - /* ensure propagation of failed status to new devices */ - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - zfcp_erp_set_adapter_status(adapter, - ZFCP_STATUS_COMMON_ERP_FAILED); - return -EIO; - } - return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, - adapter, NULL, NULL, id, 0); + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, + adapter, NULL, NULL, id, 0); } /** @@ -299,12 +374,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id) zfcp_scsi_schedule_rports_block(adapter); write_lock_irqsave(&adapter->erp_lock, flags); - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - zfcp_erp_set_adapter_status(adapter, - ZFCP_STATUS_COMMON_ERP_FAILED); - else - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, - NULL, NULL, id, 0); + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, + NULL, NULL, id, 0); write_unlock_irqrestore(&adapter->erp_lock, flags); } @@ -345,9 +416,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, zfcp_erp_port_block(port, clear); zfcp_scsi_schedule_rport_block(port); - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - return; - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, port->adapter, port, NULL, id, 0); } @@ -368,19 +436,13 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id) write_unlock_irqrestore(&adapter->erp_lock, flags); } -static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) +static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) { zfcp_erp_port_block(port, clear); zfcp_scsi_schedule_rport_block(port); - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - /* ensure propagation of failed status to new devices */ - zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); - return -EIO; - } - - return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, - port->adapter, port, NULL, id, 0); + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, + port->adapter, port, NULL, id, 0); } /** @@ -388,20 +450,15 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) * @port: port to recover * @clear_mask: flags in port status to be cleared * @id: Id for debug trace event. - * - * Returns 0 if recovery has been triggered, < 0 if not. */ -int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) +void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) { - int retval; unsigned long flags; struct zfcp_adapter *adapter = port->adapter; write_lock_irqsave(&adapter->erp_lock, flags); - retval = _zfcp_erp_port_reopen(port, clear, id); + _zfcp_erp_port_reopen(port, clear, id); write_unlock_irqrestore(&adapter->erp_lock, flags); - - return retval; } static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask) @@ -418,9 +475,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, zfcp_erp_lun_block(sdev, clear); - if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - return; - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, zfcp_sdev->port, sdev, id, act_status); } @@ -482,21 +536,23 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id) zfcp_erp_wait(adapter); } -static int status_change_set(unsigned long mask, atomic_t *status) +static int zfcp_erp_status_change_set(unsigned long mask, atomic_t *status) { return (atomic_read(status) ^ mask) & mask; } static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { - if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) + if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, + &adapter->status)) zfcp_dbf_rec_run("eraubl1", &adapter->erp_action); atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); } static void zfcp_erp_port_unblock(struct zfcp_port *port) { - if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) + if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, + &port->status)) zfcp_dbf_rec_run("erpubl1", &port->erp_action); atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); } @@ -505,7 +561,8 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); - if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) + if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, + &zfcp_sdev->status)) zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); } @@ -553,7 +610,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask) unsigned long flags; write_lock_irqsave(&adapter->erp_lock, flags); - if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { + if (zfcp_erp_action_is_running(erp_action)) { erp_action->status |= set_mask; zfcp_erp_action_ready(erp_action); } @@ -1634,3 +1691,14 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask) atomic_set(&zfcp_sdev->erp_counter, 0); } +/** + * zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait. + * @adapter: Pointer to zfcp_adapter to reopen. + * @id: Trace tag string of length %ZFCP_DBF_TAG_LEN. + */ +void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id) +{ + zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); + zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id); + zfcp_erp_wait(adapter); +} diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index e5eed8aac0ce..bd0c5a9f04cb 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -50,17 +50,23 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *); extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); -extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *, - struct zfcp_fsf_req *); +extern void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev, + struct scsi_cmnd *sc, + struct zfcp_fsf_req *fsf); +extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter, + unsigned int scsi_id, int ret); /* zfcp_erp.c */ extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); +extern void zfcp_erp_port_forced_no_port_dbf(char *id, + struct zfcp_adapter *adapter, + u64 port_name, u32 port_id); extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *); extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *); extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); -extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *); +extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id); extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); @@ -73,6 +79,7 @@ extern void zfcp_erp_thread_kill(struct zfcp_adapter *); extern void zfcp_erp_wait(struct zfcp_adapter *); extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); extern void zfcp_erp_timeout_handler(struct timer_list *t); +extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id); /* zfcp_fc.c */ extern struct kmem_cache *zfcp_fc_req_cache; @@ -120,7 +127,8 @@ extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, struct zfcp_fsf_ct_els *, unsigned int); extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *); extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); -extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8); +extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev, + u8 tm_flags); extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *); extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 6162cf57a20a..f6c415d6ef48 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -111,11 +111,10 @@ void zfcp_fc_post_event(struct work_struct *work) list_for_each_entry_safe(event, tmp, &tmp_lh, list) { fc_host_post_event(adapter->scsi_host, fc_get_event_number(), - event->code, event->data); + event->code, event->data); list_del(&event->list); kfree(event); } - } /** @@ -126,7 +125,7 @@ void zfcp_fc_post_event(struct work_struct *work) * @event_data: The event data (e.g. n_port page in case of els) */ void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter, - enum fc_host_event_code event_code, u32 event_data) + enum fc_host_event_code event_code, u32 event_data) { struct zfcp_fc_event *event; @@ -425,6 +424,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work) struct zfcp_port *port = container_of(work, struct zfcp_port, gid_pn_work); + set_worker_desc("zgidpn%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */ ret = zfcp_fc_ns_gid_pn(port); if (ret) { /* could not issue gid_pn for some reason */ @@ -559,6 +559,7 @@ void zfcp_fc_link_test_work(struct work_struct *work) container_of(work, struct zfcp_port, test_link_work); int retval; + set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */ get_device(&port->dev); port->rport_task = RPORT_DEL; zfcp_scsi_rport_work(&port->rport_work); @@ -596,7 +597,7 @@ void zfcp_fc_test_link(struct zfcp_port *port) put_device(&port->dev); } -static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num) +static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num) { struct zfcp_fc_req *fc_req; @@ -748,7 +749,7 @@ void zfcp_fc_scan_ports(struct work_struct *work) if (zfcp_fc_wka_port_get(&adapter->gs->ds)) return; - fc_req = zfcp_alloc_sg_env(buf_num); + fc_req = zfcp_fc_alloc_sg_env(buf_num); if (!fc_req) goto out; diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 6a397ddaadf0..3cd74729cfb9 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -207,21 +207,14 @@ struct zfcp_fc_wka_ports { * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd * @fcp: fcp_cmnd to setup * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB - * @tm: task management flags to setup task management command */ static inline -void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, - u8 tm_flags) +void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) { u32 datalen; int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); - if (unlikely(tm_flags)) { - fcp->fc_tm_flags = tm_flags; - return; - } - fcp->fc_pri_ta = FCP_PTA_SIMPLE; if (scsi->sc_data_direction == DMA_FROM_DEVICE) @@ -241,6 +234,19 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, } /** + * zfcp_fc_fcp_tm() - Setup FCP command as task management command. + * @fcp: Pointer to FCP_CMND IU to set up. + * @dev: Pointer to SCSI_device where to send the task management command. + * @tm_flags: Task management flags to setup tm command. + */ +static inline +void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags) +{ + int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun); + fcp->fc_tm_flags = tm_flags; +} + +/** * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly * @fcp_rsp: FCP RSP IU to evaluate * @scsi: SCSI command where to update status and sense buffer diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index b12cb81ad8a2..3c86e27f094d 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -4,7 +4,7 @@ * * Implementation of FSF commands. * - * Copyright IBM Corp. 2002, 2017 + * Copyright IBM Corp. 2002, 2018 */ #define KMSG_COMPONENT "zfcp" @@ -437,6 +437,9 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) +#define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6) +#define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7) +#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8) #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) @@ -454,6 +457,12 @@ static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) fdmi_speed |= FC_PORTSPEED_8GBIT; if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) fdmi_speed |= FC_PORTSPEED_16GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT) + fdmi_speed |= FC_PORTSPEED_32GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT) + fdmi_speed |= FC_PORTSPEED_64GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT) + fdmi_speed |= FC_PORTSPEED_128GBIT; if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; return fdmi_speed; @@ -662,7 +671,7 @@ static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) return req; } -static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) +static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool) { struct fsf_qtcb *qtcb; @@ -701,9 +710,10 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { if (likely(pool)) - req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool); + req->qtcb = zfcp_fsf_qtcb_alloc( + adapter->pool.qtcb_pool); else - req->qtcb = zfcp_qtcb_alloc(NULL); + req->qtcb = zfcp_fsf_qtcb_alloc(NULL); if (unlikely(!req->qtcb)) { zfcp_fsf_req_free(req); @@ -2036,10 +2046,14 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) sizeof(blktrc)); } -static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) +/** + * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF. + * @req: Pointer to FSF request. + * @sdev: Pointer to SCSI device as request context. + */ +static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req, + struct scsi_device *sdev) { - struct scsi_cmnd *scmnd = req->data; - struct scsi_device *sdev = scmnd->device; struct zfcp_scsi_dev *zfcp_sdev; struct fsf_qtcb_header *header = &req->qtcb->header; @@ -2051,7 +2065,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_HANDLE_MISMATCH: case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1"); + zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_FCPLUN_NOT_VALID: @@ -2069,8 +2083,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) req->qtcb->bottom.io.data_direction, (unsigned long long)zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); - zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, - "fssfch3"); + zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_CMND_LENGTH_NOT_VALID: @@ -2080,8 +2093,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) req->qtcb->bottom.io.fcp_cmnd_length, (unsigned long long)zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); - zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, - "fssfch4"); + zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_BOXED: @@ -2120,7 +2132,7 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) return; } - zfcp_fsf_fcp_handler_common(req); + zfcp_fsf_fcp_handler_common(req, scpnt->device); if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); @@ -2258,7 +2270,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE); fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; - zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); + zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && scsi_prot_sg_count(scsi_cmnd)) { @@ -2297,10 +2309,11 @@ out: static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) { + struct scsi_device *sdev = req->data; struct fcp_resp_with_ext *fcp_rsp; struct fcp_resp_rsp_info *rsp_info; - zfcp_fsf_fcp_handler_common(req); + zfcp_fsf_fcp_handler_common(req, sdev); fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; @@ -2311,17 +2324,18 @@ static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) } /** - * zfcp_fsf_fcp_task_mgmt - send SCSI task management command - * @scmnd: SCSI command to send the task management command for - * @tm_flags: unsigned byte for task management flags - * Returns: on success pointer to struct fsf_req, NULL otherwise + * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF). + * @sdev: Pointer to SCSI device to send the task management command to. + * @tm_flags: Unsigned byte for task management flags. + * + * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise. */ -struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, +struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev, u8 tm_flags) { struct zfcp_fsf_req *req = NULL; struct fcp_cmnd *fcp_cmnd; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; if (unlikely(!(atomic_read(&zfcp_sdev->status) & @@ -2341,7 +2355,8 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, goto out; } - req->data = scmnd; + req->data = sdev; + req->handler = zfcp_fsf_fcp_task_mgmt_handler; req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; req->qtcb->header.port_handle = zfcp_sdev->port->handle; @@ -2352,7 +2367,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; - zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags); + zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags); zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); if (!zfcp_fsf_req_send(req)) diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 4baca67aba6d..535628b92f0a 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -4,7 +4,7 @@ * * Interface to the FSF support functions. * - * Copyright IBM Corp. 2002, 2017 + * Copyright IBM Corp. 2002, 2018 */ #ifndef FSF_H @@ -356,7 +356,7 @@ struct fsf_qtcb_bottom_config { u32 adapter_features; u32 connection_features; u32 fc_topology; - u32 fc_link_speed; + u32 fc_link_speed; /* one of ZFCP_FSF_PORTSPEED_* */ u32 adapter_type; u8 res0; u8 peer_d_id[3]; @@ -382,7 +382,7 @@ struct fsf_qtcb_bottom_port { u32 class_of_service; /* should be 0x00000006 for class 2 and 3 */ u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */ u8 active_fc4_types[32]; - u32 supported_speed; /* 0x0001 for 1 GBit/s or 0x0002 for 2 GBit/s */ + u32 supported_speed; /* any combination of ZFCP_FSF_PORTSPEED_* */ u32 maximum_frame_size; /* fixed value of 2112 */ u64 seconds_since_last_reset; u64 tx_frames; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 22f9562f415c..a8efcb330bc1 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -181,6 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) if (abrt_req) break; + zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL); zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) { @@ -264,44 +265,52 @@ static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) write_unlock_irqrestore(&adapter->abort_lock, flags); } -static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) +/** + * zfcp_scsi_task_mgmt_function() - Send a task management function (sync). + * @sdev: Pointer to SCSI device to send the task management command to. + * @tm_flags: Task management flags, + * here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET. + */ +static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags) { - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct zfcp_fsf_req *fsf_req = NULL; int retval = SUCCESS, ret; int retry = 3; while (retry--) { - fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); + fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags); if (fsf_req) break; + zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL); zfcp_erp_wait(adapter); - ret = fc_block_scsi_eh(scpnt); + ret = fc_block_rport(rport); if (ret) { - zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL); + zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL); return ret; } if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { - zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL); + zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL); return SUCCESS; } } if (!fsf_req) { - zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL); + zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL); return FAILED; } wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { - zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req); + zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req); retval = FAILED; } else { - zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req); + zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req); zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); } @@ -311,27 +320,81 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) { - return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET); + struct scsi_device *sdev = scpnt->device; + + return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET); } static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) { - return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET); + struct scsi_target *starget = scsi_target(scpnt->device); + struct fc_rport *rport = starget_to_rport(starget); + struct Scsi_Host *shost = rport_to_shost(rport); + struct scsi_device *sdev = NULL, *tmp_sdev; + struct zfcp_adapter *adapter = + (struct zfcp_adapter *)shost->hostdata[0]; + int ret; + + shost_for_each_device(tmp_sdev, shost) { + if (tmp_sdev->id == starget->id) { + sdev = tmp_sdev; + break; + } + } + if (!sdev) { + ret = FAILED; + zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret); + return ret; + } + + ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET); + + /* release reference from above shost_for_each_device */ + if (sdev) + scsi_device_put(tmp_sdev); + + return ret; } static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; - int ret; + int ret = SUCCESS, fc_ret; zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); - ret = fc_block_scsi_eh(scpnt); - if (ret) + fc_ret = fc_block_scsi_eh(scpnt); + if (fc_ret) + ret = fc_ret; + + zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret); + return ret; +} + +/** + * zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset. + * @shost: Pointer to Scsi_Host to perform action on. + * @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET. + * + * Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise. + * + * This is similar to zfcp_sysfs_adapter_failed_store(). + */ +static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type) +{ + struct zfcp_adapter *adapter = + (struct zfcp_adapter *)shost->hostdata[0]; + int ret = 0; + + if (reset_type != SCSI_ADAPTER_RESET) { + ret = -EOPNOTSUPP; + zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret); return ret; + } - return SUCCESS; + zfcp_erp_adapter_reset_sync(adapter, "scshr_y"); + return ret; } struct scsi_transport_template *zfcp_scsi_transport_template; @@ -349,6 +412,7 @@ static struct scsi_host_template zfcp_scsi_host_template = { .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, .change_queue_depth = scsi_change_queue_depth, + .host_reset = zfcp_scsi_sysfs_host_reset, .proc_name = "zfcp", .can_queue = 4096, .this_id = -1, @@ -363,6 +427,7 @@ static struct scsi_host_template zfcp_scsi_host_template = { .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, .track_queue_depth = 1, + .supported_mode = MODE_INITIATOR, }; /** @@ -430,7 +495,7 @@ void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter) } static struct fc_host_statistics* -zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) +zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter) { struct fc_host_statistics *fc_stats; @@ -444,9 +509,9 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) return adapter->fc_stats; } -static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, - struct fsf_qtcb_bottom_port *data, - struct fsf_qtcb_bottom_port *old) +static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, + struct fsf_qtcb_bottom_port *data, + struct fsf_qtcb_bottom_port *old) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset - old->seconds_since_last_reset; @@ -477,8 +542,8 @@ static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; } -static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, - struct fsf_qtcb_bottom_port *data) +static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats, + struct fsf_qtcb_bottom_port *data) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames; @@ -502,7 +567,8 @@ static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, fc_stats->fcp_output_megabytes = data->output_mb; } -static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) +static struct fc_host_statistics * +zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host) { struct zfcp_adapter *adapter; struct fc_host_statistics *fc_stats; @@ -510,7 +576,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) int ret; adapter = (struct zfcp_adapter *)host->hostdata[0]; - fc_stats = zfcp_init_fc_host_stats(adapter); + fc_stats = zfcp_scsi_init_fc_host_stats(adapter); if (!fc_stats) return NULL; @@ -527,16 +593,16 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) if (adapter->stats_reset && ((jiffies/HZ - adapter->stats_reset) < data->seconds_since_last_reset)) - zfcp_adjust_fc_host_stats(fc_stats, data, - adapter->stats_reset_data); + zfcp_scsi_adjust_fc_host_stats(fc_stats, data, + adapter->stats_reset_data); else - zfcp_set_fc_host_stats(fc_stats, data); + zfcp_scsi_set_fc_host_stats(fc_stats, data); kfree(data); return fc_stats; } -static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) +static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost) { struct zfcp_adapter *adapter; struct fsf_qtcb_bottom_port *data; @@ -558,7 +624,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) } } -static void zfcp_get_host_port_state(struct Scsi_Host *shost) +static void zfcp_scsi_get_host_port_state(struct Scsi_Host *shost) { struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0]; @@ -575,7 +641,8 @@ static void zfcp_get_host_port_state(struct Scsi_Host *shost) fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; } -static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) +static void zfcp_scsi_set_rport_dev_loss_tmo(struct fc_rport *rport, + u32 timeout) { rport->dev_loss_tmo = timeout; } @@ -602,6 +669,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) if (port) { zfcp_erp_port_forced_reopen(port, 0, "sctrpi1"); put_device(&port->dev); + } else { + zfcp_erp_port_forced_no_port_dbf( + "sctrpin", adapter, + rport->port_name /* zfcp_scsi_rport_register */, + rport->port_id /* zfcp_scsi_rport_register */); } } @@ -687,6 +759,9 @@ void zfcp_scsi_rport_work(struct work_struct *work) struct zfcp_port *port = container_of(work, struct zfcp_port, rport_work); + set_worker_desc("zrp%c-%16llx", + (port->rport_task == RPORT_ADD) ? 'a' : 'd', + port->wwpn); /* < WORKER_DESC_LEN=24 */ while (port->rport_task) { if (port->rport_task == RPORT_ADD) { port->rport_task = RPORT_NONE; @@ -761,10 +836,10 @@ struct fc_function_template zfcp_transport_functions = { .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .show_host_serial_number = 1, - .get_fc_host_stats = zfcp_get_fc_host_stats, - .reset_fc_host_stats = zfcp_reset_fc_host_stats, - .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, - .get_host_port_state = zfcp_get_host_port_state, + .get_fc_host_stats = zfcp_scsi_get_fc_host_stats, + .reset_fc_host_stats = zfcp_scsi_reset_fc_host_stats, + .set_rport_dev_loss_tmo = zfcp_scsi_set_rport_dev_loss_tmo, + .get_host_port_state = zfcp_scsi_get_host_port_state, .terminate_rport_io = zfcp_scsi_terminate_rport_io, .show_host_port_state = 1, .show_host_active_fc4s = 1, diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 3ac823f2540f..b277be6f7611 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -200,10 +200,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev, goto out; } - zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); - zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "syafai2"); - zfcp_erp_wait(adapter); + zfcp_erp_adapter_reset_sync(adapter, "syafai2"); out: zfcp_ccw_adapter_put(adapter); return retval ? retval : (ssize_t) count; |