diff options
Diffstat (limited to 'drivers')
26 files changed, 1195 insertions, 494 deletions
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 50b8b7d54416..530d0e49f2ed 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -5,12 +5,13 @@ * * Author(s): * Jan Glauber <jang@linux.vnet.ibm.com> + * + * License: GPL */ #define KMSG_COMPONENT "zpci" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pci.h> @@ -21,10 +22,6 @@ #define SLOT_NAME_SIZE 10 static LIST_HEAD(s390_hotplug_slot_list); -MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com"); -MODULE_DESCRIPTION("Hot Plug PCI Controller for System z"); -MODULE_LICENSE("GPL"); - static int zpci_fn_configured(enum zpci_state state) { return state == ZPCI_FN_STATE_CONFIGURED || diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1de089019268..0e3fdfdbd098 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -69,6 +69,7 @@ static void dasd_block_tasklet(struct dasd_block *); static void do_kick_device(struct work_struct *); static void do_restore_device(struct work_struct *); static void do_reload_device(struct work_struct *); +static void do_requeue_requests(struct work_struct *); static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); static void dasd_device_timeout(unsigned long); static void dasd_block_timeout(unsigned long); @@ -125,6 +126,7 @@ struct dasd_device *dasd_alloc_device(void) INIT_WORK(&device->kick_work, do_kick_device); INIT_WORK(&device->restore_device, do_restore_device); INIT_WORK(&device->reload_device, do_reload_device); + INIT_WORK(&device->requeue_requests, do_requeue_requests); device->state = DASD_STATE_NEW; device->target = DASD_STATE_NEW; mutex_init(&device->state_mutex); @@ -1448,9 +1450,9 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) cqr->starttime = jiffies; cqr->retries--; if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { - cqr->lpm &= device->path_data.opm; + cqr->lpm &= dasd_path_get_opm(device); if (!cqr->lpm) - cqr->lpm = device->path_data.opm; + cqr->lpm = dasd_path_get_opm(device); } if (cqr->cpmode == 1) { rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, @@ -1483,8 +1485,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) DBF_DEV_EVENT(DBF_WARNING, device, "start_IO: selected paths gone (%x)", cqr->lpm); - } else if (cqr->lpm != device->path_data.opm) { - cqr->lpm = device->path_data.opm; + } else if (cqr->lpm != dasd_path_get_opm(device)) { + cqr->lpm = dasd_path_get_opm(device); DBF_DEV_EVENT(DBF_DEBUG, device, "%s", "start_IO: selected paths gone," " retry on all paths"); @@ -1493,11 +1495,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) "start_IO: all paths in opm gone," " do path verification"); dasd_generic_last_path_gone(device); - device->path_data.opm = 0; - device->path_data.ppm = 0; - device->path_data.npm = 0; - device->path_data.tbvpm = - ccw_device_get_path_mask(device->cdev); + dasd_path_no_path(device); + dasd_path_set_tbvpm(device, + ccw_device_get_path_mask( + device->cdev)); } break; case -ENODEV: @@ -1623,6 +1624,13 @@ void dasd_generic_handle_state_change(struct dasd_device *device) } EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); +static int dasd_check_hpf_error(struct irb *irb) +{ + return (scsw_tm_is_valid_schxs(&irb->scsw) && + (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || + irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); +} + /* * Interrupt handler for "normal" ssch-io based dasd devices. */ @@ -1642,7 +1650,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, switch (PTR_ERR(irb)) { case -EIO: if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { - device = (struct dasd_device *) cqr->startdev; + device = cqr->startdev; cqr->status = DASD_CQR_CLEARED; dasd_device_clear_timer(device); wake_up(&dasd_flush_wq); @@ -1749,19 +1757,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, struct dasd_ccw_req, devlist); } } else { /* error */ + /* check for HPF error + * call discipline function to requeue all requests + * and disable HPF accordingly + */ + if (cqr->cpmode && dasd_check_hpf_error(irb) && + device->discipline->handle_hpf_error) + device->discipline->handle_hpf_error(device, irb); /* * If we don't want complex ERP for this request, then just * reset this and retry it in the fastpath */ if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && cqr->retries > 0) { - if (cqr->lpm == device->path_data.opm) + if (cqr->lpm == dasd_path_get_opm(device)) DBF_DEV_EVENT(DBF_DEBUG, device, "default ERP in fastpath " "(%i retries left)", cqr->retries); if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) - cqr->lpm = device->path_data.opm; + cqr->lpm = dasd_path_get_opm(device); cqr->status = DASD_CQR_QUEUED; next = cqr; } else @@ -2002,17 +2017,18 @@ static void __dasd_device_check_path_events(struct dasd_device *device) { int rc; - if (device->path_data.tbvpm) { - if (device->stopped & ~(DASD_STOPPED_DC_WAIT | - DASD_UNRESUMED_PM)) - return; - rc = device->discipline->verify_path( - device, device->path_data.tbvpm); - if (rc) - dasd_device_set_timer(device, 50); - else - device->path_data.tbvpm = 0; - } + if (!dasd_path_get_tbvpm(device)) + return; + + if (device->stopped & + ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) + return; + rc = device->discipline->verify_path(device, + dasd_path_get_tbvpm(device)); + if (rc) + dasd_device_set_timer(device, 50); + else + dasd_path_clear_all_verify(device); }; /* @@ -2924,10 +2940,10 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr) if (!block) return -EINVAL; - spin_lock_irqsave(&block->queue_lock, flags); + spin_lock_irqsave(&block->request_queue_lock, flags); req = (struct request *) cqr->callback_data; blk_requeue_request(block->request_queue, req); - spin_unlock_irqrestore(&block->queue_lock, flags); + spin_unlock_irqrestore(&block->request_queue_lock, flags); return 0; } @@ -3121,6 +3137,7 @@ static int dasd_alloc_queue(struct dasd_block *block) */ static void dasd_setup_queue(struct dasd_block *block) { + struct request_queue *q = block->request_queue; int max; if (block->base->features & DASD_FEATURE_USERAW) { @@ -3135,17 +3152,16 @@ static void dasd_setup_queue(struct dasd_block *block) } else { max = block->base->discipline->max_blocks << block->s2b_shift; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); - block->request_queue->limits.max_dev_sectors = max; - blk_queue_logical_block_size(block->request_queue, - block->bp_block); - blk_queue_max_hw_sectors(block->request_queue, max); - blk_queue_max_segments(block->request_queue, -1L); + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + q->limits.max_dev_sectors = max; + blk_queue_logical_block_size(q, block->bp_block); + blk_queue_max_hw_sectors(q, max); + blk_queue_max_segments(q, USHRT_MAX); /* with page sized segments we can translate each segement into * one idaw/tidaw */ - blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); - blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); + blk_queue_max_segment_size(q, PAGE_SIZE); + blk_queue_segment_boundary(q, PAGE_SIZE - 1); } /* @@ -3517,11 +3533,15 @@ int dasd_generic_set_offline(struct ccw_device *cdev) struct dasd_device *device; struct dasd_block *block; int max_count, open_count, rc; + unsigned long flags; rc = 0; - device = dasd_device_from_cdev(cdev); - if (IS_ERR(device)) + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + device = dasd_device_from_cdev_locked(cdev); + if (IS_ERR(device)) { + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); return PTR_ERR(device); + } /* * We must make sure that this device is currently not in use. @@ -3540,8 +3560,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev) pr_warn("%s: The DASD cannot be set offline while it is in use\n", dev_name(&cdev->dev)); clear_bit(DASD_FLAG_OFFLINE, &device->flags); - dasd_put_device(device); - return -EBUSY; + goto out_busy; } } @@ -3551,19 +3570,19 @@ int dasd_generic_set_offline(struct ccw_device *cdev) * could only be called by normal offline so safe_offline flag * needs to be removed to run normal offline and kill all I/O */ - if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { + if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) /* Already doing normal offline processing */ - dasd_put_device(device); - return -EBUSY; - } else + goto out_busy; + else clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); - - } else - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + } else { + if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) /* Already doing offline processing */ - dasd_put_device(device); - return -EBUSY; - } + goto out_busy; + } + + set_bit(DASD_FLAG_OFFLINE, &device->flags); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); /* * if safe_offline called set safe_offline_running flag and @@ -3591,7 +3610,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev) goto interrupted; } - set_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ block = device->block; @@ -3610,7 +3628,14 @@ interrupted: clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); clear_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_put_device(device); + return rc; + +out_busy: + dasd_put_device(device); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + + return -EBUSY; } EXPORT_SYMBOL_GPL(dasd_generic_set_offline); @@ -3675,14 +3700,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) case CIO_GONE: case CIO_BOXED: case CIO_NO_PATH: - device->path_data.opm = 0; - device->path_data.ppm = 0; - device->path_data.npm = 0; + dasd_path_no_path(device); ret = dasd_generic_last_path_gone(device); break; case CIO_OPER: ret = 1; - if (device->path_data.opm) + if (dasd_path_get_opm(device)) ret = dasd_generic_path_operational(device); break; } @@ -3693,48 +3716,32 @@ EXPORT_SYMBOL_GPL(dasd_generic_notify); void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) { - int chp; - __u8 oldopm, eventlpm; struct dasd_device *device; + int chp, oldopm, hpfpm, ifccpm; device = dasd_device_from_cdev_locked(cdev); if (IS_ERR(device)) return; + + oldopm = dasd_path_get_opm(device); for (chp = 0; chp < 8; chp++) { - eventlpm = 0x80 >> chp; if (path_event[chp] & PE_PATH_GONE) { - oldopm = device->path_data.opm; - device->path_data.opm &= ~eventlpm; - device->path_data.ppm &= ~eventlpm; - device->path_data.npm &= ~eventlpm; - if (oldopm && !device->path_data.opm) { - dev_warn(&device->cdev->dev, - "No verified channel paths remain " - "for the device\n"); - DBF_DEV_EVENT(DBF_WARNING, device, - "%s", "last verified path gone"); - dasd_eer_write(device, NULL, DASD_EER_NOPATH); - dasd_device_set_stop_bits(device, - DASD_STOPPED_DC_WAIT); - } + dasd_path_notoper(device, chp); } if (path_event[chp] & PE_PATH_AVAILABLE) { - device->path_data.opm &= ~eventlpm; - device->path_data.ppm &= ~eventlpm; - device->path_data.npm &= ~eventlpm; - device->path_data.tbvpm |= eventlpm; + dasd_path_available(device, chp); dasd_schedule_device_bh(device); } if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { - if (!(device->path_data.opm & eventlpm) && - !(device->path_data.tbvpm & eventlpm)) { + if (!dasd_path_is_operational(device, chp) && + !dasd_path_need_verify(device, chp)) { /* * we can not establish a pathgroup on an * unavailable path, so trigger a path * verification first */ - device->path_data.tbvpm |= eventlpm; - dasd_schedule_device_bh(device); + dasd_path_available(device, chp); + dasd_schedule_device_bh(device); } DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Pathgroup re-established\n"); @@ -3742,45 +3749,65 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) device->discipline->kick_validate(device); } } + hpfpm = dasd_path_get_hpfpm(device); + ifccpm = dasd_path_get_ifccpm(device); + if (!dasd_path_get_opm(device) && hpfpm) { + /* + * device has no operational paths but at least one path is + * disabled due to HPF errors + * disable HPF at all and use the path(s) again + */ + if (device->discipline->disable_hpf) + device->discipline->disable_hpf(device); + dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); + dasd_path_set_tbvpm(device, hpfpm); + dasd_schedule_device_bh(device); + dasd_schedule_requeue(device); + } else if (!dasd_path_get_opm(device) && ifccpm) { + /* + * device has no operational paths but at least one path is + * disabled due to IFCC errors + * trigger path verification on paths with IFCC errors + */ + dasd_path_set_tbvpm(device, ifccpm); + dasd_schedule_device_bh(device); + } + if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { + dev_warn(&device->cdev->dev, + "No verified channel paths remain for the device\n"); + DBF_DEV_EVENT(DBF_WARNING, device, + "%s", "last verified path gone"); + dasd_eer_write(device, NULL, DASD_EER_NOPATH); + dasd_device_set_stop_bits(device, + DASD_STOPPED_DC_WAIT); + } dasd_put_device(device); } EXPORT_SYMBOL_GPL(dasd_generic_path_event); int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) { - if (!device->path_data.opm && lpm) { - device->path_data.opm = lpm; + if (!dasd_path_get_opm(device) && lpm) { + dasd_path_set_opm(device, lpm); dasd_generic_path_operational(device); } else - device->path_data.opm |= lpm; + dasd_path_add_opm(device, lpm); return 0; } EXPORT_SYMBOL_GPL(dasd_generic_verify_path); - -int dasd_generic_pm_freeze(struct ccw_device *cdev) +/* + * clear active requests and requeue them to block layer if possible + */ +static int dasd_generic_requeue_all_requests(struct dasd_device *device) { - struct dasd_device *device = dasd_device_from_cdev(cdev); - struct list_head freeze_queue; + struct list_head requeue_queue; struct dasd_ccw_req *cqr, *n; struct dasd_ccw_req *refers; int rc; - if (IS_ERR(device)) - return PTR_ERR(device); - - /* mark device as suspended */ - set_bit(DASD_FLAG_SUSPENDED, &device->flags); - - if (device->discipline->freeze) - rc = device->discipline->freeze(device); - - /* disallow new I/O */ - dasd_device_set_stop_bits(device, DASD_STOPPED_PM); - - /* clear active requests and requeue them to block layer if possible */ - INIT_LIST_HEAD(&freeze_queue); - spin_lock_irq(get_ccwdev_lock(cdev)); + INIT_LIST_HEAD(&requeue_queue); + spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = 0; list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { /* Check status and move request to flush_queue */ @@ -3791,25 +3818,22 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) dev_err(&device->cdev->dev, "Unable to terminate request %p " "on suspend\n", cqr); - spin_unlock_irq(get_ccwdev_lock(cdev)); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); dasd_put_device(device); return rc; } } - list_move_tail(&cqr->devlist, &freeze_queue); + list_move_tail(&cqr->devlist, &requeue_queue); } - spin_unlock_irq(get_ccwdev_lock(cdev)); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); - list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { + list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { wait_event(dasd_flush_wq, (cqr->status != DASD_CQR_CLEAR_PENDING)); - if (cqr->status == DASD_CQR_CLEARED) - cqr->status = DASD_CQR_QUEUED; - /* requeue requests to blocklayer will only work for - block device requests */ - if (_dasd_requeue_request(cqr)) - continue; + /* mark sleepon requests as ended */ + if (cqr->callback_data == DASD_SLEEPON_START_TAG) + cqr->callback_data = DASD_SLEEPON_END_TAG; /* remove requests from device and block queue */ list_del_init(&cqr->devlist); @@ -3821,6 +3845,14 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) dasd_free_erp_request(cqr, cqr->memdev); cqr = refers; } + + /* + * requeue requests to blocklayer will only work + * for block device requests + */ + if (_dasd_requeue_request(cqr)) + continue; + if (cqr->block) list_del_init(&cqr->blocklist); cqr->block->base->discipline->free_cp( @@ -3831,15 +3863,56 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) * if requests remain then they are internal request * and go back to the device queue */ - if (!list_empty(&freeze_queue)) { + if (!list_empty(&requeue_queue)) { /* move freeze_queue to start of the ccw_queue */ - spin_lock_irq(get_ccwdev_lock(cdev)); - list_splice_tail(&freeze_queue, &device->ccw_queue); - spin_unlock_irq(get_ccwdev_lock(cdev)); + spin_lock_irq(get_ccwdev_lock(device->cdev)); + list_splice_tail(&requeue_queue, &device->ccw_queue); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); } - dasd_put_device(device); + /* wake up generic waitqueue for eventually ended sleepon requests */ + wake_up(&generic_waitq); return rc; } + +static void do_requeue_requests(struct work_struct *work) +{ + struct dasd_device *device = container_of(work, struct dasd_device, + requeue_requests); + dasd_generic_requeue_all_requests(device); + dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); + if (device->block) + dasd_schedule_block_bh(device->block); + dasd_put_device(device); +} + +void dasd_schedule_requeue(struct dasd_device *device) +{ + dasd_get_device(device); + /* queue call to dasd_reload_device to the kernel event daemon. */ + if (!schedule_work(&device->requeue_requests)) + dasd_put_device(device); +} +EXPORT_SYMBOL(dasd_schedule_requeue); + +int dasd_generic_pm_freeze(struct ccw_device *cdev) +{ + struct dasd_device *device = dasd_device_from_cdev(cdev); + int rc; + + if (IS_ERR(device)) + return PTR_ERR(device); + + /* mark device as suspended */ + set_bit(DASD_FLAG_SUSPENDED, &device->flags); + + if (device->discipline->freeze) + rc = device->discipline->freeze(device); + + /* disallow new I/O */ + dasd_device_set_stop_bits(device, DASD_STOPPED_PM); + + return dasd_generic_requeue_all_requests(device); +} EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); int dasd_generic_restore_device(struct ccw_device *cdev) diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 8305ab688d57..95f7645e3c37 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) opm = ccw_device_get_path_mask(device->cdev); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (erp->lpm == 0) - erp->lpm = device->path_data.opm & + erp->lpm = dasd_path_get_opm(device) & ~(erp->irb.esw.esw0.sublog.lpum); else erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); @@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp) !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { erp->status = DASD_CQR_FILLED; erp->retries = 10; - erp->lpm = erp->startdev->path_data.opm; + erp->lpm = dasd_path_get_opm(erp->startdev); erp->function = dasd_3990_erp_action_1_sec; } return erp; @@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { /* reset the lpm and the status to be able to * try further actions. */ - erp->lpm = erp->startdev->path_data.opm; + erp->lpm = dasd_path_get_opm(erp->startdev); erp->status = DASD_CQR_NEED_ERP; } } @@ -2208,6 +2208,51 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) } /* end dasd_3990_erp_inspect_32 */ +static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum) +{ + int pos = pathmask_to_pos(lpum); + + /* no remaining path, cannot disable */ + if (!(dasd_path_get_opm(device) & ~lpum)) + return; + + dev_err(&device->cdev->dev, + "Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n", + device->path[pos].cssid, device->path[pos].chpid, lpum); + dasd_path_remove_opm(device, lpum); + dasd_path_add_ifccpm(device, lpum); + device->path[pos].errorclk = 0; + atomic_set(&device->path[pos].error_count, 0); +} + +static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp) +{ + struct dasd_device *device = erp->startdev; + __u8 lpum = erp->refers->irb.esw.esw1.lpum; + int pos = pathmask_to_pos(lpum); + unsigned long long clk; + + if (!device->path_thrhld) + return; + + clk = get_tod_clock(); + /* + * check if the last error is longer ago than the timeout, + * if so reset error state + */ + if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC) + >= device->path_interval) { + atomic_set(&device->path[pos].error_count, 0); + device->path[pos].errorclk = 0; + } + atomic_inc(&device->path[pos].error_count); + device->path[pos].errorclk = clk; + /* threshold exceeded disable path if possible */ + if (atomic_read(&device->path[pos].error_count) >= + device->path_thrhld) + dasd_3990_erp_disable_path(device, lpum); +} + /* ***************************************************************************** * main ERP control functions (24 and 32 byte sense) @@ -2237,6 +2282,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp) | SCHN_STAT_CHN_CTRL_CHK)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "channel or interface control check"); + dasd_3990_erp_account_error(erp); erp = dasd_3990_erp_action_4(erp, NULL); } return erp; diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 15a1a70cace9..84ca314c87e3 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -725,27 +725,15 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr, static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; - int val; - char *endp; - - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); + unsigned int val; + int rc; - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; - spin_lock(&dasd_devmap_lock); - if (val) - devmap->features |= DASD_FEATURE_FAILFAST; - else - devmap->features &= ~DASD_FEATURE_FAILFAST; - if (devmap->device) - devmap->device->features = devmap->features; - spin_unlock(&dasd_devmap_lock); - return count; + rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val); + + return rc ? : count; } static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store); @@ -771,32 +759,41 @@ static ssize_t dasd_ro_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; + struct ccw_device *cdev = to_ccwdev(dev); struct dasd_device *device; - int val; - char *endp; - - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); + unsigned long flags; + unsigned int val; + int rc; - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; - spin_lock(&dasd_devmap_lock); - if (val) - devmap->features |= DASD_FEATURE_READONLY; - else - devmap->features &= ~DASD_FEATURE_READONLY; - device = devmap->device; - if (device) { - device->features = devmap->features; - val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags); + rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val); + if (rc) + return rc; + + device = dasd_device_from_cdev(cdev); + if (IS_ERR(device)) + return PTR_ERR(device); + + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags); + + if (!device->block || !device->block->gdp || + test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + goto out; } - spin_unlock(&dasd_devmap_lock); - if (device && device->block && device->block->gdp) - set_disk_ro(device->block->gdp, val); + /* Increase open_count to avoid losing the block device */ + atomic_inc(&device->block->open_count); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + + set_disk_ro(device->block->gdp, val); + atomic_dec(&device->block->open_count); + +out: + dasd_put_device(device); + return count; } @@ -823,27 +820,15 @@ static ssize_t dasd_erplog_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; - int val; - char *endp; - - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); + unsigned int val; + int rc; - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; - spin_lock(&dasd_devmap_lock); - if (val) - devmap->features |= DASD_FEATURE_ERPLOG; - else - devmap->features &= ~DASD_FEATURE_ERPLOG; - if (devmap->device) - devmap->device->features = devmap->features; - spin_unlock(&dasd_devmap_lock); - return count; + rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val); + + return rc ? : count; } static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store); @@ -871,16 +856,14 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dasd_devmap *devmap; + unsigned int val; ssize_t rc; - int val; - char *endp; devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); if (IS_ERR(devmap)) return PTR_ERR(devmap); - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; spin_lock(&dasd_devmap_lock); @@ -994,10 +977,12 @@ dasd_access_show(struct device *dev, struct device_attribute *attr, if (IS_ERR(device)) return PTR_ERR(device); - if (device->discipline->host_access_count) - count = device->discipline->host_access_count(device); - else + if (!device->discipline) + count = -ENODEV; + else if (!device->discipline->host_access_count) count = -EOPNOTSUPP; + else + count = device->discipline->host_access_count(device); dasd_put_device(device); if (count < 0) @@ -1197,27 +1182,25 @@ static ssize_t dasd_eer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; - int val, rc; - char *endp; + struct dasd_device *device; + unsigned int val; + int rc = 0; - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); - if (!devmap->device) - return -ENODEV; + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return PTR_ERR(device); - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val > 1)) + if (kstrtouint(buf, 0, &val) || val > 1) return -EINVAL; - if (val) { - rc = dasd_eer_enable(devmap->device); - if (rc) - return rc; - } else - dasd_eer_disable(devmap->device); - return count; + if (val) + rc = dasd_eer_enable(device); + else + dasd_eer_disable(device); + + dasd_put_device(device); + + return rc ? : count; } static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); @@ -1360,6 +1343,50 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(timeout, 0644, dasd_timeout_show, dasd_timeout_store); + +static ssize_t +dasd_path_reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + unsigned int val; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + + if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff) + val = 0; + + if (device->discipline && device->discipline->reset_path) + device->discipline->reset_path(device, (__u8) val); + + dasd_put_device(device); + return count; +} + +static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store); + +static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dasd_device *device; + int hpf; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + if (!device->discipline || !device->discipline->hpf_enabled) { + dasd_put_device(device); + return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx); + } + hpf = device->discipline->hpf_enabled(device); + dasd_put_device(device); + return snprintf(buf, PAGE_SIZE, "%d\n", hpf); +} + +static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL); + static ssize_t dasd_reservation_policy_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1385,27 +1412,17 @@ static ssize_t dasd_reservation_policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct dasd_devmap *devmap; + struct ccw_device *cdev = to_ccwdev(dev); int rc; - devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); - rc = 0; - spin_lock(&dasd_devmap_lock); if (sysfs_streq("ignore", buf)) - devmap->features &= ~DASD_FEATURE_FAILONSLCK; + rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0); else if (sysfs_streq("fail", buf)) - devmap->features |= DASD_FEATURE_FAILONSLCK; + rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1); else rc = -EINVAL; - if (devmap->device) - devmap->device->features = devmap->features; - spin_unlock(&dasd_devmap_lock); - if (rc) - return rc; - else - return count; + + return rc ? : count; } static DEVICE_ATTR(reservation_policy, 0644, @@ -1461,25 +1478,120 @@ static ssize_t dasd_pm_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dasd_device *device; - u8 opm, nppm, cablepm, cuirpm, hpfpm; + u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm; device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return sprintf(buf, "0\n"); - opm = device->path_data.opm; - nppm = device->path_data.npm; - cablepm = device->path_data.cablepm; - cuirpm = device->path_data.cuirpm; - hpfpm = device->path_data.hpfpm; + opm = dasd_path_get_opm(device); + nppm = dasd_path_get_nppm(device); + cablepm = dasd_path_get_cablepm(device); + cuirpm = dasd_path_get_cuirpm(device); + hpfpm = dasd_path_get_hpfpm(device); + ifccpm = dasd_path_get_ifccpm(device); dasd_put_device(device); - return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm, - cablepm, cuirpm, hpfpm); + return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm, + cablepm, cuirpm, hpfpm, ifccpm); } static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL); +/* + * threshold value for IFCC/CCC errors + */ +static ssize_t +dasd_path_threshold_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dasd_device *device; + int len; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld); + dasd_put_device(device); + return len; +} + +static ssize_t +dasd_path_threshold_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + unsigned long flags; + unsigned long val; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + + if ((kstrtoul(buf, 10, &val) != 0) || + (val > DASD_THRHLD_MAX) || val == 0) { + dasd_put_device(device); + return -EINVAL; + } + spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags); + if (val) + device->path_thrhld = val; + spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags); + dasd_put_device(device); + return count; +} + +static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show, + dasd_path_threshold_store); +/* + * interval for IFCC/CCC checks + * meaning time with no IFCC/CCC error before the error counter + * gets reset + */ +static ssize_t +dasd_path_interval_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dasd_device *device; + int len; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval); + dasd_put_device(device); + return len; +} + +static ssize_t +dasd_path_interval_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + unsigned long flags; + unsigned long val; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + + if ((kstrtoul(buf, 10, &val) != 0) || + (val > DASD_INTERVAL_MAX) || val == 0) { + dasd_put_device(device); + return -EINVAL; + } + spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags); + if (val) + device->path_interval = val; + spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags); + dasd_put_device(device); + return count; +} + +static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show, + dasd_path_interval_store); + + static struct attribute * dasd_attrs[] = { &dev_attr_readonly.attr, &dev_attr_discipline.attr, @@ -1500,6 +1612,10 @@ static struct attribute * dasd_attrs[] = { &dev_attr_safe_offline.attr, &dev_attr_host_access_count.attr, &dev_attr_path_masks.attr, + &dev_attr_path_threshold.attr, + &dev_attr_path_interval.attr, + &dev_attr_path_reset.attr, + &dev_attr_hpf.attr, NULL, }; @@ -1531,7 +1647,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag) { struct dasd_devmap *devmap; - devmap = dasd_find_busid(dev_name(&cdev->dev)); + devmap = dasd_devmap_from_cdev(cdev); if (IS_ERR(devmap)) return PTR_ERR(devmap); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a7a88476e215..67bf50c9946f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1042,8 +1042,11 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device) private->conf_data = NULL; private->conf_len = 0; for (i = 0; i < 8; i++) { - kfree(private->path_conf_data[i]); - private->path_conf_data[i] = NULL; + kfree(device->path[i].conf_data); + device->path[i].conf_data = NULL; + device->path[i].cssid = 0; + device->path[i].ssid = 0; + device->path[i].chpid = 0; } } @@ -1055,13 +1058,14 @@ static int dasd_eckd_read_conf(struct dasd_device *device) int rc, path_err, pos; __u8 lpm, opm; struct dasd_eckd_private *private, path_private; - struct dasd_path *path_data; struct dasd_uid *uid; char print_path_uid[60], print_device_uid[60]; + struct channel_path_desc *chp_desc; + struct subchannel_id sch_id; private = device->private; - path_data = &device->path_data; opm = ccw_device_get_path_mask(device->cdev); + ccw_device_get_schid(device->cdev, &sch_id); conf_data_saved = 0; path_err = 0; /* get configuration data per operational path */ @@ -1081,7 +1085,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device) "No configuration data " "retrieved"); /* no further analysis possible */ - path_data->opm |= lpm; + dasd_path_add_opm(device, opm); continue; /* no error */ } /* save first valid configuration data */ @@ -1098,8 +1102,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device) } pos = pathmask_to_pos(lpm); /* store per path conf_data */ - private->path_conf_data[pos] = - (struct dasd_conf_data *) conf_data; + device->path[pos].conf_data = conf_data; + device->path[pos].cssid = sch_id.cssid; + device->path[pos].ssid = sch_id.ssid; + chp_desc = ccw_device_get_chp_desc(device->cdev, pos); + if (chp_desc) + device->path[pos].chpid = chp_desc->chpid; + kfree(chp_desc); /* * build device UID that other path data * can be compared to it @@ -1154,42 +1163,66 @@ static int dasd_eckd_read_conf(struct dasd_device *device) "device %s instead of %s\n", lpm, print_path_uid, print_device_uid); path_err = -EINVAL; - path_data->cablepm |= lpm; + dasd_path_add_cablepm(device, lpm); continue; } pos = pathmask_to_pos(lpm); /* store per path conf_data */ - private->path_conf_data[pos] = - (struct dasd_conf_data *) conf_data; + device->path[pos].conf_data = conf_data; + device->path[pos].cssid = sch_id.cssid; + device->path[pos].ssid = sch_id.ssid; + chp_desc = ccw_device_get_chp_desc(device->cdev, pos); + if (chp_desc) + device->path[pos].chpid = chp_desc->chpid; + kfree(chp_desc); path_private.conf_data = NULL; path_private.conf_len = 0; } switch (dasd_eckd_path_access(conf_data, conf_len)) { case 0x02: - path_data->npm |= lpm; + dasd_path_add_nppm(device, lpm); break; case 0x03: - path_data->ppm |= lpm; + dasd_path_add_ppm(device, lpm); break; } - if (!path_data->opm) { - path_data->opm = lpm; + if (!dasd_path_get_opm(device)) { + dasd_path_set_opm(device, lpm); dasd_generic_path_operational(device); } else { - path_data->opm |= lpm; + dasd_path_add_opm(device, lpm); } - /* - * if the path is used - * it should not be in one of the negative lists - */ - path_data->cablepm &= ~lpm; - path_data->hpfpm &= ~lpm; - path_data->cuirpm &= ~lpm; } return path_err; } +static u32 get_fcx_max_data(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + int fcx_in_css, fcx_in_gneq, fcx_in_features; + int tpm, mdc; + + if (dasd_nofcx) + return 0; + /* is transport mode supported? */ + fcx_in_css = css_general_characteristics.fcx; + fcx_in_gneq = private->gneq->reserved2[7] & 0x04; + fcx_in_features = private->features.feature[40] & 0x80; + tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; + + if (!tpm) + return 0; + + mdc = ccw_device_get_mdc(device->cdev, 0); + if (mdc < 0) { + dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); + return 0; + } else { + return (u32)mdc * FCX_MAX_DATA_FACTOR; + } +} + static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) { struct dasd_eckd_private *private = device->private; @@ -1222,8 +1255,7 @@ static int rebuild_device_uid(struct dasd_device *device, struct path_verification_work_data *data) { struct dasd_eckd_private *private = device->private; - struct dasd_path *path_data = &device->path_data; - __u8 lpm, opm = path_data->opm; + __u8 lpm, opm = dasd_path_get_opm(device); int rc = -ENODEV; for (lpm = 0x80; lpm; lpm >>= 1) { @@ -1356,7 +1388,7 @@ static void do_path_verification_work(struct work_struct *work) * in other case the device UID may have changed and * the first working path UID will be used as device UID */ - if (device->path_data.opm && + if (dasd_path_get_opm(device) && dasd_eckd_compare_path_uid(device, &path_private)) { /* * the comparison was not successful @@ -1406,23 +1438,17 @@ static void do_path_verification_work(struct work_struct *work) * situation in dasd_start_IO. */ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); - if (!device->path_data.opm && opm) { - device->path_data.opm = opm; - device->path_data.cablepm &= ~opm; - device->path_data.cuirpm &= ~opm; - device->path_data.hpfpm &= ~opm; + if (!dasd_path_get_opm(device) && opm) { + dasd_path_set_opm(device, opm); dasd_generic_path_operational(device); } else { - device->path_data.opm |= opm; - device->path_data.cablepm &= ~opm; - device->path_data.cuirpm &= ~opm; - device->path_data.hpfpm &= ~opm; + dasd_path_add_opm(device, opm); } - device->path_data.npm |= npm; - device->path_data.ppm |= ppm; - device->path_data.tbvpm |= epm; - device->path_data.cablepm |= cablepm; - device->path_data.hpfpm |= hpfpm; + dasd_path_add_nppm(device, npm); + dasd_path_add_ppm(device, ppm); + dasd_path_add_tbvpm(device, epm); + dasd_path_add_cablepm(device, cablepm); + dasd_path_add_nohpfpm(device, hpfpm); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); } clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); @@ -1456,6 +1482,19 @@ static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm) return 0; } +static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm) +{ + struct dasd_eckd_private *private = device->private; + unsigned long flags; + + if (!private->fcx_max_data) + private->fcx_max_data = get_fcx_max_data(device); + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device)); + dasd_schedule_device_bh(device); + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); +} + static int dasd_eckd_read_features(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; @@ -1652,32 +1691,6 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device) dasd_put_device(device); } -static u32 get_fcx_max_data(struct dasd_device *device) -{ - struct dasd_eckd_private *private = device->private; - int fcx_in_css, fcx_in_gneq, fcx_in_features; - int tpm, mdc; - - if (dasd_nofcx) - return 0; - /* is transport mode supported? */ - fcx_in_css = css_general_characteristics.fcx; - fcx_in_gneq = private->gneq->reserved2[7] & 0x04; - fcx_in_features = private->features.feature[40] & 0x80; - tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; - - if (!tpm) - return 0; - - mdc = ccw_device_get_mdc(device->cdev, 0); - if (mdc < 0) { - dev_warn(&device->cdev->dev, "Detecting the maximum supported" - " data size for zHPF requests failed\n"); - return 0; - } else - return (u32)mdc * FCX_MAX_DATA_FACTOR; -} - /* * Check device characteristics. * If the device is accessible using ECKD discipline, the device is enabled. @@ -1729,10 +1742,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device) if (rc) goto out_err1; - /* set default timeout */ + /* set some default values */ device->default_expires = DASD_EXPIRES; - /* set default retry count */ device->default_retries = DASD_RETRIES; + device->path_thrhld = DASD_ECKD_PATH_THRHLD; + device->path_interval = DASD_ECKD_PATH_INTERVAL; if (private->gneq) { value = 1; @@ -1839,13 +1853,16 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) private->gneq = NULL; private->conf_len = 0; for (i = 0; i < 8; i++) { - kfree(private->path_conf_data[i]); - if ((__u8 *)private->path_conf_data[i] == + kfree(device->path[i].conf_data); + if ((__u8 *)device->path[i].conf_data == private->conf_data) { private->conf_data = NULL; private->conf_len = 0; } - private->path_conf_data[i] = NULL; + device->path[i].conf_data = NULL; + device->path[i].cssid = 0; + device->path[i].ssid = 0; + device->path[i].chpid = 0; } kfree(private->conf_data); private->conf_data = NULL; @@ -2966,7 +2983,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) if (cqr->block && (cqr->startdev != cqr->block->base)) { dasd_eckd_reset_ccw_to_base_io(cqr); cqr->startdev = cqr->block->base; - cqr->lpm = cqr->block->base->path_data.opm; + cqr->lpm = dasd_path_get_opm(cqr->block->base); } }; @@ -3251,7 +3268,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( cqr->memdev = startdev; cqr->block = block; cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ - cqr->lpm = startdev->path_data.ppm; + cqr->lpm = dasd_path_get_ppm(startdev); cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -3426,7 +3443,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( cqr->memdev = startdev; cqr->block = block; cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ - cqr->lpm = startdev->path_data.ppm; + cqr->lpm = dasd_path_get_ppm(startdev); cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -3735,7 +3752,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( cqr->memdev = startdev; cqr->block = block; cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ - cqr->lpm = startdev->path_data.ppm; + cqr->lpm = dasd_path_get_ppm(startdev); cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -3962,7 +3979,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, cqr->memdev = startdev; cqr->block = block; cqr->expires = startdev->default_expires * HZ; - cqr->lpm = startdev->path_data.ppm; + cqr->lpm = dasd_path_get_ppm(startdev); cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; @@ -4783,7 +4800,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), - irb->scsw.tm.fcxs, irb->scsw.tm.schxs, + irb->scsw.tm.fcxs, + (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, req ? req->intrc : 0); len += sprintf(page + len, PRINTK_HEADER " device %s: Failing TCW: %p\n", @@ -5306,11 +5324,10 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m) */ static int dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, - __u32 message_id, - struct channel_path_desc *desc, - struct subchannel_id sch_id) + __u32 message_id, __u8 lpum) { struct dasd_psf_cuir_response *psf_cuir; + int pos = pathmask_to_pos(lpum); struct dasd_ccw_req *cqr; struct ccw1 *ccw; int rc; @@ -5328,11 +5345,10 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, psf_cuir = (struct dasd_psf_cuir_response *)cqr->data; psf_cuir->order = PSF_ORDER_CUIR_RESPONSE; psf_cuir->cc = response; - if (desc) - psf_cuir->chpid = desc->chpid; + psf_cuir->chpid = device->path[pos].chpid; psf_cuir->message_id = message_id; - psf_cuir->cssid = sch_id.cssid; - psf_cuir->ssid = sch_id.ssid; + psf_cuir->cssid = device->path[pos].cssid; + psf_cuir->ssid = device->path[pos].ssid; ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_PSF; ccw->cda = (__u32)(addr_t)psf_cuir; @@ -5363,20 +5379,19 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device, __u8 lpum, struct dasd_cuir_message *cuir) { - struct dasd_eckd_private *private = device->private; struct dasd_conf_data *conf_data; int path, pos; if (cuir->record_selector == 0) goto out; for (path = 0x80, pos = 0; path; path >>= 1, pos++) { - conf_data = private->path_conf_data[pos]; + conf_data = device->path[pos].conf_data; if (conf_data->gneq.record_selector == cuir->record_selector) return conf_data; } out: - return private->path_conf_data[pathmask_to_pos(lpum)]; + return device->path[pathmask_to_pos(lpum)].conf_data; } /* @@ -5391,7 +5406,6 @@ out: static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, struct dasd_cuir_message *cuir) { - struct dasd_eckd_private *private = device->private; struct dasd_conf_data *ref_conf_data; unsigned long bitmask = 0, mask = 0; struct dasd_conf_data *conf_data; @@ -5417,11 +5431,10 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, mask |= cuir->neq_map[1] << 8; mask |= cuir->neq_map[0] << 16; - for (path = 0x80; path; path >>= 1) { + for (path = 0; path < 8; path++) { /* initialise data per path */ bitmask = mask; - pos = pathmask_to_pos(path); - conf_data = private->path_conf_data[pos]; + conf_data = device->path[path].conf_data; pos = 8 - ffs(cuir->ned_map); ned = (char *) &conf_data->neds[pos]; /* compare reference ned and per path ned */ @@ -5442,33 +5455,29 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, continue; /* device and path match the reference values add path to CUIR scope */ - tbcpm |= path; + tbcpm |= 0x80 >> path; } return tbcpm; } static void dasd_eckd_cuir_notify_user(struct dasd_device *device, - unsigned long paths, - struct subchannel_id sch_id, int action) + unsigned long paths, int action) { - struct channel_path_desc *desc; int pos; while (paths) { /* get position of bit in mask */ - pos = ffs(paths) - 1; + pos = 8 - ffs(paths); /* get channel path descriptor from this position */ - desc = ccw_device_get_chp_desc(device->cdev, 7 - pos); if (action == CUIR_QUIESCE) - pr_warn("Service on the storage server caused path " - "%x.%02x to go offline", sch_id.cssid, - desc ? desc->chpid : 0); + pr_warn("Service on the storage server caused path %x.%02x to go offline", + device->path[pos].cssid, + device->path[pos].chpid); else if (action == CUIR_RESUME) - pr_info("Path %x.%02x is back online after service " - "on the storage server", sch_id.cssid, - desc ? desc->chpid : 0); - kfree(desc); - clear_bit(pos, &paths); + pr_info("Path %x.%02x is back online after service on the storage server", + device->path[pos].cssid, + device->path[pos].chpid); + clear_bit(7 - pos, &paths); } } @@ -5479,16 +5488,16 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir); /* nothing to do if path is not in use */ - if (!(device->path_data.opm & tbcpm)) + if (!(dasd_path_get_opm(device) & tbcpm)) return 0; - if (!(device->path_data.opm & ~tbcpm)) { + if (!(dasd_path_get_opm(device) & ~tbcpm)) { /* no path would be left if the CUIR action is taken return error */ return -EINVAL; } /* remove device from operational path mask */ - device->path_data.opm &= ~tbcpm; - device->path_data.cuirpm |= tbcpm; + dasd_path_remove_opm(device, tbcpm); + dasd_path_add_cuirpm(device, tbcpm); return tbcpm; } @@ -5501,7 +5510,6 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, * notify the already set offline devices again */ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, - struct subchannel_id sch_id, struct dasd_cuir_message *cuir) { struct dasd_eckd_private *private = device->private; @@ -5556,14 +5564,13 @@ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, } } /* notify user about all paths affected by CUIR action */ - dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE); + dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE); return 0; out_err: return tbcpm; } static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, - struct subchannel_id sch_id, struct dasd_cuir_message *cuir) { struct dasd_eckd_private *private = device->private; @@ -5581,8 +5588,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, alias_list) { tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); paths |= tbcpm; - if (!(dev->path_data.opm & tbcpm)) { - dev->path_data.tbvpm |= tbcpm; + if (!(dasd_path_get_opm(dev) & tbcpm)) { + dasd_path_add_tbvpm(dev, tbcpm); dasd_schedule_device_bh(dev); } } @@ -5591,8 +5598,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, alias_list) { tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); paths |= tbcpm; - if (!(dev->path_data.opm & tbcpm)) { - dev->path_data.tbvpm |= tbcpm; + if (!(dasd_path_get_opm(dev) & tbcpm)) { + dasd_path_add_tbvpm(dev, tbcpm); dasd_schedule_device_bh(dev); } } @@ -5605,8 +5612,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, alias_list) { tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); paths |= tbcpm; - if (!(dev->path_data.opm & tbcpm)) { - dev->path_data.tbvpm |= tbcpm; + if (!(dasd_path_get_opm(dev) & tbcpm)) { + dasd_path_add_tbvpm(dev, tbcpm); dasd_schedule_device_bh(dev); } } @@ -5615,14 +5622,14 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, alias_list) { tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); paths |= tbcpm; - if (!(dev->path_data.opm & tbcpm)) { - dev->path_data.tbvpm |= tbcpm; + if (!(dasd_path_get_opm(dev) & tbcpm)) { + dasd_path_add_tbvpm(dev, tbcpm); dasd_schedule_device_bh(dev); } } } /* notify user about all paths affected by CUIR action */ - dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME); + dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME); return 0; } @@ -5630,38 +5637,31 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages, __u8 lpum) { struct dasd_cuir_message *cuir = messages; - struct channel_path_desc *desc; - struct subchannel_id sch_id; - int pos, response; + int response; DBF_DEV_EVENT(DBF_WARNING, device, "CUIR request: %016llx %016llx %016llx %08x", ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], ((u32 *)cuir)[3]); - ccw_device_get_schid(device->cdev, &sch_id); - pos = pathmask_to_pos(lpum); - desc = ccw_device_get_chp_desc(device->cdev, pos); if (cuir->code == CUIR_QUIESCE) { /* quiesce */ - if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir)) + if (dasd_eckd_cuir_quiesce(device, lpum, cuir)) response = PSF_CUIR_LAST_PATH; else response = PSF_CUIR_COMPLETED; } else if (cuir->code == CUIR_RESUME) { /* resume */ - dasd_eckd_cuir_resume(device, lpum, sch_id, cuir); + dasd_eckd_cuir_resume(device, lpum, cuir); response = PSF_CUIR_COMPLETED; } else response = PSF_CUIR_NOT_SUPPORTED; dasd_eckd_psf_cuir_response(device, response, - cuir->message_id, desc, sch_id); + cuir->message_id, lpum); DBF_DEV_EVENT(DBF_WARNING, device, "CUIR response: %d on message ID %08x", response, cuir->message_id); - /* free descriptor copy */ - kfree(desc); /* to make sure there is no attention left schedule work again */ device->discipline->check_attention(device, lpum); } @@ -5708,6 +5708,63 @@ static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum) return 0; } +static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum) +{ + if (~lpum & dasd_path_get_opm(device)) { + dasd_path_add_nohpfpm(device, lpum); + dasd_path_remove_opm(device, lpum); + dev_err(&device->cdev->dev, + "Channel path %02X lost HPF functionality and is disabled\n", + lpum); + return 1; + } + return 0; +} + +static void dasd_eckd_disable_hpf_device(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + dev_err(&device->cdev->dev, + "High Performance FICON disabled\n"); + private->fcx_max_data = 0; +} + +static int dasd_eckd_hpf_enabled(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + return private->fcx_max_data ? 1 : 0; +} + +static void dasd_eckd_handle_hpf_error(struct dasd_device *device, + struct irb *irb) +{ + struct dasd_eckd_private *private = device->private; + + if (!private->fcx_max_data) { + /* sanity check for no HPF, the error makes no sense */ + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "Trying to disable HPF for a non HPF device"); + return; + } + if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) { + dasd_eckd_disable_hpf_device(device); + } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) { + if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum)) + return; + dasd_eckd_disable_hpf_device(device); + dasd_path_set_tbvpm(device, + dasd_path_get_hpfpm(device)); + } + /* + * prevent that any new I/O ist started on the device and schedule a + * requeue of existing requests + */ + dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); + dasd_schedule_requeue(device); +} + static struct ccw_driver dasd_eckd_driver = { .driver = { .name = "dasd-eckd", @@ -5776,6 +5833,10 @@ static struct dasd_discipline dasd_eckd_discipline = { .check_attention = dasd_eckd_check_attention, .host_access_count = dasd_eckd_host_access_count, .hosts_print = dasd_hosts_print, + .handle_hpf_error = dasd_eckd_handle_hpf_error, + .disable_hpf = dasd_eckd_disable_hpf_device, + .hpf_enabled = dasd_eckd_hpf_enabled, + .reset_path = dasd_eckd_reset_path, }; static int __init diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index 59803626ea36..e2a710c250a5 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h @@ -94,6 +94,8 @@ #define FCX_MAX_DATA_FACTOR 65536 #define DASD_ECKD_RCD_DATA_SIZE 256 +#define DASD_ECKD_PATH_THRHLD 256 +#define DASD_ECKD_PATH_INTERVAL 300 /***************************************************************************** * SECTION: Type Definitions @@ -535,8 +537,7 @@ struct dasd_eckd_private { struct dasd_eckd_characteristics rdc_data; u8 *conf_data; int conf_len; - /* per path configuration data */ - struct dasd_conf_data *path_conf_data[8]; + /* pointers to specific parts in the conf_data */ struct dasd_ned *ned; struct dasd_sneq *sneq; diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 21ef63cf0960..6c5d671304b4 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -454,20 +454,30 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) */ int dasd_eer_enable(struct dasd_device *device) { - struct dasd_ccw_req *cqr; + struct dasd_ccw_req *cqr = NULL; unsigned long flags; struct ccw1 *ccw; + int rc = 0; + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); if (device->eer_cqr) - return 0; + goto out; + else if (!device->discipline || + strcmp(device->discipline->name, "ECKD")) + rc = -EMEDIUMTYPE; + else if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) + rc = -EBUSY; - if (!device->discipline || strcmp(device->discipline->name, "ECKD")) - return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ + if (rc) + goto out; cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, SNSS_DATA_SIZE, device); - if (IS_ERR(cqr)) - return -ENOMEM; + if (IS_ERR(cqr)) { + rc = -ENOMEM; + cqr = NULL; + goto out; + } cqr->startdev = device; cqr->retries = 255; @@ -485,15 +495,18 @@ int dasd_eer_enable(struct dasd_device *device) cqr->status = DASD_CQR_FILLED; cqr->callback = dasd_eer_snss_cb; - spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); if (!device->eer_cqr) { device->eer_cqr = cqr; cqr = NULL; } + +out: spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + if (cqr) dasd_kfree_request(cqr, device); - return 0; + + return rc; } /* diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index d138d0116734..113c1c1fa1af 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) "default ERP called (%i retries left)", cqr->retries); if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) - cqr->lpm = device->path_data.opm; + cqr->lpm = dasd_path_get_opm(device); cqr->status = DASD_CQR_FILLED; } else { pr_err("%s: default ERP has run out of retries and failed\n", diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index d7b5b550364b..462cab5d4302 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device) device->default_expires = DASD_EXPIRES; device->default_retries = FBA_DEFAULT_RETRIES; - device->path_data.opm = LPM_ANYPATH; + dasd_path_set_opm(device, LPM_ANYPATH); readonly = dasd_device_is_ro(device); if (readonly) diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 87ff6cef872f..24be210c10e5 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -55,6 +55,7 @@ #include <asm/debug.h> #include <asm/dasd.h> #include <asm/idals.h> +#include <linux/bitops.h> /* DASD discipline magic */ #define DASD_ECKD_MAGIC 0xC5C3D2C4 @@ -377,6 +378,10 @@ struct dasd_discipline { int (*check_attention)(struct dasd_device *, __u8); int (*host_access_count)(struct dasd_device *); int (*hosts_print)(struct dasd_device *, struct seq_file *); + void (*handle_hpf_error)(struct dasd_device *, struct irb *); + void (*disable_hpf)(struct dasd_device *); + int (*hpf_enabled)(struct dasd_device *); + void (*reset_path)(struct dasd_device *, __u8); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; @@ -397,17 +402,31 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer; #define DASD_EER_STATECHANGE 3 #define DASD_EER_PPRCSUSPEND 4 +/* DASD path handling */ + +#define DASD_PATH_OPERATIONAL 1 +#define DASD_PATH_TBV 2 +#define DASD_PATH_PP 3 +#define DASD_PATH_NPP 4 +#define DASD_PATH_MISCABLED 5 +#define DASD_PATH_NOHPF 6 +#define DASD_PATH_CUIR 7 +#define DASD_PATH_IFCC 8 + +#define DASD_THRHLD_MAX 4294967295U +#define DASD_INTERVAL_MAX 4294967295U + struct dasd_path { - __u8 opm; - __u8 tbvpm; - __u8 ppm; - __u8 npm; - /* paths that are not used because of a special condition */ - __u8 cablepm; /* miss-cabled */ - __u8 hpfpm; /* the HPF requirements of the other paths are not met */ - __u8 cuirpm; /* CUIR varied offline */ + unsigned long flags; + u8 cssid; + u8 ssid; + u8 chpid; + struct dasd_conf_data *conf_data; + atomic_t error_count; + unsigned long long errorclk; }; + struct dasd_profile_info { /* legacy part of profile data, as in dasd_profile_info_t */ unsigned int dasd_io_reqs; /* number of requests processed */ @@ -458,7 +477,8 @@ struct dasd_device { struct dasd_discipline *discipline; struct dasd_discipline *base_discipline; void *private; - struct dasd_path path_data; + struct dasd_path path[8]; + __u8 opm; /* Device state and target state. */ int state, target; @@ -483,6 +503,7 @@ struct dasd_device { struct work_struct reload_device; struct work_struct kick_validate; struct work_struct suc_work; + struct work_struct requeue_requests; struct timer_list timer; debug_info_t *debug_area; @@ -498,6 +519,9 @@ struct dasd_device { unsigned long blk_timeout; + unsigned long path_thrhld; + unsigned long path_interval; + struct dentry *debugfs_dentry; struct dentry *hosts_dentry; struct dasd_profile profile; @@ -707,6 +731,7 @@ void dasd_set_target_state(struct dasd_device *, int); void dasd_kick_device(struct dasd_device *); void dasd_restore_device(struct dasd_device *); void dasd_reload_device(struct dasd_device *); +void dasd_schedule_requeue(struct dasd_device *); void dasd_add_request_head(struct dasd_ccw_req *); void dasd_add_request_tail(struct dasd_ccw_req *); @@ -835,4 +860,410 @@ static inline int dasd_eer_enabled(struct dasd_device *device) #define dasd_eer_enabled(d) (0) #endif /* CONFIG_DASD_ERR */ + +/* DASD path handling functions */ + +/* + * helper functions to modify bit masks for a given channel path for a device + */ +static inline int dasd_path_is_operational(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags); +} + +static inline int dasd_path_need_verify(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_TBV, &device->path[chp].flags); +} + +static inline void dasd_path_verify(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_TBV, &device->path[chp].flags); +} + +static inline void dasd_path_clear_verify(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_TBV, &device->path[chp].flags); +} + +static inline void dasd_path_clear_all_verify(struct dasd_device *device) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + dasd_path_clear_verify(device, chp); +} + +static inline void dasd_path_operational(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags); + device->opm |= (0x80 >> chp); +} + +static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_NPP, &device->path[chp].flags); +} + +static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_NPP, &device->path[chp].flags); +} + +static inline void dasd_path_clear_nonpreferred(struct dasd_device *device, + int chp) +{ + __clear_bit(DASD_PATH_NPP, &device->path[chp].flags); +} + +static inline void dasd_path_preferred(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_PP, &device->path[chp].flags); +} + +static inline int dasd_path_is_preferred(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_PP, &device->path[chp].flags); +} + +static inline void dasd_path_clear_preferred(struct dasd_device *device, + int chp) +{ + __clear_bit(DASD_PATH_PP, &device->path[chp].flags); +} + +static inline void dasd_path_clear_oper(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags); + device->opm &= ~(0x80 >> chp); +} + +static inline void dasd_path_clear_cable(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags); +} + +static inline void dasd_path_cuir(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_CUIR, &device->path[chp].flags); +} + +static inline int dasd_path_is_cuir(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_CUIR, &device->path[chp].flags); +} + +static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_CUIR, &device->path[chp].flags); +} + +static inline void dasd_path_ifcc(struct dasd_device *device, int chp) +{ + set_bit(DASD_PATH_IFCC, &device->path[chp].flags); +} + +static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_IFCC, &device->path[chp].flags); +} + +static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp) +{ + clear_bit(DASD_PATH_IFCC, &device->path[chp].flags); +} + +static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp) +{ + __clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags); +} + +static inline void dasd_path_miscabled(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags); +} + +static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags); +} + +static inline void dasd_path_nohpf(struct dasd_device *device, int chp) +{ + __set_bit(DASD_PATH_NOHPF, &device->path[chp].flags); +} + +static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp) +{ + return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags); +} + +/* + * get functions for path masks + * will return a path masks for the given device + */ + +static inline __u8 dasd_path_get_opm(struct dasd_device *device) +{ + return device->opm; +} + +static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device) +{ + int chp; + __u8 tbvpm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_need_verify(device, chp)) + tbvpm |= 0x80 >> chp; + return tbvpm; +} + +static inline __u8 dasd_path_get_nppm(struct dasd_device *device) +{ + int chp; + __u8 npm = 0x00; + + for (chp = 0; chp < 8; chp++) { + if (dasd_path_is_nonpreferred(device, chp)) + npm |= 0x80 >> chp; + } + return npm; +} + +static inline __u8 dasd_path_get_ppm(struct dasd_device *device) +{ + int chp; + __u8 ppm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_preferred(device, chp)) + ppm |= 0x80 >> chp; + return ppm; +} + +static inline __u8 dasd_path_get_cablepm(struct dasd_device *device) +{ + int chp; + __u8 cablepm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_miscabled(device, chp)) + cablepm |= 0x80 >> chp; + return cablepm; +} + +static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device) +{ + int chp; + __u8 cuirpm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_cuir(device, chp)) + cuirpm |= 0x80 >> chp; + return cuirpm; +} + +static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device) +{ + int chp; + __u8 ifccpm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_ifcc(device, chp)) + ifccpm |= 0x80 >> chp; + return ifccpm; +} + +static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device) +{ + int chp; + __u8 hpfpm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_nohpf(device, chp)) + hpfpm |= 0x80 >> chp; + return hpfpm; +} + +/* + * add functions for path masks + * the existing path mask will be extended by the given path mask + */ +static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_verify(device, chp); +} + +static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device) +{ + int chp; + __u8 nopm = 0x00; + + for (chp = 0; chp < 8; chp++) + if (dasd_path_is_nohpf(device, chp) || + dasd_path_is_ifcc(device, chp) || + dasd_path_is_cuir(device, chp) || + dasd_path_is_miscabled(device, chp)) + nopm |= 0x80 >> chp; + return nopm; +} + +static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) { + dasd_path_operational(device, chp); + /* + * if the path is used + * it should not be in one of the negative lists + */ + dasd_path_clear_nohpf(device, chp); + dasd_path_clear_cuir(device, chp); + dasd_path_clear_cable(device, chp); + dasd_path_clear_ifcc(device, chp); + } +} + +static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_miscabled(device, chp); +} + +static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_cuir(device, chp); +} + +static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_ifcc(device, chp); +} + +static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_nonpreferred(device, chp); +} + +static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_nohpf(device, chp); +} + +static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_preferred(device, chp); +} + +/* + * set functions for path masks + * the existing path mask will be replaced by the given path mask + */ +static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + if (pm & (0x80 >> chp)) + dasd_path_verify(device, chp); + else + dasd_path_clear_verify(device, chp); +} + +static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) { + dasd_path_clear_oper(device, chp); + if (pm & (0x80 >> chp)) { + dasd_path_operational(device, chp); + /* + * if the path is used + * it should not be in one of the negative lists + */ + dasd_path_clear_nohpf(device, chp); + dasd_path_clear_cuir(device, chp); + dasd_path_clear_cable(device, chp); + dasd_path_clear_ifcc(device, chp); + } + } +} + +/* + * remove functions for path masks + * the existing path mask will be cleared with the given path mask + */ +static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm) +{ + int chp; + + for (chp = 0; chp < 8; chp++) { + if (pm & (0x80 >> chp)) + dasd_path_clear_oper(device, chp); + } +} + +/* + * add the newly available path to the to be verified pm and remove it from + * normal operation until it is verified + */ +static inline void dasd_path_available(struct dasd_device *device, int chp) +{ + dasd_path_clear_oper(device, chp); + dasd_path_verify(device, chp); +} + +static inline void dasd_path_notoper(struct dasd_device *device, int chp) +{ + dasd_path_clear_oper(device, chp); + dasd_path_clear_preferred(device, chp); + dasd_path_clear_nonpreferred(device, chp); +} + +/* + * remove all paths from normal operation + */ +static inline void dasd_path_no_path(struct dasd_device *device) +{ + int chp; + + for (chp = 0; chp < 8; chp++) + dasd_path_notoper(device, chp); + + dasd_path_clear_all_verify(device); +} + +/* end - path handling */ + #endif /* DASD_H */ diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 931d10e86837..1b8d825623bd 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -9,7 +9,6 @@ * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> */ -#include <linux/module.h> #include <linux/types.h> #include <linux/kdev_t.h> #include <linux/tty.h> @@ -1215,13 +1214,4 @@ static int __init tty3215_init(void) tty3215_driver = driver; return 0; } - -static void __exit tty3215_exit(void) -{ - tty_unregister_driver(tty3215_driver); - put_tty_driver(tty3215_driver); - ccw_driver_unregister(&raw3215_ccw_driver); -} - -module_init(tty3215_init); -module_exit(tty3215_exit); +device_initcall(tty3215_init); diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 7a10c56334bb..e1fc7eb043d6 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -59,6 +59,7 @@ typedef unsigned int sclp_cmdw_t; +#define SCLP_CMDW_READ_CPU_INFO 0x00010001 #define SCLP_CMDW_READ_EVENT_DATA 0x00770005 #define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 #define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 @@ -102,6 +103,28 @@ struct init_sccb { sccb_mask_t sclp_send_mask; } __attribute__((packed)); +struct read_cpu_info_sccb { + struct sccb_header header; + u16 nr_configured; + u16 offset_configured; + u16 nr_standby; + u16 offset_standby; + u8 reserved[4096 - 16]; +} __attribute__((packed, aligned(PAGE_SIZE))); + +static inline void sclp_fill_core_info(struct sclp_core_info *info, + struct read_cpu_info_sccb *sccb) +{ + char *page = (char *) sccb; + + memset(info, 0, sizeof(*info)); + info->configured = sccb->nr_configured; + info->standby = sccb->nr_standby; + info->combined = sccb->nr_configured + sccb->nr_standby; + memcpy(&info->core, page + sccb->offset_configured, + info->combined * sizeof(struct sclp_core_entry)); +} + #define SCLP_HAS_CHP_INFO (sclp.facilities & 0x8000000000000000ULL) #define SCLP_HAS_CHP_RECONFIG (sclp.facilities & 0x2000000000000000ULL) #define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL) diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index e3fc7539116b..b9c5522b8a68 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -80,33 +80,10 @@ out: * CPU configuration related functions. */ -#define SCLP_CMDW_READ_CPU_INFO 0x00010001 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 -struct read_cpu_info_sccb { - struct sccb_header header; - u16 nr_configured; - u16 offset_configured; - u16 nr_standby; - u16 offset_standby; - u8 reserved[4096 - 16]; -} __attribute__((packed, aligned(PAGE_SIZE))); - -static void sclp_fill_core_info(struct sclp_core_info *info, - struct read_cpu_info_sccb *sccb) -{ - char *page = (char *) sccb; - - memset(info, 0, sizeof(*info)); - info->configured = sccb->nr_configured; - info->standby = sccb->nr_standby; - info->combined = sccb->nr_configured + sccb->nr_standby; - memcpy(&info->core, page + sccb->offset_configured, - info->combined * sizeof(struct sclp_core_entry)); -} - -int sclp_get_core_info(struct sclp_core_info *info) +int _sclp_get_core_info(struct sclp_core_info *info) { int rc; struct read_cpu_info_sccb *sccb; diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index c71df0c7dedc..f8e46c22e641 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -221,6 +221,36 @@ static int __init sclp_set_event_mask(struct init_sccb *sccb, return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); } +static struct sclp_core_info sclp_core_info_early __initdata; +static int sclp_core_info_early_valid __initdata; + +static void __init sclp_init_core_info_early(struct read_cpu_info_sccb *sccb) +{ + int rc; + + if (!SCLP_HAS_CPU_INFO) + return; + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + do { + rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb); + } while (rc == -EBUSY); + if (rc) + return; + if (sccb->header.response_code != 0x0010) + return; + sclp_fill_core_info(&sclp_core_info_early, sccb); + sclp_core_info_early_valid = 1; +} + +int __init _sclp_get_core_info_early(struct sclp_core_info *info) +{ + if (!sclp_core_info_early_valid) + return -EIO; + *info = sclp_core_info_early; + return 0; +} + static long __init sclp_hsa_size_init(struct sdias_sccb *sccb) { sccb_init_eq_size(sccb); @@ -293,6 +323,7 @@ void __init sclp_early_detect(void) void *sccb = &sccb_early; sclp_facilities_detect(sccb); + sclp_init_core_info_early(sccb); sclp_hsa_size_detect(sccb); /* Turn off SCLP event notifications. Also save remote masks in the diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 475e470d9768..e4958511168a 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c @@ -6,7 +6,6 @@ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ -#include <linux/module.h> #include <linux/types.h> #include <linux/cpumask.h> #include <linux/smp.h> @@ -80,5 +79,4 @@ static int __init sclp_quiesce_init(void) { return sclp_register(&sclp_quiesce_event); } - -module_init(sclp_quiesce_init); +device_initcall(sclp_quiesce_init); diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 3c6e174e19b6..9259017a1295 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -7,7 +7,6 @@ * Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#include <linux/module.h> #include <linux/kmod.h> #include <linux/tty.h> #include <linux/tty_driver.h> @@ -573,4 +572,4 @@ sclp_tty_init(void) sclp_tty_driver = driver; return 0; } -module_init(sclp_tty_init); +device_initcall(sclp_tty_init); diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index e883063c7258..3167e8581994 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -870,7 +870,7 @@ static int __init vmlogrdr_init(void) goto cleanup; for (i=0; i < MAXMINOR; ++i ) { - sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL); + sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sys_ser[i].buffer) { rc = -ENOMEM; break; diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 16992e2a40ad..f771e5e9e26b 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -7,6 +7,7 @@ * * Copyright IBM Corp. 2003, 2008 * Author(s): Michael Holzheu + * License: GPL */ #define KMSG_COMPONENT "zdump" @@ -16,7 +17,6 @@ #include <linux/slab.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> -#include <linux/module.h> #include <linux/memblock.h> #include <asm/asm-offsets.h> @@ -320,7 +320,7 @@ static int __init zcore_init(void) goto fail; } - pr_alert("DETECTED 'S390X (64 bit) OS'\n"); + pr_alert("The dump process started for a 64-bit operating system\n"); rc = init_cpu_info(); if (rc) goto fail; @@ -364,22 +364,4 @@ fail: diag308(DIAG308_REL_HSA, NULL); return rc; } - -static void __exit zcore_exit(void) -{ - debug_unregister(zcore_dbf); - sclp_sdias_exit(); - free_page((unsigned long) ipl_block); - debugfs_remove(zcore_hsa_file); - debugfs_remove(zcore_reipl_file); - debugfs_remove(zcore_memmap_file); - debugfs_remove(zcore_dir); - diag308(DIAG308_REL_HSA, NULL); -} - -MODULE_AUTHOR("Copyright IBM Corp. 2003,2008"); -MODULE_DESCRIPTION("zcore module for zfcpdump support"); -MODULE_LICENSE("GPL"); - subsys_initcall(zcore_init); -module_exit(zcore_exit); diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 268aa23afa01..6b6386e9a500 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -30,7 +30,7 @@ #include <linux/device.h> #include <linux/init.h> #include <linux/list.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/timex.h> /* get_tod_clock() */ @@ -1389,13 +1389,7 @@ static int __init init_cmf(void) "%s (mode %s)\n", format_string, detect_string); return 0; } -module_init(init_cmf); - - -MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("channel measurement facility base driver\n" - "Copyright IBM Corp. 2003\n"); +device_initcall(init_cmf); EXPORT_SYMBOL_GPL(enable_cmf); EXPORT_SYMBOL_GPL(disable_cmf); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 3d2b20ee613f..bc099b61394d 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -5,12 +5,14 @@ * * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) + * + * License: GPL */ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include <linux/module.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/device.h> #include <linux/slab.h> @@ -1285,5 +1287,3 @@ void css_driver_unregister(struct css_driver *cdrv) driver_unregister(&cdrv->drv); } EXPORT_SYMBOL_GPL(css_driver_unregister); - -MODULE_LICENSE("GPL"); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 6a58bc8f46e2..79823ee9c100 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -5,12 +5,14 @@ * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * License: GPL */ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include <linux/module.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> @@ -145,7 +147,6 @@ static struct css_device_id io_subchannel_ids[] = { { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, { /* end of list */ }, }; -MODULE_DEVICE_TABLE(css, io_subchannel_ids); static int io_subchannel_prepare(struct subchannel *sch) { @@ -2150,7 +2151,6 @@ int ccw_device_siosl(struct ccw_device *cdev) } EXPORT_SYMBOL_GPL(ccw_device_siosl); -MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_online); EXPORT_SYMBOL(ccw_device_set_offline); EXPORT_SYMBOL(ccw_driver_register); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 065b1be98e2c..ec497af99dd8 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -13,7 +13,6 @@ */ enum dev_state { DEV_STATE_NOT_OPER, - DEV_STATE_SENSE_PGID, DEV_STATE_SENSE_ID, DEV_STATE_OFFLINE, DEV_STATE_VERIFY, diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 8327d47e08b6..9afb5ce13007 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -1058,12 +1058,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_TIMEOUT] = ccw_device_nop, [DEV_EVENT_VERIFY] = ccw_device_nop, }, - [DEV_STATE_SENSE_PGID] = { - [DEV_EVENT_NOTOPER] = ccw_device_request_event, - [DEV_EVENT_INTERRUPT] = ccw_device_request_event, - [DEV_EVENT_TIMEOUT] = ccw_device_request_event, - [DEV_EVENT_VERIFY] = ccw_device_nop, - }, [DEV_STATE_SENSE_ID] = { [DEV_EVENT_NOTOPER] = ccw_device_request_event, [DEV_EVENT_INTERRUPT] = ccw_device_request_event, diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 877d9f601e63..cf8c4ac6323a 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -3,8 +3,10 @@ * * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) + * + * License: GPL */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> @@ -676,7 +678,6 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid) } EXPORT_SYMBOL_GPL(ccw_device_get_schid); -MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_options_mask); EXPORT_SYMBOL(ccw_device_set_options); EXPORT_SYMBOL(ccw_device_clear_options); diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index ed92fb09fc8e..f407b4f9d0ba 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1273,7 +1273,7 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) return retval; } -static int ap_dev_suspend(struct device *dev, pm_message_t state) +static int ap_dev_suspend(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); @@ -1287,11 +1287,6 @@ static int ap_dev_suspend(struct device *dev, pm_message_t state) return 0; } -static int ap_dev_resume(struct device *dev) -{ - return 0; -} - static void ap_bus_suspend(void) { ap_suspend_flag = 1; @@ -1356,12 +1351,13 @@ static struct notifier_block ap_power_notifier = { .notifier_call = ap_power_event, }; +static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, NULL); + static struct bus_type ap_bus_type = { .name = "ap", .match = &ap_bus_match, .uevent = &ap_uevent, - .suspend = ap_dev_suspend, - .resume = ap_dev_resume, + .pm = &ap_bus_pm_ops, }; void ap_device_init_reply(struct ap_device *ap_dev, diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 8688ad4c825f..639ed4e6afd1 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -24,7 +24,7 @@ #include <linux/wait.h> #include <linux/list.h> #include <linux/bitops.h> -#include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/io.h> #include <linux/kvm_para.h> #include <linux/notifier.h> @@ -235,16 +235,6 @@ static struct airq_info *new_airq_info(void) return info; } -static void destroy_airq_info(struct airq_info *info) -{ - if (!info) - return; - - unregister_adapter_interrupt(&info->airq); - airq_iv_release(info->aiv); - kfree(info); -} - static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, u64 *first, void **airq_info) { @@ -1294,7 +1284,6 @@ static struct ccw_device_id virtio_ids[] = { { CCW_DEVICE(0x3832, 0) }, {}, }; -MODULE_DEVICE_TABLE(ccw, virtio_ids); static struct ccw_driver virtio_ccw_driver = { .driver = { @@ -1406,14 +1395,4 @@ static int __init virtio_ccw_init(void) no_auto_parse(); return ccw_driver_register(&virtio_ccw_driver); } -module_init(virtio_ccw_init); - -static void __exit virtio_ccw_exit(void) -{ - int i; - - ccw_driver_unregister(&virtio_ccw_driver); - for (i = 0; i < MAX_AIRQ_AREAS; i++) - destroy_airq_info(airq_areas[i]); -} -module_exit(virtio_ccw_exit); +device_initcall(virtio_ccw_init); |