diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-01 07:44:14 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-01 07:44:14 +0300 |
commit | 0161b6e0d88e04f54ada7112bb2dad1f3ae472af (patch) | |
tree | 54f3bdfdb74dc55a8165d0e46bff0ae65ebea123 /drivers/s390 | |
parent | 94521ca3dfa6efd4ceb4f6746bf8ead58afb1490 (diff) | |
parent | 1af135a1e70ee6e31b5c265c769448984d221cf2 (diff) | |
download | linux-0161b6e0d88e04f54ada7112bb2dad1f3ae472af.tar.xz |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Martin Schwidefsky:
"There is one larger patch for the AP bus code to make it work with the
longer reset periods of the latest crypto cards.
A new default configuration, a naming cleanup for SMP and a few fixes"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/kdump: fix compile for !SMP
s390/kdump: fix nosmt kernel parameter
s390: new default configuration
s390/smp: cleanup core vs. cpu in the SCLP interface
s390/smp: fix sigp cpu detection loop
s390/zcrypt: Fixed reset and interrupt handling of AP queues
s390/kdump: fix REGSET_VX_LOW vector register ELF notes
s390/bpf: Fix backward jumps
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/char/sclp_cmd.c | 23 | ||||
-rw-r--r-- | drivers/s390/char/sclp_early.c | 10 | ||||
-rw-r--r-- | drivers/s390/char/zcore.c | 2 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.c | 424 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.h | 13 | ||||
-rw-r--r-- | drivers/s390/crypto/zcrypt_cex4.c | 2 |
6 files changed, 256 insertions, 218 deletions
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index f74c040d5c10..e9485fbbb373 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -92,8 +92,8 @@ struct read_cpu_info_sccb { u8 reserved[4096 - 16]; } __attribute__((packed, aligned(PAGE_SIZE))); -static void sclp_fill_cpu_info(struct sclp_cpu_info *info, - struct read_cpu_info_sccb *sccb) +static void sclp_fill_core_info(struct sclp_core_info *info, + struct read_cpu_info_sccb *sccb) { char *page = (char *) sccb; @@ -101,12 +101,11 @@ static void sclp_fill_cpu_info(struct sclp_cpu_info *info, info->configured = sccb->nr_configured; info->standby = sccb->nr_standby; info->combined = sccb->nr_configured + sccb->nr_standby; - info->has_cpu_type = sclp.has_cpu_type; - memcpy(&info->cpu, page + sccb->offset_configured, - info->combined * sizeof(struct sclp_cpu_entry)); + memcpy(&info->core, page + sccb->offset_configured, + info->combined * sizeof(struct sclp_core_entry)); } -int sclp_get_cpu_info(struct sclp_cpu_info *info) +int sclp_get_core_info(struct sclp_core_info *info) { int rc; struct read_cpu_info_sccb *sccb; @@ -127,7 +126,7 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info) rc = -EIO; goto out; } - sclp_fill_cpu_info(info, sccb); + sclp_fill_core_info(info, sccb); out: free_page((unsigned long) sccb); return rc; @@ -137,7 +136,7 @@ struct cpu_configure_sccb { struct sccb_header header; } __attribute__((packed, aligned(8))); -static int do_cpu_configure(sclp_cmdw_t cmd) +static int do_core_configure(sclp_cmdw_t cmd) { struct cpu_configure_sccb *sccb; int rc; @@ -171,14 +170,14 @@ out: return rc; } -int sclp_cpu_configure(u8 cpu) +int sclp_core_configure(u8 core) { - return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8); + return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8); } -int sclp_cpu_deconfigure(u8 cpu) +int sclp_core_deconfigure(u8 core) { - return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); + return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8); } #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index d7f696d95597..aeed7969fd79 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -98,7 +98,7 @@ static int __init sclp_read_info_early(struct read_info_sccb *sccb) static void __init sclp_facilities_detect(struct read_info_sccb *sccb) { - struct sclp_cpu_entry *cpue; + struct sclp_core_entry *cpue; u16 boot_cpu_address, cpu; if (sclp_read_info_early(sccb)) @@ -106,7 +106,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) sclp.facilities = sccb->facilities; sclp.has_sprp = !!(sccb->fac84 & 0x02); - sclp.has_cpu_type = !!(sccb->fac84 & 0x01); + sclp.has_core_type = !!(sccb->fac84 & 0x01); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; @@ -116,11 +116,11 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) if (!sccb->hcpua) { if (MACHINE_IS_VM) - sclp.max_cpu = 64; + sclp.max_cores = 64; else - sclp.max_cpu = sccb->ncpurl; + sclp.max_cores = sccb->ncpurl; } else { - sclp.max_cpu = sccb->hcpua + 1; + sclp.max_cores = sccb->hcpua + 1; } boot_cpu_address = stap(); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 9a3dd95029cc..823f41fc4bbd 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -154,7 +154,7 @@ static int __init init_cpu_info(enum arch_id arch) /* get info for boot cpu from lowcore, stored in the HSA */ - sa_ext = dump_save_area_create(0); + sa_ext = dump_save_areas.areas[0]; if (!sa_ext) return -ENOMEM; if (memcpy_hsa_kernel(&sa_ext->sa, sys_info.sa_base, diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 3ba611419759..559a9dcdb15d 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -60,7 +60,7 @@ static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); static int ap_device_remove(struct device *dev); static int ap_device_probe(struct device *dev); static void ap_interrupt_handler(struct airq_struct *airq); -static void ap_reset(struct ap_device *ap_dev); +static void ap_reset(struct ap_device *ap_dev, unsigned long *flags); static void ap_config_timeout(unsigned long ptr); static int ap_select_domain(void); static void ap_query_configuration(void); @@ -310,35 +310,26 @@ static inline int __ap_query_configuration(struct ap_config_info *config) static int ap_query_functions(ap_qid_t qid, unsigned int *functions) { struct ap_queue_status status; - int i; + status = __ap_query_functions(qid, functions); - for (i = 0; i < AP_MAX_RESET; i++) { - if (ap_queue_status_invalid_test(&status)) - return -ENODEV; + if (ap_queue_status_invalid_test(&status)) + return -ENODEV; - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - return 0; - case AP_RESPONSE_RESET_IN_PROGRESS: - case AP_RESPONSE_BUSY: - break; - case AP_RESPONSE_Q_NOT_AVAIL: - case AP_RESPONSE_DECONFIGURED: - case AP_RESPONSE_CHECKSTOPPED: - case AP_RESPONSE_INVALID_ADDRESS: - return -ENODEV; - case AP_RESPONSE_OTHERWISE_CHANGED: - break; - default: - break; - } - if (i < AP_MAX_RESET - 1) { - udelay(5); - status = __ap_query_functions(qid, functions); - } + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + return 0; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_INVALID_ADDRESS: + return -ENODEV; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + case AP_RESPONSE_OTHERWISE_CHANGED: + default: + return -EBUSY; } - return -EBUSY; } /** @@ -350,47 +341,25 @@ static int ap_query_functions(ap_qid_t qid, unsigned int *functions) * on the return value it waits a while and tests the AP queue if interrupts * have been switched on using ap_test_queue(). */ -static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) +static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind) { struct ap_queue_status status; - int t_depth, t_device_type, rc, i; - rc = -EBUSY; - status = ap_queue_interruption_control(qid, ind); - - for (i = 0; i < AP_MAX_RESET; i++) { - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - if (status.int_enabled) - return 0; - break; - case AP_RESPONSE_RESET_IN_PROGRESS: - case AP_RESPONSE_BUSY: - if (i < AP_MAX_RESET - 1) { - udelay(5); - status = ap_queue_interruption_control(qid, - ind); - continue; - } - break; - case AP_RESPONSE_Q_NOT_AVAIL: - case AP_RESPONSE_DECONFIGURED: - case AP_RESPONSE_CHECKSTOPPED: - case AP_RESPONSE_INVALID_ADDRESS: - return -ENODEV; - case AP_RESPONSE_OTHERWISE_CHANGED: - if (status.int_enabled) - return 0; - break; - default: - break; - } - if (i < AP_MAX_RESET - 1) { - udelay(5); - status = ap_test_queue(qid, &t_depth, &t_device_type); - } + status = ap_queue_interruption_control(ap_dev->qid, ind); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + case AP_RESPONSE_OTHERWISE_CHANGED: + return 0; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_INVALID_ADDRESS: + return -ENODEV; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + default: + return -EBUSY; } - return rc; } /** @@ -511,109 +480,94 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) EXPORT_SYMBOL(ap_recv); /** + * __ap_schedule_poll_timer(): Schedule poll timer. + * + * Set up the timer to run the poll tasklet + */ +static inline void __ap_schedule_poll_timer(void) +{ + ktime_t hr_time; + + spin_lock_bh(&ap_poll_timer_lock); + if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) { + hr_time = ktime_set(0, poll_timeout); + hrtimer_forward_now(&ap_poll_timer, hr_time); + hrtimer_restart(&ap_poll_timer); + } + spin_unlock_bh(&ap_poll_timer_lock); +} + +/** + * ap_schedule_poll_timer(): Schedule poll timer. + * + * Set up the timer to run the poll tasklet + */ +static inline void ap_schedule_poll_timer(void) +{ + if (ap_using_interrupts()) + return; + __ap_schedule_poll_timer(); +} + + +/** * ap_query_queue(): Check if an AP queue is available. * @qid: The AP queue number * @queue_depth: Pointer to queue depth value * @device_type: Pointer to device type value - * - * The test is repeated for AP_MAX_RESET times. */ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) { struct ap_queue_status status; - int t_depth, t_device_type, rc, i; + int t_depth, t_device_type; - rc = -EBUSY; - for (i = 0; i < AP_MAX_RESET; i++) { - status = ap_test_queue(qid, &t_depth, &t_device_type); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - *queue_depth = t_depth + 1; - *device_type = t_device_type; - rc = 0; - break; - case AP_RESPONSE_Q_NOT_AVAIL: - rc = -ENODEV; - break; - case AP_RESPONSE_RESET_IN_PROGRESS: - break; - case AP_RESPONSE_DECONFIGURED: - rc = -ENODEV; - break; - case AP_RESPONSE_CHECKSTOPPED: - rc = -ENODEV; - break; - case AP_RESPONSE_INVALID_ADDRESS: - rc = -ENODEV; - break; - case AP_RESPONSE_OTHERWISE_CHANGED: - break; - case AP_RESPONSE_BUSY: - break; - default: - BUG(); - } - if (rc != -EBUSY) - break; - if (i < AP_MAX_RESET - 1) - udelay(5); + status = ap_test_queue(qid, &t_depth, &t_device_type); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + *queue_depth = t_depth + 1; + *device_type = t_device_type; + return 0; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_INVALID_ADDRESS: + return -ENODEV; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_OTHERWISE_CHANGED: + case AP_RESPONSE_BUSY: + return -EBUSY; + default: + BUG(); } - return rc; } /** * ap_init_queue(): Reset an AP queue. * @qid: The AP queue number * - * Reset an AP queue and wait for it to become available again. + * Submit the Reset command to an AP queue. + * Since the reset is asynchron set the state to 'RESET_IN_PROGRESS' + * and check later via ap_poll_queue() if the reset is done. */ -static int ap_init_queue(ap_qid_t qid) +static int ap_init_queue(struct ap_device *ap_dev) { struct ap_queue_status status; - int rc, dummy, i; - rc = -ENODEV; - status = ap_reset_queue(qid); - for (i = 0; i < AP_MAX_RESET; i++) { - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - if (status.queue_empty) - rc = 0; - break; - case AP_RESPONSE_Q_NOT_AVAIL: - case AP_RESPONSE_DECONFIGURED: - case AP_RESPONSE_CHECKSTOPPED: - i = AP_MAX_RESET; /* return with -ENODEV */ - break; - case AP_RESPONSE_RESET_IN_PROGRESS: - rc = -EBUSY; - case AP_RESPONSE_BUSY: - default: - break; - } - if (rc != -ENODEV && rc != -EBUSY) - break; - if (i < AP_MAX_RESET - 1) { - /* Time we are waiting until we give up (0.7sec * 90). - * Since the actual request (in progress) will not - * interrupted immediately for the reset command, - * we have to be patient. In worst case we have to - * wait 60sec + reset time (some msec). - */ - schedule_timeout(AP_RESET_TIMEOUT); - status = ap_test_queue(qid, &dummy, &dummy); - } - } - if (rc == 0 && ap_using_interrupts()) { - rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr); - /* If interruption mode is supported by the machine, - * but an AP can not be enabled for interruption then - * the AP will be discarded. */ - if (rc) - pr_err("Registering adapter interrupts for " - "AP %d failed\n", AP_QID_DEVICE(qid)); + status = ap_reset_queue(ap_dev->qid); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + ap_dev->interrupt = AP_INTR_DISABLED; + ap_dev->reset = AP_RESET_IN_PROGRESS; + return 0; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + return -EBUSY; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + default: + return -ENODEV; } - return rc; } /** @@ -729,10 +683,63 @@ static ssize_t ap_pendingq_count_show(struct device *dev, static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); +static ssize_t ap_reset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + int rc = 0; + + spin_lock_bh(&ap_dev->lock); + switch (ap_dev->reset) { + case AP_RESET_IGNORE: + rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); + break; + case AP_RESET_ARMED: + rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); + break; + case AP_RESET_DO: + rc = snprintf(buf, PAGE_SIZE, "Reset Timer expired.\n"); + break; + case AP_RESET_IN_PROGRESS: + rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); + break; + default: + break; + } + spin_unlock_bh(&ap_dev->lock); + return rc; +} + +static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL); + +static ssize_t ap_interrupt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + int rc = 0; + + spin_lock_bh(&ap_dev->lock); + switch (ap_dev->interrupt) { + case AP_INTR_DISABLED: + rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); + break; + case AP_INTR_ENABLED: + rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); + break; + case AP_INTR_IN_PROGRESS: + rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); + break; + } + spin_unlock_bh(&ap_dev->lock); + return rc; +} + +static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL); + static ssize_t ap_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type); + return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type); } static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); @@ -753,6 +760,8 @@ static struct attribute *ap_dev_attrs[] = { &dev_attr_request_count.attr, &dev_attr_requestq_count.attr, &dev_attr_pendingq_count.attr, + &dev_attr_reset.attr, + &dev_attr_interrupt.attr, &dev_attr_modalias.attr, &dev_attr_ap_functions.attr, NULL @@ -926,6 +935,10 @@ static int ap_device_probe(struct device *dev) spin_lock_bh(&ap_device_list_lock); list_del_init(&ap_dev->list); spin_unlock_bh(&ap_device_list_lock); + } else { + if (ap_dev->reset == AP_RESET_IN_PROGRESS || + ap_dev->interrupt == AP_INTR_IN_PROGRESS) + __ap_schedule_poll_timer(); } return rc; } @@ -1411,7 +1424,7 @@ static void ap_scan_bus(struct work_struct *unused) struct ap_device *ap_dev; struct device *dev; ap_qid_t qid; - int queue_depth, device_type; + int queue_depth = 0, device_type = 0; unsigned int device_functions; int rc, i; @@ -1429,15 +1442,9 @@ static void ap_scan_bus(struct work_struct *unused) else rc = -ENODEV; if (dev) { - if (rc == -EBUSY) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(AP_RESET_TIMEOUT); - rc = ap_query_queue(qid, &queue_depth, - &device_type); - } ap_dev = to_ap_dev(dev); spin_lock_bh(&ap_dev->lock); - if (rc || ap_dev->unregistered) { + if (rc == -ENODEV || ap_dev->unregistered) { spin_unlock_bh(&ap_dev->lock); if (ap_dev->unregistered) i--; @@ -1451,13 +1458,15 @@ static void ap_scan_bus(struct work_struct *unused) } if (rc) continue; - rc = ap_init_queue(qid); - if (rc) - continue; ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); if (!ap_dev) break; ap_dev->qid = qid; + rc = ap_init_queue(ap_dev); + if ((rc != 0) && (rc != -EBUSY)) { + kfree(ap_dev); + continue; + } ap_dev->queue_depth = queue_depth; ap_dev->unregistered = 1; spin_lock_init(&ap_dev->lock); @@ -1520,36 +1529,6 @@ ap_config_timeout(unsigned long ptr) } /** - * __ap_schedule_poll_timer(): Schedule poll timer. - * - * Set up the timer to run the poll tasklet - */ -static inline void __ap_schedule_poll_timer(void) -{ - ktime_t hr_time; - - spin_lock_bh(&ap_poll_timer_lock); - if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) { - hr_time = ktime_set(0, poll_timeout); - hrtimer_forward_now(&ap_poll_timer, hr_time); - hrtimer_restart(&ap_poll_timer); - } - spin_unlock_bh(&ap_poll_timer_lock); -} - -/** - * ap_schedule_poll_timer(): Schedule poll timer. - * - * Set up the timer to run the poll tasklet - */ -static inline void ap_schedule_poll_timer(void) -{ - if (ap_using_interrupts()) - return; - __ap_schedule_poll_timer(); -} - -/** * ap_poll_read(): Receive pending reply messages from an AP device. * @ap_dev: pointer to the AP device * @flags: pointer to control flags, bit 2^0 is set if another poll is @@ -1568,6 +1547,7 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) ap_dev->reply->message, ap_dev->reply->length); switch (status.response_code) { case AP_RESPONSE_NORMAL: + ap_dev->interrupt = status.int_enabled; atomic_dec(&ap_poll_requests); ap_decrease_queue_count(ap_dev); list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { @@ -1582,6 +1562,7 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) *flags |= 1; break; case AP_RESPONSE_NO_PENDING_REPLY: + ap_dev->interrupt = status.int_enabled; if (status.queue_empty) { /* The card shouldn't forget requests but who knows. */ atomic_sub(ap_dev->queue_count, &ap_poll_requests); @@ -1612,7 +1593,8 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) struct ap_message *ap_msg; if (ap_dev->requestq_count <= 0 || - ap_dev->queue_count >= ap_dev->queue_depth) + (ap_dev->queue_count >= ap_dev->queue_depth) || + (ap_dev->reset == AP_RESET_IN_PROGRESS)) return 0; /* Start the next request on the queue. */ ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); @@ -1646,6 +1628,8 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) /** * ap_poll_queue(): Poll AP device for pending replies and send new messages. + * Check if the queue has a pending reset. In case it's done re-enable + * interrupts, otherwise reschedule the poll_timer for another attempt. * @ap_dev: pointer to the bus device * @flags: pointer to control flags, bit 2^0 is set if another poll is * required, bit 2^1 is set if the poll timer needs to get armed @@ -1656,7 +1640,51 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) */ static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) { - int rc; + int rc, depth, type; + struct ap_queue_status status; + + + if (ap_dev->reset == AP_RESET_IN_PROGRESS) { + status = ap_test_queue(ap_dev->qid, &depth, &type); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + ap_dev->reset = AP_RESET_IGNORE; + if (ap_using_interrupts()) { + rc = ap_queue_enable_interruption( + ap_dev, ap_airq.lsi_ptr); + if (!rc) + ap_dev->interrupt = AP_INTR_IN_PROGRESS; + else if (rc == -ENODEV) { + pr_err("Registering adapter interrupts for " + "AP %d failed\n", AP_QID_DEVICE(ap_dev->qid)); + return rc; + } + } + /* fall through */ + case AP_RESPONSE_BUSY: + case AP_RESPONSE_RESET_IN_PROGRESS: + *flags |= AP_POLL_AFTER_TIMEOUT; + break; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + return -ENODEV; + default: + break; + } + } + + if ((ap_dev->reset != AP_RESET_IN_PROGRESS) && + (ap_dev->interrupt == AP_INTR_IN_PROGRESS)) { + status = ap_test_queue(ap_dev->qid, &depth, &type); + if (ap_using_interrupts()) { + if (status.int_enabled == 1) + ap_dev->interrupt = AP_INTR_ENABLED; + else + *flags |= AP_POLL_AFTER_TIMEOUT; + } else + ap_dev->interrupt = AP_INTR_DISABLED; + } rc = ap_poll_read(ap_dev, flags); if (rc) @@ -1676,7 +1704,8 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms struct ap_queue_status status; if (list_empty(&ap_dev->requestq) && - ap_dev->queue_count < ap_dev->queue_depth) { + (ap_dev->queue_count < ap_dev->queue_depth) && + (ap_dev->reset != AP_RESET_IN_PROGRESS)) { status = __ap_send(ap_dev->qid, ap_msg->psmid, ap_msg->message, ap_msg->length, ap_msg->special); @@ -1789,21 +1818,20 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) * Reset a not responding AP device and move all requests from the * pending queue to the request queue. */ -static void ap_reset(struct ap_device *ap_dev) +static void ap_reset(struct ap_device *ap_dev, unsigned long *flags) { int rc; - ap_dev->reset = AP_RESET_IGNORE; atomic_sub(ap_dev->queue_count, &ap_poll_requests); ap_dev->queue_count = 0; list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); ap_dev->requestq_count += ap_dev->pendingq_count; ap_dev->pendingq_count = 0; - rc = ap_init_queue(ap_dev->qid); + rc = ap_init_queue(ap_dev); if (rc == -ENODEV) ap_dev->unregistered = 1; else - __ap_schedule_poll_timer(); + *flags |= AP_POLL_AFTER_TIMEOUT; } static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) @@ -1812,7 +1840,7 @@ static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) if (ap_poll_queue(ap_dev, flags)) ap_dev->unregistered = 1; if (ap_dev->reset == AP_RESET_DO) - ap_reset(ap_dev); + ap_reset(ap_dev, flags); } return 0; } @@ -1845,9 +1873,9 @@ static void ap_poll_all(unsigned long dummy) spin_unlock(&ap_dev->lock); } spin_unlock(&ap_device_list_lock); - } while (flags & 1); - if (flags & 2) - ap_schedule_poll_timer(); + } while (flags & AP_POLL_IMMEDIATELY); + if (flags & AP_POLL_AFTER_TIMEOUT) + __ap_schedule_poll_timer(); } /** diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 2737d261a324..00468c8d0781 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -32,11 +32,13 @@ #define AP_DEVICES 64 /* Number of AP devices. */ #define AP_DOMAINS 256 /* Number of AP domains. */ -#define AP_MAX_RESET 90 /* Maximum number of resets. */ #define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */ #define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ #define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ +#define AP_POLL_IMMEDIATELY 1 /* continue running poll tasklet */ +#define AP_POLL_AFTER_TIMEOUT 2 /* run poll tasklet again after timout */ + extern int ap_domain_index; /** @@ -135,6 +137,14 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) #define AP_RESET_IGNORE 0 /* request timeout will be ignored */ #define AP_RESET_ARMED 1 /* request timeout timer is active */ #define AP_RESET_DO 2 /* AP reset required */ +#define AP_RESET_IN_PROGRESS 3 /* AP reset in progress */ + +/* + * AP interrupt states + */ +#define AP_INTR_DISABLED 0 /* AP interrupt disabled */ +#define AP_INTR_ENABLED 1 /* AP interrupt enabled */ +#define AP_INTR_IN_PROGRESS 3 /* AP interrupt in progress */ struct ap_device; struct ap_message; @@ -168,6 +178,7 @@ struct ap_device { struct timer_list timeout; /* Timer for request timeouts. */ int reset; /* Reset required after req. timeout. */ + int interrupt; /* indicate if interrupts are enabled */ int queue_count; /* # messages currently on AP queue. */ struct list_head pendingq; /* List of message sent to AP queue. */ diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 71e698b85772..bb3908818505 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -39,7 +39,7 @@ * But the maximum time limit managed by the stomper code is set to 60sec. * Hence we have to wait at least that time period. */ -#define CEX4_CLEANUP_TIME (61*HZ) +#define CEX4_CLEANUP_TIME (900*HZ) static struct ap_device_id zcrypt_cex4_ids[] = { { AP_DEVICE(AP_DEVICE_TYPE_CEX4) }, |