diff options
Diffstat (limited to 'net/iucv/iucv.c')
-rw-r--r-- | net/iucv/iucv.c | 256 |
1 files changed, 154 insertions, 102 deletions
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 1b10d576f222..fb3faf72e850 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -32,7 +32,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> - #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -69,7 +68,7 @@ #define IUCV_IPNORPY 0x10 #define IUCV_IPALL 0x80 -static int iucv_bus_match (struct device *dev, struct device_driver *drv) +static int iucv_bus_match(struct device *dev, struct device_driver *drv) { return 0; } @@ -78,8 +77,11 @@ struct bus_type iucv_bus = { .name = "iucv", .match = iucv_bus_match, }; +EXPORT_SYMBOL(iucv_bus); struct device *iucv_root; +EXPORT_SYMBOL(iucv_root); + static int iucv_available; /* General IUCV interrupt structure */ @@ -90,20 +92,43 @@ struct iucv_irq_data { u32 res2[8]; }; -struct iucv_work { +struct iucv_irq_list { struct list_head list; struct iucv_irq_data data; }; -static LIST_HEAD(iucv_work_queue); -static DEFINE_SPINLOCK(iucv_work_lock); - static struct iucv_irq_data *iucv_irq_data; static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; -static void iucv_tasklet_handler(unsigned long); -static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_handler,0); +/* + * Queue of interrupt buffers lock for delivery via the tasklet + * (fast but can't call smp_call_function). + */ +static LIST_HEAD(iucv_task_queue); + +/* + * The tasklet for fast delivery of iucv interrupts. + */ +static void iucv_tasklet_fn(unsigned long); +static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); + +/* + * Queue of interrupt buffers for delivery via a work queue + * (slower but can call smp_call_function). + */ +static LIST_HEAD(iucv_work_queue); + +/* + * The work element to deliver path pending interrupts. + */ +static void iucv_work_fn(struct work_struct *work); +static DECLARE_WORK(iucv_work, iucv_work_fn); + +/* + * Spinlock protecting task and work queue. + */ +static DEFINE_SPINLOCK(iucv_queue_lock); enum iucv_command_codes { IUCV_QUERY = 0, @@ -147,10 +172,10 @@ static unsigned long iucv_max_pathid; static DEFINE_SPINLOCK(iucv_table_lock); /* - * iucv_tasklet_cpu: contains the number of the cpu executing the tasklet. - * Needed for iucv_path_sever called from tasklet. + * iucv_active_cpu: contains the number of the cpu executing the tasklet + * or the work handler. Needed for iucv_path_sever called from tasklet. */ -static int iucv_tasklet_cpu = -1; +static int iucv_active_cpu = -1; /* * Mutex and wait queue for iucv_register/iucv_unregister. @@ -382,7 +407,7 @@ static void iucv_declare_cpu(void *data) rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); if (rc) { char *err = "Unknown"; - switch(rc) { + switch (rc) { case 0x03: err = "Directory error"; break; @@ -449,17 +474,19 @@ static void iucv_setmask_mp(void) { int cpu; + preempt_disable(); for_each_online_cpu(cpu) /* Enable all cpus with a declared buffer. */ if (cpu_isset(cpu, iucv_buffer_cpumask) && !cpu_isset(cpu, iucv_irq_cpumask)) smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu); + preempt_enable(); } /** * iucv_setmask_up * - * Allow iucv interrupts on a single cpus. + * Allow iucv interrupts on a single cpu. */ static void iucv_setmask_up(void) { @@ -493,8 +520,10 @@ static int iucv_enable(void) goto out; /* Declare per cpu buffers. */ rc = -EIO; + preempt_disable(); for_each_online_cpu(cpu) smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu); + preempt_enable(); if (cpus_empty(iucv_buffer_cpumask)) /* No cpu could declare an iucv buffer. */ goto out_path; @@ -519,7 +548,6 @@ static void iucv_disable(void) kfree(iucv_path_table); } -#ifdef CONFIG_HOTPLUG_CPU static int __cpuinit iucv_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -562,10 +590,9 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block iucv_cpu_notifier = { +static struct notifier_block __cpuinitdata iucv_cpu_notifier = { .notifier_call = iucv_cpu_notify, }; -#endif /** * iucv_sever_pathid @@ -586,48 +613,49 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) return iucv_call_b2f0(IUCV_SEVER, parm); } +#ifdef CONFIG_SMP /** - * __iucv_cleanup_pathid + * __iucv_cleanup_queue * @dummy: unused dummy argument * * Nop function called via smp_call_function to force work items from * pending external iucv interrupts to the work queue. */ -static void __iucv_cleanup_pathid(void *dummy) +static void __iucv_cleanup_queue(void *dummy) { } +#endif /** - * iucv_cleanup_pathid - * @pathid: 16 bit pathid + * iucv_cleanup_queue * * Function called after a path has been severed to find all remaining * work items for the now stale pathid. The caller needs to hold the * iucv_table_lock. */ -static void iucv_cleanup_pathid(u16 pathid) +static void iucv_cleanup_queue(void) { - struct iucv_work *p, *n; + struct iucv_irq_list *p, *n; /* - * Path is severed, the pathid can be reused immediatly on - * a iucv connect or a connection pending interrupt. - * iucv_path_connect and connection pending interrupt will - * wait until the iucv_table_lock is released before the - * recycled pathid enters the system. - * Force remaining interrupts to the work queue, then - * scan the work queue for items of this path. + * When a path is severed, the pathid can be reused immediatly + * on a iucv connect or a connection pending interrupt. Remove + * all entries from the task queue that refer to a stale pathid + * (iucv_path_table[ix] == NULL). Only then do the iucv connect + * or deliver the connection pending interrupt. To get all the + * pending interrupts force them to the work queue by calling + * an empty function on all cpus. */ - smp_call_function(__iucv_cleanup_pathid, NULL, 0, 1); - spin_lock_irq(&iucv_work_lock); - list_for_each_entry_safe(p, n, &iucv_work_queue, list) { - /* Remove work items for pathid except connection pending */ - if (p->data.ippathid == pathid && p->data.iptype != 0x01) { + smp_call_function(__iucv_cleanup_queue, NULL, 0, 1); + spin_lock_irq(&iucv_queue_lock); + list_for_each_entry_safe(p, n, &iucv_task_queue, list) { + /* Remove stale work items from the task queue. */ + if (iucv_path_table[p->data.ippathid] == NULL) { list_del(&p->list); kfree(p); } } - spin_unlock_irq(&iucv_work_lock); + spin_unlock_irq(&iucv_queue_lock); } /** @@ -665,6 +693,7 @@ out_mutex: mutex_unlock(&iucv_register_mutex); return rc; } +EXPORT_SYMBOL(iucv_register); /** * iucv_unregister @@ -686,7 +715,6 @@ void iucv_unregister(struct iucv_handler *handler, int smp) iucv_sever_pathid(p->pathid, NULL); iucv_path_table[p->pathid] = NULL; list_del(&p->list); - iucv_cleanup_pathid(p->pathid); iucv_path_free(p); } spin_unlock_bh(&iucv_table_lock); @@ -698,6 +726,7 @@ void iucv_unregister(struct iucv_handler *handler, int smp) iucv_setmask_mp(); mutex_unlock(&iucv_register_mutex); } +EXPORT_SYMBOL(iucv_unregister); /** * iucv_path_accept @@ -736,6 +765,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, local_bh_enable(); return rc; } +EXPORT_SYMBOL(iucv_path_accept); /** * iucv_path_connect @@ -759,9 +789,9 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, union iucv_param *parm; int rc; - preempt_disable(); - if (iucv_tasklet_cpu != smp_processor_id()) - spin_lock_bh(&iucv_table_lock); + BUG_ON(in_atomic()); + spin_lock_bh(&iucv_table_lock); + iucv_cleanup_queue(); parm = percpu_ptr(iucv_param, smp_processor_id()); memset(parm, 0, sizeof(union iucv_param)); parm->ctrl.ipmsglim = path->msglim; @@ -796,11 +826,10 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, rc = -EIO; } } - if (iucv_tasklet_cpu != smp_processor_id()) - spin_unlock_bh(&iucv_table_lock); - preempt_enable(); + spin_unlock_bh(&iucv_table_lock); return rc; } +EXPORT_SYMBOL(iucv_path_connect); /** * iucv_path_quiesce: @@ -827,6 +856,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) local_bh_enable(); return rc; } +EXPORT_SYMBOL(iucv_path_quiesce); /** * iucv_path_resume: @@ -867,21 +897,20 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) { int rc; - preempt_disable(); - if (iucv_tasklet_cpu != smp_processor_id()) + if (iucv_active_cpu != smp_processor_id()) spin_lock_bh(&iucv_table_lock); rc = iucv_sever_pathid(path->pathid, userdata); if (!rc) { iucv_path_table[path->pathid] = NULL; list_del_init(&path->list); - iucv_cleanup_pathid(path->pathid); } - if (iucv_tasklet_cpu != smp_processor_id()) + if (iucv_active_cpu != smp_processor_id()) spin_unlock_bh(&iucv_table_lock); preempt_enable(); return rc; } +EXPORT_SYMBOL(iucv_path_sever); /** * iucv_message_purge @@ -914,6 +943,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, local_bh_enable(); return rc; } +EXPORT_SYMBOL(iucv_message_purge); /** * iucv_message_receive @@ -984,6 +1014,7 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, local_bh_enable(); return rc; } +EXPORT_SYMBOL(iucv_message_receive); /** * iucv_message_reject @@ -1012,6 +1043,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) local_bh_enable(); return rc; } +EXPORT_SYMBOL(iucv_message_reject); /** * iucv_message_reply @@ -1055,6 +1087,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, local_bh_enable(); return rc; } +EXPORT_SYMBOL(iucv_message_reply); /** * iucv_message_send @@ -1103,6 +1136,7 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, local_bh_enable(); return rc; } +EXPORT_SYMBOL(iucv_message_send); /** * iucv_message_send2way @@ -1159,6 +1193,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, local_bh_enable(); return rc; } +EXPORT_SYMBOL(iucv_message_send2way); /** * iucv_path_pending @@ -1246,8 +1281,7 @@ static void iucv_path_complete(struct iucv_irq_data *data) struct iucv_path_complete *ipc = (void *) data; struct iucv_path *path = iucv_path_table[ipc->ippathid]; - BUG_ON(!path || !path->handler); - if (path->handler->path_complete) + if (path && path->handler && path->handler->path_complete) path->handler->path_complete(path, ipc->ipuser); } @@ -1275,14 +1309,14 @@ static void iucv_path_severed(struct iucv_irq_data *data) struct iucv_path_severed *ips = (void *) data; struct iucv_path *path = iucv_path_table[ips->ippathid]; - BUG_ON(!path || !path->handler); + if (!path || !path->handler) /* Already severed */ + return; if (path->handler->path_severed) path->handler->path_severed(path, ips->ipuser); else { iucv_sever_pathid(path->pathid, NULL); iucv_path_table[path->pathid] = NULL; list_del_init(&path->list); - iucv_cleanup_pathid(path->pathid); iucv_path_free(path); } } @@ -1311,8 +1345,7 @@ static void iucv_path_quiesced(struct iucv_irq_data *data) struct iucv_path_quiesced *ipq = (void *) data; struct iucv_path *path = iucv_path_table[ipq->ippathid]; - BUG_ON(!path || !path->handler); - if (path->handler->path_quiesced) + if (path && path->handler && path->handler->path_quiesced) path->handler->path_quiesced(path, ipq->ipuser); } @@ -1340,8 +1373,7 @@ static void iucv_path_resumed(struct iucv_irq_data *data) struct iucv_path_resumed *ipr = (void *) data; struct iucv_path *path = iucv_path_table[ipr->ippathid]; - BUG_ON(!path || !path->handler); - if (path->handler->path_resumed) + if (path && path->handler && path->handler->path_resumed) path->handler->path_resumed(path, ipr->ipuser); } @@ -1373,8 +1405,7 @@ static void iucv_message_complete(struct iucv_irq_data *data) struct iucv_path *path = iucv_path_table[imc->ippathid]; struct iucv_message msg; - BUG_ON(!path || !path->handler); - if (path->handler->message_complete) { + if (path && path->handler && path->handler->message_complete) { msg.flags = imc->ipflags1; msg.id = imc->ipmsgid; msg.audit = imc->ipaudit; @@ -1419,8 +1450,7 @@ static void iucv_message_pending(struct iucv_irq_data *data) struct iucv_path *path = iucv_path_table[imp->ippathid]; struct iucv_message msg; - BUG_ON(!path || !path->handler); - if (path->handler->message_pending) { + if (path && path->handler && path->handler->message_pending) { msg.flags = imp->ipflags1; msg.id = imp->ipmsgid; msg.class = imp->iptrgcls; @@ -1435,17 +1465,16 @@ static void iucv_message_pending(struct iucv_irq_data *data) } /** - * iucv_tasklet_handler: + * iucv_tasklet_fn: * * This tasklet loops over the queue of irq buffers created by * iucv_external_interrupt, calls the appropriate action handler * and then frees the buffer. */ -static void iucv_tasklet_handler(unsigned long ignored) +static void iucv_tasklet_fn(unsigned long ignored) { typedef void iucv_irq_fn(struct iucv_irq_data *); static iucv_irq_fn *irq_fn[] = { - [0x01] = iucv_path_pending, [0x02] = iucv_path_complete, [0x03] = iucv_path_severed, [0x04] = iucv_path_quiesced, @@ -1455,38 +1484,70 @@ static void iucv_tasklet_handler(unsigned long ignored) [0x08] = iucv_message_pending, [0x09] = iucv_message_pending, }; - struct iucv_work *p; + struct list_head task_queue = LIST_HEAD_INIT(task_queue); + struct iucv_irq_list *p, *n; /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ spin_lock(&iucv_table_lock); - iucv_tasklet_cpu = smp_processor_id(); + iucv_active_cpu = smp_processor_id(); - spin_lock_irq(&iucv_work_lock); - while (!list_empty(&iucv_work_queue)) { - p = list_entry(iucv_work_queue.next, struct iucv_work, list); + spin_lock_irq(&iucv_queue_lock); + list_splice_init(&iucv_task_queue, &task_queue); + spin_unlock_irq(&iucv_queue_lock); + + list_for_each_entry_safe(p, n, &task_queue, list) { list_del_init(&p->list); - spin_unlock_irq(&iucv_work_lock); irq_fn[p->data.iptype](&p->data); kfree(p); - spin_lock_irq(&iucv_work_lock); } - spin_unlock_irq(&iucv_work_lock); - iucv_tasklet_cpu = -1; + iucv_active_cpu = -1; spin_unlock(&iucv_table_lock); } /** + * iucv_work_fn: + * + * This work function loops over the queue of path pending irq blocks + * created by iucv_external_interrupt, calls the appropriate action + * handler and then frees the buffer. + */ +static void iucv_work_fn(struct work_struct *work) +{ + typedef void iucv_irq_fn(struct iucv_irq_data *); + struct list_head work_queue = LIST_HEAD_INIT(work_queue); + struct iucv_irq_list *p, *n; + + /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ + spin_lock_bh(&iucv_table_lock); + iucv_active_cpu = smp_processor_id(); + + spin_lock_irq(&iucv_queue_lock); + list_splice_init(&iucv_work_queue, &work_queue); + spin_unlock_irq(&iucv_queue_lock); + + iucv_cleanup_queue(); + list_for_each_entry_safe(p, n, &work_queue, list) { + list_del_init(&p->list); + iucv_path_pending(&p->data); + kfree(p); + } + + iucv_active_cpu = -1; + spin_unlock_bh(&iucv_table_lock); +} + +/** * iucv_external_interrupt * @code: irq code * * Handles external interrupts coming in from CP. - * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler(). + * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). */ static void iucv_external_interrupt(u16 code) { struct iucv_irq_data *p; - struct iucv_work *work; + struct iucv_irq_list *work; p = percpu_ptr(iucv_irq_data, smp_processor_id()); if (p->ippathid >= iucv_max_pathid) { @@ -1500,16 +1561,23 @@ static void iucv_external_interrupt(u16 code) printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n"); return; } - work = kmalloc(sizeof(struct iucv_work), GFP_ATOMIC); + work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); if (!work) { printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); return; } memcpy(&work->data, p, sizeof(work->data)); - spin_lock(&iucv_work_lock); - list_add_tail(&work->list, &iucv_work_queue); - spin_unlock(&iucv_work_lock); - tasklet_schedule(&iucv_tasklet); + spin_lock(&iucv_queue_lock); + if (p->iptype == 0x01) { + /* Path pending interrupt. */ + list_add_tail(&work->list, &iucv_work_queue); + schedule_work(&iucv_work); + } else { + /* The other interrupts. */ + list_add_tail(&work->list, &iucv_task_queue); + tasklet_schedule(&iucv_tasklet); + } + spin_unlock(&iucv_queue_lock); } /** @@ -1517,7 +1585,7 @@ static void iucv_external_interrupt(u16 code) * * Allocates and initializes various data structures. */ -static int iucv_init(void) +static int __init iucv_init(void) { int rc; @@ -1528,7 +1596,7 @@ static int iucv_init(void) rc = iucv_query_maxconn(); if (rc) goto out; - rc = register_external_interrupt (0x4000, iucv_external_interrupt); + rc = register_external_interrupt(0x4000, iucv_external_interrupt); if (rc) goto out; rc = bus_register(&iucv_bus); @@ -1539,7 +1607,7 @@ static int iucv_init(void) rc = PTR_ERR(iucv_root); goto out_bus; } - /* Note: GFP_DMA used used to get memory below 2G */ + /* Note: GFP_DMA used to get memory below 2G */ iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data), GFP_KERNEL|GFP_DMA); if (!iucv_irq_data) { @@ -1577,14 +1645,16 @@ out: * * Frees everything allocated from iucv_init. */ -static void iucv_exit(void) +static void __exit iucv_exit(void) { - struct iucv_work *p, *n; + struct iucv_irq_list *p, *n; - spin_lock_irq(&iucv_work_lock); + spin_lock_irq(&iucv_queue_lock); + list_for_each_entry_safe(p, n, &iucv_task_queue, list) + kfree(p); list_for_each_entry_safe(p, n, &iucv_work_queue, list) kfree(p); - spin_unlock_irq(&iucv_work_lock); + spin_unlock_irq(&iucv_queue_lock); unregister_hotcpu_notifier(&iucv_cpu_notifier); percpu_free(iucv_param); percpu_free(iucv_irq_data); @@ -1596,24 +1666,6 @@ static void iucv_exit(void) subsys_initcall(iucv_init); module_exit(iucv_exit); -/** - * Export all public stuff - */ -EXPORT_SYMBOL (iucv_bus); -EXPORT_SYMBOL (iucv_root); -EXPORT_SYMBOL (iucv_register); -EXPORT_SYMBOL (iucv_unregister); -EXPORT_SYMBOL (iucv_path_accept); -EXPORT_SYMBOL (iucv_path_connect); -EXPORT_SYMBOL (iucv_path_quiesce); -EXPORT_SYMBOL (iucv_path_sever); -EXPORT_SYMBOL (iucv_message_purge); -EXPORT_SYMBOL (iucv_message_receive); -EXPORT_SYMBOL (iucv_message_reject); -EXPORT_SYMBOL (iucv_message_reply); -EXPORT_SYMBOL (iucv_message_send); -EXPORT_SYMBOL (iucv_message_send2way); - MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); MODULE_LICENSE("GPL"); |