diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-05-07 15:15:46 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-07 15:15:46 +0400 |
commit | 2fe5de9ce7d57498abc14b375cad2fcf8c3ee6cc (patch) | |
tree | 9478e8cf470c1d5bdb2d89b57a7e35919ab95e72 /drivers/char | |
parent | 08f8aeb55d7727d644dbbbbfb798fe937d47751d (diff) | |
parent | 2b4cfe64dee0d84506b951d81bf55d9891744d25 (diff) | |
download | linux-2fe5de9ce7d57498abc14b375cad2fcf8c3ee6cc.tar.xz |
Merge branch 'sched/urgent' into sched/core, to avoid conflicts
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/Kconfig | 4 | ||||
-rw-r--r-- | drivers/char/hw_random/Kconfig | 6 | ||||
-rw-r--r-- | drivers/char/hw_random/atmel-rng.c | 23 | ||||
-rw-r--r-- | drivers/char/hw_random/bcm2835-rng.c | 10 | ||||
-rw-r--r-- | drivers/char/hw_random/core.c | 17 | ||||
-rw-r--r-- | drivers/char/hw_random/nomadik-rng.c | 13 | ||||
-rw-r--r-- | drivers/char/hw_random/omap3-rom-rng.c | 3 | ||||
-rw-r--r-- | drivers/char/hw_random/picoxcell-rng.c | 27 | ||||
-rw-r--r-- | drivers/char/hw_random/timeriomem-rng.c | 40 | ||||
-rw-r--r-- | drivers/char/hw_random/virtio-rng.c | 3 | ||||
-rw-r--r-- | drivers/char/ipmi/Kconfig | 12 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_bt_sm.c | 2 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_kcs_sm.c | 5 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 239 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 145 | ||||
-rw-r--r-- | drivers/char/pcmcia/Kconfig | 2 | ||||
-rw-r--r-- | drivers/char/random.c | 244 | ||||
-rw-r--r-- | drivers/char/tpm/Kconfig | 2 | ||||
-rw-r--r-- | drivers/char/ttyprintk.c | 15 | ||||
-rw-r--r-- | drivers/char/virtio_console.c | 4 |
20 files changed, 450 insertions, 366 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 1386749b48ff..6e9f74a5c095 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -40,7 +40,7 @@ config SGI_MBCS source "drivers/tty/serial/Kconfig" config TTY_PRINTK - bool "TTY driver to output user messages via printk" + tristate "TTY driver to output user messages via printk" depends on EXPERT && TTY default n ---help--- @@ -408,7 +408,7 @@ config APPLICOM config SONYPI tristate "Sony Vaio Programmable I/O Control Device support" - depends on X86 && PCI && INPUT && !64BIT + depends on X86_32 && PCI && INPUT ---help--- This driver enables access to the Sony Programmable I/O Control Device which can be found in many (all ?) Sony Vaio laptops. diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 2f2b08457c67..244759bbd7b7 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -342,11 +342,11 @@ config HW_RANDOM_TPM If unsure, say Y. config HW_RANDOM_MSM - tristate "Qualcomm MSM Random Number Generator support" - depends on HW_RANDOM && ARCH_MSM + tristate "Qualcomm SoCs Random Number Generator support" + depends on HW_RANDOM && ARCH_QCOM ---help--- This driver provides kernel-side support for the Random Number - Generator hardware found on Qualcomm MSM SoCs. + Generator hardware found on Qualcomm SoCs. To compile this driver as a module, choose M here. the module will be called msm-rng. diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index bf9fc6b79328..851bc7e20ad2 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -54,29 +54,22 @@ static int atmel_trng_probe(struct platform_device *pdev) struct resource *res; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -EINVAL; - trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; - if (!devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), pdev->name)) - return -EBUSY; - - trng->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!trng->base) - return -EBUSY; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + trng->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(trng->base)) + return PTR_ERR(trng->base); - trng->clk = clk_get(&pdev->dev, NULL); + trng->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(trng->clk)) return PTR_ERR(trng->clk); ret = clk_enable(trng->clk); if (ret) - goto err_enable; + return ret; writel(TRNG_KEY | 1, trng->base + TRNG_CR); trng->rng.name = pdev->name; @@ -92,9 +85,6 @@ static int atmel_trng_probe(struct platform_device *pdev) err_register: clk_disable(trng->clk); -err_enable: - clk_put(trng->clk); - return ret; } @@ -106,7 +96,6 @@ static int atmel_trng_remove(struct platform_device *pdev) writel(TRNG_KEY, trng->base + TRNG_CR); clk_disable(trng->clk); - clk_put(trng->clk); return 0; } diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index 8c3b255e629a..e900961cdd2e 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -61,18 +61,18 @@ static int bcm2835_rng_probe(struct platform_device *pdev) } bcm2835_rng_ops.priv = (unsigned long)rng_base; + /* set warm-up count & enable */ + __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS); + __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL); + /* register driver */ err = hwrng_register(&bcm2835_rng_ops); if (err) { dev_err(dev, "hwrng registration failed\n"); iounmap(rng_base); - } else { + } else dev_info(dev, "hwrng registered\n"); - /* set warm-up count & enable */ - __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS); - __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL); - } return err; } diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index b9495a8c05c6..334601cc81cf 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -40,6 +40,7 @@ #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/random.h> #include <asm/uaccess.h> @@ -301,9 +302,10 @@ err_misc_dereg: int hwrng_register(struct hwrng *rng) { - int must_register_misc; int err = -EINVAL; struct hwrng *old_rng, *tmp; + unsigned char bytes[16]; + int bytes_read; if (rng->name == NULL || (rng->data_read == NULL && rng->read == NULL)) @@ -326,7 +328,6 @@ int hwrng_register(struct hwrng *rng) goto out_unlock; } - must_register_misc = (current_rng == NULL); old_rng = current_rng; if (!old_rng) { err = hwrng_init(rng); @@ -335,18 +336,20 @@ int hwrng_register(struct hwrng *rng) current_rng = rng; } err = 0; - if (must_register_misc) { + if (!old_rng) { err = register_miscdev(); if (err) { - if (!old_rng) { - hwrng_cleanup(rng); - current_rng = NULL; - } + hwrng_cleanup(rng); + current_rng = NULL; goto out_unlock; } } INIT_LIST_HEAD(&rng->list); list_add_tail(&rng->list, &rng_list); + + bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); + if (bytes_read > 0) + add_device_randomness(bytes, bytes_read); out_unlock: mutex_unlock(&rng_mutex); out: diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index 00e9d2d46634..9c8581577246 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -43,7 +43,7 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) void __iomem *base; int ret; - rng_clk = clk_get(&dev->dev, NULL); + rng_clk = devm_clk_get(&dev->dev, NULL); if (IS_ERR(rng_clk)) { dev_err(&dev->dev, "could not get rng clock\n"); ret = PTR_ERR(rng_clk); @@ -56,33 +56,28 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) if (ret) goto out_clk; ret = -ENOMEM; - base = ioremap(dev->res.start, resource_size(&dev->res)); + base = devm_ioremap(&dev->dev, dev->res.start, + resource_size(&dev->res)); if (!base) goto out_release; nmk_rng.priv = (unsigned long)base; ret = hwrng_register(&nmk_rng); if (ret) - goto out_unmap; + goto out_release; return 0; -out_unmap: - iounmap(base); out_release: amba_release_regions(dev); out_clk: clk_disable(rng_clk); - clk_put(rng_clk); return ret; } static int nmk_rng_remove(struct amba_device *dev) { - void __iomem *base = (void __iomem *)nmk_rng.priv; hwrng_unregister(&nmk_rng); - iounmap(base); amba_release_regions(dev); clk_disable(rng_clk); - clk_put(rng_clk); return 0; } diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index c853e9e68573..6f2eaffed623 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -103,7 +103,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev) } setup_timer(&idle_timer, omap3_rom_rng_idle, 0); - rng_clk = clk_get(&pdev->dev, "ick"); + rng_clk = devm_clk_get(&pdev->dev, "ick"); if (IS_ERR(rng_clk)) { pr_err("unable to get RNG clock\n"); return PTR_ERR(rng_clk); @@ -120,7 +120,6 @@ static int omap3_rom_rng_remove(struct platform_device *pdev) { hwrng_unregister(&omap3_rom_rng_ops); clk_disable_unprepare(rng_clk); - clk_put(rng_clk); return 0; } diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c index 3d4c2293c6f5..eab5448ad56f 100644 --- a/drivers/char/hw_random/picoxcell-rng.c +++ b/drivers/char/hw_random/picoxcell-rng.c @@ -104,24 +104,11 @@ static int picoxcell_trng_probe(struct platform_device *pdev) int ret; struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_warn(&pdev->dev, "no memory resource\n"); - return -ENOMEM; - } - - if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), - "picoxcell_trng")) { - dev_warn(&pdev->dev, "unable to request io mem\n"); - return -EBUSY; - } + rng_base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(rng_base)) + return PTR_ERR(rng_base); - rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); - if (!rng_base) { - dev_warn(&pdev->dev, "unable to remap io mem\n"); - return -ENOMEM; - } - - rng_clk = clk_get(&pdev->dev, NULL); + rng_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(rng_clk)) { dev_warn(&pdev->dev, "no clk\n"); return PTR_ERR(rng_clk); @@ -130,7 +117,7 @@ static int picoxcell_trng_probe(struct platform_device *pdev) ret = clk_enable(rng_clk); if (ret) { dev_warn(&pdev->dev, "unable to enable clk\n"); - goto err_enable; + return ret; } picoxcell_trng_start(); @@ -145,9 +132,6 @@ static int picoxcell_trng_probe(struct platform_device *pdev) err_register: clk_disable(rng_clk); -err_enable: - clk_put(rng_clk); - return ret; } @@ -155,7 +139,6 @@ static int picoxcell_trng_remove(struct platform_device *pdev) { hwrng_unregister(&picoxcell_trng); clk_disable(rng_clk); - clk_put(rng_clk); return 0; } diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index 73ce739f8e19..439ff8b28c43 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -118,7 +118,8 @@ static int timeriomem_rng_probe(struct platform_device *pdev) } /* Allocate memory for the device structure (and zero it) */ - priv = kzalloc(sizeof(struct timeriomem_rng_private_data), GFP_KERNEL); + priv = devm_kzalloc(&pdev->dev, + sizeof(struct timeriomem_rng_private_data), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "failed to allocate device structure.\n"); return -ENOMEM; @@ -134,17 +135,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev) period = i; else { dev_err(&pdev->dev, "missing period\n"); - err = -EINVAL; - goto out_free; + return -EINVAL; } - } else + } else { period = pdata->period; + } priv->period = usecs_to_jiffies(period); if (priv->period < 1) { dev_err(&pdev->dev, "period is less than one jiffy\n"); - err = -EINVAL; - goto out_free; + return -EINVAL; } priv->expires = jiffies; @@ -160,24 +160,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev) priv->timeriomem_rng_ops.data_read = timeriomem_rng_data_read; priv->timeriomem_rng_ops.priv = (unsigned long)priv; - if (!request_mem_region(res->start, resource_size(res), - dev_name(&pdev->dev))) { - dev_err(&pdev->dev, "request_mem_region failed\n"); - err = -EBUSY; + priv->io_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->io_base)) { + err = PTR_ERR(priv->io_base); goto out_timer; } - priv->io_base = ioremap(res->start, resource_size(res)); - if (priv->io_base == NULL) { - dev_err(&pdev->dev, "ioremap failed\n"); - err = -EIO; - goto out_release_io; - } - err = hwrng_register(&priv->timeriomem_rng_ops); if (err) { dev_err(&pdev->dev, "problem registering\n"); - goto out; + goto out_timer; } dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", @@ -185,30 +177,18 @@ static int timeriomem_rng_probe(struct platform_device *pdev) return 0; -out: - iounmap(priv->io_base); -out_release_io: - release_mem_region(res->start, resource_size(res)); out_timer: del_timer_sync(&priv->timer); -out_free: - kfree(priv); return err; } static int timeriomem_rng_remove(struct platform_device *pdev) { struct timeriomem_rng_private_data *priv = platform_get_drvdata(pdev); - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwrng_unregister(&priv->timeriomem_rng_ops); del_timer_sync(&priv->timer); - iounmap(priv->io_base); - release_mem_region(res->start, resource_size(res)); - kfree(priv); return 0; } diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index c12398d1517c..2ce0e225e58c 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -47,8 +47,7 @@ static void register_buffer(u8 *buf, size_t size) sg_init_one(&sg, buf, size); /* There should always be room for one buffer. */ - if (virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL) < 0) - BUG(); + virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL); virtqueue_kick(vq); } diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index 0baa8fab4ea7..db1c9b7adaa6 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -50,6 +50,18 @@ config IPMI_SI Currently, only KCS and SMIC are supported. If you are using IPMI, you should probably say "y" here. +config IPMI_SI_PROBE_DEFAULTS + bool 'Probe for all possible IPMI system interfaces by default' + default n + depends on IPMI_SI + help + Modern systems will usually expose IPMI interfaces via a discoverable + firmware mechanism such as ACPI or DMI. Older systems do not, and so + the driver is forced to probe hardware manually. This may cause boot + delays. Say "n" here to disable this manual probing. IPMI will then + only be available on older systems if the "ipmi_si_intf.trydefaults=1" + boot argument is passed. + config IPMI_WATCHDOG tristate 'IPMI Watchdog Timer' help diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index f5e4cd7617f6..61e71616689b 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt) static inline int read_all_bytes(struct si_sm_data *bt) { - unsigned char i; + unsigned int i; /* * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index 6a4bdc18955a..8c25f596808a 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c @@ -251,8 +251,9 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status, if (!GET_STATUS_OBF(status)) { kcs->obf_timeout -= time; if (kcs->obf_timeout < 0) { - start_error_recovery(kcs, "OBF not ready in time"); - return 1; + kcs->obf_timeout = OBF_RETRY_TIMEOUT; + start_error_recovery(kcs, "OBF not ready in time"); + return 1; } return 0; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index ec4e10fcf1a5..e6db9381b2c7 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -55,6 +55,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); static void smi_recv_tasklet(unsigned long); static void handle_new_recv_msgs(ipmi_smi_t intf); +static void need_waiter(ipmi_smi_t intf); static int initialized; @@ -73,14 +74,28 @@ static struct proc_dir_entry *proc_ipmi_root; */ #define MAX_MSG_TIMEOUT 60000 +/* Call every ~1000 ms. */ +#define IPMI_TIMEOUT_TIME 1000 + +/* How many jiffies does it take to get to the timeout time. */ +#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) + +/* + * Request events from the queue every second (this is the number of + * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the + * future, IPMI will add a way to know immediately if an event is in + * the queue and this silliness can go away. + */ +#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) + /* * The main "user" data structure. */ struct ipmi_user { struct list_head link; - /* Set to "0" when the user is destroyed. */ - int valid; + /* Set to false when the user is destroyed. */ + bool valid; struct kref refcount; @@ -92,7 +107,7 @@ struct ipmi_user { ipmi_smi_t intf; /* Does this interface receive IPMI events? */ - int gets_events; + bool gets_events; }; struct cmd_rcvr { @@ -383,6 +398,9 @@ struct ipmi_smi { unsigned int waiting_events_count; /* How many events in queue? */ char delivering_events; char event_msg_printed; + atomic_t event_waiters; + unsigned int ticks_to_req_ev; + int last_needs_timer; /* * The event receiver for my BMC, only really used at panic @@ -395,7 +413,7 @@ struct ipmi_smi { /* For handling of maintenance mode. */ int maintenance_mode; - int maintenance_mode_enable; + bool maintenance_mode_enable; int auto_maintenance_timeout; spinlock_t maintenance_mode_lock; /* Used in a timer... */ @@ -451,7 +469,6 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex); static LIST_HEAD(smi_watchers); static DEFINE_MUTEX(smi_watchers_mutex); - #define ipmi_inc_stat(intf, stat) \ atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) #define ipmi_get_stat(intf, stat) \ @@ -772,6 +789,7 @@ static int intf_next_seq(ipmi_smi_t intf, *seq = i; *seqid = intf->seq_table[i].seqid; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; + need_waiter(intf); } else { rv = -EAGAIN; } @@ -941,7 +959,7 @@ int ipmi_create_user(unsigned int if_num, new_user->handler = handler; new_user->handler_data = handler_data; new_user->intf = intf; - new_user->gets_events = 0; + new_user->gets_events = false; if (!try_module_get(intf->handlers->owner)) { rv = -ENODEV; @@ -962,10 +980,15 @@ int ipmi_create_user(unsigned int if_num, */ mutex_unlock(&ipmi_interfaces_mutex); - new_user->valid = 1; + new_user->valid = true; spin_lock_irqsave(&intf->seq_lock, flags); list_add_rcu(&new_user->link, &intf->users); spin_unlock_irqrestore(&intf->seq_lock, flags); + if (handler->ipmi_watchdog_pretimeout) { + /* User wants pretimeouts, so make sure to watch for them. */ + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + } *user = new_user; return 0; @@ -1019,7 +1042,13 @@ int ipmi_destroy_user(ipmi_user_t user) struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; - user->valid = 0; + user->valid = false; + + if (user->handler->ipmi_watchdog_pretimeout) + atomic_dec(&intf->event_waiters); + + if (user->gets_events) + atomic_dec(&intf->event_waiters); /* Remove the user from the interface's sequence table. */ spin_lock_irqsave(&intf->seq_lock, flags); @@ -1155,25 +1184,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) if (intf->maintenance_mode != mode) { switch (mode) { case IPMI_MAINTENANCE_MODE_AUTO: - intf->maintenance_mode = mode; intf->maintenance_mode_enable = (intf->auto_maintenance_timeout > 0); break; case IPMI_MAINTENANCE_MODE_OFF: - intf->maintenance_mode = mode; - intf->maintenance_mode_enable = 0; + intf->maintenance_mode_enable = false; break; case IPMI_MAINTENANCE_MODE_ON: - intf->maintenance_mode = mode; - intf->maintenance_mode_enable = 1; + intf->maintenance_mode_enable = true; break; default: rv = -EINVAL; goto out_unlock; } + intf->maintenance_mode = mode; maintenance_mode_update(intf); } @@ -1184,7 +1211,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) } EXPORT_SYMBOL(ipmi_set_maintenance_mode); -int ipmi_set_gets_events(ipmi_user_t user, int val) +int ipmi_set_gets_events(ipmi_user_t user, bool val) { unsigned long flags; ipmi_smi_t intf = user->intf; @@ -1194,8 +1221,18 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) INIT_LIST_HEAD(&msgs); spin_lock_irqsave(&intf->events_lock, flags); + if (user->gets_events == val) + goto out; + user->gets_events = val; + if (val) { + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + } else { + atomic_dec(&intf->event_waiters); + } + if (intf->delivering_events) /* * Another thread is delivering events for this, so @@ -1289,6 +1326,9 @@ int ipmi_register_for_cmd(ipmi_user_t user, goto out_unlock; } + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); out_unlock: @@ -1330,6 +1370,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, mutex_unlock(&intf->cmd_rcvrs_mutex); synchronize_rcu(); while (rcvrs) { + atomic_dec(&intf->event_waiters); rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); @@ -1535,7 +1576,7 @@ static int i_ipmi_request(ipmi_user_t user, = IPMI_MAINTENANCE_MODE_TIMEOUT; if (!intf->maintenance_mode && !intf->maintenance_mode_enable) { - intf->maintenance_mode_enable = 1; + intf->maintenance_mode_enable = true; maintenance_mode_update(intf); } spin_unlock_irqrestore(&intf->maintenance_mode_lock, @@ -2876,6 +2917,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, (unsigned long) intf); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); spin_lock_init(&intf->events_lock); + atomic_set(&intf->event_waiters, 0); + intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; INIT_LIST_HEAD(&intf->waiting_events); intf->waiting_events_count = 0; mutex_init(&intf->cmd_rcvrs_mutex); @@ -3965,7 +4008,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, struct list_head *timeouts, long timeout_period, - int slot, unsigned long *flags) + int slot, unsigned long *flags, + unsigned int *waiting_msgs) { struct ipmi_recv_msg *msg; struct ipmi_smi_handlers *handlers; @@ -3977,8 +4021,10 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, return; ent->timeout -= timeout_period; - if (ent->timeout > 0) + if (ent->timeout > 0) { + (*waiting_msgs)++; return; + } if (ent->retries_left == 0) { /* The message has used all its retries. */ @@ -3995,6 +4041,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, struct ipmi_smi_msg *smi_msg; /* More retries, send again. */ + (*waiting_msgs)++; + /* * Start with the max timer, set to normal timer after * the message is sent. @@ -4040,117 +4088,118 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, } } -static void ipmi_timeout_handler(long timeout_period) +static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) { - ipmi_smi_t intf; struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; unsigned long flags; int i; + unsigned int waiting_msgs = 0; - rcu_read_lock(); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - tasklet_schedule(&intf->recv_tasklet); - - /* - * Go through the seq table and find any messages that - * have timed out, putting them in the timeouts - * list. - */ - INIT_LIST_HEAD(&timeouts); - spin_lock_irqsave(&intf->seq_lock, flags); - for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) - check_msg_timeout(intf, &(intf->seq_table[i]), - &timeouts, timeout_period, i, - &flags); - spin_unlock_irqrestore(&intf->seq_lock, flags); + /* + * Go through the seq table and find any messages that + * have timed out, putting them in the timeouts + * list. + */ + INIT_LIST_HEAD(&timeouts); + spin_lock_irqsave(&intf->seq_lock, flags); + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) + check_msg_timeout(intf, &(intf->seq_table[i]), + &timeouts, timeout_period, i, + &flags, &waiting_msgs); + spin_unlock_irqrestore(&intf->seq_lock, flags); - list_for_each_entry_safe(msg, msg2, &timeouts, link) - deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); + list_for_each_entry_safe(msg, msg2, &timeouts, link) + deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); - /* - * Maintenance mode handling. Check the timeout - * optimistically before we claim the lock. It may - * mean a timeout gets missed occasionally, but that - * only means the timeout gets extended by one period - * in that case. No big deal, and it avoids the lock - * most of the time. - */ + /* + * Maintenance mode handling. Check the timeout + * optimistically before we claim the lock. It may + * mean a timeout gets missed occasionally, but that + * only means the timeout gets extended by one period + * in that case. No big deal, and it avoids the lock + * most of the time. + */ + if (intf->auto_maintenance_timeout > 0) { + spin_lock_irqsave(&intf->maintenance_mode_lock, flags); if (intf->auto_maintenance_timeout > 0) { - spin_lock_irqsave(&intf->maintenance_mode_lock, flags); - if (intf->auto_maintenance_timeout > 0) { - intf->auto_maintenance_timeout - -= timeout_period; - if (!intf->maintenance_mode - && (intf->auto_maintenance_timeout <= 0)) { - intf->maintenance_mode_enable = 0; - maintenance_mode_update(intf); - } + intf->auto_maintenance_timeout + -= timeout_period; + if (!intf->maintenance_mode + && (intf->auto_maintenance_timeout <= 0)) { + intf->maintenance_mode_enable = false; + maintenance_mode_update(intf); } - spin_unlock_irqrestore(&intf->maintenance_mode_lock, - flags); } + spin_unlock_irqrestore(&intf->maintenance_mode_lock, + flags); } - rcu_read_unlock(); + + tasklet_schedule(&intf->recv_tasklet); + + return waiting_msgs; } -static void ipmi_request_event(void) +static void ipmi_request_event(ipmi_smi_t intf) { - ipmi_smi_t intf; struct ipmi_smi_handlers *handlers; - rcu_read_lock(); - /* - * Called from the timer, no need to check if handlers is - * valid. - */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - /* No event requests when in maintenance mode. */ - if (intf->maintenance_mode_enable) - continue; + /* No event requests when in maintenance mode. */ + if (intf->maintenance_mode_enable) + return; - handlers = intf->handlers; - if (handlers) - handlers->request_events(intf->send_info); - } - rcu_read_unlock(); + handlers = intf->handlers; + if (handlers) + handlers->request_events(intf->send_info); } static struct timer_list ipmi_timer; -/* Call every ~1000 ms. */ -#define IPMI_TIMEOUT_TIME 1000 - -/* How many jiffies does it take to get to the timeout time. */ -#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) - -/* - * Request events from the queue every second (this is the number of - * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the - * future, IPMI will add a way to know immediately if an event is in - * the queue and this silliness can go away. - */ -#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) - static atomic_t stop_operation; -static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME; static void ipmi_timeout(unsigned long data) { + ipmi_smi_t intf; + int nt = 0; + if (atomic_read(&stop_operation)) return; - ticks_to_req_ev--; - if (ticks_to_req_ev == 0) { - ipmi_request_event(); - ticks_to_req_ev = IPMI_REQUEST_EV_TIME; - } + rcu_read_lock(); + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + int lnt = 0; + + if (atomic_read(&intf->event_waiters)) { + intf->ticks_to_req_ev--; + if (intf->ticks_to_req_ev == 0) { + ipmi_request_event(intf); + intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; + } + lnt++; + } - ipmi_timeout_handler(IPMI_TIMEOUT_TIME); + lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); - mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); + lnt = !!lnt; + if (lnt != intf->last_needs_timer && + intf->handlers->set_need_watch) + intf->handlers->set_need_watch(intf->send_info, lnt); + intf->last_needs_timer = lnt; + + nt += lnt; + } + rcu_read_unlock(); + + if (nt) + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } +static void need_waiter(ipmi_smi_t intf) +{ + /* Racy, but worst case we start the timer twice. */ + if (!timer_pending(&ipmi_timer)) + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); +} static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 0f20d3646a46..5d665680ae33 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -217,7 +217,7 @@ struct smi_info { unsigned char msg_flags; /* Does the BMC have an event buffer? */ - char has_event_buffer; + bool has_event_buffer; /* * If set to true, this will request events the next time the @@ -230,7 +230,7 @@ struct smi_info { * call. Generally used after a panic to make sure stuff goes * out. */ - int run_to_completion; + bool run_to_completion; /* The I/O port of an SI interface. */ int port; @@ -248,19 +248,25 @@ struct smi_info { /* The timer for this si. */ struct timer_list si_timer; + /* This flag is set, if the timer is running (timer_pending() isn't enough) */ + bool timer_running; + /* The time (in jiffies) the last timeout occurred at. */ unsigned long last_timeout_jiffies; /* Used to gracefully stop the timer without race conditions. */ atomic_t stop_operation; + /* Are we waiting for the events, pretimeouts, received msgs? */ + atomic_t need_watch; + /* * The driver will disable interrupts when it gets into a * situation where it cannot handle messages due to lack of * memory. Once that situation clears up, it will re-enable * interrupts. */ - int interrupt_disabled; + bool interrupt_disabled; /* From the get device id response... */ struct ipmi_device_id device_id; @@ -273,7 +279,7 @@ struct smi_info { * True if we allocated the device, false if it came from * someplace else (like PCI). */ - int dev_registered; + bool dev_registered; /* Slave address, could be reported from DMI. */ unsigned char slave_addr; @@ -297,19 +303,19 @@ struct smi_info { static int force_kipmid[SI_MAX_PARMS]; static int num_force_kipmid; #ifdef CONFIG_PCI -static int pci_registered; +static bool pci_registered; #endif #ifdef CONFIG_ACPI -static int pnp_registered; +static bool pnp_registered; #endif #ifdef CONFIG_PARISC -static int parisc_registered; +static bool parisc_registered; #endif static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; static int num_max_busy_us; -static int unload_when_empty = 1; +static bool unload_when_empty = true; static int add_smi(struct smi_info *smi); static int try_smi_init(struct smi_info *smi); @@ -434,6 +440,13 @@ static void start_clear_flags(struct smi_info *smi_info) smi_info->si_state = SI_CLEARING_FLAGS; } +static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) +{ + smi_info->last_timeout_jiffies = jiffies; + mod_timer(&smi_info->si_timer, new_val); + smi_info->timer_running = true; +} + /* * When we have a situtaion where we run out of memory and cannot * allocate messages, we just leave them in the BMC and run the system @@ -444,10 +457,9 @@ static inline void disable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { start_disable_irq(smi_info); - smi_info->interrupt_disabled = 1; + smi_info->interrupt_disabled = true; if (!atomic_read(&smi_info->stop_operation)) - mod_timer(&smi_info->si_timer, - jiffies + SI_TIMEOUT_JIFFIES); + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); } } @@ -455,7 +467,7 @@ static inline void enable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (smi_info->interrupt_disabled)) { start_enable_irq(smi_info); - smi_info->interrupt_disabled = 0; + smi_info->interrupt_disabled = false; } } @@ -700,7 +712,7 @@ static void handle_transaction_done(struct smi_info *smi_info) dev_warn(smi_info->dev, "Maybe ok, but ipmi might run very slowly.\n"); } else - smi_info->interrupt_disabled = 0; + smi_info->interrupt_disabled = false; smi_info->si_state = SI_NORMAL; break; } @@ -853,6 +865,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, return si_sm_result; } +static void check_start_timer_thread(struct smi_info *smi_info) +{ + if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); + + if (smi_info->thread) + wake_up_process(smi_info->thread); + + start_next_msg(smi_info); + smi_event_handler(smi_info, 0); + } +} + static void sender(void *send_info, struct ipmi_smi_msg *msg, int priority) @@ -906,27 +931,11 @@ static void sender(void *send_info, else list_add_tail(&msg->link, &smi_info->xmit_msgs); - if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { - /* - * last_timeout_jiffies is updated here to avoid - * smi_timeout() handler passing very large time_diff - * value to smi_event_handler() that causes - * the send command to abort. - */ - smi_info->last_timeout_jiffies = jiffies; - - mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); - - if (smi_info->thread) - wake_up_process(smi_info->thread); - - start_next_msg(smi_info); - smi_event_handler(smi_info, 0); - } + check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); } -static void set_run_to_completion(void *send_info, int i_run_to_completion) +static void set_run_to_completion(void *send_info, bool i_run_to_completion) { struct smi_info *smi_info = send_info; enum si_sm_result result; @@ -1004,6 +1013,17 @@ static int ipmi_thread(void *data) spin_lock_irqsave(&(smi_info->si_lock), flags); smi_result = smi_event_handler(smi_info, 0); + + /* + * If the driver is doing something, there is a possible + * race with the timer. If the timer handler see idle, + * and the thread here sees something else, the timer + * handler won't restart the timer even though it is + * required. So start it here if necessary. + */ + if (smi_result != SI_SM_IDLE && !smi_info->timer_running) + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); + spin_unlock_irqrestore(&(smi_info->si_lock), flags); busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, &busy_until); @@ -1011,9 +1031,15 @@ static int ipmi_thread(void *data) ; /* do nothing */ else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) schedule(); - else if (smi_result == SI_SM_IDLE) - schedule_timeout_interruptible(100); - else + else if (smi_result == SI_SM_IDLE) { + if (atomic_read(&smi_info->need_watch)) { + schedule_timeout_interruptible(100); + } else { + /* Wait to be woken up when we are needed. */ + __set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + } else schedule_timeout_interruptible(1); } return 0; @@ -1024,7 +1050,7 @@ static void poll(void *send_info) { struct smi_info *smi_info = send_info; unsigned long flags = 0; - int run_to_completion = smi_info->run_to_completion; + bool run_to_completion = smi_info->run_to_completion; /* * Make sure there is some delay in the poll loop so we can @@ -1049,6 +1075,17 @@ static void request_events(void *send_info) atomic_set(&smi_info->req_events, 1); } +static void set_need_watch(void *send_info, bool enable) +{ + struct smi_info *smi_info = send_info; + unsigned long flags; + + atomic_set(&smi_info->need_watch, enable); + spin_lock_irqsave(&smi_info->si_lock, flags); + check_start_timer_thread(smi_info); + spin_unlock_irqrestore(&smi_info->si_lock, flags); +} + static int initialized; static void smi_timeout(unsigned long data) @@ -1073,10 +1110,6 @@ static void smi_timeout(unsigned long data) * SI_USEC_PER_JIFFY); smi_result = smi_event_handler(smi_info, time_diff); - spin_unlock_irqrestore(&(smi_info->si_lock), flags); - - smi_info->last_timeout_jiffies = jiffies_now; - if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { /* Running with interrupts, only do long timeouts. */ timeout = jiffies + SI_TIMEOUT_JIFFIES; @@ -1098,7 +1131,10 @@ static void smi_timeout(unsigned long data) do_mod_timer: if (smi_result != SI_SM_IDLE) - mod_timer(&(smi_info->si_timer), timeout); + smi_mod_timer(smi_info, timeout); + else + smi_info->timer_running = false; + spin_unlock_irqrestore(&(smi_info->si_lock), flags); } static irqreturn_t si_irq_handler(int irq, void *data) @@ -1146,8 +1182,7 @@ static int smi_start_processing(void *send_info, /* Set up the timer that drives the interface. */ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); - new_smi->last_timeout_jiffies = jiffies; - mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES); + smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); /* * Check if the user forcefully enabled the daemon. @@ -1188,7 +1223,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data) return 0; } -static void set_maintenance_mode(void *send_info, int enable) +static void set_maintenance_mode(void *send_info, bool enable) { struct smi_info *smi_info = send_info; @@ -1202,6 +1237,7 @@ static struct ipmi_smi_handlers handlers = { .get_smi_info = get_smi_info, .sender = sender, .request_events = request_events, + .set_need_watch = set_need_watch, .set_maintenance_mode = set_maintenance_mode, .set_run_to_completion = set_run_to_completion, .poll = poll, @@ -1229,7 +1265,7 @@ static bool si_tryplatform = 1; #ifdef CONFIG_PCI static bool si_trypci = 1; #endif -static bool si_trydefaults = 1; +static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS); static char *si_type[SI_MAX_PARMS]; #define MAX_SI_TYPE_STR 30 static char si_type_str[MAX_SI_TYPE_STR]; @@ -1328,7 +1364,7 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0); MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" " disabled(0). Normally the IPMI driver auto-detects" " this, but the value may be overridden by this parm."); -module_param(unload_when_empty, int, 0); +module_param(unload_when_empty, bool, 0); MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" " specified or found, default is 1. Setting to 0" " is useful for hot add of devices using hotmod."); @@ -3336,18 +3372,19 @@ static int try_smi_init(struct smi_info *new_smi) INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); new_smi->curr_msg = NULL; atomic_set(&new_smi->req_events, 0); - new_smi->run_to_completion = 0; + new_smi->run_to_completion = false; for (i = 0; i < SI_NUM_STATS; i++) atomic_set(&new_smi->stats[i], 0); - new_smi->interrupt_disabled = 1; + new_smi->interrupt_disabled = true; atomic_set(&new_smi->stop_operation, 0); + atomic_set(&new_smi->need_watch, 0); new_smi->intf_num = smi_num; smi_num++; rv = try_enable_event_buffer(new_smi); if (rv == 0) - new_smi->has_event_buffer = 1; + new_smi->has_event_buffer = true; /* * Start clearing the flags before we enable interrupts or the @@ -3381,7 +3418,7 @@ static int try_smi_init(struct smi_info *new_smi) rv); goto out_err; } - new_smi->dev_registered = 1; + new_smi->dev_registered = true; } rv = ipmi_register_smi(&handlers, @@ -3430,7 +3467,7 @@ static int try_smi_init(struct smi_info *new_smi) wait_for_timer_and_thread(new_smi); out_err: - new_smi->interrupt_disabled = 1; + new_smi->interrupt_disabled = true; if (new_smi->intf) { ipmi_unregister_smi(new_smi->intf); @@ -3466,7 +3503,7 @@ static int try_smi_init(struct smi_info *new_smi) if (new_smi->dev_registered) { platform_device_unregister(new_smi->pdev); - new_smi->dev_registered = 0; + new_smi->dev_registered = false; } return rv; @@ -3521,14 +3558,14 @@ static int init_ipmi_si(void) printk(KERN_ERR PFX "Unable to register " "PCI driver: %d\n", rv); else - pci_registered = 1; + pci_registered = true; } #endif #ifdef CONFIG_ACPI if (si_tryacpi) { pnp_register_driver(&ipmi_pnp_driver); - pnp_registered = 1; + pnp_registered = true; } #endif @@ -3544,7 +3581,7 @@ static int init_ipmi_si(void) #ifdef CONFIG_PARISC register_parisc_driver(&ipmi_parisc_driver); - parisc_registered = 1; + parisc_registered = true; /* poking PC IO addresses will crash machine, don't do it */ si_trydefaults = 0; #endif diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig index b27f5342fe76..8d3dfb0c8a26 100644 --- a/drivers/char/pcmcia/Kconfig +++ b/drivers/char/pcmcia/Kconfig @@ -15,7 +15,7 @@ config SYNCLINK_CS This driver may be built as a module ( = code which can be inserted in and removed from the running kernel whenever you want). - The module will be called synclinkmp. If you want to do that, say M + The module will be called synclink_cs. If you want to do that, say M here. config CARDMAN_4000 diff --git a/drivers/char/random.c b/drivers/char/random.c index 429b75bb60e8..6b75713d953a 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -295,17 +295,17 @@ * The minimum number of bits of entropy before we wake up a read on * /dev/random. Should be enough to do a significant reseed. */ -static int random_read_wakeup_thresh = 64; +static int random_read_wakeup_bits = 64; /* * If the entropy count falls under this number of bits, then we * should wake up processes which are selecting or polling on write * access to /dev/random. */ -static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS; +static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; /* - * The minimum number of seconds between urandom pool resending. We + * The minimum number of seconds between urandom pool reseeding. We * do this to limit the amount of entropy that can be drained from the * input pool even if there are heavy demands on /dev/urandom. */ @@ -322,7 +322,7 @@ static int random_min_urandom_seed = 60; * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR * generators. ACM Transactions on Modeling and Computer Simulation * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted - * GFSR generators II. ACM Transactions on Mdeling and Computer + * GFSR generators II. ACM Transactions on Modeling and Computer * Simulation 4:254-266) * * Thanks to Colin Plumb for suggesting this. @@ -666,10 +666,10 @@ retry: r->entropy_total, _RET_IP_); if (r == &input_pool) { - int entropy_bytes = entropy_count >> ENTROPY_SHIFT; + int entropy_bits = entropy_count >> ENTROPY_SHIFT; /* should we wake readers? */ - if (entropy_bytes >= random_read_wakeup_thresh) { + if (entropy_bits >= random_read_wakeup_bits) { wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } @@ -678,9 +678,9 @@ retry: * forth between them, until the output pools are 75% * full. */ - if (entropy_bytes > random_write_wakeup_thresh && + if (entropy_bits > random_write_wakeup_bits && r->initialized && - r->entropy_total >= 2*random_read_wakeup_thresh) { + r->entropy_total >= 2*random_read_wakeup_bits) { static struct entropy_store *last = &blocking_pool; struct entropy_store *other = &blocking_pool; @@ -844,6 +844,8 @@ void add_interrupt_randomness(int irq, int irq_flags) cycles_t cycles = random_get_entropy(); __u32 input[4], c_high, j_high; __u64 ip; + unsigned long seed; + int credit; c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; j_high = (sizeof(now) > 4) ? now >> 32 : 0; @@ -862,20 +864,33 @@ void add_interrupt_randomness(int irq, int irq_flags) r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); + /* * If we don't have a valid cycle counter, and we see * back-to-back timer interrupts, then skip giving credit for - * any entropy. + * any entropy, otherwise credit 1 bit. */ + credit = 1; if (cycles == 0) { if (irq_flags & __IRQF_TIMER) { if (fast_pool->last_timer_intr) - return; + credit = 0; fast_pool->last_timer_intr = 1; } else fast_pool->last_timer_intr = 0; } - credit_entropy_bits(r, 1); + + /* + * If we have architectural seed generator, produce a seed and + * add it to the pool. For the sake of paranoia count it as + * 50% entropic. + */ + if (arch_get_random_seed_long(&seed)) { + __mix_pool_bytes(r, &seed, sizeof(seed), NULL); + credit += sizeof(seed) * 4; + } + + credit_entropy_bits(r, credit); } #ifdef CONFIG_BLOCK @@ -924,19 +939,19 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) { __u32 tmp[OUTPUT_POOL_WORDS]; - /* For /dev/random's pool, always leave two wakeup worth's BITS */ - int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4; + /* For /dev/random's pool, always leave two wakeups' worth */ + int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4; int bytes = nbytes; - /* pull at least as many as BYTES as wakeup BITS */ - bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); + /* pull at least as much as a wakeup */ + bytes = max_t(int, bytes, random_read_wakeup_bits / 8); /* but never more than the buffer size */ bytes = min_t(int, bytes, sizeof(tmp)); trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); bytes = extract_entropy(r->pull, tmp, bytes, - random_read_wakeup_thresh / 8, rsvd); + random_read_wakeup_bits / 8, rsvd_bytes); mix_pool_bytes(r, tmp, bytes, NULL); credit_entropy_bits(r, bytes*8); } @@ -952,35 +967,22 @@ static void push_to_pool(struct work_struct *work) struct entropy_store *r = container_of(work, struct entropy_store, push_work); BUG_ON(!r); - _xfer_secondary_pool(r, random_read_wakeup_thresh/8); + _xfer_secondary_pool(r, random_read_wakeup_bits/8); trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT, r->pull->entropy_count >> ENTROPY_SHIFT); } /* - * These functions extracts randomness from the "entropy pool", and - * returns it in a buffer. - * - * The min parameter specifies the minimum amount we can pull before - * failing to avoid races that defeat catastrophic reseeding while the - * reserved parameter indicates how much entropy we must leave in the - * pool after each pull to avoid starving other readers. - * - * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words. + * This function decides how many bytes to actually take from the + * given pool, and also debits the entropy count accordingly. */ - static size_t account(struct entropy_store *r, size_t nbytes, int min, int reserved) { - unsigned long flags; - int wakeup_write = 0; int have_bytes; int entropy_count, orig; size_t ibytes; - /* Hold lock while accounting */ - spin_lock_irqsave(&r->lock, flags); - BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); /* Can we pull enough? */ @@ -988,29 +990,19 @@ retry: entropy_count = orig = ACCESS_ONCE(r->entropy_count); have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); ibytes = nbytes; - if (have_bytes < min + reserved) { + /* If limited, never pull more than available */ + if (r->limit) + ibytes = min_t(size_t, ibytes, have_bytes - reserved); + if (ibytes < min) ibytes = 0; - } else { - /* If limited, never pull more than available */ - if (r->limit && ibytes + reserved >= have_bytes) - ibytes = have_bytes - reserved; - - if (have_bytes >= ibytes + reserved) - entropy_count -= ibytes << (ENTROPY_SHIFT + 3); - else - entropy_count = reserved << (ENTROPY_SHIFT + 3); - - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) - goto retry; - - if ((r->entropy_count >> ENTROPY_SHIFT) - < random_write_wakeup_thresh) - wakeup_write = 1; - } - spin_unlock_irqrestore(&r->lock, flags); + entropy_count = max_t(int, 0, + entropy_count - (ibytes << (ENTROPY_SHIFT + 3))); + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + goto retry; trace_debit_entropy(r->name, 8 * ibytes); - if (wakeup_write) { + if (ibytes && + (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) { wake_up_interruptible(&random_write_wait); kill_fasync(&fasync, SIGIO, POLL_OUT); } @@ -1018,6 +1010,12 @@ retry: return ibytes; } +/* + * This function does the actual extraction for extract_entropy and + * extract_entropy_user. + * + * Note: we assume that .poolwords is a multiple of 16 words. + */ static void extract_buf(struct entropy_store *r, __u8 *out) { int i; @@ -1029,23 +1027,23 @@ static void extract_buf(struct entropy_store *r, __u8 *out) __u8 extract[64]; unsigned long flags; - /* Generate a hash across the pool, 16 words (512 bits) at a time */ - sha_init(hash.w); - spin_lock_irqsave(&r->lock, flags); - for (i = 0; i < r->poolinfo->poolwords; i += 16) - sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); - /* - * If we have a architectural hardware random number - * generator, mix that in, too. + * If we have an architectural hardware random number + * generator, use it for SHA's initial vector */ + sha_init(hash.w); for (i = 0; i < LONGS(20); i++) { unsigned long v; if (!arch_get_random_long(&v)) break; - hash.l[i] ^= v; + hash.l[i] = v; } + /* Generate a hash across the pool, 16 words (512 bits) at a time */ + spin_lock_irqsave(&r->lock, flags); + for (i = 0; i < r->poolinfo->poolwords; i += 16) + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); + /* * We mix the hash back into the pool to prevent backtracking * attacks (where the attacker knows the state of the pool @@ -1079,6 +1077,15 @@ static void extract_buf(struct entropy_store *r, __u8 *out) memset(&hash, 0, sizeof(hash)); } +/* + * This function extracts randomness from the "entropy pool", and + * returns it in a buffer. + * + * The min parameter specifies the minimum amount we can pull before + * failing to avoid races that defeat catastrophic reseeding while the + * reserved parameter indicates how much entropy we must leave in the + * pool after each pull to avoid starving other readers. + */ static ssize_t extract_entropy(struct entropy_store *r, void *buf, size_t nbytes, int min, int reserved) { @@ -1129,6 +1136,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, return ret; } +/* + * This function extracts randomness from the "entropy pool", and + * returns it in a userspace buffer. + */ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, size_t nbytes) { @@ -1170,8 +1181,9 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, /* * This function is the exported kernel interface. It returns some * number of good random numbers, suitable for key generation, seeding - * TCP sequence numbers, etc. It does not use the hw random number - * generator, if available; use get_random_bytes_arch() for that. + * TCP sequence numbers, etc. It does not rely on the hardware random + * number generator. For random bytes direct from the hardware RNG + * (when available), use get_random_bytes_arch(). */ void get_random_bytes(void *buf, int nbytes) { @@ -1238,7 +1250,8 @@ static void init_std_data(struct entropy_store *r) r->last_pulled = jiffies; mix_pool_bytes(r, &now, sizeof(now), NULL); for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { - if (!arch_get_random_long(&rv)) + if (!arch_get_random_seed_long(&rv) && + !arch_get_random_long(&rv)) rv = random_get_entropy(); mix_pool_bytes(r, &rv, sizeof(rv), NULL); } @@ -1281,56 +1294,71 @@ void rand_initialize_disk(struct gendisk *disk) } #endif -static ssize_t -random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) +/* + * Attempt an emergency refill using arch_get_random_seed_long(). + * + * As with add_interrupt_randomness() be paranoid and only + * credit the output as 50% entropic. + */ +static int arch_random_refill(void) { - ssize_t n, retval = 0, count = 0; + const unsigned int nlongs = 64; /* Arbitrary number */ + unsigned int n = 0; + unsigned int i; + unsigned long buf[nlongs]; - if (nbytes == 0) + if (!arch_has_random_seed()) return 0; - while (nbytes > 0) { - n = nbytes; - if (n > SEC_XFER_SIZE) - n = SEC_XFER_SIZE; + for (i = 0; i < nlongs; i++) { + if (arch_get_random_seed_long(&buf[n])) + n++; + } - n = extract_entropy_user(&blocking_pool, buf, n); + if (n) { + unsigned int rand_bytes = n * sizeof(unsigned long); - if (n < 0) { - retval = n; - break; - } + mix_pool_bytes(&input_pool, buf, rand_bytes, NULL); + credit_entropy_bits(&input_pool, rand_bytes*4); + } + return n; +} + +static ssize_t +random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) +{ + ssize_t n; + + if (nbytes == 0) + return 0; + + nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE); + while (1) { + n = extract_entropy_user(&blocking_pool, buf, nbytes); + if (n < 0) + return n; trace_random_read(n*8, (nbytes-n)*8, ENTROPY_BITS(&blocking_pool), ENTROPY_BITS(&input_pool)); + if (n > 0) + return n; - if (n == 0) { - if (file->f_flags & O_NONBLOCK) { - retval = -EAGAIN; - break; - } - - wait_event_interruptible(random_read_wait, - ENTROPY_BITS(&input_pool) >= - random_read_wakeup_thresh); - - if (signal_pending(current)) { - retval = -ERESTARTSYS; - break; - } + /* Pool is (near) empty. Maybe wait and retry. */ + /* First try an emergency refill */ + if (arch_random_refill()) continue; - } - count += n; - buf += n; - nbytes -= n; - break; /* This break makes the device work */ - /* like a named pipe */ - } + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; - return (count ? count : retval); + wait_event_interruptible(random_read_wait, + ENTROPY_BITS(&input_pool) >= + random_read_wakeup_bits); + if (signal_pending(current)) + return -ERESTARTSYS; + } } static ssize_t @@ -1358,9 +1386,9 @@ random_poll(struct file *file, poll_table * wait) poll_wait(file, &random_read_wait, wait); poll_wait(file, &random_write_wait, wait); mask = 0; - if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh) + if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits) mask |= POLLIN | POLLRDNORM; - if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh) + if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) mask |= POLLOUT | POLLWRNORM; return mask; } @@ -1507,18 +1535,18 @@ EXPORT_SYMBOL(generate_random_uuid); #include <linux/sysctl.h> static int min_read_thresh = 8, min_write_thresh; -static int max_read_thresh = INPUT_POOL_WORDS * 32; +static int max_read_thresh = OUTPUT_POOL_WORDS * 32; static int max_write_thresh = INPUT_POOL_WORDS * 32; static char sysctl_bootid[16]; /* - * These functions is used to return both the bootid UUID, and random + * This function is used to return both the bootid UUID, and random * UUID. The difference is in whether table->data is NULL; if it is, * then a new UUID is generated and returned to the user. * - * If the user accesses this via the proc interface, it will be returned - * as an ASCII string in the standard UUID format. If accesses via the - * sysctl system call, it is returned as 16 bytes of binary data. + * If the user accesses this via the proc interface, the UUID will be + * returned as an ASCII string in the standard UUID format; if via the + * sysctl system call, as 16 bytes of binary data. */ static int proc_do_uuid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -1583,7 +1611,7 @@ struct ctl_table random_table[] = { }, { .procname = "read_wakeup_threshold", - .data = &random_read_wakeup_thresh, + .data = &random_read_wakeup_bits, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, @@ -1592,7 +1620,7 @@ struct ctl_table random_table[] = { }, { .procname = "write_wakeup_threshold", - .data = &random_write_wakeup_thresh, + .data = &random_write_wakeup_bits, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 1a65838888cd..c54cac3f8bc8 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -74,7 +74,7 @@ config TCG_NSC config TCG_ATMEL tristate "Atmel TPM Interface" - depends on PPC64 || HAS_IOPORT + depends on PPC64 || HAS_IOPORT_MAP ---help--- If you have a TPM security chip from Atmel say Yes and it will be accessible from within Linux. To compile this driver diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index daea84c41743..a15ce4ef39cd 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -17,7 +17,7 @@ #include <linux/device.h> #include <linux/serial.h> #include <linux/tty.h> -#include <linux/export.h> +#include <linux/module.h> struct ttyprintk_port { struct tty_port port; @@ -210,10 +210,19 @@ static int __init ttyprintk_init(void) return 0; error: - tty_unregister_driver(ttyprintk_driver); put_tty_driver(ttyprintk_driver); tty_port_destroy(&tpk_port.port); - ttyprintk_driver = NULL; return ret; } + +static void __exit ttyprintk_exit(void) +{ + tty_unregister_driver(ttyprintk_driver); + put_tty_driver(ttyprintk_driver); + tty_port_destroy(&tpk_port.port); +} + device_initcall(ttyprintk_init); +module_exit(ttyprintk_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 6928d094451d..60aafb8a1f2e 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -901,9 +901,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - src = buf->ops->map(pipe, buf, 1); + src = kmap_atomic(buf->page); memcpy(page_address(page) + offset, src + buf->offset, len); - buf->ops->unmap(pipe, buf, src); + kunmap_atomic(src); sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); } |