summaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/ds1620.c8
-rw-r--r--drivers/char/hw_random/omap-rng.c121
-rw-r--r--drivers/char/mbcs.c4
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/nwflash.c4
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/virtio_console.c198
8 files changed, 259 insertions, 82 deletions
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index aab9605f0b43..24ffd8cec51e 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -74,21 +74,21 @@ static inline void netwinder_ds1620_reset(void)
static inline void netwinder_lock(unsigned long *flags)
{
- spin_lock_irqsave(&nw_gpio_lock, *flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, *flags);
}
static inline void netwinder_unlock(unsigned long *flags)
{
- spin_unlock_irqrestore(&nw_gpio_lock, *flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, *flags);
}
static inline void netwinder_set_fan(int i)
{
unsigned long flags;
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
static inline int netwinder_get_fan(void)
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 4fbdceb6f773..a5effd813abd 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -18,11 +18,12 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/random.h>
-#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <asm/io.h>
@@ -46,26 +47,36 @@
#define RNG_SYSSTATUS 0x44 /* System status
[0] = RESETDONE */
-static void __iomem *rng_base;
-static struct clk *rng_ick;
-static struct platform_device *rng_dev;
+/**
+ * struct omap_rng_private_data - RNG IP block-specific data
+ * @base: virtual address of the beginning of the RNG IP block registers
+ * @mem_res: struct resource * for the IP block registers physical memory
+ */
+struct omap_rng_private_data {
+ void __iomem *base;
+ struct resource *mem_res;
+};
-static inline u32 omap_rng_read_reg(int reg)
+static inline u32 omap_rng_read_reg(struct omap_rng_private_data *priv, int reg)
{
- return __raw_readl(rng_base + reg);
+ return __raw_readl(priv->base + reg);
}
-static inline void omap_rng_write_reg(int reg, u32 val)
+static inline void omap_rng_write_reg(struct omap_rng_private_data *priv,
+ int reg, u32 val)
{
- __raw_writel(val, rng_base + reg);
+ __raw_writel(val, priv->base + reg);
}
static int omap_rng_data_present(struct hwrng *rng, int wait)
{
+ struct omap_rng_private_data *priv;
int data, i;
+ priv = (struct omap_rng_private_data *)rng->priv;
+
for (i = 0; i < 20; i++) {
- data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
+ data = omap_rng_read_reg(priv, RNG_STAT_REG) ? 0 : 1;
if (data || !wait)
break;
/* RNG produces data fast enough (2+ MBit/sec, even
@@ -80,9 +91,13 @@ static int omap_rng_data_present(struct hwrng *rng, int wait)
static int omap_rng_data_read(struct hwrng *rng, u32 *data)
{
- *data = omap_rng_read_reg(RNG_OUT_REG);
+ struct omap_rng_private_data *priv;
+
+ priv = (struct omap_rng_private_data *)rng->priv;
+
+ *data = omap_rng_read_reg(priv, RNG_OUT_REG);
- return 4;
+ return sizeof(u32);
}
static struct hwrng omap_rng_ops = {
@@ -93,69 +108,68 @@ static struct hwrng omap_rng_ops = {
static int __devinit omap_rng_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct omap_rng_private_data *priv;
int ret;
- /*
- * A bit ugly, and it will never actually happen but there can
- * be only one RNG and this catches any bork
- */
- if (rng_dev)
- return -EBUSY;
-
- if (cpu_is_omap24xx()) {
- rng_ick = clk_get(&pdev->dev, "ick");
- if (IS_ERR(rng_ick)) {
- dev_err(&pdev->dev, "Could not get rng_ick\n");
- ret = PTR_ERR(rng_ick);
- return ret;
- } else
- clk_enable(rng_ick);
- }
+ priv = kzalloc(sizeof(struct omap_rng_private_data), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "could not allocate memory\n");
+ return -ENOMEM;
+ };
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ omap_rng_ops.priv = (unsigned long)priv;
+ dev_set_drvdata(&pdev->dev, priv);
- rng_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!rng_base) {
+ priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!priv->mem_res) {
+ ret = -ENOENT;
+ goto err_ioremap;
+ }
+
+ priv->base = devm_request_and_ioremap(&pdev->dev, priv->mem_res);
+ if (!priv->base) {
ret = -ENOMEM;
goto err_ioremap;
}
- dev_set_drvdata(&pdev->dev, res);
+ dev_set_drvdata(&pdev->dev, priv);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
ret = hwrng_register(&omap_rng_ops);
if (ret)
goto err_register;
dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
- omap_rng_read_reg(RNG_REV_REG));
- omap_rng_write_reg(RNG_MASK_REG, 0x1);
+ omap_rng_read_reg(priv, RNG_REV_REG));
- rng_dev = pdev;
+ omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
return 0;
err_register:
- rng_base = NULL;
+ priv->base = NULL;
+ pm_runtime_disable(&pdev->dev);
err_ioremap:
- if (cpu_is_omap24xx()) {
- clk_disable(rng_ick);
- clk_put(rng_ick);
- }
+ kfree(priv);
+
return ret;
}
static int __exit omap_rng_remove(struct platform_device *pdev)
{
+ struct omap_rng_private_data *priv = dev_get_drvdata(&pdev->dev);
+
hwrng_unregister(&omap_rng_ops);
- omap_rng_write_reg(RNG_MASK_REG, 0x0);
+ omap_rng_write_reg(priv, RNG_MASK_REG, 0x0);
- if (cpu_is_omap24xx()) {
- clk_disable(rng_ick);
- clk_put(rng_ick);
- }
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ release_mem_region(priv->mem_res->start, resource_size(priv->mem_res));
- rng_base = NULL;
+ kfree(priv);
return 0;
}
@@ -164,13 +178,21 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
static int omap_rng_suspend(struct device *dev)
{
- omap_rng_write_reg(RNG_MASK_REG, 0x0);
+ struct omap_rng_private_data *priv = dev_get_drvdata(dev);
+
+ omap_rng_write_reg(priv, RNG_MASK_REG, 0x0);
+ pm_runtime_put_sync(dev);
+
return 0;
}
static int omap_rng_resume(struct device *dev)
{
- omap_rng_write_reg(RNG_MASK_REG, 0x1);
+ struct omap_rng_private_data *priv = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
+
return 0;
}
@@ -198,9 +220,6 @@ static struct platform_driver omap_rng_driver = {
static int __init omap_rng_init(void)
{
- if (!cpu_is_omap16xx() && !cpu_is_omap24xx())
- return -ENODEV;
-
return platform_driver_register(&omap_rng_driver);
}
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 47ff7e470d87..f74e892711dd 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -507,7 +507,7 @@ static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ /* Remap-pfn-range will mark the range VM_IO */
if (remap_pfn_range(vma,
vma->vm_start,
__pa(soft->gscr_addr) >> PAGE_SHIFT,
@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
return 0;
}
-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
{
.part_num = MBCS_PART_NUM,
.mfg_num = MBCS_MFG_NUM,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index e5eedfa24c91..0537903c985b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -322,7 +322,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &mmap_mem_ops;
- /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ /* Remap-pfn-range will mark the range VM_IO */
if (remap_pfn_range(vma,
vma->vm_start,
vma->vm_pgoff,
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 845f97fd1832..e1f60f968fdd 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -286,7 +286,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
atomic_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
- vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index a0e2f7d70355..e371480d3639 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -583,9 +583,9 @@ static void kick_open(void)
* we want to write a bit pattern XXX1 to Xilinx to enable
* the write gate, which will be open for about the next 2ms.
*/
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
/*
* let the ISA bus to catch on...
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 54a3a6d09819..0bb207eaef2f 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations raw_fops = {
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .aio_read = blkdev_aio_read,
.write = do_sync_write,
.aio_write = blkdev_aio_write,
.fsync = blkdev_fsync,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 060a672ebb7b..8ab9c3d4bf13 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -24,6 +24,8 @@
#include <linux/err.h>
#include <linux/freezer.h>
#include <linux/fs.h>
+#include <linux/splice.h>
+#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/poll.h>
@@ -474,26 +476,53 @@ static ssize_t send_control_msg(struct port *port, unsigned int event,
return 0;
}
+struct buffer_token {
+ union {
+ void *buf;
+ struct scatterlist *sg;
+ } u;
+ /* If sgpages == 0 then buf is used, else sg is used */
+ unsigned int sgpages;
+};
+
+static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < nrpages; i++) {
+ page = sg_page(&sg[i]);
+ if (!page)
+ break;
+ put_page(page);
+ }
+ kfree(sg);
+}
+
/* Callers must take the port->outvq_lock */
static void reclaim_consumed_buffers(struct port *port)
{
- void *buf;
+ struct buffer_token *tok;
unsigned int len;
if (!port->portdev) {
/* Device has been unplugged. vqs are already gone. */
return;
}
- while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
- kfree(buf);
+ while ((tok = virtqueue_get_buf(port->out_vq, &len))) {
+ if (tok->sgpages)
+ reclaim_sg_pages(tok->u.sg, tok->sgpages);
+ else
+ kfree(tok->u.buf);
+ kfree(tok);
port->outvq_full = false;
}
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
- bool nonblock)
+static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
+ int nents, size_t in_count,
+ struct buffer_token *tok, bool nonblock)
{
- struct scatterlist sg[1];
struct virtqueue *out_vq;
ssize_t ret;
unsigned long flags;
@@ -505,8 +534,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
reclaim_consumed_buffers(port);
- sg_init_one(sg, in_buf, in_count);
- ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf, GFP_ATOMIC);
+ ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC);
/* Tell Host to go! */
virtqueue_kick(out_vq);
@@ -544,6 +572,37 @@ done:
return in_count;
}
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
+ bool nonblock)
+{
+ struct scatterlist sg[1];
+ struct buffer_token *tok;
+
+ tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
+ if (!tok)
+ return -ENOMEM;
+ tok->sgpages = 0;
+ tok->u.buf = in_buf;
+
+ sg_init_one(sg, in_buf, in_count);
+
+ return __send_to_port(port, sg, 1, in_count, tok, nonblock);
+}
+
+static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents,
+ size_t in_count, bool nonblock)
+{
+ struct buffer_token *tok;
+
+ tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
+ if (!tok)
+ return -ENOMEM;
+ tok->sgpages = nents;
+ tok->u.sg = sg;
+
+ return __send_to_port(port, sg, nents, in_count, tok, nonblock);
+}
+
/*
* Give out the data that's requested from the buffer that we have
* queued up.
@@ -665,6 +724,26 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
return fill_readbuf(port, ubuf, count, true);
}
+static int wait_port_writable(struct port *port, bool nonblock)
+{
+ int ret;
+
+ if (will_write_block(port)) {
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_freezable(port->waitqueue,
+ !will_write_block(port));
+ if (ret < 0)
+ return ret;
+ }
+ /* Port got hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+
+ return 0;
+}
+
static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *offp)
{
@@ -681,18 +760,9 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
nonblock = filp->f_flags & O_NONBLOCK;
- if (will_write_block(port)) {
- if (nonblock)
- return -EAGAIN;
-
- ret = wait_event_freezable(port->waitqueue,
- !will_write_block(port));
- if (ret < 0)
- return ret;
- }
- /* Port got hot-unplugged. */
- if (!port->guest_connected)
- return -ENODEV;
+ ret = wait_port_writable(port, nonblock);
+ if (ret < 0)
+ return ret;
count = min((size_t)(32 * 1024), count);
@@ -725,6 +795,93 @@ out:
return ret;
}
+struct sg_list {
+ unsigned int n;
+ unsigned int size;
+ size_t len;
+ struct scatterlist *sg;
+};
+
+static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+ struct splice_desc *sd)
+{
+ struct sg_list *sgl = sd->u.data;
+ unsigned int offset, len;
+
+ if (sgl->n == sgl->size)
+ return 0;
+
+ /* Try lock this page */
+ if (buf->ops->steal(pipe, buf) == 0) {
+ /* Get reference and unlock page for moving */
+ get_page(buf->page);
+ unlock_page(buf->page);
+
+ len = min(buf->len, sd->len);
+ sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
+ } else {
+ /* Failback to copying a page */
+ struct page *page = alloc_page(GFP_KERNEL);
+ char *src = buf->ops->map(pipe, buf, 1);
+ char *dst;
+
+ if (!page)
+ return -ENOMEM;
+ dst = kmap(page);
+
+ offset = sd->pos & ~PAGE_MASK;
+
+ len = sd->len;
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ memcpy(dst + offset, src + buf->offset, len);
+
+ kunmap(page);
+ buf->ops->unmap(pipe, buf, src);
+
+ sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
+ }
+ sgl->n++;
+ sgl->len += len;
+
+ return len;
+}
+
+/* Faster zero-copy write by splicing */
+static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t len, unsigned int flags)
+{
+ struct port *port = filp->private_data;
+ struct sg_list sgl;
+ ssize_t ret;
+ struct splice_desc sd = {
+ .total_len = len,
+ .flags = flags,
+ .pos = *ppos,
+ .u.data = &sgl,
+ };
+
+ ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
+ if (ret < 0)
+ return ret;
+
+ sgl.n = 0;
+ sgl.len = 0;
+ sgl.size = pipe->nrbufs;
+ sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL);
+ if (unlikely(!sgl.sg))
+ return -ENOMEM;
+
+ sg_init_table(sgl.sg, sgl.size);
+ ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
+ if (likely(ret > 0))
+ ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true);
+
+ return ret;
+}
+
static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
{
struct port *port;
@@ -856,6 +1013,7 @@ static const struct file_operations port_fops = {
.open = port_fops_open,
.read = port_fops_read,
.write = port_fops_write,
+ .splice_write = port_fops_splice_write,
.poll = port_fops_poll,
.release = port_fops_release,
.fasync = port_fops_fasync,