diff options
Diffstat (limited to 'drivers')
52 files changed, 2631 insertions, 461 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 9f1000d2a40c..9f2059d24ae2 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -660,6 +660,7 @@ struct binder_transaction { #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) static void _binder_proc_lock(struct binder_proc *proc, int line) + __acquires(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -675,6 +676,7 @@ _binder_proc_lock(struct binder_proc *proc, int line) #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) static void _binder_proc_unlock(struct binder_proc *proc, int line) + __releases(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -690,6 +692,7 @@ _binder_proc_unlock(struct binder_proc *proc, int line) #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) static void _binder_inner_proc_lock(struct binder_proc *proc, int line) + __acquires(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -705,6 +708,7 @@ _binder_inner_proc_lock(struct binder_proc *proc, int line) #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) static void _binder_inner_proc_unlock(struct binder_proc *proc, int line) + __releases(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -720,6 +724,7 @@ _binder_inner_proc_unlock(struct binder_proc *proc, int line) #define binder_node_lock(node) _binder_node_lock(node, __LINE__) static void _binder_node_lock(struct binder_node *node, int line) + __acquires(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -735,6 +740,7 @@ _binder_node_lock(struct binder_node *node, int line) #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) static void _binder_node_unlock(struct binder_node *node, int line) + __releases(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -751,12 +757,16 @@ _binder_node_unlock(struct binder_node *node, int line) #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) static void _binder_node_inner_lock(struct binder_node *node, int line) + __acquires(&node->lock) __acquires(&node->proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_lock(&node->lock); if (node->proc) binder_inner_proc_lock(node->proc); + else + /* annotation for sparse */ + __acquire(&node->proc->inner_lock); } /** @@ -768,6 +778,7 @@ _binder_node_inner_lock(struct binder_node *node, int line) #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) static void _binder_node_inner_unlock(struct binder_node *node, int line) + __releases(&node->lock) __releases(&node->proc->inner_lock) { struct binder_proc *proc = node->proc; @@ -775,6 +786,9 @@ _binder_node_inner_unlock(struct binder_node *node, int line) "%s: line=%d\n", __func__, line); if (proc) binder_inner_proc_unlock(proc); + else + /* annotation for sparse */ + __release(&node->proc->inner_lock); spin_unlock(&node->lock); } @@ -1384,10 +1398,14 @@ static void binder_dec_node_tmpref(struct binder_node *node) binder_node_inner_lock(node); if (!node->proc) spin_lock(&binder_dead_nodes_lock); + else + __acquire(&binder_dead_nodes_lock); node->tmp_refs--; BUG_ON(node->tmp_refs < 0); if (!node->proc) spin_unlock(&binder_dead_nodes_lock); + else + __release(&binder_dead_nodes_lock); /* * Call binder_dec_node() to check if all refcounts are 0 * and cleanup is needed. Calling with strong=0 and internal=1 @@ -1890,18 +1908,22 @@ static struct binder_thread *binder_get_txn_from( */ static struct binder_thread *binder_get_txn_from_and_acq_inner( struct binder_transaction *t) + __acquires(&t->from->proc->inner_lock) { struct binder_thread *from; from = binder_get_txn_from(t); - if (!from) + if (!from) { + __acquire(&from->proc->inner_lock); return NULL; + } binder_inner_proc_lock(from->proc); if (t->from) { BUG_ON(from != t->from); return from; } binder_inner_proc_unlock(from->proc); + __acquire(&from->proc->inner_lock); binder_thread_dec_tmpref(from); return NULL; } @@ -1973,6 +1995,8 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_thread_dec_tmpref(target_thread); binder_free_transaction(t); return; + } else { + __release(&target_thread->proc->inner_lock); } next = t->from_parent; @@ -2394,11 +2418,15 @@ static int binder_translate_handle(struct flat_binder_object *fp, fp->cookie = node->cookie; if (node->proc) binder_inner_proc_lock(node->proc); + else + __acquire(&node->proc->inner_lock); binder_inc_node_nilocked(node, fp->hdr.type == BINDER_TYPE_BINDER, 0, NULL); if (node->proc) binder_inner_proc_unlock(node->proc); + else + __release(&node->proc->inner_lock); trace_binder_transaction_ref_to_node(t, node, &src_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%016llx\n", @@ -2762,6 +2790,8 @@ static void binder_transaction(struct binder_proc *proc, binder_set_nice(in_reply_to->saved_priority); target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { + /* annotation for sparse */ + __release(&target_thread->proc->inner_lock); return_error = BR_DEAD_REPLY; return_error_line = __LINE__; goto err_dead_binder; @@ -4164,6 +4194,11 @@ retry: if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; + default: + binder_inner_proc_unlock(proc); + pr_err("%d:%d: bad work type %d\n", + proc->pid, thread->pid, w->type); + break; } if (!t) @@ -4467,6 +4502,8 @@ static int binder_thread_release(struct binder_proc *proc, spin_lock(&t->lock); if (t->to_thread == thread) send_reply = t; + } else { + __acquire(&t->lock); } thread->is_dead = true; @@ -4495,7 +4532,11 @@ static int binder_thread_release(struct binder_proc *proc, spin_unlock(&last_t->lock); if (t) spin_lock(&t->lock); + else + __acquire(&t->lock); } + /* annotation for sparse, lock not acquired in last iteration above */ + __release(&t->lock); /* * If this thread used poll, make sure we remove the waitqueue diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 030c98f35cca..022cd80e80cc 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -939,6 +939,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, spinlock_t *lock, void *cb_arg) + __must_hold(lock) { struct mm_struct *mm = NULL; struct binder_lru_page *page = container_of(item, diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 8c4dd1a3bb6a..e0a92d764eee 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -46,8 +46,8 @@ * lp=auto (assign lp devices to all ports that * have printers attached, as determined * by the IEEE-1284 autoprobe) - * - * lp=reset (reset the printer during + * + * lp=reset (reset the printer during * initialisation) * * lp=off (disable the printer driver entirely) @@ -166,7 +166,7 @@ static struct parport *console_registered; static void lp_claim_parport_or_block(struct lp_struct *this_lp) { if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { - parport_claim_or_block (this_lp->dev); + parport_claim_or_block(this_lp->dev); } } @@ -174,7 +174,7 @@ static void lp_claim_parport_or_block(struct lp_struct *this_lp) static void lp_release_parport(struct lp_struct *this_lp) { if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) { - parport_release (this_lp->dev); + parport_release(this_lp->dev); } } @@ -184,37 +184,37 @@ static int lp_preempt(void *handle) { struct lp_struct *this_lp = (struct lp_struct *)handle; set_bit(LP_PREEMPT_REQUEST, &this_lp->bits); - return (1); + return 1; } -/* +/* * Try to negotiate to a new mode; if unsuccessful negotiate to * compatibility mode. Return the mode we ended up in. */ -static int lp_negotiate(struct parport * port, int mode) +static int lp_negotiate(struct parport *port, int mode) { - if (parport_negotiate (port, mode) != 0) { + if (parport_negotiate(port, mode) != 0) { mode = IEEE1284_MODE_COMPAT; - parport_negotiate (port, mode); + parport_negotiate(port, mode); } - return (mode); + return mode; } static int lp_reset(int minor) { int retval; - lp_claim_parport_or_block (&lp_table[minor]); + lp_claim_parport_or_block(&lp_table[minor]); w_ctr(minor, LP_PSELECP); - udelay (LP_DELAY); + udelay(LP_DELAY); w_ctr(minor, LP_PSELECP | LP_PINITP); retval = r_str(minor); - lp_release_parport (&lp_table[minor]); + lp_release_parport(&lp_table[minor]); return retval; } -static void lp_error (int minor) +static void lp_error(int minor) { DEFINE_WAIT(wait); int polling; @@ -223,12 +223,15 @@ static void lp_error (int minor) return; polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE; - if (polling) lp_release_parport (&lp_table[minor]); + if (polling) + lp_release_parport(&lp_table[minor]); prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE); schedule_timeout(LP_TIMEOUT_POLLED); finish_wait(&lp_table[minor].waitq, &wait); - if (polling) lp_claim_parport_or_block (&lp_table[minor]); - else parport_yield_blocking (lp_table[minor].dev); + if (polling) + lp_claim_parport_or_block(&lp_table[minor]); + else + parport_yield_blocking(lp_table[minor].dev); } static int lp_check_status(int minor) @@ -259,7 +262,7 @@ static int lp_check_status(int minor) error = -EIO; } else { last = 0; /* Come here if LP_CAREFUL is set and no - errors are reported. */ + errors are reported. */ } lp_table[minor].last_error = last; @@ -276,14 +279,14 @@ static int lp_wait_ready(int minor, int nonblock) /* If we're not in compatibility mode, we're ready now! */ if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) { - return (0); + return 0; } do { - error = lp_check_status (minor); + error = lp_check_status(minor); if (error && (nonblock || (LP_F(minor) & LP_ABORT))) break; - if (signal_pending (current)) { + if (signal_pending(current)) { error = -EINTR; break; } @@ -291,8 +294,8 @@ static int lp_wait_ready(int minor, int nonblock) return error; } -static ssize_t lp_write(struct file * file, const char __user * buf, - size_t count, loff_t *ppos) +static ssize_t lp_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) { unsigned int minor = iminor(file_inode(file)); struct parport *port = lp_table[minor].dev->port; @@ -317,26 +320,26 @@ static ssize_t lp_write(struct file * file, const char __user * buf, if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) return -EINTR; - if (copy_from_user (kbuf, buf, copy_size)) { + if (copy_from_user(kbuf, buf, copy_size)) { retv = -EFAULT; goto out_unlock; } - /* Claim Parport or sleep until it becomes available - */ - lp_claim_parport_or_block (&lp_table[minor]); + /* Claim Parport or sleep until it becomes available + */ + lp_claim_parport_or_block(&lp_table[minor]); /* Go to the proper mode. */ - lp_table[minor].current_mode = lp_negotiate (port, - lp_table[minor].best_mode); + lp_table[minor].current_mode = lp_negotiate(port, + lp_table[minor].best_mode); - parport_set_timeout (lp_table[minor].dev, - (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK - : lp_table[minor].timeout)); + parport_set_timeout(lp_table[minor].dev, + (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK + : lp_table[minor].timeout)); - if ((retv = lp_wait_ready (minor, nonblock)) == 0) + if ((retv = lp_wait_ready(minor, nonblock)) == 0) do { /* Write the data. */ - written = parport_write (port, kbuf, copy_size); + written = parport_write(port, kbuf, copy_size); if (written > 0) { copy_size -= written; count -= written; @@ -344,7 +347,7 @@ static ssize_t lp_write(struct file * file, const char __user * buf, retv += written; } - if (signal_pending (current)) { + if (signal_pending(current)) { if (retv == 0) retv = -EINTR; @@ -355,11 +358,11 @@ static ssize_t lp_write(struct file * file, const char __user * buf, /* incomplete write -> check error ! */ int error; - parport_negotiate (lp_table[minor].dev->port, - IEEE1284_MODE_COMPAT); + parport_negotiate(lp_table[minor].dev->port, + IEEE1284_MODE_COMPAT); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; - error = lp_wait_ready (minor, nonblock); + error = lp_wait_ready(minor, nonblock); if (error) { if (retv == 0) @@ -371,13 +374,13 @@ static ssize_t lp_write(struct file * file, const char __user * buf, break; } - parport_yield_blocking (lp_table[minor].dev); - lp_table[minor].current_mode - = lp_negotiate (port, - lp_table[minor].best_mode); + parport_yield_blocking(lp_table[minor].dev); + lp_table[minor].current_mode + = lp_negotiate(port, + lp_table[minor].best_mode); } else if (need_resched()) - schedule (); + schedule(); if (count) { copy_size = count; @@ -389,27 +392,27 @@ static ssize_t lp_write(struct file * file, const char __user * buf, retv = -EFAULT; break; } - } + } } while (count > 0); - if (test_and_clear_bit(LP_PREEMPT_REQUEST, + if (test_and_clear_bit(LP_PREEMPT_REQUEST, &lp_table[minor].bits)) { printk(KERN_INFO "lp%d releasing parport\n", minor); - parport_negotiate (lp_table[minor].dev->port, - IEEE1284_MODE_COMPAT); + parport_negotiate(lp_table[minor].dev->port, + IEEE1284_MODE_COMPAT); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; - lp_release_parport (&lp_table[minor]); + lp_release_parport(&lp_table[minor]); } out_unlock: mutex_unlock(&lp_table[minor].port_mutex); - return retv; + return retv; } #ifdef CONFIG_PARPORT_1284 /* Status readback conforming to ieee1284 */ -static ssize_t lp_read(struct file * file, char __user * buf, +static ssize_t lp_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { DEFINE_WAIT(wait); @@ -426,21 +429,21 @@ static ssize_t lp_read(struct file * file, char __user * buf, if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) return -EINTR; - lp_claim_parport_or_block (&lp_table[minor]); + lp_claim_parport_or_block(&lp_table[minor]); - parport_set_timeout (lp_table[minor].dev, - (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK - : lp_table[minor].timeout)); + parport_set_timeout(lp_table[minor].dev, + (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK + : lp_table[minor].timeout)); - parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); - if (parport_negotiate (lp_table[minor].dev->port, - IEEE1284_MODE_NIBBLE)) { + parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); + if (parport_negotiate(lp_table[minor].dev->port, + IEEE1284_MODE_NIBBLE)) { retval = -EIO; goto out; } while (retval == 0) { - retval = parport_read (port, kbuf, count); + retval = parport_read(port, kbuf, count); if (retval > 0) break; @@ -453,11 +456,11 @@ static ssize_t lp_read(struct file * file, char __user * buf, /* Wait for data. */ if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) { - parport_negotiate (lp_table[minor].dev->port, - IEEE1284_MODE_COMPAT); - lp_error (minor); - if (parport_negotiate (lp_table[minor].dev->port, - IEEE1284_MODE_NIBBLE)) { + parport_negotiate(lp_table[minor].dev->port, + IEEE1284_MODE_COMPAT); + lp_error(minor); + if (parport_negotiate(lp_table[minor].dev->port, + IEEE1284_MODE_NIBBLE)) { retval = -EIO; goto out; } @@ -467,18 +470,18 @@ static ssize_t lp_read(struct file * file, char __user * buf, finish_wait(&lp_table[minor].waitq, &wait); } - if (signal_pending (current)) { + if (signal_pending(current)) { retval = -ERESTARTSYS; break; } - cond_resched (); + cond_resched(); } - parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); + parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); out: - lp_release_parport (&lp_table[minor]); + lp_release_parport(&lp_table[minor]); - if (retval > 0 && copy_to_user (buf, kbuf, retval)) + if (retval > 0 && copy_to_user(buf, kbuf, retval)) retval = -EFAULT; mutex_unlock(&lp_table[minor].port_mutex); @@ -488,7 +491,7 @@ static ssize_t lp_read(struct file * file, char __user * buf, #endif /* IEEE 1284 support */ -static int lp_open(struct inode * inode, struct file * file) +static int lp_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); int ret = 0; @@ -513,9 +516,9 @@ static int lp_open(struct inode * inode, struct file * file) should most likely only ever be used by the tunelp application. */ if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) { int status; - lp_claim_parport_or_block (&lp_table[minor]); + lp_claim_parport_or_block(&lp_table[minor]); status = r_str(minor); - lp_release_parport (&lp_table[minor]); + lp_release_parport(&lp_table[minor]); if (status & LP_POUTPA) { printk(KERN_INFO "lp%d out of paper\n", minor); LP_F(minor) &= ~LP_BUSY; @@ -540,32 +543,32 @@ static int lp_open(struct inode * inode, struct file * file) goto out; } /* Determine if the peripheral supports ECP mode */ - lp_claim_parport_or_block (&lp_table[minor]); + lp_claim_parport_or_block(&lp_table[minor]); if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) && - !parport_negotiate (lp_table[minor].dev->port, - IEEE1284_MODE_ECP)) { - printk (KERN_INFO "lp%d: ECP mode\n", minor); + !parport_negotiate(lp_table[minor].dev->port, + IEEE1284_MODE_ECP)) { + printk(KERN_INFO "lp%d: ECP mode\n", minor); lp_table[minor].best_mode = IEEE1284_MODE_ECP; } else { lp_table[minor].best_mode = IEEE1284_MODE_COMPAT; } /* Leave peripheral in compatibility mode */ - parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); - lp_release_parport (&lp_table[minor]); + parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); + lp_release_parport(&lp_table[minor]); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; out: mutex_unlock(&lp_mutex); return ret; } -static int lp_release(struct inode * inode, struct file * file) +static int lp_release(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); - lp_claim_parport_or_block (&lp_table[minor]); - parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); + lp_claim_parport_or_block(&lp_table[minor]); + parport_negotiate(lp_table[minor].dev->port, IEEE1284_MODE_COMPAT); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; - lp_release_parport (&lp_table[minor]); + lp_release_parport(&lp_table[minor]); kfree(lp_table[minor].lp_buffer); lp_table[minor].lp_buffer = NULL; LP_F(minor) &= ~LP_BUSY; @@ -615,7 +618,7 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd, case LPWAIT: LP_WAIT(minor) = arg; break; - case LPSETIRQ: + case LPSETIRQ: return -EINVAL; break; case LPGETIRQ: @@ -626,9 +629,9 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd, case LPGETSTATUS: if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) return -EINTR; - lp_claim_parport_or_block (&lp_table[minor]); + lp_claim_parport_or_block(&lp_table[minor]); status = r_str(minor); - lp_release_parport (&lp_table[minor]); + lp_release_parport(&lp_table[minor]); mutex_unlock(&lp_table[minor].port_mutex); if (copy_to_user(argp, &status, sizeof(int))) @@ -647,8 +650,8 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd, sizeof(struct lp_stats)); break; #endif - case LPGETFLAGS: - status = LP_F(minor); + case LPGETFLAGS: + status = LP_F(minor); if (copy_to_user(argp, &status, sizeof(int))) return -EFAULT; break; @@ -801,31 +804,31 @@ static const struct file_operations lp_fops = { /* The console must be locked when we get here. */ -static void lp_console_write (struct console *co, const char *s, - unsigned count) +static void lp_console_write(struct console *co, const char *s, + unsigned count) { struct pardevice *dev = lp_table[CONSOLE_LP].dev; struct parport *port = dev->port; ssize_t written; - if (parport_claim (dev)) + if (parport_claim(dev)) /* Nothing we can do. */ return; - parport_set_timeout (dev, 0); + parport_set_timeout(dev, 0); /* Go to compatibility mode. */ - parport_negotiate (port, IEEE1284_MODE_COMPAT); + parport_negotiate(port, IEEE1284_MODE_COMPAT); do { /* Write the data, converting LF->CRLF as we go. */ ssize_t canwrite = count; - char *lf = memchr (s, '\n', count); + char *lf = memchr(s, '\n', count); if (lf) canwrite = lf - s; if (canwrite > 0) { - written = parport_write (port, s, canwrite); + written = parport_write(port, s, canwrite); if (written <= 0) continue; @@ -843,14 +846,14 @@ static void lp_console_write (struct console *co, const char *s, s++; count--; do { - written = parport_write (port, crlf, i); + written = parport_write(port, crlf, i); if (written > 0) i -= written, crlf += written; } while (i > 0 && (CONSOLE_LP_STRICT || written > 0)); } } while (count > 0 && (CONSOLE_LP_STRICT || written > 0)); - parport_release (dev); + parport_release(dev); } static struct console lpcons = { @@ -871,7 +874,7 @@ module_param_array(parport, charp, NULL, 0); module_param(reset, bool, 0); #ifndef MODULE -static int __init lp_setup (char *str) +static int __init lp_setup(char *str) { static int parport_ptr; int x; @@ -908,7 +911,7 @@ static int __init lp_setup (char *str) static int lp_register(int nr, struct parport *port) { - lp_table[nr].dev = parport_register_device(port, "lp", + lp_table[nr].dev = parport_register_device(port, "lp", lp_preempt, NULL, NULL, 0, (void *) &lp_table[nr]); if (lp_table[nr].dev == NULL) @@ -921,7 +924,7 @@ static int lp_register(int nr, struct parport *port) device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL, "lp%d", nr); - printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name, + printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name, (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven"); #ifdef CONFIG_LP_CONSOLE @@ -929,17 +932,17 @@ static int lp_register(int nr, struct parport *port) if (port->modes & PARPORT_MODE_SAFEININT) { register_console(&lpcons); console_registered = port; - printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP); + printk(KERN_INFO "lp%d: console ready\n", CONSOLE_LP); } else - printk (KERN_ERR "lp%d: cannot run console on %s\n", - CONSOLE_LP, port->name); + printk(KERN_ERR "lp%d: cannot run console on %s\n", + CONSOLE_LP, port->name); } #endif return 0; } -static void lp_attach (struct parport *port) +static void lp_attach(struct parport *port) { unsigned int i; @@ -969,7 +972,7 @@ static void lp_attach (struct parport *port) } } -static void lp_detach (struct parport *port) +static void lp_detach(struct parport *port) { /* Write this some day. */ #ifdef CONFIG_LP_CONSOLE @@ -986,7 +989,7 @@ static struct parport_driver lp_driver = { .detach = lp_detach, }; -static int __init lp_init (void) +static int __init lp_init(void) { int i, err = 0; @@ -1003,17 +1006,17 @@ static int __init lp_init (void) #ifdef LP_STATS lp_table[i].lastcall = 0; lp_table[i].runchars = 0; - memset (&lp_table[i].stats, 0, sizeof (struct lp_stats)); + memset(&lp_table[i].stats, 0, sizeof(struct lp_stats)); #endif lp_table[i].last_error = 0; - init_waitqueue_head (&lp_table[i].waitq); - init_waitqueue_head (&lp_table[i].dataq); + init_waitqueue_head(&lp_table[i].waitq); + init_waitqueue_head(&lp_table[i].dataq); mutex_init(&lp_table[i].port_mutex); lp_table[i].timeout = 10 * HZ; } - if (register_chrdev (LP_MAJOR, "lp", &lp_fops)) { - printk (KERN_ERR "lp: unable to get major %d\n", LP_MAJOR); + if (register_chrdev(LP_MAJOR, "lp", &lp_fops)) { + printk(KERN_ERR "lp: unable to get major %d\n", LP_MAJOR); return -EIO; } @@ -1023,17 +1026,17 @@ static int __init lp_init (void) goto out_reg; } - if (parport_register_driver (&lp_driver)) { - printk (KERN_ERR "lp: unable to register with parport\n"); + if (parport_register_driver(&lp_driver)) { + printk(KERN_ERR "lp: unable to register with parport\n"); err = -EIO; goto out_class; } if (!lp_count) { - printk (KERN_INFO "lp: driver loaded but no devices found\n"); + printk(KERN_INFO "lp: driver loaded but no devices found\n"); #ifndef CONFIG_PARPORT_1284 if (parport_nr[0] == LP_PARPORT_AUTO) - printk (KERN_INFO "lp: (is IEEE 1284 support enabled?)\n"); + printk(KERN_INFO "lp: (is IEEE 1284 support enabled?)\n"); #endif } @@ -1046,7 +1049,7 @@ out_reg: return err; } -static int __init lp_init_module (void) +static int __init lp_init_module(void) { if (parport[0]) { /* The user gave some parameters. Let's see what they were. */ @@ -1060,7 +1063,7 @@ static int __init lp_init_module (void) else { char *ep; unsigned long r = simple_strtoul(parport[n], &ep, 0); - if (ep != parport[n]) + if (ep != parport[n]) parport_nr[n] = r; else { printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]); @@ -1074,14 +1077,14 @@ static int __init lp_init_module (void) return lp_init(); } -static void lp_cleanup_module (void) +static void lp_cleanup_module(void) { unsigned int offset; - parport_unregister_driver (&lp_driver); + parport_unregister_driver(&lp_driver); #ifdef CONFIG_LP_CONSOLE - unregister_console (&lpcons); + unregister_console(&lpcons); #endif unregister_chrdev(LP_MAJOR, "lp"); diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 8eeb4190207d..6d81bb3bb503 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -506,28 +506,28 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d, val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); - if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { - SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28); - SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); - } else if (val >= CLK_8_592MHz) { - SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38); - switch (val) { - case CLK_8_592MHz: - SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); - break; - case CLK_11_184MHz: - SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); - break; - case CLK_34_368MHz: - SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); - break; - case CLK_44_736MHz: - SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); - break; - } - } else - SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3); - + if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { + SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28); + SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); + } else if (val >= CLK_8_592MHz) { + SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38); + switch (val) { + case CLK_8_592MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); + break; + case CLK_11_184MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); + break; + case CLK_34_368MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); + break; + case CLK_44_736MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); + break; + } + } else { + SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3); + } spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); @@ -548,27 +548,28 @@ static ssize_t store_select_amcb1_transmit_clock(struct device *d, val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); - if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { - SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5); - SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); - } else if (val >= CLK_8_592MHz) { - SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7); - switch (val) { - case CLK_8_592MHz: - SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); - break; - case CLK_11_184MHz: - SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); - break; - case CLK_34_368MHz: - SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); - break; - case CLK_44_736MHz: - SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); - break; - } - } else - SET_PORT_BITS(TLCLK_REG3, 0xf8, val); + if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { + SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5); + SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); + } else if (val >= CLK_8_592MHz) { + SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7); + switch (val) { + case CLK_8_592MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); + break; + case CLK_11_184MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); + break; + case CLK_34_368MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); + break; + case CLK_44_736MHz: + SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); + break; + } + } else { + SET_PORT_BITS(TLCLK_REG3, 0xf8, val); + } spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 7273e5082b41..f754578414f0 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -216,6 +216,18 @@ config FW_CFG_SYSFS_CMDLINE WARNING: Using incorrect parameters (base address in particular) may crash your system. +config INTEL_STRATIX10_SERVICE + tristate "Intel Stratix10 Service Layer" + depends on HAVE_ARM_SMCCC + default n + help + Intel Stratix10 service layer runs at privileged exception level, + interfaces with the service providers (FPGA manager is one of them) + and manages secure monitor call to communicate with secure monitor + software at secure monitor exception level. + + Say Y here if you want Stratix10 service layer support. + config QCOM_SCM bool depends on ARM || ARM64 diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 3158dffd9914..80feb635120f 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o obj-$(CONFIG_EDD) += edd.o obj-$(CONFIG_EFI_PCDP) += pcdp.o obj-$(CONFIG_DMIID) += dmi-id.o +obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c new file mode 100644 index 000000000000..81f3182e290d --- /dev/null +++ b/drivers/firmware/stratix10-svc.c @@ -0,0 +1,1044 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2018, Intel Corporation + */ + +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/kfifo.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/firmware/intel/stratix10-smc.h> +#include <linux/firmware/intel/stratix10-svc-client.h> +#include <linux/types.h> + +/** + * SVC_NUM_DATA_IN_FIFO - number of struct stratix10_svc_data in the FIFO + * + * SVC_NUM_CHANNEL - number of channel supported by service layer driver + * + * FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS - claim back the submitted buffer(s) + * from the secure world for FPGA manager to reuse, or to free the buffer(s) + * when all bit-stream data had be send. + * + * FPGA_CONFIG_STATUS_TIMEOUT_SEC - poll the FPGA configuration status, + * service layer will return error to FPGA manager when timeout occurs, + * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC. + */ +#define SVC_NUM_DATA_IN_FIFO 32 +#define SVC_NUM_CHANNEL 2 +#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200 +#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30 + +typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, + struct arm_smccc_res *); +struct stratix10_svc_chan; + +/** + * struct stratix10_svc_sh_memory - service shared memory structure + * @sync_complete: state for a completion + * @addr: physical address of shared memory block + * @size: size of shared memory block + * @invoke_fn: function to issue secure monitor or hypervisor call + * + * This struct is used to save physical address and size of shared memory + * block. The shared memory blocked is allocated by secure monitor software + * at secure world. + * + * Service layer driver uses the physical address and size to create a memory + * pool, then allocates data buffer from that memory pool for service client. + */ +struct stratix10_svc_sh_memory { + struct completion sync_complete; + unsigned long addr; + unsigned long size; + svc_invoke_fn *invoke_fn; +}; + +/** + * struct stratix10_svc_data_mem - service memory structure + * @vaddr: virtual address + * @paddr: physical address + * @size: size of memory + * @node: link list head node + * + * This struct is used in a list that keeps track of buffers which have + * been allocated or freed from the memory pool. Service layer driver also + * uses this struct to transfer physical address to virtual address. + */ +struct stratix10_svc_data_mem { + void *vaddr; + phys_addr_t paddr; + size_t size; + struct list_head node; +}; + +/** + * struct stratix10_svc_data - service data structure + * @chan: service channel + * @paddr: playload physical address + * @size: playload size + * @command: service command requested by client + * @flag: configuration type (full or partial) + * @arg: args to be passed via registers and not physically mapped buffers + * + * This struct is used in service FIFO for inter-process communication. + */ +struct stratix10_svc_data { + struct stratix10_svc_chan *chan; + phys_addr_t paddr; + size_t size; + u32 command; + u32 flag; + u64 arg[3]; +}; + +/** + * struct stratix10_svc_controller - service controller + * @dev: device + * @chans: array of service channels + * @num_chans: number of channels in 'chans' array + * @num_active_client: number of active service client + * @node: list management + * @genpool: memory pool pointing to the memory region + * @task: pointer to the thread task which handles SMC or HVC call + * @svc_fifo: a queue for storing service message data + * @complete_status: state for completion + * @svc_fifo_lock: protect access to service message data queue + * @invoke_fn: function to issue secure monitor call or hypervisor call + * + * This struct is used to create communication channels for service clients, to + * handle secure monitor or hypervisor call. + */ +struct stratix10_svc_controller { + struct device *dev; + struct stratix10_svc_chan *chans; + int num_chans; + int num_active_client; + struct list_head node; + struct gen_pool *genpool; + struct task_struct *task; + struct kfifo svc_fifo; + struct completion complete_status; + spinlock_t svc_fifo_lock; + svc_invoke_fn *invoke_fn; +}; + +/** + * struct stratix10_svc_chan - service communication channel + * @ctrl: pointer to service controller which is the provider of this channel + * @scl: pointer to service client which owns the channel + * @name: service client name associated with the channel + * @lock: protect access to the channel + * + * This struct is used by service client to communicate with service layer, each + * service client has its own channel created by service controller. + */ +struct stratix10_svc_chan { + struct stratix10_svc_controller *ctrl; + struct stratix10_svc_client *scl; + char *name; + spinlock_t lock; +}; + +static LIST_HEAD(svc_ctrl); +static LIST_HEAD(svc_data_mem); + +/** + * svc_pa_to_va() - translate physical address to virtual address + * @addr: to be translated physical address + * + * Return: valid virtual address or NULL if the provided physical + * address doesn't exist. + */ +static void *svc_pa_to_va(unsigned long addr) +{ + struct stratix10_svc_data_mem *pmem; + + pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr); + list_for_each_entry(pmem, &svc_data_mem, node) + if (pmem->paddr == addr) + return pmem->vaddr; + + /* physical address is not found */ + return NULL; +} + +/** + * svc_thread_cmd_data_claim() - claim back buffer from the secure world + * @ctrl: pointer to service layer controller + * @p_data: pointer to service data structure + * @cb_data: pointer to callback data structure to service client + * + * Claim back the submitted buffers from the secure world and pass buffer + * back to service client (FPGA manager, etc) for reuse. + */ +static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl, + struct stratix10_svc_data *p_data, + struct stratix10_svc_cb_data *cb_data) +{ + struct arm_smccc_res res; + unsigned long timeout; + + reinit_completion(&ctrl->complete_status); + timeout = msecs_to_jiffies(FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS); + + pr_debug("%s: claim back the submitted buffer\n", __func__); + do { + ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE, + 0, 0, 0, 0, 0, 0, 0, &res); + + if (res.a0 == INTEL_SIP_SMC_STATUS_OK) { + if (!res.a1) { + complete(&ctrl->complete_status); + break; + } + cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_DONE); + cb_data->kaddr1 = svc_pa_to_va(res.a1); + cb_data->kaddr2 = (res.a2) ? + svc_pa_to_va(res.a2) : NULL; + cb_data->kaddr3 = (res.a3) ? + svc_pa_to_va(res.a3) : NULL; + p_data->chan->scl->receive_cb(p_data->chan->scl, + cb_data); + } else { + pr_debug("%s: secure world busy, polling again\n", + __func__); + } + } while (res.a0 == INTEL_SIP_SMC_STATUS_OK || + res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY || + wait_for_completion_timeout(&ctrl->complete_status, timeout)); +} + +/** + * svc_thread_cmd_config_status() - check configuration status + * @ctrl: pointer to service layer controller + * @p_data: pointer to service data structure + * @cb_data: pointer to callback data structure to service client + * + * Check whether the secure firmware at secure world has finished the FPGA + * configuration, and then inform FPGA manager the configuration status. + */ +static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl, + struct stratix10_svc_data *p_data, + struct stratix10_svc_cb_data *cb_data) +{ + struct arm_smccc_res res; + int count_in_sec; + + cb_data->kaddr1 = NULL; + cb_data->kaddr2 = NULL; + cb_data->kaddr3 = NULL; + cb_data->status = BIT(SVC_STATUS_RECONFIG_ERROR); + + pr_debug("%s: polling config status\n", __func__); + + count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC; + while (count_in_sec) { + ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE, + 0, 0, 0, 0, 0, 0, 0, &res); + if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) || + (res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR)) + break; + + /* + * configuration is still in progress, wait one second then + * poll again + */ + msleep(1000); + count_in_sec--; + }; + + if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec) + cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED); + + p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data); +} + +/** + * svc_thread_recv_status_ok() - handle the successful status + * @p_data: pointer to service data structure + * @cb_data: pointer to callback data structure to service client + * @res: result from SMC or HVC call + * + * Send back the correspond status to the service clients. + */ +static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, + struct stratix10_svc_cb_data *cb_data, + struct arm_smccc_res res) +{ + cb_data->kaddr1 = NULL; + cb_data->kaddr2 = NULL; + cb_data->kaddr3 = NULL; + + switch (p_data->command) { + case COMMAND_RECONFIG: + cb_data->status = BIT(SVC_STATUS_RECONFIG_REQUEST_OK); + break; + case COMMAND_RECONFIG_DATA_SUBMIT: + cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED); + break; + case COMMAND_NOOP: + cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED); + cb_data->kaddr1 = svc_pa_to_va(res.a1); + break; + case COMMAND_RECONFIG_STATUS: + cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED); + break; + case COMMAND_RSU_UPDATE: + cb_data->status = BIT(SVC_STATUS_RSU_OK); + break; + default: + pr_warn("it shouldn't happen\n"); + break; + } + + pr_debug("%s: call receive_cb\n", __func__); + p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data); +} + +/** + * svc_normal_to_secure_thread() - the function to run in the kthread + * @data: data pointer for kthread function + * + * Service layer driver creates stratix10_svc_smc_hvc_call kthread on CPU + * node 0, its function stratix10_svc_secure_call_thread is used to handle + * SMC or HVC calls between kernel driver and secure monitor software. + * + * Return: 0 for success or -ENOMEM on error. + */ +static int svc_normal_to_secure_thread(void *data) +{ + struct stratix10_svc_controller + *ctrl = (struct stratix10_svc_controller *)data; + struct stratix10_svc_data *pdata; + struct stratix10_svc_cb_data *cbdata; + struct arm_smccc_res res; + unsigned long a0, a1, a2; + int ret_fifo = 0; + + pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + cbdata = kmalloc(sizeof(*cbdata), GFP_KERNEL); + if (!cbdata) { + kfree(pdata); + return -ENOMEM; + } + + /* default set, to remove build warning */ + a0 = INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK; + a1 = 0; + a2 = 0; + + pr_debug("smc_hvc_shm_thread is running\n"); + + while (!kthread_should_stop()) { + ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo, + pdata, sizeof(*pdata), + &ctrl->svc_fifo_lock); + + if (!ret_fifo) + continue; + + pr_debug("get from FIFO pa=0x%016x, command=%u, size=%u\n", + (unsigned int)pdata->paddr, pdata->command, + (unsigned int)pdata->size); + + switch (pdata->command) { + case COMMAND_RECONFIG_DATA_CLAIM: + svc_thread_cmd_data_claim(ctrl, pdata, cbdata); + continue; + case COMMAND_RECONFIG: + a0 = INTEL_SIP_SMC_FPGA_CONFIG_START; + pr_debug("conf_type=%u\n", (unsigned int)pdata->flag); + a1 = pdata->flag; + a2 = 0; + break; + case COMMAND_RECONFIG_DATA_SUBMIT: + a0 = INTEL_SIP_SMC_FPGA_CONFIG_WRITE; + a1 = (unsigned long)pdata->paddr; + a2 = (unsigned long)pdata->size; + break; + case COMMAND_RECONFIG_STATUS: + a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE; + a1 = 0; + a2 = 0; + break; + case COMMAND_RSU_STATUS: + a0 = INTEL_SIP_SMC_RSU_STATUS; + a1 = 0; + a2 = 0; + break; + case COMMAND_RSU_UPDATE: + a0 = INTEL_SIP_SMC_RSU_UPDATE; + a1 = pdata->arg[0]; + a2 = 0; + break; + default: + pr_warn("it shouldn't happen\n"); + break; + } + pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x", + __func__, (unsigned int)a0, (unsigned int)a1); + pr_debug(" a2=0x%016x\n", (unsigned int)a2); + + ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res); + + pr_debug("%s: after SMC call -- res.a0=0x%016x", + __func__, (unsigned int)res.a0); + pr_debug(" res.a1=0x%016x, res.a2=0x%016x", + (unsigned int)res.a1, (unsigned int)res.a2); + pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3); + + if (pdata->command == COMMAND_RSU_STATUS) { + if (res.a0 == INTEL_SIP_SMC_RSU_ERROR) + cbdata->status = BIT(SVC_STATUS_RSU_ERROR); + else + cbdata->status = BIT(SVC_STATUS_RSU_OK); + + cbdata->kaddr1 = &res; + cbdata->kaddr2 = NULL; + cbdata->kaddr3 = NULL; + pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); + continue; + } + + switch (res.a0) { + case INTEL_SIP_SMC_STATUS_OK: + svc_thread_recv_status_ok(pdata, cbdata, res); + break; + case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY: + switch (pdata->command) { + case COMMAND_RECONFIG_DATA_SUBMIT: + svc_thread_cmd_data_claim(ctrl, + pdata, cbdata); + break; + case COMMAND_RECONFIG_STATUS: + svc_thread_cmd_config_status(ctrl, + pdata, cbdata); + break; + default: + pr_warn("it shouldn't happen\n"); + break; + } + break; + case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED: + pr_debug("%s: STATUS_REJECTED\n", __func__); + break; + case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR: + pr_err("%s: STATUS_ERROR\n", __func__); + cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR); + cbdata->kaddr1 = NULL; + cbdata->kaddr2 = NULL; + cbdata->kaddr3 = NULL; + pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); + break; + default: + pr_warn("it shouldn't happen\n"); + break; + } + }; + + kfree(cbdata); + kfree(pdata); + + return 0; +} + +/** + * svc_normal_to_secure_shm_thread() - the function to run in the kthread + * @data: data pointer for kthread function + * + * Service layer driver creates stratix10_svc_smc_hvc_shm kthread on CPU + * node 0, its function stratix10_svc_secure_shm_thread is used to query the + * physical address of memory block reserved by secure monitor software at + * secure world. + * + * svc_normal_to_secure_shm_thread() calls do_exit() directly since it is a + * standlone thread for which no one will call kthread_stop() or return when + * 'kthread_should_stop()' is true. + */ +static int svc_normal_to_secure_shm_thread(void *data) +{ + struct stratix10_svc_sh_memory + *sh_mem = (struct stratix10_svc_sh_memory *)data; + struct arm_smccc_res res; + + /* SMC or HVC call to get shared memory info from secure world */ + sh_mem->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM, + 0, 0, 0, 0, 0, 0, 0, &res); + if (res.a0 == INTEL_SIP_SMC_STATUS_OK) { + sh_mem->addr = res.a1; + sh_mem->size = res.a2; + } else { + pr_err("%s: after SMC call -- res.a0=0x%016x", __func__, + (unsigned int)res.a0); + sh_mem->addr = 0; + sh_mem->size = 0; + } + + complete(&sh_mem->sync_complete); + do_exit(0); +} + +/** + * svc_get_sh_memory() - get memory block reserved by secure monitor SW + * @pdev: pointer to service layer device + * @sh_memory: pointer to service shared memory structure + * + * Return: zero for successfully getting the physical address of memory block + * reserved by secure monitor software, or negative value on error. + */ +static int svc_get_sh_memory(struct platform_device *pdev, + struct stratix10_svc_sh_memory *sh_memory) +{ + struct device *dev = &pdev->dev; + struct task_struct *sh_memory_task; + unsigned int cpu = 0; + + init_completion(&sh_memory->sync_complete); + + /* smc or hvc call happens on cpu 0 bound kthread */ + sh_memory_task = kthread_create_on_node(svc_normal_to_secure_shm_thread, + (void *)sh_memory, + cpu_to_node(cpu), + "svc_smc_hvc_shm_thread"); + if (IS_ERR(sh_memory_task)) { + dev_err(dev, "fail to create stratix10_svc_smc_shm_thread\n"); + return -EINVAL; + } + + wake_up_process(sh_memory_task); + + if (!wait_for_completion_timeout(&sh_memory->sync_complete, 10 * HZ)) { + dev_err(dev, + "timeout to get sh-memory paras from secure world\n"); + return -ETIMEDOUT; + } + + if (!sh_memory->addr || !sh_memory->size) { + dev_err(dev, + "fails to get shared memory info from secure world\n"); + return -ENOMEM; + } + + dev_dbg(dev, "SM software provides paddr: 0x%016x, size: 0x%08x\n", + (unsigned int)sh_memory->addr, + (unsigned int)sh_memory->size); + + return 0; +} + +/** + * svc_create_memory_pool() - create a memory pool from reserved memory block + * @pdev: pointer to service layer device + * @sh_memory: pointer to service shared memory structure + * + * Return: pool allocated from reserved memory block or ERR_PTR() on error. + */ +static struct gen_pool * +svc_create_memory_pool(struct platform_device *pdev, + struct stratix10_svc_sh_memory *sh_memory) +{ + struct device *dev = &pdev->dev; + struct gen_pool *genpool; + unsigned long vaddr; + phys_addr_t paddr; + size_t size; + phys_addr_t begin; + phys_addr_t end; + void *va; + size_t page_mask = PAGE_SIZE - 1; + int min_alloc_order = 3; + int ret; + + begin = roundup(sh_memory->addr, PAGE_SIZE); + end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE); + paddr = begin; + size = end - begin; + va = memremap(paddr, size, MEMREMAP_WC); + if (!va) { + dev_err(dev, "fail to remap shared memory\n"); + return ERR_PTR(-EINVAL); + } + vaddr = (unsigned long)va; + dev_dbg(dev, + "reserved memory vaddr: %p, paddr: 0x%16x size: 0x%8x\n", + va, (unsigned int)paddr, (unsigned int)size); + if ((vaddr & page_mask) || (paddr & page_mask) || + (size & page_mask)) { + dev_err(dev, "page is not aligned\n"); + return ERR_PTR(-EINVAL); + } + genpool = gen_pool_create(min_alloc_order, -1); + if (!genpool) { + dev_err(dev, "fail to create genpool\n"); + return ERR_PTR(-ENOMEM); + } + gen_pool_set_algo(genpool, gen_pool_best_fit, NULL); + ret = gen_pool_add_virt(genpool, vaddr, paddr, size, -1); + if (ret) { + dev_err(dev, "fail to add memory chunk to the pool\n"); + gen_pool_destroy(genpool); + return ERR_PTR(ret); + } + + return genpool; +} + +/** + * svc_smccc_smc() - secure monitor call between normal and secure world + * @a0: argument passed in registers 0 + * @a1: argument passed in registers 1 + * @a2: argument passed in registers 2 + * @a3: argument passed in registers 3 + * @a4: argument passed in registers 4 + * @a5: argument passed in registers 5 + * @a6: argument passed in registers 6 + * @a7: argument passed in registers 7 + * @res: result values from register 0 to 3 + */ +static void svc_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +/** + * svc_smccc_hvc() - hypervisor call between normal and secure world + * @a0: argument passed in registers 0 + * @a1: argument passed in registers 1 + * @a2: argument passed in registers 2 + * @a3: argument passed in registers 3 + * @a4: argument passed in registers 4 + * @a5: argument passed in registers 5 + * @a6: argument passed in registers 6 + * @a7: argument passed in registers 7 + * @res: result values from register 0 to 3 + */ +static void svc_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +/** + * get_invoke_func() - invoke SMC or HVC call + * @dev: pointer to device + * + * Return: function pointer to svc_smccc_smc or svc_smccc_hvc. + */ +static svc_invoke_fn *get_invoke_func(struct device *dev) +{ + const char *method; + + if (of_property_read_string(dev->of_node, "method", &method)) { + dev_warn(dev, "missing \"method\" property\n"); + return ERR_PTR(-ENXIO); + } + + if (!strcmp(method, "smc")) + return svc_smccc_smc; + if (!strcmp(method, "hvc")) + return svc_smccc_hvc; + + dev_warn(dev, "invalid \"method\" property: %s\n", method); + + return ERR_PTR(-EINVAL); +} + +/** + * stratix10_svc_request_channel_byname() - request a service channel + * @client: pointer to service client + * @name: service client name + * + * This function is used by service client to request a service channel. + * + * Return: a pointer to channel assigned to the client on success, + * or ERR_PTR() on error. + */ +struct stratix10_svc_chan *stratix10_svc_request_channel_byname( + struct stratix10_svc_client *client, const char *name) +{ + struct device *dev = client->dev; + struct stratix10_svc_controller *controller; + struct stratix10_svc_chan *chan = NULL; + unsigned long flag; + int i; + + /* if probe was called after client's, or error on probe */ + if (list_empty(&svc_ctrl)) + return ERR_PTR(-EPROBE_DEFER); + + controller = list_first_entry(&svc_ctrl, + struct stratix10_svc_controller, node); + for (i = 0; i < SVC_NUM_CHANNEL; i++) { + if (!strcmp(controller->chans[i].name, name)) { + chan = &controller->chans[i]; + break; + } + } + + /* if there was no channel match */ + if (i == SVC_NUM_CHANNEL) { + dev_err(dev, "%s: channel not allocated\n", __func__); + return ERR_PTR(-EINVAL); + } + + if (chan->scl || !try_module_get(controller->dev->driver->owner)) { + dev_dbg(dev, "%s: svc not free\n", __func__); + return ERR_PTR(-EBUSY); + } + + spin_lock_irqsave(&chan->lock, flag); + chan->scl = client; + chan->ctrl->num_active_client++; + spin_unlock_irqrestore(&chan->lock, flag); + + return chan; +} +EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname); + +/** + * stratix10_svc_free_channel() - free service channel + * @chan: service channel to be freed + * + * This function is used by service client to free a service channel. + */ +void stratix10_svc_free_channel(struct stratix10_svc_chan *chan) +{ + unsigned long flag; + + spin_lock_irqsave(&chan->lock, flag); + chan->scl = NULL; + chan->ctrl->num_active_client--; + module_put(chan->ctrl->dev->driver->owner); + spin_unlock_irqrestore(&chan->lock, flag); +} +EXPORT_SYMBOL_GPL(stratix10_svc_free_channel); + +/** + * stratix10_svc_send() - send a message data to the remote + * @chan: service channel assigned to the client + * @msg: message data to be sent, in the format of + * "struct stratix10_svc_client_msg" + * + * This function is used by service client to add a message to the service + * layer driver's queue for being sent to the secure world. + * + * Return: 0 for success, -ENOMEM or -ENOBUFS on error. + */ +int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) +{ + struct stratix10_svc_client_msg + *p_msg = (struct stratix10_svc_client_msg *)msg; + struct stratix10_svc_data_mem *p_mem; + struct stratix10_svc_data *p_data; + int ret = 0; + unsigned int cpu = 0; + + p_data = kzalloc(sizeof(*p_data), GFP_KERNEL); + if (!p_data) + return -ENOMEM; + + /* first client will create kernel thread */ + if (!chan->ctrl->task) { + chan->ctrl->task = + kthread_create_on_node(svc_normal_to_secure_thread, + (void *)chan->ctrl, + cpu_to_node(cpu), + "svc_smc_hvc_thread"); + if (IS_ERR(chan->ctrl->task)) { + dev_err(chan->ctrl->dev, + "fails to create svc_smc_hvc_thread\n"); + kfree(p_data); + return -EINVAL; + } + kthread_bind(chan->ctrl->task, cpu); + wake_up_process(chan->ctrl->task); + } + + pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, + p_msg->payload, p_msg->command, + (unsigned int)p_msg->payload_length); + + if (list_empty(&svc_data_mem)) { + if (p_msg->command == COMMAND_RECONFIG) { + struct stratix10_svc_command_config_type *ct = + (struct stratix10_svc_command_config_type *) + p_msg->payload; + p_data->flag = ct->flags; + } + } else { + list_for_each_entry(p_mem, &svc_data_mem, node) + if (p_mem->vaddr == p_msg->payload) { + p_data->paddr = p_mem->paddr; + break; + } + } + + p_data->command = p_msg->command; + p_data->arg[0] = p_msg->arg[0]; + p_data->arg[1] = p_msg->arg[1]; + p_data->arg[2] = p_msg->arg[2]; + p_data->size = p_msg->payload_length; + p_data->chan = chan; + pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__, + (unsigned int)p_data->paddr, p_data->command, + (unsigned int)p_data->size); + ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data, + sizeof(*p_data), + &chan->ctrl->svc_fifo_lock); + + kfree(p_data); + + if (!ret) + return -ENOBUFS; + + return 0; +} +EXPORT_SYMBOL_GPL(stratix10_svc_send); + +/** + * stratix10_svc_done() - complete service request transactions + * @chan: service channel assigned to the client + * + * This function should be called when client has finished its request + * or there is an error in the request process. It allows the service layer + * to stop the running thread to have maximize savings in kernel resources. + */ +void stratix10_svc_done(struct stratix10_svc_chan *chan) +{ + /* stop thread when thread is running AND only one active client */ + if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) { + pr_debug("svc_smc_hvc_shm_thread is stopped\n"); + kthread_stop(chan->ctrl->task); + chan->ctrl->task = NULL; + } +} +EXPORT_SYMBOL_GPL(stratix10_svc_done); + +/** + * stratix10_svc_allocate_memory() - allocate memory + * @chan: service channel assigned to the client + * @size: memory size requested by a specific service client + * + * Service layer allocates the requested number of bytes buffer from the + * memory pool, service client uses this function to get allocated buffers. + * + * Return: address of allocated memory on success, or ERR_PTR() on error. + */ +void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan, + size_t size) +{ + struct stratix10_svc_data_mem *pmem; + unsigned long va; + phys_addr_t pa; + struct gen_pool *genpool = chan->ctrl->genpool; + size_t s = roundup(size, 1 << genpool->min_alloc_order); + + pmem = devm_kzalloc(chan->ctrl->dev, sizeof(*pmem), GFP_KERNEL); + if (!pmem) + return ERR_PTR(-ENOMEM); + + va = gen_pool_alloc(genpool, s); + if (!va) + return ERR_PTR(-ENOMEM); + + memset((void *)va, 0, s); + pa = gen_pool_virt_to_phys(genpool, va); + + pmem->vaddr = (void *)va; + pmem->paddr = pa; + pmem->size = s; + list_add_tail(&pmem->node, &svc_data_mem); + pr_debug("%s: va=%p, pa=0x%016x\n", __func__, + pmem->vaddr, (unsigned int)pmem->paddr); + + return (void *)va; +} +EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory); + +/** + * stratix10_svc_free_memory() - free allocated memory + * @chan: service channel assigned to the client + * @kaddr: memory to be freed + * + * This function is used by service client to free allocated buffers. + */ +void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr) +{ + struct stratix10_svc_data_mem *pmem; + size_t size = 0; + + list_for_each_entry(pmem, &svc_data_mem, node) + if (pmem->vaddr == kaddr) { + size = pmem->size; + break; + } + + gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size); + pmem->vaddr = NULL; + list_del(&pmem->node); +} +EXPORT_SYMBOL_GPL(stratix10_svc_free_memory); + +static const struct of_device_id stratix10_svc_drv_match[] = { + {.compatible = "intel,stratix10-svc"}, + {}, +}; + +static int stratix10_svc_drv_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct stratix10_svc_controller *controller; + struct stratix10_svc_chan *chans; + struct gen_pool *genpool; + struct stratix10_svc_sh_memory *sh_memory; + svc_invoke_fn *invoke_fn; + size_t fifo_size; + int ret; + + /* get SMC or HVC function */ + invoke_fn = get_invoke_func(dev); + if (IS_ERR(invoke_fn)) + return -EINVAL; + + sh_memory = devm_kzalloc(dev, sizeof(*sh_memory), GFP_KERNEL); + if (!sh_memory) + return -ENOMEM; + + sh_memory->invoke_fn = invoke_fn; + ret = svc_get_sh_memory(pdev, sh_memory); + if (ret) + return ret; + + genpool = svc_create_memory_pool(pdev, sh_memory); + if (!genpool) + return -ENOMEM; + + /* allocate service controller and supporting channel */ + controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL); + if (!controller) + return -ENOMEM; + + chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL, + sizeof(*chans), GFP_KERNEL | __GFP_ZERO); + if (!chans) + return -ENOMEM; + + controller->dev = dev; + controller->num_chans = SVC_NUM_CHANNEL; + controller->num_active_client = 0; + controller->chans = chans; + controller->genpool = genpool; + controller->task = NULL; + controller->invoke_fn = invoke_fn; + init_completion(&controller->complete_status); + + fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO; + ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL); + if (ret) { + dev_err(dev, "fails to allocate FIFO\n"); + return ret; + } + spin_lock_init(&controller->svc_fifo_lock); + + chans[0].scl = NULL; + chans[0].ctrl = controller; + chans[0].name = SVC_CLIENT_FPGA; + spin_lock_init(&chans[0].lock); + + chans[1].scl = NULL; + chans[1].ctrl = controller; + chans[1].name = SVC_CLIENT_RSU; + spin_lock_init(&chans[1].lock); + + list_add_tail(&controller->node, &svc_ctrl); + platform_set_drvdata(pdev, controller); + + pr_info("Intel Service Layer Driver Initialized\n"); + + return ret; +} + +static int stratix10_svc_drv_remove(struct platform_device *pdev) +{ + struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); + + kfifo_free(&ctrl->svc_fifo); + if (ctrl->task) { + kthread_stop(ctrl->task); + ctrl->task = NULL; + } + if (ctrl->genpool) + gen_pool_destroy(ctrl->genpool); + list_del(&ctrl->node); + + return 0; +} + +static struct platform_driver stratix10_svc_driver = { + .probe = stratix10_svc_drv_probe, + .remove = stratix10_svc_drv_remove, + .driver = { + .name = "stratix10-svc", + .of_match_table = stratix10_svc_drv_match, + }, +}; + +static int __init stratix10_svc_init(void) +{ + struct device_node *fw_np; + struct device_node *np; + int ret; + + fw_np = of_find_node_by_name(NULL, "firmware"); + if (!fw_np) + return -ENODEV; + + np = of_find_matching_node(fw_np, stratix10_svc_drv_match); + if (!np) { + of_node_put(fw_np); + return -ENODEV; + } + + of_node_put(np); + ret = of_platform_populate(fw_np, stratix10_svc_drv_match, NULL, NULL); + of_node_put(fw_np); + if (ret) + return ret; + + return platform_driver_register(&stratix10_svc_driver); +} + +static void __exit stratix10_svc_exit(void) +{ + return platform_driver_unregister(&stratix10_svc_driver); +} + +subsys_initcall(stratix10_svc_init); +module_exit(stratix10_svc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel Stratix10 Service Layer Driver"); +MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>"); +MODULE_ALIAS("platform:stratix10-svc"); diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 1ebcef4bab5b..0bb7b5cd6cdc 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -56,6 +56,12 @@ config FPGA_MGR_ZYNQ_FPGA help FPGA manager driver support for Xilinx Zynq FPGAs. +config FPGA_MGR_STRATIX10_SOC + tristate "Intel Stratix10 SoC FPGA Manager" + depends on (ARCH_STRATIX10 && INTEL_STRATIX10_SERVICE) + help + FPGA manager driver support for the Intel Stratix10 SoC. + config FPGA_MGR_XILINX_SPI tristate "Xilinx Configuration over Slave Serial (SPI)" depends on SPI diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 7a2d73ba7122..c0dd4c82fbdb 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_FPGA_MGR_ICE40_SPI) += ice40-spi.o obj-$(CONFIG_FPGA_MGR_MACHXO2_SPI) += machxo2-spi.o obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o +obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 610a1558e0ed..35c3aa5792e2 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -403,6 +403,7 @@ static int altera_cvp_probe(struct pci_dev *pdev, struct altera_cvp_conf *conf; struct fpga_manager *mgr; u16 cmd, val; + u32 regval; int ret; /* @@ -416,6 +417,14 @@ static int altera_cvp_probe(struct pci_dev *pdev, return -ENODEV; } + pci_read_config_dword(pdev, VSE_CVP_STATUS, ®val); + if (!(regval & VSE_CVP_STATUS_CVP_EN)) { + dev_err(&pdev->dev, + "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n", + regval); + return -ENODEV; + } + conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL); if (!conf) return -ENOMEM; @@ -466,18 +475,11 @@ static int altera_cvp_probe(struct pci_dev *pdev, if (ret) goto err_unmap; - ret = driver_create_file(&altera_cvp_driver.driver, - &driver_attr_chkcfg); - if (ret) { - dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n"); - fpga_mgr_unregister(mgr); - goto err_unmap; - } - return 0; err_unmap: - pci_iounmap(pdev, conf->map); + if (conf->map) + pci_iounmap(pdev, conf->map); pci_release_region(pdev, CVP_BAR); err_disable: cmd &= ~PCI_COMMAND_MEMORY; @@ -491,16 +493,39 @@ static void altera_cvp_remove(struct pci_dev *pdev) struct altera_cvp_conf *conf = mgr->priv; u16 cmd; - driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); fpga_mgr_unregister(mgr); - pci_iounmap(pdev, conf->map); + if (conf->map) + pci_iounmap(pdev, conf->map); pci_release_region(pdev, CVP_BAR); pci_read_config_word(pdev, PCI_COMMAND, &cmd); cmd &= ~PCI_COMMAND_MEMORY; pci_write_config_word(pdev, PCI_COMMAND, cmd); } -module_pci_driver(altera_cvp_driver); +static int __init altera_cvp_init(void) +{ + int ret; + + ret = pci_register_driver(&altera_cvp_driver); + if (ret) + return ret; + + ret = driver_create_file(&altera_cvp_driver.driver, + &driver_attr_chkcfg); + if (ret) + pr_warn("Can't create sysfs chkcfg file\n"); + + return 0; +} + +static void __exit altera_cvp_exit(void) +{ + driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); + pci_unregister_driver(&altera_cvp_driver); +} + +module_init(altera_cvp_init); +module_exit(altera_cvp_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>"); diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 33aafda50af5..8c18beec6b57 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -75,6 +75,12 @@ static struct altera_ps_data a10_data = { .t_st2ck_us = 10, /* min(t_ST2CK) */ }; +/* Array index is enum altera_ps_devtype */ +static const struct altera_ps_data *altera_ps_data_map[] = { + &c5_data, + &a10_data, +}; + static const struct of_device_id of_ef_match[] = { { .compatible = "altr,fpga-passive-serial", .data = &c5_data }, { .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data }, @@ -234,6 +240,22 @@ static const struct fpga_manager_ops altera_ps_ops = { .write_complete = altera_ps_write_complete, }; +static const struct altera_ps_data *id_to_data(const struct spi_device_id *id) +{ + kernel_ulong_t devtype = id->driver_data; + const struct altera_ps_data *data; + + /* someone added a altera_ps_devtype without adding to the map array */ + if (devtype >= ARRAY_SIZE(altera_ps_data_map)) + return NULL; + + data = altera_ps_data_map[devtype]; + if (!data || data->devtype != devtype) + return NULL; + + return data; +} + static int altera_ps_probe(struct spi_device *spi) { struct altera_ps_conf *conf; @@ -244,11 +266,17 @@ static int altera_ps_probe(struct spi_device *spi) if (!conf) return -ENOMEM; - of_id = of_match_device(of_ef_match, &spi->dev); - if (!of_id) - return -ENODEV; + if (spi->dev.of_node) { + of_id = of_match_device(of_ef_match, &spi->dev); + if (!of_id) + return -ENODEV; + conf->data = of_id->data; + } else { + conf->data = id_to_data(spi_get_device_id(spi)); + if (!conf->data) + return -ENODEV; + } - conf->data = of_id->data; conf->spi = spi; conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW); if (IS_ERR(conf->config)) { @@ -294,7 +322,9 @@ static int altera_ps_remove(struct spi_device *spi) } static const struct spi_device_id altera_ps_spi_ids[] = { - {"cyclone-ps-spi", 0}, + { "cyclone-ps-spi", CYCLONE5 }, + { "fpga-passive-serial", CYCLONE5 }, + { "fpga-arria10-passive-serial", ARRIA10 }, {} }; MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids); diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c index 0b840531ef33..fe5a5578fbf7 100644 --- a/drivers/fpga/dfl-fme-pr.c +++ b/drivers/fpga/dfl-fme-pr.c @@ -444,10 +444,8 @@ static void pr_mgmt_uinit(struct platform_device *pdev, struct dfl_feature *feature) { struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); - struct dfl_fme *priv; mutex_lock(&pdata->lock); - priv = dfl_fpga_pdata_get_private(pdata); dfl_fme_destroy_regions(pdata); dfl_fme_destroy_bridges(pdata); diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c index ec134ec93f08..1eeb42af1012 100644 --- a/drivers/fpga/dfl-fme-region.c +++ b/drivers/fpga/dfl-fme-region.c @@ -64,7 +64,7 @@ eprobe_mgr_put: static int fme_region_remove(struct platform_device *pdev) { - struct fpga_region *region = dev_get_drvdata(&pdev->dev); + struct fpga_region *region = platform_get_drvdata(pdev); struct fpga_manager *mgr = region->mgr; fpga_region_unregister(region); diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index 122286fd255a..75f64abf9c81 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c @@ -421,7 +421,7 @@ static int of_fpga_region_probe(struct platform_device *pdev) goto eprobe_mgr_put; of_platform_populate(np, fpga_region_of_match, NULL, ®ion->dev); - dev_set_drvdata(dev, region); + platform_set_drvdata(pdev, region); dev_info(dev, "FPGA Region probed\n"); diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c new file mode 100644 index 000000000000..a1a09e04fab8 --- /dev/null +++ b/drivers/fpga/stratix10-soc.c @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * FPGA Manager Driver for Intel Stratix10 SoC + * + * Copyright (C) 2018 Intel Corporation + */ +#include <linux/completion.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/firmware/intel/stratix10-svc-client.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> + +/* + * FPGA programming requires a higher level of privilege (EL3), per the SoC + * design. + */ +#define NUM_SVC_BUFS 4 +#define SVC_BUF_SIZE SZ_512K + +/* Indicates buffer is in use if set */ +#define SVC_BUF_LOCK 0 + +#define S10_BUFFER_TIMEOUT (msecs_to_jiffies(SVC_RECONFIG_BUFFER_TIMEOUT_MS)) +#define S10_RECONFIG_TIMEOUT (msecs_to_jiffies(SVC_RECONFIG_REQUEST_TIMEOUT_MS)) + +/* + * struct s10_svc_buf + * buf: virtual address of buf provided by service layer + * lock: locked if buffer is in use + */ +struct s10_svc_buf { + char *buf; + unsigned long lock; +}; + +struct s10_priv { + struct stratix10_svc_chan *chan; + struct stratix10_svc_client client; + struct completion status_return_completion; + struct s10_svc_buf svc_bufs[NUM_SVC_BUFS]; + unsigned long status; +}; + +static int s10_svc_send_msg(struct s10_priv *priv, + enum stratix10_svc_command_code command, + void *payload, u32 payload_length) +{ + struct stratix10_svc_chan *chan = priv->chan; + struct device *dev = priv->client.dev; + struct stratix10_svc_client_msg msg; + int ret; + + dev_dbg(dev, "%s cmd=%d payload=%p length=%d\n", + __func__, command, payload, payload_length); + + msg.command = command; + msg.payload = payload; + msg.payload_length = payload_length; + + ret = stratix10_svc_send(chan, &msg); + dev_dbg(dev, "stratix10_svc_send returned status %d\n", ret); + + return ret; +} + +/* + * Free buffers allocated from the service layer's pool that are not in use. + * Return true when all buffers are freed. + */ +static bool s10_free_buffers(struct fpga_manager *mgr) +{ + struct s10_priv *priv = mgr->priv; + uint num_free = 0; + uint i; + + for (i = 0; i < NUM_SVC_BUFS; i++) { + if (!priv->svc_bufs[i].buf) { + num_free++; + continue; + } + + if (!test_and_set_bit_lock(SVC_BUF_LOCK, + &priv->svc_bufs[i].lock)) { + stratix10_svc_free_memory(priv->chan, + priv->svc_bufs[i].buf); + priv->svc_bufs[i].buf = NULL; + num_free++; + } + } + + return num_free == NUM_SVC_BUFS; +} + +/* + * Returns count of how many buffers are not in use. + */ +static uint s10_free_buffer_count(struct fpga_manager *mgr) +{ + struct s10_priv *priv = mgr->priv; + uint num_free = 0; + uint i; + + for (i = 0; i < NUM_SVC_BUFS; i++) + if (!priv->svc_bufs[i].buf) + num_free++; + + return num_free; +} + +/* + * s10_unlock_bufs + * Given the returned buffer address, match that address to our buffer struct + * and unlock that buffer. This marks it as available to be refilled and sent + * (or freed). + * priv: private data + * kaddr: kernel address of buffer that was returned from service layer + */ +static void s10_unlock_bufs(struct s10_priv *priv, void *kaddr) +{ + uint i; + + if (!kaddr) + return; + + for (i = 0; i < NUM_SVC_BUFS; i++) + if (priv->svc_bufs[i].buf == kaddr) { + clear_bit_unlock(SVC_BUF_LOCK, + &priv->svc_bufs[i].lock); + return; + } + + WARN(1, "Unknown buffer returned from service layer %p\n", kaddr); +} + +/* + * s10_receive_callback - callback for service layer to use to provide client + * (this driver) messages received through the mailbox. + * client: service layer client struct + * data: message from service layer + */ +static void s10_receive_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct s10_priv *priv = client->priv; + u32 status; + int i; + + WARN_ONCE(!data, "%s: stratix10_svc_rc_data = NULL", __func__); + + status = data->status; + + /* + * Here we set status bits as we receive them. Elsewhere, we always use + * test_and_clear_bit() to check status in priv->status + */ + for (i = 0; i <= SVC_STATUS_RECONFIG_ERROR; i++) + if (status & (1 << i)) + set_bit(i, &priv->status); + + if (status & BIT(SVC_STATUS_RECONFIG_BUFFER_DONE)) { + s10_unlock_bufs(priv, data->kaddr1); + s10_unlock_bufs(priv, data->kaddr2); + s10_unlock_bufs(priv, data->kaddr3); + } + + complete(&priv->status_return_completion); +} + +/* + * s10_ops_write_init - prepare for FPGA reconfiguration by requesting + * partial reconfig and allocating buffers from the service layer. + */ +static int s10_ops_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count) +{ + struct s10_priv *priv = mgr->priv; + struct device *dev = priv->client.dev; + struct stratix10_svc_command_config_type ctype; + char *kbuf; + uint i; + int ret; + + ctype.flags = 0; + if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { + dev_dbg(dev, "Requesting partial reconfiguration.\n"); + ctype.flags |= BIT(COMMAND_RECONFIG_FLAG_PARTIAL); + } else { + dev_dbg(dev, "Requesting full reconfiguration.\n"); + } + + reinit_completion(&priv->status_return_completion); + ret = s10_svc_send_msg(priv, COMMAND_RECONFIG, + &ctype, sizeof(ctype)); + if (ret < 0) + goto init_done; + + ret = wait_for_completion_interruptible_timeout( + &priv->status_return_completion, S10_RECONFIG_TIMEOUT); + if (!ret) { + dev_err(dev, "timeout waiting for RECONFIG_REQUEST\n"); + ret = -ETIMEDOUT; + goto init_done; + } + if (ret < 0) { + dev_err(dev, "error (%d) waiting for RECONFIG_REQUEST\n", ret); + goto init_done; + } + + ret = 0; + if (!test_and_clear_bit(SVC_STATUS_RECONFIG_REQUEST_OK, + &priv->status)) { + ret = -ETIMEDOUT; + goto init_done; + } + + /* Allocate buffers from the service layer's pool. */ + for (i = 0; i < NUM_SVC_BUFS; i++) { + kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE); + if (!kbuf) { + s10_free_buffers(mgr); + ret = -ENOMEM; + goto init_done; + } + + priv->svc_bufs[i].buf = kbuf; + priv->svc_bufs[i].lock = 0; + } + +init_done: + stratix10_svc_done(priv->chan); + return ret; +} + +/* + * s10_send_buf - send a buffer to the service layer queue + * mgr: fpga manager struct + * buf: fpga image buffer + * count: size of buf in bytes + * Returns # of bytes transferred or -ENOBUFS if the all the buffers are in use + * or if the service queue is full. Never returns 0. + */ +static int s10_send_buf(struct fpga_manager *mgr, const char *buf, size_t count) +{ + struct s10_priv *priv = mgr->priv; + struct device *dev = priv->client.dev; + void *svc_buf; + size_t xfer_sz; + int ret; + uint i; + + /* get/lock a buffer that that's not being used */ + for (i = 0; i < NUM_SVC_BUFS; i++) + if (!test_and_set_bit_lock(SVC_BUF_LOCK, + &priv->svc_bufs[i].lock)) + break; + + if (i == NUM_SVC_BUFS) + return -ENOBUFS; + + xfer_sz = count < SVC_BUF_SIZE ? count : SVC_BUF_SIZE; + + svc_buf = priv->svc_bufs[i].buf; + memcpy(svc_buf, buf, xfer_sz); + ret = s10_svc_send_msg(priv, COMMAND_RECONFIG_DATA_SUBMIT, + svc_buf, xfer_sz); + if (ret < 0) { + dev_err(dev, + "Error while sending data to service layer (%d)", ret); + clear_bit_unlock(SVC_BUF_LOCK, &priv->svc_bufs[i].lock); + return ret; + } + + return xfer_sz; +} + +/* + * Send a FPGA image to privileged layers to write to the FPGA. When done + * sending, free all service layer buffers we allocated in write_init. + */ +static int s10_ops_write(struct fpga_manager *mgr, const char *buf, + size_t count) +{ + struct s10_priv *priv = mgr->priv; + struct device *dev = priv->client.dev; + long wait_status; + int sent = 0; + int ret = 0; + + /* + * Loop waiting for buffers to be returned. When a buffer is returned, + * reuse it to send more data or free if if all data has been sent. + */ + while (count > 0 || s10_free_buffer_count(mgr) != NUM_SVC_BUFS) { + reinit_completion(&priv->status_return_completion); + + if (count > 0) { + sent = s10_send_buf(mgr, buf, count); + if (sent < 0) + continue; + + count -= sent; + buf += sent; + } else { + if (s10_free_buffers(mgr)) + return 0; + + ret = s10_svc_send_msg( + priv, COMMAND_RECONFIG_DATA_CLAIM, + NULL, 0); + if (ret < 0) + break; + } + + /* + * If callback hasn't already happened, wait for buffers to be + * returned from service layer + */ + wait_status = 1; /* not timed out */ + if (!priv->status) + wait_status = wait_for_completion_interruptible_timeout( + &priv->status_return_completion, + S10_BUFFER_TIMEOUT); + + if (test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_DONE, + &priv->status) || + test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED, + &priv->status)) { + ret = 0; + continue; + } + + if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR, + &priv->status)) { + dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n"); + ret = -EFAULT; + break; + } + + if (!wait_status) { + dev_err(dev, "timeout waiting for svc layer buffers\n"); + ret = -ETIMEDOUT; + break; + } + if (wait_status < 0) { + ret = wait_status; + dev_err(dev, + "error (%d) waiting for svc layer buffers\n", + ret); + break; + } + } + + if (!s10_free_buffers(mgr)) + dev_err(dev, "%s not all buffers were freed\n", __func__); + + return ret; +} + +static int s10_ops_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + struct s10_priv *priv = mgr->priv; + struct device *dev = priv->client.dev; + unsigned long timeout; + int ret; + + timeout = usecs_to_jiffies(info->config_complete_timeout_us); + + do { + reinit_completion(&priv->status_return_completion); + + ret = s10_svc_send_msg(priv, COMMAND_RECONFIG_STATUS, NULL, 0); + if (ret < 0) + break; + + ret = wait_for_completion_interruptible_timeout( + &priv->status_return_completion, timeout); + if (!ret) { + dev_err(dev, + "timeout waiting for RECONFIG_COMPLETED\n"); + ret = -ETIMEDOUT; + break; + } + if (ret < 0) { + dev_err(dev, + "error (%d) waiting for RECONFIG_COMPLETED\n", + ret); + break; + } + /* Not error or timeout, so ret is # of jiffies until timeout */ + timeout = ret; + ret = 0; + + if (test_and_clear_bit(SVC_STATUS_RECONFIG_COMPLETED, + &priv->status)) + break; + + if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR, + &priv->status)) { + dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n"); + ret = -EFAULT; + break; + } + } while (1); + + stratix10_svc_done(priv->chan); + + return ret; +} + +static enum fpga_mgr_states s10_ops_state(struct fpga_manager *mgr) +{ + return FPGA_MGR_STATE_UNKNOWN; +} + +static const struct fpga_manager_ops s10_ops = { + .state = s10_ops_state, + .write_init = s10_ops_write_init, + .write = s10_ops_write, + .write_complete = s10_ops_write_complete, +}; + +static int s10_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct s10_priv *priv; + struct fpga_manager *mgr; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->client.dev = dev; + priv->client.receive_cb = s10_receive_callback; + priv->client.priv = priv; + + priv->chan = stratix10_svc_request_channel_byname(&priv->client, + SVC_CLIENT_FPGA); + if (IS_ERR(priv->chan)) { + dev_err(dev, "couldn't get service channel (%s)\n", + SVC_CLIENT_FPGA); + return PTR_ERR(priv->chan); + } + + init_completion(&priv->status_return_completion); + + mgr = fpga_mgr_create(dev, "Stratix10 SOC FPGA Manager", + &s10_ops, priv); + if (!mgr) { + dev_err(dev, "unable to create FPGA manager\n"); + ret = -ENOMEM; + goto probe_err; + } + + ret = fpga_mgr_register(mgr); + if (ret) { + dev_err(dev, "unable to register FPGA manager\n"); + fpga_mgr_free(mgr); + goto probe_err; + } + + platform_set_drvdata(pdev, mgr); + return ret; + +probe_err: + stratix10_svc_free_channel(priv->chan); + return ret; +} + +static int s10_remove(struct platform_device *pdev) +{ + struct fpga_manager *mgr = platform_get_drvdata(pdev); + struct s10_priv *priv = mgr->priv; + + fpga_mgr_unregister(mgr); + stratix10_svc_free_channel(priv->chan); + + return 0; +} + +static const struct of_device_id s10_of_match[] = { + { .compatible = "intel,stratix10-soc-fpga-mgr", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, s10_of_match); + +static struct platform_driver s10_driver = { + .probe = s10_probe, + .remove = s10_remove, + .driver = { + .name = "Stratix10 SoC FPGA manager", + .of_match_table = of_match_ptr(s10_of_match), + }, +}; + +static int __init s10_init(void) +{ + struct device_node *fw_np; + struct device_node *np; + int ret; + + fw_np = of_find_node_by_name(NULL, "svc"); + if (!fw_np) + return -ENODEV; + + np = of_find_matching_node(fw_np, s10_of_match); + if (!np) { + of_node_put(fw_np); + return -ENODEV; + } + + of_node_put(np); + ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); + of_node_put(fw_np); + if (ret) + return ret; + + return platform_driver_register(&s10_driver); +} + +static void __exit s10_exit(void) +{ + return platform_driver_unregister(&s10_driver); +} + +module_init(s10_init); +module_exit(s10_exit); + +MODULE_AUTHOR("Alan Tull <atull@kernel.org>"); +MODULE_DESCRIPTION("Intel Stratix 10 SOC FPGA Manager"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index bb82efeebb9d..57b0e6775958 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -501,6 +501,10 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, if (err) return err; + /* Release 'PR' control back to the ICAP */ + zynq_fpga_write(priv, CTRL_OFFSET, + zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK); + err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status, intr_status & IXR_PCFG_DONE_MASK, INIT_POLL_DELAY, diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index fe00b12e4417..ce0ba2062723 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -711,7 +711,6 @@ int vmbus_disconnect_ring(struct vmbus_channel *channel) /* Snapshot the list of subchannels */ spin_lock_irqsave(&channel->lock, flags); list_splice_init(&channel->sc_list, &list); - channel->num_sc = 0; spin_unlock_irqrestore(&channel->lock, flags); list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) { diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 6277597d3d58..82e673671087 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -405,7 +405,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel) primary_channel = channel->primary_channel; spin_lock_irqsave(&primary_channel->lock, flags); list_del(&channel->sc_list); - primary_channel->num_sc--; spin_unlock_irqrestore(&primary_channel->lock, flags); } @@ -483,7 +482,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) newchannel->primary_channel = channel; spin_lock_irqsave(&channel->lock, flags); list_add_tail(&newchannel->sc_list, &channel->sc_list); - channel->num_sc++; spin_unlock_irqrestore(&channel->lock, flags); } else { goto err_free_chan; @@ -1239,49 +1237,6 @@ cleanup: return ret; } -/* - * Retrieve the (sub) channel on which to send an outgoing request. - * When a primary channel has multiple sub-channels, we try to - * distribute the load equally amongst all available channels. - */ -struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary) -{ - struct list_head *cur, *tmp; - int cur_cpu; - struct vmbus_channel *cur_channel; - struct vmbus_channel *outgoing_channel = primary; - int next_channel; - int i = 1; - - if (list_empty(&primary->sc_list)) - return outgoing_channel; - - next_channel = primary->next_oc++; - - if (next_channel > (primary->num_sc)) { - primary->next_oc = 0; - return outgoing_channel; - } - - cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id()); - list_for_each_safe(cur, tmp, &primary->sc_list) { - cur_channel = list_entry(cur, struct vmbus_channel, sc_list); - if (cur_channel->state != CHANNEL_OPENED_STATE) - continue; - - if (cur_channel->target_vp == cur_cpu) - return cur_channel; - - if (i == next_channel) - return cur_channel; - - i++; - } - - return outgoing_channel; -} -EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel); - static void invoke_sc_cb(struct vmbus_channel *primary_channel) { struct list_head *cur, *tmp; diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 332d7c34be5c..166c2501de17 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -33,9 +33,7 @@ #include "hyperv_vmbus.h" /* The one and only */ -struct hv_context hv_context = { - .synic_initialized = false, -}; +struct hv_context hv_context; /* * If false, we're using the old mechanism for stimer0 interrupts @@ -326,8 +324,6 @@ int hv_synic_init(unsigned int cpu) hv_set_synic_state(sctrl.as_uint64); - hv_context.synic_initialized = true; - /* * Register the per-cpu clockevent source. */ @@ -373,7 +369,8 @@ int hv_synic_cleanup(unsigned int cpu) bool channel_found = false; unsigned long flags; - if (!hv_context.synic_initialized) + hv_get_synic_state(sctrl.as_uint64); + if (sctrl.enable != 1) return -EFAULT; /* @@ -435,7 +432,6 @@ int hv_synic_cleanup(unsigned int cpu) hv_set_siefp(siefp.as_uint64); /* Disable the global synic bit */ - hv_get_synic_state(sctrl.as_uint64); sctrl.enable = 0; hv_set_synic_state(sctrl.as_uint64); diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index d6106e1a0d4a..5054d1105236 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -437,7 +437,7 @@ kvp_send_key(struct work_struct *dummy) val32 = in_msg->body.kvp_set.data.value_u32; message->body.kvp_set.data.value_size = sprintf(message->body.kvp_set.data.value, - "%d", val32) + 1; + "%u", val32) + 1; break; case REG_U64: diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 423205077bf6..f10eeb120c8b 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -483,7 +483,7 @@ MODULE_DEVICE_TABLE(vmbus, id_table); /* The one and only one */ static struct hv_driver util_drv = { - .name = "hv_util", + .name = "hv_utils", .id_table = id_table, .probe = util_probe, .remove = util_remove, diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 72eaba3d50fc..f17c06a5e74b 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -230,8 +230,6 @@ struct hv_context { void *tsc_page; - bool synic_initialized; - struct hv_per_cpu_context __percpu *cpu_context; /* diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 3726eacdf65d..f417b06e11c5 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -513,6 +513,14 @@ config MISC_RTSX tristate default MISC_RTSX_PCI || MISC_RTSX_USB +config PVPANIC + tristate "pvpanic device support" + depends on HAS_IOMEM && (ACPI || OF) + help + This driver provides support for the pvpanic device. pvpanic is + a paravirtualized device provided by QEMU; it lets a virtual machine + (guest) communicate panic events to the host. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index af22bbc3d00c..b76d611c87de 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -58,3 +58,4 @@ obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o obj-$(CONFIG_OCXL) += ocxl/ obj-$(CONFIG_MISC_RTSX) += cardreader/ +obj-$(CONFIG_PVPANIC) += pvpanic.o diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index ef83a9078646..d2ed3b9728b7 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -2176,8 +2176,7 @@ static int altera_get_note(u8 *p, s32 program_size, key_ptr = &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i)])]; - if ((strncasecmp(key, key_ptr, strlen(key_ptr)) == 0) && - (key != NULL)) { + if (key && !strncasecmp(key, key_ptr, strlen(key_ptr))) { status = 0; value_ptr = &p[note_strings + diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index b66d832d3233..c79ba1c699ad 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1718,7 +1718,6 @@ int cxl_slot_is_switched(struct pci_dev *dev) { struct device_node *np; int depth = 0; - const __be32 *prop; if (!(np = pci_device_to_OF_node(dev))) { pr_err("cxl: np = NULL\n"); @@ -1727,8 +1726,7 @@ int cxl_slot_is_switched(struct pci_dev *dev) of_node_get(np); while (np) { np = of_get_next_parent(np); - prop = of_get_property(np, "device_type", NULL); - if (!prop || strcmp((char *)prop, "pciex")) + if (!of_node_is_type(np, "pciex")) break; depth++; } diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index cd6825afa8e1..d9215fc4e499 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -9,6 +9,7 @@ mei-objs += hbm.o mei-objs += interrupt.o mei-objs += client.o mei-objs += main.o +mei-objs += dma-ring.o mei-objs += bus.o mei-objs += bus-fixup.o mei-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index ebdcf0b450e2..1fc8ea0f519b 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -318,23 +318,6 @@ void mei_me_cl_rm_all(struct mei_device *dev) } /** - * mei_cl_cmp_id - tells if the clients are the same - * - * @cl1: host client 1 - * @cl2: host client 2 - * - * Return: true - if the clients has same host and me ids - * false - otherwise - */ -static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, - const struct mei_cl *cl2) -{ - return cl1 && cl2 && - (cl1->host_client_id == cl2->host_client_id) && - (mei_cl_me_id(cl1) == mei_cl_me_id(cl2)); -} - -/** * mei_io_cb_free - free mei_cb_private related memory * * @cb: mei callback struct @@ -418,7 +401,7 @@ static void mei_io_list_flush_cl(struct list_head *head, struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, head, list) { - if (mei_cl_cmp_id(cl, cb->cl)) + if (cl == cb->cl) list_del_init(&cb->list); } } @@ -435,7 +418,7 @@ static void mei_io_tx_list_free_cl(struct list_head *head, struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, head, list) { - if (mei_cl_cmp_id(cl, cb->cl)) + if (cl == cb->cl) mei_tx_cb_dequeue(cb); } } @@ -478,7 +461,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, if (length == 0) return cb; - cb->buf.data = kmalloc(length, GFP_KERNEL); + cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); if (!cb->buf.data) { mei_io_cb_free(cb); return NULL; @@ -1374,7 +1357,9 @@ int mei_cl_notify_request(struct mei_cl *cl, mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, - cl->notify_en == request || !mei_cl_is_connected(cl), + cl->notify_en == request || + cl->status || + !mei_cl_is_connected(cl), mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); @@ -1573,10 +1558,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, struct mei_msg_hdr mei_hdr; size_t hdr_len = sizeof(mei_hdr); size_t len; - size_t hbuf_len; + size_t hbuf_len, dr_len; int hbuf_slots; + u32 dr_slots; + u32 dma_len; int rets; bool first_chunk; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1597,6 +1585,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } len = buf->size - cb->buf_idx; + data = buf->data + cb->buf_idx; hbuf_slots = mei_hbuf_empty_slots(dev); if (hbuf_slots < 0) { rets = -EOVERFLOW; @@ -1604,6 +1593,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } hbuf_len = mei_slots2data(hbuf_slots); + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); mei_msg_hdr_init(&mei_hdr, cb); @@ -1614,23 +1605,33 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, if (len + hdr_len <= hbuf_len) { mei_hdr.length = len; mei_hdr.msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr.dma_ring = 1; + if (len > dr_len) + len = dr_len; + else + mei_hdr.msg_complete = 1; + + mei_hdr.length = sizeof(dma_len); + dma_len = len; + data = &dma_len; } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { - mei_hdr.length = hbuf_len - hdr_len; + len = hbuf_len - hdr_len; + mei_hdr.length = len; } else { return 0; } - cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", - cb->buf.size, cb->buf_idx); + if (mei_hdr.dma_ring) + mei_dma_ring_write(dev, buf->data + cb->buf_idx, len); - rets = mei_write_message(dev, &mei_hdr, hdr_len, - buf->data + cb->buf_idx, mei_hdr.length); + rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length); if (rets) goto err; cl->status = 0; cl->writing_state = MEI_WRITING; - cb->buf_idx += mei_hdr.length; + cb->buf_idx += len; if (first_chunk) { if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { @@ -1665,11 +1666,13 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) struct mei_msg_data *buf; struct mei_msg_hdr mei_hdr; size_t hdr_len = sizeof(mei_hdr); - size_t len; - size_t hbuf_len; + size_t len, hbuf_len, dr_len; int hbuf_slots; + u32 dr_slots; + u32 dma_len; ssize_t rets; bool blocking; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1681,10 +1684,12 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) buf = &cb->buf; len = buf->size; - blocking = cb->blocking; cl_dbg(dev, cl, "len=%zd\n", len); + blocking = cb->blocking; + data = buf->data; + rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { pm_runtime_put_noidle(dev->dev); @@ -1721,16 +1726,32 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) } hbuf_len = mei_slots2data(hbuf_slots); + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); if (len + hdr_len <= hbuf_len) { mei_hdr.length = len; mei_hdr.msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr.dma_ring = 1; + if (len > dr_len) + len = dr_len; + else + mei_hdr.msg_complete = 1; + + mei_hdr.length = sizeof(dma_len); + dma_len = len; + data = &dma_len; } else { - mei_hdr.length = hbuf_len - hdr_len; + len = hbuf_len - hdr_len; + mei_hdr.length = len; } + if (mei_hdr.dma_ring) + mei_dma_ring_write(dev, buf->data, len); + rets = mei_write_message(dev, &mei_hdr, hdr_len, - buf->data, mei_hdr.length); + data, mei_hdr.length); if (rets) goto err; @@ -1739,7 +1760,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) goto err; cl->writing_state = MEI_WRITING; - cb->buf_idx = mei_hdr.length; + cb->buf_idx = len; + /* restore return value */ + len = buf->size; out: if (mei_hdr.msg_complete) diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c new file mode 100644 index 000000000000..795641b82181 --- /dev/null +++ b/drivers/misc/mei/dma-ring.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ +#include <linux/dma-mapping.h> +#include <linux/mei.h> + +#include "mei_dev.h" + +/** + * mei_dmam_dscr_alloc() - allocate a managed coherent buffer + * for the dma descriptor + * @dev: mei_device + * @dscr: dma descriptor + * + * Return: + * * 0 - on success or zero allocation request + * * -EINVAL - if size is not power of 2 + * * -ENOMEM - of allocation has failed + */ +static int mei_dmam_dscr_alloc(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->size) + return 0; + + if (WARN_ON(!is_power_of_2(dscr->size))) + return -EINVAL; + + if (dscr->vaddr) + return 0; + + dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr, + GFP_KERNEL); + if (!dscr->vaddr) + return -ENOMEM; + + return 0; +} + +/** + * mei_dmam_dscr_free() - free a managed coherent buffer + * from the dma descriptor + * @dev: mei_device + * @dscr: dma descriptor + */ +static void mei_dmam_dscr_free(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->vaddr) + return; + + dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr); + dscr->vaddr = NULL; +} + +/** + * mei_dmam_ring_free() - free dma ring buffers + * @dev: mei device + */ +void mei_dmam_ring_free(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + mei_dmam_dscr_free(dev, &dev->dr_dscr[i]); +} + +/** + * mei_dmam_ring_alloc() - allocate dma ring buffers + * @dev: mei device + * + * Return: -ENOMEM on allocation failure 0 otherwise + */ +int mei_dmam_ring_alloc(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i])) + goto err; + + return 0; + +err: + mei_dmam_ring_free(dev); + return -ENOMEM; +} + +/** + * mei_dma_ring_is_allocated() - check if dma ring is allocated + * @dev: mei device + * + * Return: true if dma ring is allocated + */ +bool mei_dma_ring_is_allocated(struct mei_device *dev) +{ + return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr; +} + +static inline +struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev) +{ + return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr; +} + +/** + * mei_dma_ring_reset() - reset the dma control block + * @dev: mei device + */ +void mei_dma_ring_reset(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + + if (!ctrl) + return; + + memset(ctrl, 0, sizeof(*ctrl)); +} + +/** + * mei_dma_copy_from() - copy from dma ring into buffer + * @dev: mei device + * @buf: data buffer + * @offset: offset in slots. + * @n: number of slots to copy. + */ +static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(buf, dbuf + b_offset, b_n); + + return b_n; +} + +/** + * mei_dma_copy_to() - copy to a buffer to the dma ring + * @dev: mei device + * @buf: data buffer + * @offset: offset in slots. + * @n: number of slots to copy. + */ +static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(hbuf + b_offset, buf, b_n); + + return b_n; +} + +/** + * mei_dma_ring_read() - read data from the ring + * @dev: mei device + * @buf: buffer to read into: may be NULL in case of droping the data. + * @len: length to read. + */ +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 dbuf_depth; + u32 rd_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "reading from dma %u bytes\n", len); + + if (!len) + return; + + dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2; + rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1); + slots = mei_data2slots(len); + + /* if buf is NULL we drop the packet by advancing the pointer.*/ + if (!buf) + goto out; + + if (rd_idx + slots > dbuf_depth) { + buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx); + rem = slots - (dbuf_depth - rd_idx); + rd_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_from(dev, buf, rd_idx, rem); +out: + WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots); +} + +static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev) +{ + return dev->dr_dscr[DMA_DSCR_HOST].size >> 2; +} + +/** + * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring + * @dev: mei_device + * + * Return: number of empty slots + */ +u32 mei_dma_ring_empty_slots(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 wr_idx, rd_idx, hbuf_depth, empty; + + if (!mei_dma_ring_is_allocated(dev)) + return 0; + + if (WARN_ON(!ctrl)) + return 0; + + /* easier to work in slots */ + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + rd_idx = READ_ONCE(ctrl->hbuf_rd_idx); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx); + + if (rd_idx > wr_idx) + empty = rd_idx - wr_idx; + else + empty = hbuf_depth - (wr_idx - rd_idx); + + return empty; +} + +/** + * mei_dma_ring_write - write data to dma ring host buffer + * + * @dev: mei_device + * @buf: data will be written + * @len: data length + */ +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 hbuf_depth; + u32 wr_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "writing to dma %u bytes\n", len); + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1); + slots = mei_data2slots(len); + + if (wr_idx + slots > hbuf_depth) { + buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx); + rem = slots - (hbuf_depth - wr_idx); + wr_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_to(dev, buf, wr_idx, rem); + + WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots); +} diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index e56f3e72d57a..78c26cebf5d4 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -65,6 +65,7 @@ const char *mei_hbm_state_str(enum mei_hbm_state state) MEI_HBM_STATE(IDLE); MEI_HBM_STATE(STARTING); MEI_HBM_STATE(STARTED); + MEI_HBM_STATE(DR_SETUP); MEI_HBM_STATE(ENUM_CLIENTS); MEI_HBM_STATE(CLIENT_PROPERTIES); MEI_HBM_STATE(STOPPED); @@ -296,6 +297,48 @@ int mei_hbm_start_req(struct mei_device *dev) } /** + * mei_hbm_dma_setup_req() - setup DMA request + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_dma_setup_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_dma_setup_request req; + const size_t len = sizeof(struct hbm_dma_setup_request); + unsigned int i; + int ret; + + mei_hbm_hdr(&mei_hdr, len); + + memset(&req, 0, len); + req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD; + for (i = 0; i < DMA_DSCR_NUM; i++) { + phys_addr_t paddr; + + paddr = dev->dr_dscr[i].daddr; + req.dma_dscr[i].addr_hi = upper_32_bits(paddr); + req.dma_dscr[i].addr_lo = lower_32_bits(paddr); + req.dma_dscr[i].size = dev->dr_dscr[i].size; + } + + mei_dma_ring_reset(dev); + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, "dma setup request write failed: ret = %d.\n", + ret); + return ret; + } + + dev->hbm_state = MEI_HBM_DR_SETUP; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + +/** * mei_hbm_enum_clients_req - sends enumeration client request message. * * @dev: the device structure @@ -1044,6 +1087,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) struct hbm_host_version_response *version_res; struct hbm_props_response *props_res; struct hbm_host_enum_response *enum_res; + struct hbm_dma_setup_response *dma_setup_res; struct hbm_add_client_request *add_cl_req; int ret; @@ -1108,14 +1152,52 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) return -EPROTO; } - if (mei_hbm_enum_clients_req(dev)) { - dev_err(dev->dev, "hbm: start: failed to send enumeration request\n"); - return -EIO; + if (dev->hbm_f_dr_supported) { + if (mei_dmam_ring_alloc(dev)) + dev_info(dev->dev, "running w/o dma ring\n"); + if (mei_dma_ring_is_allocated(dev)) { + if (mei_hbm_dma_setup_req(dev)) + return -EIO; + + wake_up(&dev->wait_hbm_start); + break; + } } + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + wake_up(&dev->wait_hbm_start); break; + case MEI_HBM_DMA_SETUP_RES_CMD: + dev_dbg(dev->dev, "hbm: dma setup response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_DR_SETUP) { + dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; + + if (dma_setup_res->status) { + dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", + dma_setup_res->status, + mei_hbm_status_str(dma_setup_res->status)); + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + } + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + break; + case CLIENT_CONNECT_RES_CMD: dev_dbg(dev->dev, "hbm: client connect response: message received.\n"); mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT); @@ -1271,8 +1353,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) break; default: - BUG(); - break; + WARN(1, "hbm: wrong command %d\n", mei_msg->hbm_cmd); + return -EPROTO; } return 0; diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index a2025a5083a3..0171a7e79bab 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h @@ -26,6 +26,7 @@ struct mei_cl; * * @MEI_HBM_IDLE : protocol not started * @MEI_HBM_STARTING : start request message was sent + * @MEI_HBM_DR_SETUP : dma ring setup request message was sent * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties * @MEI_HBM_STARTED : enumeration was completed @@ -34,6 +35,7 @@ struct mei_cl; enum mei_hbm_state { MEI_HBM_IDLE = 0, MEI_HBM_STARTING, + MEI_HBM_DR_SETUP, MEI_HBM_ENUM_CLIENTS, MEI_HBM_CLIENT_PROPERTIES, MEI_HBM_STARTED, diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 0759c3a668de..3fbbadfa2ae1 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1471,15 +1471,21 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev, { struct mei_device *dev; struct mei_me_hw *hw; + int i; dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) + sizeof(struct mei_me_hw), GFP_KERNEL); if (!dev) return NULL; + hw = to_me_hw(dev); + for (i = 0; i < DMA_DSCR_NUM; i++) + dev->dr_dscr[i].size = cfg->dma_size[i]; + mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); hw->cfg = cfg; + return dev; } diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 65655925791a..2b7f7677f8cc 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -35,7 +35,7 @@ /* * MEI Version */ -#define HBM_MINOR_VERSION 0 +#define HBM_MINOR_VERSION 1 #define HBM_MAJOR_VERSION 2 /* @@ -206,6 +206,7 @@ enum mei_cl_disconnect_status { * @dma_ring: message is on dma ring * @internal: message is internal * @msg_complete: last packet of the message + * @extension: extension of the header */ struct mei_msg_hdr { u32 me_addr:8; @@ -215,8 +216,11 @@ struct mei_msg_hdr { u32 dma_ring:1; u32 internal:1; u32 msg_complete:1; + u32 extension[0]; } __packed; +#define MEI_MSG_HDR_MAX 2 + struct mei_bus_message { u8 hbm_cmd; u8 data[0]; @@ -512,4 +516,27 @@ struct hbm_dma_setup_response { u8 reserved[2]; } __packed; +/** + * struct mei_dma_ring_ctrl - dma ring control block + * + * @hbuf_wr_idx: host circular buffer write index in slots + * @reserved1: reserved for alignment + * @hbuf_rd_idx: host circular buffer read index in slots + * @reserved2: reserved for alignment + * @dbuf_wr_idx: device circular buffer write index in slots + * @reserved3: reserved for alignment + * @dbuf_rd_idx: device circular buffer read index in slots + * @reserved4: reserved for alignment + */ +struct hbm_dma_ring_ctrl { + u32 hbuf_wr_idx; + u32 reserved1; + u32 hbuf_rd_idx; + u32 reserved2; + u32 dbuf_wr_idx; + u32 reserved3; + u32 dbuf_rd_idx; + u32 reserved4; +} __packed; + #endif diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 4888ebc076b7..eb026e2a0537 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -151,7 +151,7 @@ int mei_reset(struct mei_device *dev) mei_hbm_reset(dev); - dev->rd_msg_hdr = 0; + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); if (ret) { dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 5a661cbdf2ae..055c2d89b310 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -75,6 +75,8 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl, */ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) { + if (hdr->dma_ring) + mei_dma_ring_read(dev, NULL, hdr->extension[0]); /* * no need to check for size as it is guarantied * that length fits into rd_msg_buf @@ -100,6 +102,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_device *dev = cl->dev; struct mei_cl_cb *cb; size_t buf_sz; + u32 length; cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); if (!cb) { @@ -119,25 +122,31 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, goto discard; } - buf_sz = mei_hdr->length + cb->buf_idx; + length = mei_hdr->dma_ring ? mei_hdr->extension[0] : mei_hdr->length; + + buf_sz = length + cb->buf_idx; /* catch for integer overflow */ if (buf_sz < cb->buf_idx) { cl_err(dev, cl, "message is too big len %d idx %zu\n", - mei_hdr->length, cb->buf_idx); + length, cb->buf_idx); cb->status = -EMSGSIZE; goto discard; } if (cb->buf.size < buf_sz) { cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", - cb->buf.size, mei_hdr->length, cb->buf_idx); + cb->buf.size, length, cb->buf_idx); cb->status = -EMSGSIZE; goto discard; } + if (mei_hdr->dma_ring) + mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length); + + /* for DMA read 0 length to generate an interrupt to the device */ mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length); - cb->buf_idx += mei_hdr->length; + cb->buf_idx += length; if (mei_hdr->msg_complete) { cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); @@ -247,6 +256,9 @@ static inline int hdr_is_valid(u32 msg_hdr) if (!msg_hdr || mei_hdr->reserved) return -EBADMSG; + if (mei_hdr->dma_ring && mei_hdr->length != MEI_SLOT_SIZE) + return -EBADMSG; + return 0; } @@ -267,20 +279,20 @@ int mei_irq_read_handler(struct mei_device *dev, struct mei_cl *cl; int ret; - if (!dev->rd_msg_hdr) { - dev->rd_msg_hdr = mei_read_hdr(dev); + if (!dev->rd_msg_hdr[0]) { + dev->rd_msg_hdr[0] = mei_read_hdr(dev); (*slots)--; dev_dbg(dev->dev, "slots =%08x.\n", *slots); - ret = hdr_is_valid(dev->rd_msg_hdr); + ret = hdr_is_valid(dev->rd_msg_hdr[0]); if (ret) { dev_err(dev->dev, "corrupted message header 0x%08X\n", - dev->rd_msg_hdr); + dev->rd_msg_hdr[0]); goto end; } } - mei_hdr = (struct mei_msg_hdr *)&dev->rd_msg_hdr; + mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr; dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); if (mei_slots2data(*slots) < mei_hdr->length) { @@ -291,6 +303,12 @@ int mei_irq_read_handler(struct mei_device *dev, goto end; } + if (mei_hdr->dma_ring) { + dev->rd_msg_hdr[1] = mei_read_hdr(dev); + (*slots)--; + mei_hdr->length = 0; + } + /* HBM message */ if (hdr_is_hbm(mei_hdr)) { ret = mei_hbm_dispatch(dev, mei_hdr); @@ -324,7 +342,7 @@ int mei_irq_read_handler(struct mei_device *dev, goto reset_slots; } dev_err(dev->dev, "no destination client found 0x%08X\n", - dev->rd_msg_hdr); + dev->rd_msg_hdr[0]); ret = -EBADMSG; goto end; } @@ -334,9 +352,8 @@ int mei_irq_read_handler(struct mei_device *dev, reset_slots: /* reset the number of slots and header */ + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); *slots = mei_count_full_read_slots(dev); - dev->rd_msg_hdr = 0; - if (*slots == -EOVERFLOW) { /* overflow - reset */ dev_err(dev->dev, "resetting due to slots overflow.\n"); diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 377397e1b5a5..685b78ce30a5 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -122,6 +122,19 @@ struct mei_msg_data { unsigned char *data; }; +/** + * struct mei_dma_dscr - dma address descriptor + * + * @vaddr: dma buffer virtual address + * @daddr: dma buffer physical address + * @size : dma buffer size + */ +struct mei_dma_dscr { + void *vaddr; + dma_addr_t daddr; + size_t size; +}; + /* Maximum number of processed FW status registers */ #define MEI_FW_STATUS_MAX 6 /* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */ @@ -409,6 +422,7 @@ struct mei_fw_version { * @rd_msg_hdr : read message header storage * * @hbuf_is_ready : query if the host host/write buffer is ready + * @dr_dscr: DMA ring descriptors: TX, RX, and CTRL * * @version : HBM protocol version in use * @hbm_f_pg_supported : hbm feature pgi protocol @@ -483,11 +497,13 @@ struct mei_device { #endif /* CONFIG_PM */ unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; - u32 rd_msg_hdr; + u32 rd_msg_hdr[MEI_MSG_HDR_MAX]; /* write buffer */ bool hbuf_is_ready; + struct mei_dma_dscr dr_dscr[DMA_DSCR_NUM]; + struct hbm_version version; unsigned int hbm_f_pg_supported:1; unsigned int hbm_f_dc_supported:1; @@ -578,6 +594,14 @@ int mei_restart(struct mei_device *dev); void mei_stop(struct mei_device *dev); void mei_cancel_work(struct mei_device *dev); +int mei_dmam_ring_alloc(struct mei_device *dev); +void mei_dmam_ring_free(struct mei_device *dev); +bool mei_dma_ring_is_allocated(struct mei_device *dev); +void mei_dma_ring_reset(struct mei_device *dev); +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len); +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len); +u32 mei_dma_ring_empty_slots(struct mei_device *dev); + /* * MEI interrupt functions prototype */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index ea4e152270a3..73ace2d59dea 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -98,9 +98,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, /* required last entry */ diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c new file mode 100644 index 000000000000..595ac065b401 --- /dev/null +++ b/drivers/misc/pvpanic.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Pvpanic Device Support + * + * Copyright (C) 2013 Fujitsu. + * Copyright (C) 2018 ZTE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +static void __iomem *base; + +#define PVPANIC_PANICKED (1 << 0) + +MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>"); +MODULE_DESCRIPTION("pvpanic device driver"); +MODULE_LICENSE("GPL"); + +static void +pvpanic_send_event(unsigned int event) +{ + iowrite8(event, base); +} + +static int +pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, + void *unused) +{ + pvpanic_send_event(PVPANIC_PANICKED); + return NOTIFY_DONE; +} + +static struct notifier_block pvpanic_panic_nb = { + .notifier_call = pvpanic_panic_notify, + .priority = 1, /* let this called before broken drm_fb_helper */ +}; + +#ifdef CONFIG_ACPI +static int pvpanic_add(struct acpi_device *device); +static int pvpanic_remove(struct acpi_device *device); + +static const struct acpi_device_id pvpanic_device_ids[] = { + { "QEMU0001", 0 }, + { "", 0 } +}; +MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids); + +static struct acpi_driver pvpanic_driver = { + .name = "pvpanic", + .class = "QEMU", + .ids = pvpanic_device_ids, + .ops = { + .add = pvpanic_add, + .remove = pvpanic_remove, + }, + .owner = THIS_MODULE, +}; + +static acpi_status +pvpanic_walk_resources(struct acpi_resource *res, void *context) +{ + struct resource r; + + if (acpi_dev_resource_io(res, &r)) { + base = ioport_map(r.start, resource_size(&r)); + return AE_OK; + } else if (acpi_dev_resource_memory(res, &r)) { + base = ioremap(r.start, resource_size(&r)); + return AE_OK; + } + + return AE_ERROR; +} + +static int pvpanic_add(struct acpi_device *device) +{ + int ret; + + ret = acpi_bus_get_status(device); + if (ret < 0) + return ret; + + if (!device->status.enabled || !device->status.functional) + return -ENODEV; + + acpi_walk_resources(device->handle, METHOD_NAME__CRS, + pvpanic_walk_resources, NULL); + + if (!base) + return -ENODEV; + + atomic_notifier_chain_register(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static int pvpanic_remove(struct acpi_device *device) +{ + + atomic_notifier_chain_unregister(&panic_notifier_list, + &pvpanic_panic_nb); + iounmap(base); + + return 0; +} + +static int pvpanic_register_acpi_driver(void) +{ + return acpi_bus_register_driver(&pvpanic_driver); +} + +static void pvpanic_unregister_acpi_driver(void) +{ + acpi_bus_unregister_driver(&pvpanic_driver); +} +#else +static int pvpanic_register_acpi_driver(void) +{ + return -ENODEV; +} + +static void pvpanic_unregister_acpi_driver(void) {} +#endif + +static int pvpanic_mmio_probe(struct platform_device *pdev) +{ + struct resource *mem; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; + + base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(base)) + return PTR_ERR(base); + + atomic_notifier_chain_register(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static int pvpanic_mmio_remove(struct platform_device *pdev) +{ + + atomic_notifier_chain_unregister(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static const struct of_device_id pvpanic_mmio_match[] = { + { .compatible = "qemu,pvpanic-mmio", }, + {} +}; + +static struct platform_driver pvpanic_mmio_driver = { + .driver = { + .name = "pvpanic-mmio", + .of_match_table = pvpanic_mmio_match, + }, + .probe = pvpanic_mmio_probe, + .remove = pvpanic_mmio_remove, +}; + +static int __init pvpanic_mmio_init(void) +{ + if (acpi_disabled) + return platform_driver_register(&pvpanic_mmio_driver); + else + return pvpanic_register_acpi_driver(); +} + +static void __exit pvpanic_mmio_exit(void) +{ + if (acpi_disabled) + platform_driver_unregister(&pvpanic_mmio_driver); + else + pvpanic_unregister_acpi_driver(); +} + +module_init(pvpanic_mmio_init); +module_exit(pvpanic_mmio_exit); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 380916bff9e0..9c8249f74479 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -1667,7 +1667,7 @@ static int parport_ECP_supported(struct parport *pb) default: printk(KERN_WARNING "0x%lx: Unknown implementation ID\n", pb->base); - /* Assume 1 */ + /* Fall through - Assume 1 */ case 1: pword = 1; } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 54f6a40c75c6..5d5ee4fa1e6e 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1168,14 +1168,6 @@ config INTEL_SMARTCONNECT This driver checks to determine whether the device has Intel Smart Connect enabled, and if so disables it. -config PVPANIC - tristate "pvpanic device support" - depends on ACPI - ---help--- - This driver provides support for the pvpanic device. pvpanic is - a paravirtualized device provided by QEMU; it lets a virtual machine - (guest) communicate panic events to the host. - config INTEL_PMC_IPC tristate "Intel PMC IPC Driver" depends on ACPI diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 39ae94135406..d537d1753147 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -78,7 +78,6 @@ obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o obj-$(CONFIG_INTEL_RST) += intel-rst.o obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o -obj-$(CONFIG_PVPANIC) += pvpanic.o obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c deleted file mode 100644 index fd86daba7ffd..000000000000 --- a/drivers/platform/x86/pvpanic.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * pvpanic.c - pvpanic Device Support - * - * Copyright (C) 2013 Fujitsu. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/acpi.h> - -MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>"); -MODULE_DESCRIPTION("pvpanic device driver"); -MODULE_LICENSE("GPL"); - -static int pvpanic_add(struct acpi_device *device); -static int pvpanic_remove(struct acpi_device *device); - -static const struct acpi_device_id pvpanic_device_ids[] = { - { "QEMU0001", 0 }, - { "", 0 }, -}; -MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids); - -#define PVPANIC_PANICKED (1 << 0) - -static u16 port; - -static struct acpi_driver pvpanic_driver = { - .name = "pvpanic", - .class = "QEMU", - .ids = pvpanic_device_ids, - .ops = { - .add = pvpanic_add, - .remove = pvpanic_remove, - }, - .owner = THIS_MODULE, -}; - -static void -pvpanic_send_event(unsigned int event) -{ - outb(event, port); -} - -static int -pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, - void *unused) -{ - pvpanic_send_event(PVPANIC_PANICKED); - return NOTIFY_DONE; -} - -static struct notifier_block pvpanic_panic_nb = { - .notifier_call = pvpanic_panic_notify, - .priority = 1, /* let this called before broken drm_fb_helper */ -}; - - -static acpi_status -pvpanic_walk_resources(struct acpi_resource *res, void *context) -{ - switch (res->type) { - case ACPI_RESOURCE_TYPE_END_TAG: - return AE_OK; - - case ACPI_RESOURCE_TYPE_IO: - port = res->data.io.minimum; - return AE_OK; - - default: - return AE_ERROR; - } -} - -static int pvpanic_add(struct acpi_device *device) -{ - int ret; - - ret = acpi_bus_get_status(device); - if (ret < 0) - return ret; - - if (!device->status.enabled || !device->status.functional) - return -ENODEV; - - acpi_walk_resources(device->handle, METHOD_NAME__CRS, - pvpanic_walk_resources, NULL); - - if (!port) - return -ENODEV; - - atomic_notifier_chain_register(&panic_notifier_list, - &pvpanic_panic_nb); - - return 0; -} - -static int pvpanic_remove(struct acpi_device *device) -{ - - atomic_notifier_chain_unregister(&panic_notifier_list, - &pvpanic_panic_nb); - return 0; -} - -module_acpi_driver(pvpanic_driver); diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c index 333ad7d5b45b..dd5d1103e02b 100644 --- a/drivers/pps/clients/pps-gpio.c +++ b/drivers/pps/clients/pps-gpio.c @@ -158,10 +158,10 @@ static int pps_gpio_probe(struct platform_device *pdev) if (data->capture_clear) pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR; data->pps = pps_register_source(&data->info, pps_default_params); - if (data->pps == NULL) { + if (IS_ERR(data->pps)) { dev_err(&pdev->dev, "failed to register IRQ %d as PPS source\n", data->irq); - return -EINVAL; + return PTR_ERR(data->pps); } /* register IRQ interrupt handler */ diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c index 04735649052a..728818b87af3 100644 --- a/drivers/pps/clients/pps-ktimer.c +++ b/drivers/pps/clients/pps-ktimer.c @@ -80,9 +80,9 @@ static int __init pps_ktimer_init(void) { pps = pps_register_source(&pps_ktimer_info, PPS_CAPTUREASSERT | PPS_OFFSETASSERT); - if (pps == NULL) { + if (IS_ERR(pps)) { pr_err("cannot register PPS source\n"); - return -ENOMEM; + return PTR_ERR(pps); } timer_setup(&ktimer, pps_ktimer_event, 0); diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c index 73bd3bb4d93b..00f6c460e493 100644 --- a/drivers/pps/clients/pps-ldisc.c +++ b/drivers/pps/clients/pps-ldisc.c @@ -72,9 +72,9 @@ static int pps_tty_open(struct tty_struct *tty) pps = pps_register_source(&info, PPS_CAPTUREBOTH | \ PPS_OFFSETASSERT | PPS_OFFSETCLEAR); - if (pps == NULL) { + if (IS_ERR(pps)) { pr_err("cannot register PPS source \"%s\"\n", info.path); - return -ENOMEM; + return PTR_ERR(pps); } pps->lookup_cookie = tty; diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c index 4db824f88d00..7226e39aae83 100644 --- a/drivers/pps/clients/pps_parport.c +++ b/drivers/pps/clients/pps_parport.c @@ -179,7 +179,7 @@ static void parport_attach(struct parport *port) device->pps = pps_register_source(&info, PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR); - if (device->pps == NULL) { + if (IS_ERR(device->pps)) { pr_err("couldn't register PPS source\n"); goto err_release_dev; } diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index 805c749ac1ad..a1c3cd38754f 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c @@ -72,7 +72,8 @@ static void pps_echo_client_default(struct pps_device *pps, int event, * source is described by info's fields and it will have, as default PPS * parameters, the ones specified into default_params. * - * The function returns, in case of success, the PPS device. Otherwise NULL. + * The function returns, in case of success, the PPS device. Otherwise + * ERR_PTR(errno). */ struct pps_device *pps_register_source(struct pps_source_info *info, @@ -135,7 +136,7 @@ kfree_pps: pps_register_source_exit: pr_err("%s: unable to register source\n", info->name); - return NULL; + return ERR_PTR(err); } EXPORT_SYMBOL(pps_register_source); diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig index 9d73ad806698..8cd595148d17 100644 --- a/drivers/slimbus/Kconfig +++ b/drivers/slimbus/Kconfig @@ -22,8 +22,9 @@ config SLIM_QCOM_CTRL config SLIM_QCOM_NGD_CTRL tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component" - depends on QCOM_QMI_HELPERS - depends on HAS_IOMEM && DMA_ENGINE + depends on HAS_IOMEM && DMA_ENGINE && NET + depends on ARCH_QCOM || COMPILE_TEST + select QCOM_QMI_HELPERS help Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device Component is programmed using Linux kernel. diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 1382a8df6c75..ccf33217236c 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1327,11 +1327,12 @@ static int of_qcom_slim_ngd_register(struct device *parent, { const struct ngd_reg_offset_data *data; struct qcom_slim_ngd *ngd; + const struct of_device_id *match; struct device_node *node; u32 id; - data = of_match_node(qcom_slim_ngd_dt_match, parent->of_node)->data; - + match = of_match_node(qcom_slim_ngd_dt_match, parent->of_node); + data = match->data; for_each_available_child_of_node(parent->of_node, node) { if (of_property_read_u32(node, "reg", &id)) continue; diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 0a357db4b31b..131342280b46 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -569,20 +569,20 @@ static ssize_t uio_read(struct file *filep, char __user *buf, ssize_t retval = 0; s32 event_count; - mutex_lock(&idev->info_lock); - if (!idev->info || !idev->info->irq) - retval = -EIO; - mutex_unlock(&idev->info_lock); - - if (retval) - return retval; - if (count != sizeof(s32)) return -EINVAL; add_wait_queue(&idev->wait, &wait); do { + mutex_lock(&idev->info_lock); + if (!idev->info || !idev->info->irq) { + retval = -EIO; + mutex_unlock(&idev->info_lock); + break; + } + mutex_unlock(&idev->info_lock); + set_current_state(TASK_INTERRUPTIBLE); event_count = atomic_read(&idev->event); @@ -1017,6 +1017,9 @@ void uio_unregister_device(struct uio_info *info) idev->info = NULL; mutex_unlock(&idev->info_lock); + wake_up_interruptible(&idev->wait); + kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); + device_unregister(&idev->dev); return; diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c index 9cc37fe07d35..0ee3cd3c25ee 100644 --- a/drivers/uio/uio_fsl_elbc_gpcm.c +++ b/drivers/uio/uio_fsl_elbc_gpcm.c @@ -74,8 +74,7 @@ DEVICE_ATTR(reg_or, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store); static ssize_t reg_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct platform_device *pdev = to_platform_device(dev); - struct uio_info *info = platform_get_drvdata(pdev); + struct uio_info *info = dev_get_drvdata(dev); struct fsl_elbc_gpcm *priv = info->priv; struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank]; @@ -94,8 +93,7 @@ static ssize_t reg_show(struct device *dev, struct device_attribute *attr, static ssize_t reg_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct platform_device *pdev = to_platform_device(dev); - struct uio_info *info = platform_get_drvdata(pdev); + struct uio_info *info = dev_get_drvdata(dev); struct fsl_elbc_gpcm *priv = info->priv; struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank]; unsigned long val; diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 3093655c7b92..1475ed5ffcde 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -1312,7 +1312,7 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev, return -EINVAL; } - if (f32bit) + if (IS_ENABLED(CONFIG_COMPAT) && f32bit) ret = vbg_hgcm_call32(gdev, client_id, call->function, call->timeout_ms, VBG_IOCTL_HGCM_CALL_PARMS32(call), |