diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-09-30 02:28:52 +0300 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-09-30 02:28:52 +0300 |
commit | 8f697e574012cc73b6b0dcbf30d88a3a0f43b78f (patch) | |
tree | a6b16bcf33ad08e03837b61eed2fffb31118f1f3 /ipc | |
parent | 221bcb24c6530be17468fdcdbf91299aba32a693 (diff) | |
parent | 9ffecb10283508260936b96022d4ee43a7798b4c (diff) | |
download | linux-8f697e574012cc73b6b0dcbf30d88a3a0f43b78f.tar.xz |
Merge tag 'v4.3-rc3' into next
Merge with Linux 4.3-rc3 to bring in MFD DA9062 changes to merge DA9062
OnKey driver.
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/mqueue.c | 5 | ||||
-rw-r--r-- | ipc/msgutil.c | 2 | ||||
-rw-r--r-- | ipc/sem.c | 47 | ||||
-rw-r--r-- | ipc/shm.c | 6 |
4 files changed, 39 insertions, 21 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index a24ba9fe5bb8..161a1807e6ef 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) if (!leaf) return -ENOMEM; INIT_LIST_HEAD(&leaf->msg_list); - info->qsize += sizeof(*leaf); } leaf->priority = msg->m_type; rb_link_node(&leaf->rb_node, parent, p); @@ -187,7 +186,6 @@ try_again: "lazy leaf delete!\n"); rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { - info->qsize -= sizeof(*leaf); kfree(leaf); } else { info->node_cache = leaf; @@ -200,7 +198,6 @@ try_again: if (list_empty(&leaf->msg_list)) { rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { - info->qsize -= sizeof(*leaf); kfree(leaf); } else { info->node_cache = leaf; @@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); new_leaf = NULL; } else { kfree(new_leaf); @@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); } else { kfree(new_leaf); } diff --git a/ipc/msgutil.c b/ipc/msgutil.c index 2b491590ebab..71f448e5e927 100644 --- a/ipc/msgutil.c +++ b/ipc/msgutil.c @@ -123,7 +123,7 @@ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst) size_t len = src->m_ts; size_t alen; - BUG_ON(dst == NULL); + WARN_ON(dst == NULL); if (src->m_ts > dst->m_ts) return ERR_PTR(-EINVAL); diff --git a/ipc/sem.c b/ipc/sem.c index bc3d530cb23e..b471e5a3863d 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head) } /* + * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they + * are only control barriers. + * The code must pair with spin_unlock(&sem->lock) or + * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient. + * + * smp_rmb() is sufficient, as writes cannot pass the control barrier. + */ +#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb() + +/* * Wait until all currently ongoing simple ops have completed. * Caller must own sem_perm.lock. * New simple ops cannot start, because simple ops first check @@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma) sem = sma->sem_base + i; spin_unlock_wait(&sem->lock); } + ipc_smp_acquire__after_spin_is_unlocked(); } /* @@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, /* Then check that the global lock is free */ if (!spin_is_locked(&sma->sem_perm.lock)) { /* - * The ipc object lock check must be visible on all - * cores before rechecking the complex count. Otherwise - * we can race with another thread that does: + * We need a memory barrier with acquire semantics, + * otherwise we can race with another thread that does: * complex_count++; * spin_unlock(sem_perm.lock); */ - smp_rmb(); + ipc_smp_acquire__after_spin_is_unlocked(); /* * Now repeat the test of complex_count: @@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk) rcu_read_lock(); un = list_entry_rcu(ulp->list_proc.next, struct sem_undo, list_proc); - if (&un->list_proc == &ulp->list_proc) - semid = -1; - else - semid = un->semid; + if (&un->list_proc == &ulp->list_proc) { + /* + * We must wait for freeary() before freeing this ulp, + * in case we raced with last sem_undo. There is a small + * possibility where we exit while freeary() didn't + * finish unlocking sem_undo_list. + */ + spin_unlock_wait(&ulp->lock); + rcu_read_unlock(); + break; + } + spin_lock(&ulp->lock); + semid = un->semid; + spin_unlock(&ulp->lock); + /* exit_sem raced with IPC_RMID, nothing to do */ if (semid == -1) { rcu_read_unlock(); - break; + continue; } - sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); + sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); /* exit_sem raced with IPC_RMID, nothing to do */ if (IS_ERR(sma)) { rcu_read_unlock(); @@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk) ipc_assert_locked_object(&sma->sem_perm); list_del(&un->list_id); - spin_lock(&ulp->lock); + /* we are the last process using this ulp, acquiring ulp->lock + * isn't required. Besides that, we are also protected against + * IPC_RMID as we hold sma->sem_perm lock now + */ list_del_rcu(&un->list_proc); - spin_unlock(&ulp->lock); /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { diff --git a/ipc/shm.c b/ipc/shm.c index 06e5cf2fe019..222131e8e38f 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -159,7 +159,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) * We raced in the idr lookup or with shm_destroy(). Either way, the * ID is busted. */ - BUG_ON(IS_ERR(ipcp)); + WARN_ON(IS_ERR(ipcp)); return container_of(ipcp, struct shmid_kernel, shm_perm); } @@ -393,7 +393,7 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma) return ret; sfd->vm_ops = vma->vm_ops; #ifdef CONFIG_MMU - BUG_ON(!sfd->vm_ops->fault); + WARN_ON(!sfd->vm_ops->fault); #endif vma->vm_ops = &shm_vm_ops; shm_open(vma); @@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; - file = shmem_file_setup(name, size, acctflag); + file = shmem_kernel_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) |