summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
Diffstat (limited to 'ipc')
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/sem.c47
-rw-r--r--ipc/shm.c2
3 files changed, 36 insertions, 18 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a24ba9fe5bb8..161a1807e6ef 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
if (!leaf)
return -ENOMEM;
INIT_LIST_HEAD(&leaf->msg_list);
- info->qsize += sizeof(*leaf);
}
leaf->priority = msg->m_type;
rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
"lazy leaf delete!\n");
rb_erase(&leaf->rb_node, &info->msg_tree);
if (info->node_cache) {
- info->qsize -= sizeof(*leaf);
kfree(leaf);
} else {
info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
if (list_empty(&leaf->msg_list)) {
rb_erase(&leaf->rb_node, &info->msg_tree);
if (info->node_cache) {
- info->qsize -= sizeof(*leaf);
kfree(leaf);
} else {
info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
/* Save our speculative allocation into the cache */
INIT_LIST_HEAD(&new_leaf->msg_list);
info->node_cache = new_leaf;
- info->qsize += sizeof(*new_leaf);
new_leaf = NULL;
} else {
kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
/* Save our speculative allocation into the cache */
INIT_LIST_HEAD(&new_leaf->msg_list);
info->node_cache = new_leaf;
- info->qsize += sizeof(*new_leaf);
} else {
kfree(new_leaf);
}
diff --git a/ipc/sem.c b/ipc/sem.c
index bc3d530cb23e..b471e5a3863d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
}
/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
+
+/*
* Wait until all currently ongoing simple ops have completed.
* Caller must own sem_perm.lock.
* New simple ops cannot start, because simple ops first check
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
+ ipc_smp_acquire__after_spin_is_unlocked();
}
/*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
/* Then check that the global lock is free */
if (!spin_is_locked(&sma->sem_perm.lock)) {
/*
- * The ipc object lock check must be visible on all
- * cores before rechecking the complex count. Otherwise
- * we can race with another thread that does:
+ * We need a memory barrier with acquire semantics,
+ * otherwise we can race with another thread that does:
* complex_count++;
* spin_unlock(sem_perm.lock);
*/
- smp_rmb();
+ ipc_smp_acquire__after_spin_is_unlocked();
/*
* Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
struct sem_undo, list_proc);
- if (&un->list_proc == &ulp->list_proc)
- semid = -1;
- else
- semid = un->semid;
+ if (&un->list_proc == &ulp->list_proc) {
+ /*
+ * We must wait for freeary() before freeing this ulp,
+ * in case we raced with last sem_undo. There is a small
+ * possibility where we exit while freeary() didn't
+ * finish unlocking sem_undo_list.
+ */
+ spin_unlock_wait(&ulp->lock);
+ rcu_read_unlock();
+ break;
+ }
+ spin_lock(&ulp->lock);
+ semid = un->semid;
+ spin_unlock(&ulp->lock);
+ /* exit_sem raced with IPC_RMID, nothing to do */
if (semid == -1) {
rcu_read_unlock();
- break;
+ continue;
}
- sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
/* exit_sem raced with IPC_RMID, nothing to do */
if (IS_ERR(sma)) {
rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
- spin_lock(&ulp->lock);
+ /* we are the last process using this ulp, acquiring ulp->lock
+ * isn't required. Besides that, we are also protected against
+ * IPC_RMID as we hold sma->sem_perm lock now
+ */
list_del_rcu(&un->list_proc);
- spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/ipc/shm.c b/ipc/shm.c
index 06e5cf2fe019..4aef24d91b63 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
if ((shmflg & SHM_NORESERVE) &&
sysctl_overcommit_memory != OVERCOMMIT_NEVER)
acctflag = VM_NORESERVE;
- file = shmem_file_setup(name, size, acctflag);
+ file = shmem_kernel_file_setup(name, size, acctflag);
}
error = PTR_ERR(file);
if (IS_ERR(file))