summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-02 21:08:10 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-02 21:08:10 +0200
commit71bd9341011f626d692aabe024f099820f02c497 (patch)
treea1c27fd8f17daff36e380800c5b69769d0d9cc99 /ipc
parentMerge branch 'for-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/c... (diff)
parentipc/util.c: use binary search for max_idx (diff)
downloadlinux-71bd9341011f626d692aabe024f099820f02c497.tar.xz
linux-71bd9341011f626d692aabe024f099820f02c497.zip
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "190 patches. Subsystems affected by this patch series: mm (hugetlb, userfaultfd, vmscan, kconfig, proc, z3fold, zbud, ras, mempolicy, memblock, migration, thp, nommu, kconfig, madvise, memory-hotplug, zswap, zsmalloc, zram, cleanups, kfence, and hmm), procfs, sysctl, misc, core-kernel, lib, lz4, checkpatch, init, kprobes, nilfs2, hfs, signals, exec, kcov, selftests, compress/decompress, and ipc" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (190 commits) ipc/util.c: use binary search for max_idx ipc/sem.c: use READ_ONCE()/WRITE_ONCE() for use_global_lock ipc: use kmalloc for msg_queue and shmid_kernel ipc sem: use kvmalloc for sem_undo allocation lib/decompressors: remove set but not used variabled 'level' selftests/vm/pkeys: exercise x86 XSAVE init state selftests/vm/pkeys: refill shadow register after implicit kernel write selftests/vm/pkeys: handle negative sys_pkey_alloc() return code selftests/vm/pkeys: fix alloc_random_pkey() to make it really, really random kcov: add __no_sanitize_coverage to fix noinstr for all architectures exec: remove checks in __register_bimfmt() x86: signal: don't do sas_ss_reset() until we are certain that sigframe won't be abandoned hfsplus: report create_date to kstat.btime hfsplus: remove unnecessary oom message nilfs2: remove redundant continue statement in a while-loop kprobes: remove duplicated strong free_insn_page in x86 and s390 init: print out unknown kernel parameters checkpatch: do not complain about positive return values starting with EPOLL checkpatch: improve the indented label test checkpatch: scripts/spdxcheck.py now requires python3 ...
Diffstat (limited to 'ipc')
-rw-r--r--ipc/msg.c6
-rw-r--r--ipc/sem.c25
-rw-r--r--ipc/shm.c6
-rw-r--r--ipc/util.c44
-rw-r--r--ipc/util.h3
5 files changed, 63 insertions, 21 deletions
diff --git a/ipc/msg.c b/ipc/msg.c
index 6e6c8e0c9380..6810276d6bb9 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -130,7 +130,7 @@ static void msg_rcu_free(struct rcu_head *head)
struct msg_queue *msq = container_of(p, struct msg_queue, q_perm);
security_msg_queue_free(&msq->q_perm);
- kvfree(msq);
+ kfree(msq);
}
/**
@@ -147,7 +147,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
key_t key = params->key;
int msgflg = params->flg;
- msq = kvmalloc(sizeof(*msq), GFP_KERNEL);
+ msq = kmalloc(sizeof(*msq), GFP_KERNEL);
if (unlikely(!msq))
return -ENOMEM;
@@ -157,7 +157,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(&msq->q_perm);
if (retval) {
- kvfree(msq);
+ kfree(msq);
return retval;
}
diff --git a/ipc/sem.c b/ipc/sem.c
index bf534c74293e..971e75d28364 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -217,6 +217,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
* this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
* is inside a spin_lock() and after a write from 0 to non-zero a
* spin_lock()+spin_unlock() is done.
+ * To prevent the compiler/cpu temporarily writing 0 to use_global_lock,
+ * READ_ONCE()/WRITE_ONCE() is used.
*
* 2) queue.status: (SEM_BARRIER_2)
* Initialization is done while holding sem_lock(), so no further barrier is
@@ -342,10 +344,10 @@ static void complexmode_enter(struct sem_array *sma)
* Nothing to do, just reset the
* counter until we return to simple mode.
*/
- sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
+ WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
return;
}
- sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
+ WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
for (i = 0; i < sma->sem_nsems; i++) {
sem = &sma->sems[i];
@@ -371,7 +373,8 @@ static void complexmode_tryleave(struct sem_array *sma)
/* See SEM_BARRIER_1 for purpose/pairing */
smp_store_release(&sma->use_global_lock, 0);
} else {
- sma->use_global_lock--;
+ WRITE_ONCE(sma->use_global_lock,
+ sma->use_global_lock-1);
}
}
@@ -412,7 +415,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
* Initial check for use_global_lock. Just an optimization,
* no locking, no memory barrier.
*/
- if (!sma->use_global_lock) {
+ if (!READ_ONCE(sma->use_global_lock)) {
/*
* It appears that no complex operation is around.
* Acquire the per-semaphore lock.
@@ -1154,7 +1157,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
un->semid = -1;
list_del_rcu(&un->list_proc);
spin_unlock(&un->ulp->lock);
- kfree_rcu(un, rcu);
+ kvfree_rcu(un, rcu);
}
/* Wake up all pending processes and let them fail with EIDRM. */
@@ -1937,7 +1940,8 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
rcu_read_unlock();
/* step 2: allocate new undo structure */
- new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+ new = kvzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems,
+ GFP_KERNEL);
if (!new) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return ERR_PTR(-ENOMEM);
@@ -1949,7 +1953,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1);
rcu_read_unlock();
- kfree(new);
+ kvfree(new);
un = ERR_PTR(-EIDRM);
goto out;
}
@@ -1960,7 +1964,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
*/
un = lookup_undo(ulp, semid);
if (un) {
- kfree(new);
+ kvfree(new);
goto success;
}
/* step 5: initialize & link new undo structure */
@@ -2420,7 +2424,7 @@ void exit_sem(struct task_struct *tsk)
rcu_read_unlock();
wake_up_q(&wake_q);
- kfree_rcu(un, rcu);
+ kvfree_rcu(un, rcu);
}
kfree(ulp);
}
@@ -2435,7 +2439,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
/*
* The proc interface isn't aware of sem_lock(), it calls
- * ipc_lock_object() directly (in sysvipc_find_ipc).
+ * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock).
+ * (in sysvipc_find_ipc)
* In order to stay compatible with sem_lock(), we must
* enter / leave complex_mode.
*/
diff --git a/ipc/shm.c b/ipc/shm.c
index 003234fbbd17..748933e376ca 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -222,7 +222,7 @@ static void shm_rcu_free(struct rcu_head *head)
struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
shm_perm);
security_shm_free(&shp->shm_perm);
- kvfree(shp);
+ kfree(shp);
}
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
@@ -619,7 +619,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
- shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
+ shp = kmalloc(sizeof(*shp), GFP_KERNEL);
if (unlikely(!shp))
return -ENOMEM;
@@ -630,7 +630,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->shm_perm.security = NULL;
error = security_shm_alloc(&shp->shm_perm);
if (error) {
- kvfree(shp);
+ kfree(shp);
return error;
}
diff --git a/ipc/util.c b/ipc/util.c
index cfa0045e748d..0027e47626b7 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -64,6 +64,7 @@
#include <linux/memory.h>
#include <linux/ipc_namespace.h>
#include <linux/rhashtable.h>
+#include <linux/log2.h>
#include <asm/unistd.h>
@@ -451,6 +452,41 @@ static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
}
/**
+ * ipc_search_maxidx - search for the highest assigned index
+ * @ids: ipc identifier set
+ * @limit: known upper limit for highest assigned index
+ *
+ * The function determines the highest assigned index in @ids. It is intended
+ * to be called when ids->max_idx needs to be updated.
+ * Updating ids->max_idx is necessary when the current highest index ipc
+ * object is deleted.
+ * If no ipc object is allocated, then -1 is returned.
+ *
+ * ipc_ids.rwsem needs to be held by the caller.
+ */
+static int ipc_search_maxidx(struct ipc_ids *ids, int limit)
+{
+ int tmpidx;
+ int i;
+ int retval;
+
+ i = ilog2(limit+1);
+
+ retval = 0;
+ for (; i >= 0; i--) {
+ tmpidx = retval | (1<<i);
+ /*
+ * "0" is a possible index value, thus search using
+ * e.g. 15,7,3,1,0 instead of 16,8,4,2,1.
+ */
+ tmpidx = tmpidx-1;
+ if (idr_get_next(&ids->ipcs_idr, &tmpidx))
+ retval |= (1<<i);
+ }
+ return retval - 1;
+}
+
+/**
* ipc_rmid - remove an ipc identifier
* @ids: ipc identifier set
* @ipcp: ipc perm structure containing the identifier to remove
@@ -468,11 +504,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
ipcp->deleted = true;
if (unlikely(idx == ids->max_idx)) {
- do {
- idx--;
- if (idx == -1)
- break;
- } while (!idr_find(&ids->ipcs_idr, idx));
+ idx = ids->max_idx-1;
+ if (idx >= 0)
+ idx = ipc_search_maxidx(ids, idx);
ids->max_idx = idx;
}
}
diff --git a/ipc/util.h b/ipc/util.h
index 5766c61aed0e..2dd7ce0416d8 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -145,6 +145,9 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg);
* ipc_get_maxidx - get the highest assigned index
* @ids: ipc identifier set
*
+ * The function returns the highest assigned index for @ids. The function
+ * doesn't scan the idr tree, it uses a cached value.
+ *
* Called with ipc_ids.rwsem held for reading.
*/
static inline int ipc_get_maxidx(struct ipc_ids *ids)