diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2019-05-03 16:12:21 +0200 |
---|---|---|
committer | John Johansen <john.johansen@canonical.com> | 2019-06-20 19:33:31 +0200 |
commit | df323337e507a0009d3db1ea25948d4c7f320d62 (patch) | |
tree | bc8c3f6707fcf31a22f2eaa19eca5838fb87517e /security/apparmor/lsm.c | |
parent | apparmor: Force type-casting of current->real_cred (diff) | |
download | linux-df323337e507a0009d3db1ea25948d4c7f320d62.tar.xz linux-df323337e507a0009d3db1ea25948d4c7f320d62.zip |
apparmor: Use a memory pool instead per-CPU caches
The get_buffers() macro may provide one or two buffers to the caller.
Those buffers are pre-allocated on init for each CPU. By default it
allocates
2* 2 * MAX_PATH * POSSIBLE_CPU
which equals 64KiB on a system with 4 CPUs or 1MiB with 64 CPUs and so
on.
Replace the per-CPU buffers with a common memory pool which is shared
across all CPUs. The pool grows on demand and never shrinks. The pool
starts with two (UP) or four (SMP) elements. By using this pool it is
possible to request a buffer and keeping preemption enabled which avoids
the hack in profile_transition().
It has been pointed out by Tetsuo Handa that GFP_KERNEL allocations for
small amount of memory do not fail. In order not to have an endless
retry, __GFP_RETRY_MAYFAIL is passed (so the memory allocation is not
repeated until success) and retried once hoping that in the meantime a
buffer has been returned to the pool. Since now NULL is possible all
allocation paths check the buffer pointer and return -ENOMEM on failure.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: John Johansen <john.johansen@canonical.com>
Diffstat (limited to 'security/apparmor/lsm.c')
-rw-r--r-- | security/apparmor/lsm.c | 111 |
1 files changed, 84 insertions, 27 deletions
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 8f7ffc51ad86..3e0cfdebee45 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -48,8 +48,13 @@ /* Flag indicating whether initialization completed */ int apparmor_initialized; -DEFINE_PER_CPU(struct aa_buffers, aa_buffers); +union aa_buffer { + struct list_head list; + char buffer[1]; +}; +static LIST_HEAD(aa_global_buffers); +static DEFINE_SPINLOCK(aa_buffers_lock); /* * LSM hook functions @@ -1422,6 +1427,7 @@ static int param_set_aauint(const char *val, const struct kernel_param *kp) return -EPERM; error = param_set_uint(val, kp); + aa_g_path_max = max_t(uint32_t, aa_g_path_max, sizeof(union aa_buffer)); pr_info("AppArmor: buffer size set to %d bytes\n", aa_g_path_max); return error; @@ -1565,6 +1571,48 @@ static int param_set_mode(const char *val, const struct kernel_param *kp) return 0; } +char *aa_get_buffer(void) +{ + union aa_buffer *aa_buf; + bool try_again = true; + +retry: + spin_lock(&aa_buffers_lock); + if (!list_empty(&aa_global_buffers)) { + aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer, + list); + list_del(&aa_buf->list); + spin_unlock(&aa_buffers_lock); + return &aa_buf->buffer[0]; + } + spin_unlock(&aa_buffers_lock); + + aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL | __GFP_RETRY_MAYFAIL | + __GFP_NOWARN); + if (!aa_buf) { + if (try_again) { + try_again = false; + goto retry; + } + pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n"); + return NULL; + } + return &aa_buf->buffer[0]; +} + +void aa_put_buffer(char *buf) +{ + union aa_buffer *aa_buf; + + if (!buf) + return; + aa_buf = container_of(buf, union aa_buffer, buffer[0]); + + spin_lock(&aa_buffers_lock); + list_add(&aa_buf->list, &aa_global_buffers); + spin_unlock(&aa_buffers_lock); +} + /* * AppArmor init functions */ @@ -1585,38 +1633,48 @@ static int __init set_init_ctx(void) static void destroy_buffers(void) { - u32 i, j; + union aa_buffer *aa_buf; - for_each_possible_cpu(i) { - for_each_cpu_buffer(j) { - kfree(per_cpu(aa_buffers, i).buf[j]); - per_cpu(aa_buffers, i).buf[j] = NULL; - } + spin_lock(&aa_buffers_lock); + while (!list_empty(&aa_global_buffers)) { + aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer, + list); + list_del(&aa_buf->list); + spin_unlock(&aa_buffers_lock); + kfree(aa_buf); + spin_lock(&aa_buffers_lock); } + spin_unlock(&aa_buffers_lock); } static int __init alloc_buffers(void) { - u32 i, j; - - for_each_possible_cpu(i) { - for_each_cpu_buffer(j) { - char *buffer; - - if (cpu_to_node(i) > num_online_nodes()) - /* fallback to kmalloc for offline nodes */ - buffer = kmalloc(aa_g_path_max, GFP_KERNEL); - else - buffer = kmalloc_node(aa_g_path_max, GFP_KERNEL, - cpu_to_node(i)); - if (!buffer) { - destroy_buffers(); - return -ENOMEM; - } - per_cpu(aa_buffers, i).buf[j] = buffer; + union aa_buffer *aa_buf; + int i, num; + + /* + * A function may require two buffers at once. Usually the buffers are + * used for a short period of time and are shared. On UP kernel buffers + * two should be enough, with more CPUs it is possible that more + * buffers will be used simultaneously. The preallocated pool may grow. + * This preallocation has also the side-effect that AppArmor will be + * disabled early at boot if aa_g_path_max is extremly high. + */ + if (num_online_cpus() > 1) + num = 4; + else + num = 2; + + for (i = 0; i < num; i++) { + + aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL | + __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (!aa_buf) { + destroy_buffers(); + return -ENOMEM; } + aa_put_buffer(&aa_buf->buffer[0]); } - return 0; } @@ -1781,7 +1839,7 @@ static int __init apparmor_init(void) error = alloc_buffers(); if (error) { AA_ERROR("Unable to allocate work buffers\n"); - goto buffers_out; + goto alloc_out; } error = set_init_ctx(); @@ -1806,7 +1864,6 @@ static int __init apparmor_init(void) buffers_out: destroy_buffers(); - alloc_out: aa_destroy_aafs(); aa_teardown_dfa_engine(); |