diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2017-02-08 01:19:43 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-02-08 20:40:03 +0100 |
commit | c502faf94153bd0fedc5389a936f728a659cc6ab (patch) | |
tree | 603e482c1467bc7d156623c9b83df0cee514d6aa /kernel | |
parent | bridge: vlan tunnel id info range fill size calc cleanups (diff) | |
download | linux-c502faf94153bd0fedc5389a936f728a659cc6ab.tar.xz linux-c502faf94153bd0fedc5389a936f728a659cc6ab.zip |
bpf, lpm: fix overflows in trie_alloc checks
Cap the maximum (total) value size and bail out if larger than KMALLOC_MAX_SIZE
as otherwise it doesn't make any sense to proceed further, since we're
guaranteed to fail to allocate elements anyway in lpm_trie_node_alloc();
likleyhood of failure is still high for large values, though, similarly
as with htab case in non-prealloc.
Next, make sure that cost vars are really u64 instead of size_t, so that we
don't overflow on 32 bit and charge only tiny map.pages against memlock while
allowing huge max_entries; cap also the max cost like we do with other map
types.
Fixes: b95a5c4db09b ("bpf: add a longest prefix match trie map implementation")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/lpm_trie.c | 36 |
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 144e9763102f..e0f6a0bd279b 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -394,10 +394,21 @@ static int trie_delete_elem(struct bpf_map *map, void *key) return -ENOSYS; } +#define LPM_DATA_SIZE_MAX 256 +#define LPM_DATA_SIZE_MIN 1 + +#define LPM_VAL_SIZE_MAX (KMALLOC_MAX_SIZE - LPM_DATA_SIZE_MAX - \ + sizeof(struct lpm_trie_node)) +#define LPM_VAL_SIZE_MIN 1 + +#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key) + (X)) +#define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX) +#define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN) + static struct bpf_map *trie_alloc(union bpf_attr *attr) { - size_t cost, cost_per_node; struct lpm_trie *trie; + u64 cost = sizeof(*trie), cost_per_node; int ret; if (!capable(CAP_SYS_ADMIN)) @@ -406,9 +417,10 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || attr->map_flags != BPF_F_NO_PREALLOC || - attr->key_size < sizeof(struct bpf_lpm_trie_key) + 1 || - attr->key_size > sizeof(struct bpf_lpm_trie_key) + 256 || - attr->value_size == 0) + attr->key_size < LPM_KEY_SIZE_MIN || + attr->key_size > LPM_KEY_SIZE_MAX || + attr->value_size < LPM_VAL_SIZE_MIN || + attr->value_size > LPM_VAL_SIZE_MAX) return ERR_PTR(-EINVAL); trie = kzalloc(sizeof(*trie), GFP_USER | __GFP_NOWARN); @@ -426,18 +438,24 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) cost_per_node = sizeof(struct lpm_trie_node) + attr->value_size + trie->data_size; - cost = sizeof(*trie) + attr->max_entries * cost_per_node; + cost += (u64) attr->max_entries * cost_per_node; + if (cost >= U32_MAX - PAGE_SIZE) { + ret = -E2BIG; + goto out_err; + } + trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; ret = bpf_map_precharge_memlock(trie->map.pages); - if (ret) { - kfree(trie); - return ERR_PTR(ret); - } + if (ret) + goto out_err; raw_spin_lock_init(&trie->lock); return &trie->map; +out_err: + kfree(trie); + return ERR_PTR(ret); } static void trie_free(struct bpf_map *map) |