diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-24 23:54:26 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-24 23:54:26 +0200 |
commit | fa8380a06bd0523e51f826520aac1beb8c585521 (patch) | |
tree | 6e1b8288a952b8e0177af953dec78b9108f897f9 | |
parent | Merge tag 'kbuild-v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mas... (diff) | |
parent | bpf: convert bpf_token_create() to CLASS(fd, ...) (diff) | |
download | linux-fa8380a06bd0523e51f826520aac1beb8c585521.tar.xz linux-fa8380a06bd0523e51f826520aac1beb8c585521.zip |
Merge tag 'bpf-next-6.12-struct-fd' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Pull bpf 'struct fd' updates from Alexei Starovoitov:
"This includes struct_fd BPF changes from Al and Andrii"
* tag 'bpf-next-6.12-struct-fd' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next:
bpf: convert bpf_token_create() to CLASS(fd, ...)
security,bpf: constify struct path in bpf_token_create() LSM hook
bpf: more trivial fdget() conversions
bpf: trivial conversions for fdget()
bpf: switch maps to CLASS(fd, ...)
bpf: factor out fetching bpf_map from FD and adding it to used_maps list
bpf: switch fdget_raw() uses to CLASS(fd_raw, ...)
bpf: convert __bpf_prog_get() to CLASS(fd, ...)
-rw-r--r-- | include/linux/bpf.h | 11 | ||||
-rw-r--r-- | include/linux/lsm_hook_defs.h | 2 | ||||
-rw-r--r-- | include/linux/security.h | 4 | ||||
-rw-r--r-- | kernel/bpf/bpf_inode_storage.c | 24 | ||||
-rw-r--r-- | kernel/bpf/btf.c | 11 | ||||
-rw-r--r-- | kernel/bpf/map_in_map.c | 38 | ||||
-rw-r--r-- | kernel/bpf/syscall.c | 181 | ||||
-rw-r--r-- | kernel/bpf/token.c | 74 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 110 | ||||
-rw-r--r-- | net/core/sock_map.c | 23 | ||||
-rw-r--r-- | security/security.c | 2 | ||||
-rw-r--r-- | security/selinux/hooks.c | 2 |
12 files changed, 179 insertions, 303 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0c3893c47171..19d8ca8ac960 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2246,7 +2246,16 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); -struct bpf_map *__bpf_map_get(struct fd f); + +static inline struct bpf_map *__bpf_map_get(struct fd f) +{ + if (fd_empty(f)) + return ERR_PTR(-EBADF); + if (unlikely(fd_file(f)->f_op != &bpf_map_fops)) + return ERR_PTR(-EINVAL); + return fd_file(f)->private_data; +} + void bpf_map_inc(struct bpf_map *map); void bpf_map_inc_with_uref(struct bpf_map *map); struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref); diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 1d59513bf230..9eca013aa5e1 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -431,7 +431,7 @@ LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token) LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog) LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr, - struct path *path) + const struct path *path) LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token) LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd) LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap) diff --git a/include/linux/security.h b/include/linux/security.h index c37c32ebbdcd..b86ec2afc691 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -2182,7 +2182,7 @@ extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token); extern void security_bpf_prog_free(struct bpf_prog *prog); extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, - struct path *path); + const struct path *path); extern void security_bpf_token_free(struct bpf_token *token); extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd); extern int security_bpf_token_capable(const struct bpf_token *token, int cap); @@ -2222,7 +2222,7 @@ static inline void security_bpf_prog_free(struct bpf_prog *prog) { } static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, - struct path *path) + const struct path *path) { return 0; } diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c index 0a79aee6523d..29da6d3838f6 100644 --- a/kernel/bpf/bpf_inode_storage.c +++ b/kernel/bpf/bpf_inode_storage.c @@ -78,13 +78,12 @@ void bpf_inode_storage_free(struct inode *inode) static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key) { struct bpf_local_storage_data *sdata; - struct fd f = fdget_raw(*(int *)key); + CLASS(fd_raw, f)(*(int *)key); - if (!fd_file(f)) + if (fd_empty(f)) return ERR_PTR(-EBADF); sdata = inode_storage_lookup(file_inode(fd_file(f)), map, true); - fdput(f); return sdata ? sdata->data : NULL; } @@ -92,19 +91,16 @@ static long bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { struct bpf_local_storage_data *sdata; - struct fd f = fdget_raw(*(int *)key); + CLASS(fd_raw, f)(*(int *)key); - if (!fd_file(f)) + if (fd_empty(f)) return -EBADF; - if (!inode_storage_ptr(file_inode(fd_file(f)))) { - fdput(f); + if (!inode_storage_ptr(file_inode(fd_file(f)))) return -EBADF; - } sdata = bpf_local_storage_update(file_inode(fd_file(f)), (struct bpf_local_storage_map *)map, value, map_flags, GFP_ATOMIC); - fdput(f); return PTR_ERR_OR_ZERO(sdata); } @@ -123,15 +119,11 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map) static long bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key) { - struct fd f = fdget_raw(*(int *)key); - int err; + CLASS(fd_raw, f)(*(int *)key); - if (!fd_file(f)) + if (fd_empty(f)) return -EBADF; - - err = inode_storage_delete(file_inode(fd_file(f)), map); - fdput(f); - return err; + return inode_storage_delete(file_inode(fd_file(f)), map); } /* *gfp_flags* is a hidden argument provided by the verifier */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 83bbf935c562..75e4fe83c509 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -7711,21 +7711,16 @@ int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) struct btf *btf_get_by_fd(int fd) { struct btf *btf; - struct fd f; + CLASS(fd, f)(fd); - f = fdget(fd); - - if (!fd_file(f)) + if (fd_empty(f)) return ERR_PTR(-EBADF); - if (fd_file(f)->f_op != &btf_fops) { - fdput(f); + if (fd_file(f)->f_op != &btf_fops) return ERR_PTR(-EINVAL); - } btf = fd_file(f)->private_data; refcount_inc(&btf->refcnt); - fdput(f); return btf; } diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index b4f18c85d7bc..645bd30bc9a9 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -11,24 +11,18 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) { struct bpf_map *inner_map, *inner_map_meta; u32 inner_map_meta_size; - struct fd f; - int ret; + CLASS(fd, f)(inner_map_ufd); - f = fdget(inner_map_ufd); inner_map = __bpf_map_get(f); if (IS_ERR(inner_map)) return inner_map; /* Does not support >1 level map-in-map */ - if (inner_map->inner_map_meta) { - ret = -EINVAL; - goto put; - } + if (inner_map->inner_map_meta) + return ERR_PTR(-EINVAL); - if (!inner_map->ops->map_meta_equal) { - ret = -ENOTSUPP; - goto put; - } + if (!inner_map->ops->map_meta_equal) + return ERR_PTR(-ENOTSUPP); inner_map_meta_size = sizeof(*inner_map_meta); /* In some cases verifier needs to access beyond just base map. */ @@ -36,10 +30,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) inner_map_meta_size = sizeof(struct bpf_array); inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); - if (!inner_map_meta) { - ret = -ENOMEM; - goto put; - } + if (!inner_map_meta) + return ERR_PTR(-ENOMEM); inner_map_meta->map_type = inner_map->map_type; inner_map_meta->key_size = inner_map->key_size; @@ -53,8 +45,9 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) * invalid/empty/valid, but ERR_PTR in case of errors. During * equality NULL or IS_ERR is equivalent. */ - ret = PTR_ERR(inner_map_meta->record); - goto free; + struct bpf_map *ret = ERR_CAST(inner_map_meta->record); + kfree(inner_map_meta); + return ret; } /* Note: We must use the same BTF, as we also used btf_record_dup above * which relies on BTF being same for both maps, as some members like @@ -77,14 +70,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) inner_array_meta->elem_size = inner_array->elem_size; inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1; } - - fdput(f); return inner_map_meta; -free: - kfree(inner_map_meta); -put: - fdput(f); - return ERR_PTR(ret); } void bpf_map_meta_free(struct bpf_map *map_meta) @@ -110,9 +96,8 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map, int ufd) { struct bpf_map *inner_map, *inner_map_meta; - struct fd f; + CLASS(fd, f)(ufd); - f = fdget(ufd); inner_map = __bpf_map_get(f); if (IS_ERR(inner_map)) return inner_map; @@ -123,7 +108,6 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map, else inner_map = ERR_PTR(-EINVAL); - fdput(f); return inner_map; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8386f25bc532..a8f1808a1ca5 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1425,21 +1425,6 @@ put_token: return err; } -/* if error is returned, fd is released. - * On success caller should complete fd access with matching fdput() - */ -struct bpf_map *__bpf_map_get(struct fd f) -{ - if (!fd_file(f)) - return ERR_PTR(-EBADF); - if (fd_file(f)->f_op != &bpf_map_fops) { - fdput(f); - return ERR_PTR(-EINVAL); - } - - return fd_file(f)->private_data; -} - void bpf_map_inc(struct bpf_map *map) { atomic64_inc(&map->refcnt); @@ -1455,15 +1440,11 @@ EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); struct bpf_map *bpf_map_get(u32 ufd) { - struct fd f = fdget(ufd); - struct bpf_map *map; + CLASS(fd, f)(ufd); + struct bpf_map *map = __bpf_map_get(f); - map = __bpf_map_get(f); - if (IS_ERR(map)) - return map; - - bpf_map_inc(map); - fdput(f); + if (!IS_ERR(map)) + bpf_map_inc(map); return map; } @@ -1471,15 +1452,11 @@ EXPORT_SYMBOL(bpf_map_get); struct bpf_map *bpf_map_get_with_uref(u32 ufd) { - struct fd f = fdget(ufd); - struct bpf_map *map; + CLASS(fd, f)(ufd); + struct bpf_map *map = __bpf_map_get(f); - map = __bpf_map_get(f); - if (IS_ERR(map)) - return map; - - bpf_map_inc_with_uref(map); - fdput(f); + if (!IS_ERR(map)) + bpf_map_inc_with_uref(map); return map; } @@ -1544,11 +1521,9 @@ static int map_lookup_elem(union bpf_attr *attr) { void __user *ukey = u64_to_user_ptr(attr->key); void __user *uvalue = u64_to_user_ptr(attr->value); - int ufd = attr->map_fd; struct bpf_map *map; void *key, *value; u32 value_size; - struct fd f; int err; if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) @@ -1557,26 +1532,20 @@ static int map_lookup_elem(union bpf_attr *attr) if (attr->flags & ~BPF_F_LOCK) return -EINVAL; - f = fdget(ufd); + CLASS(fd, f)(attr->map_fd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { - err = -EPERM; - goto err_put; - } + if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) + return -EPERM; if ((attr->flags & BPF_F_LOCK) && - !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { - err = -EINVAL; - goto err_put; - } + !btf_record_has_field(map->record, BPF_SPIN_LOCK)) + return -EINVAL; key = __bpf_copy_key(ukey, map->key_size); - if (IS_ERR(key)) { - err = PTR_ERR(key); - goto err_put; - } + if (IS_ERR(key)) + return PTR_ERR(key); value_size = bpf_map_value_size(map); @@ -1607,8 +1576,6 @@ free_value: kvfree(value); free_key: kvfree(key); -err_put: - fdput(f); return err; } @@ -1619,17 +1586,15 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) { bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); - int ufd = attr->map_fd; struct bpf_map *map; void *key, *value; u32 value_size; - struct fd f; int err; if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) return -EINVAL; - f = fdget(ufd); + CLASS(fd, f)(attr->map_fd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); @@ -1667,7 +1632,6 @@ free_key: kvfree(key); err_put: bpf_map_write_active_dec(map); - fdput(f); return err; } @@ -1676,16 +1640,14 @@ err_put: static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) { bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); - int ufd = attr->map_fd; struct bpf_map *map; - struct fd f; void *key; int err; if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) return -EINVAL; - f = fdget(ufd); + CLASS(fd, f)(attr->map_fd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); @@ -1722,7 +1684,6 @@ out: kvfree(key); err_put: bpf_map_write_active_dec(map); - fdput(f); return err; } @@ -1733,30 +1694,24 @@ static int map_get_next_key(union bpf_attr *attr) { void __user *ukey = u64_to_user_ptr(attr->key); void __user *unext_key = u64_to_user_ptr(attr->next_key); - int ufd = attr->map_fd; struct bpf_map *map; void *key, *next_key; - struct fd f; int err; if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) return -EINVAL; - f = fdget(ufd); + CLASS(fd, f)(attr->map_fd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { - err = -EPERM; - goto err_put; - } + if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) + return -EPERM; if (ukey) { key = __bpf_copy_key(ukey, map->key_size); - if (IS_ERR(key)) { - err = PTR_ERR(key); - goto err_put; - } + if (IS_ERR(key)) + return PTR_ERR(key); } else { key = NULL; } @@ -1788,8 +1743,6 @@ free_next_key: kvfree(next_key); free_key: kvfree(key); -err_put: - fdput(f); return err; } @@ -2018,11 +1971,9 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) { void __user *ukey = u64_to_user_ptr(attr->key); void __user *uvalue = u64_to_user_ptr(attr->value); - int ufd = attr->map_fd; struct bpf_map *map; void *key, *value; u32 value_size; - struct fd f; int err; if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) @@ -2031,7 +1982,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) if (attr->flags & ~BPF_F_LOCK) return -EINVAL; - f = fdget(ufd); + CLASS(fd, f)(attr->map_fd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); @@ -2101,7 +2052,6 @@ free_key: kvfree(key); err_put: bpf_map_write_active_dec(map); - fdput(f); return err; } @@ -2109,27 +2059,22 @@ err_put: static int map_freeze(const union bpf_attr *attr) { - int err = 0, ufd = attr->map_fd; + int err = 0; struct bpf_map *map; - struct fd f; if (CHECK_ATTR(BPF_MAP_FREEZE)) return -EINVAL; - f = fdget(ufd); + CLASS(fd, f)(attr->map_fd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) { - fdput(f); + if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) return -ENOTSUPP; - } - if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { - fdput(f); + if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) return -EPERM; - } mutex_lock(&map->freeze_mutex); if (bpf_map_write_active(map)) { @@ -2144,7 +2089,6 @@ static int map_freeze(const union bpf_attr *attr) WRITE_ONCE(map->frozen, true); err_put: mutex_unlock(&map->freeze_mutex); - fdput(f); return err; } @@ -2414,18 +2358,6 @@ int bpf_prog_new_fd(struct bpf_prog *prog) O_RDWR | O_CLOEXEC); } -static struct bpf_prog *____bpf_prog_get(struct fd f) -{ - if (!fd_file(f)) - return ERR_PTR(-EBADF); - if (fd_file(f)->f_op != &bpf_prog_fops) { - fdput(f); - return ERR_PTR(-EINVAL); - } - - return fd_file(f)->private_data; -} - void bpf_prog_add(struct bpf_prog *prog, int i) { atomic64_add(i, &prog->aux->refcnt); @@ -2481,20 +2413,19 @@ bool bpf_prog_get_ok(struct bpf_prog *prog, static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, bool attach_drv) { - struct fd f = fdget(ufd); + CLASS(fd, f)(ufd); struct bpf_prog *prog; - prog = ____bpf_prog_get(f); - if (IS_ERR(prog)) - return prog; - if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { - prog = ERR_PTR(-EINVAL); - goto out; - } + if (fd_empty(f)) + return ERR_PTR(-EBADF); + if (fd_file(f)->f_op != &bpf_prog_fops) + return ERR_PTR(-EINVAL); + + prog = fd_file(f)->private_data; + if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) + return ERR_PTR(-EINVAL); bpf_prog_inc(prog); -out: - fdput(f); return prog; } @@ -3263,20 +3194,16 @@ int bpf_link_new_fd(struct bpf_link *link) struct bpf_link *bpf_link_get_from_fd(u32 ufd) { - struct fd f = fdget(ufd); + CLASS(fd, f)(ufd); struct bpf_link *link; - if (!fd_file(f)) + if (fd_empty(f)) return ERR_PTR(-EBADF); - if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll) { - fdput(f); + if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll) return ERR_PTR(-EINVAL); - } link = fd_file(f)->private_data; bpf_link_inc(link); - fdput(f); - return link; } EXPORT_SYMBOL(bpf_link_get_from_fd); @@ -4981,33 +4908,25 @@ static int bpf_link_get_info_by_fd(struct file *file, static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, union bpf_attr __user *uattr) { - int ufd = attr->info.bpf_fd; - struct fd f; - int err; - if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) return -EINVAL; - f = fdget(ufd); - if (!fd_file(f)) + CLASS(fd, f)(attr->info.bpf_fd); + if (fd_empty(f)) return -EBADFD; if (fd_file(f)->f_op == &bpf_prog_fops) - err = bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, + return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); else if (fd_file(f)->f_op == &bpf_map_fops) - err = bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, + return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); else if (fd_file(f)->f_op == &btf_fops) - err = bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); + return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll) - err = bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data, + return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); - else - err = -EINVAL; - - fdput(f); - return err; + return -EINVAL; } #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd @@ -5195,14 +5114,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr, cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; struct bpf_map *map; - int err, ufd; - struct fd f; + int err; if (CHECK_ATTR(BPF_MAP_BATCH)) return -EINVAL; - ufd = attr->batch.map_fd; - f = fdget(ufd); + CLASS(fd, f)(attr->batch.map_fd); + map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); @@ -5230,7 +5148,6 @@ err_put: maybe_wait_bpf_programs(map); bpf_map_write_active_dec(map); } - fdput(f); return err; } diff --git a/kernel/bpf/token.c b/kernel/bpf/token.c index 9a1d356e79ed..dcbec1a0dfb3 100644 --- a/kernel/bpf/token.c +++ b/kernel/bpf/token.c @@ -116,67 +116,52 @@ int bpf_token_create(union bpf_attr *attr) struct user_namespace *userns; struct inode *inode; struct file *file; + CLASS(fd, f)(attr->token_create.bpffs_fd); struct path path; - struct fd f; + struct super_block *sb; umode_t mode; int err, fd; - f = fdget(attr->token_create.bpffs_fd); - if (!fd_file(f)) + if (fd_empty(f)) return -EBADF; path = fd_file(f)->f_path; - path_get(&path); - fdput(f); + sb = path.dentry->d_sb; - if (path.dentry != path.mnt->mnt_sb->s_root) { - err = -EINVAL; - goto out_path; - } - if (path.mnt->mnt_sb->s_op != &bpf_super_ops) { - err = -EINVAL; - goto out_path; - } + if (path.dentry != sb->s_root) + return -EINVAL; + if (sb->s_op != &bpf_super_ops) + return -EINVAL; err = path_permission(&path, MAY_ACCESS); if (err) - goto out_path; + return err; - userns = path.dentry->d_sb->s_user_ns; + userns = sb->s_user_ns; /* * Enforce that creators of BPF tokens are in the same user * namespace as the BPF FS instance. This makes reasoning about * permissions a lot easier and we can always relax this later. */ - if (current_user_ns() != userns) { - err = -EPERM; - goto out_path; - } - if (!ns_capable(userns, CAP_BPF)) { - err = -EPERM; - goto out_path; - } + if (current_user_ns() != userns) + return -EPERM; + if (!ns_capable(userns, CAP_BPF)) + return -EPERM; /* Creating BPF token in init_user_ns doesn't make much sense. */ - if (current_user_ns() == &init_user_ns) { - err = -EOPNOTSUPP; - goto out_path; - } + if (current_user_ns() == &init_user_ns) + return -EOPNOTSUPP; - mnt_opts = path.dentry->d_sb->s_fs_info; + mnt_opts = sb->s_fs_info; if (mnt_opts->delegate_cmds == 0 && mnt_opts->delegate_maps == 0 && mnt_opts->delegate_progs == 0 && - mnt_opts->delegate_attachs == 0) { - err = -ENOENT; /* no BPF token delegation is set up */ - goto out_path; - } + mnt_opts->delegate_attachs == 0) + return -ENOENT; /* no BPF token delegation is set up */ mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask()); - inode = bpf_get_inode(path.mnt->mnt_sb, NULL, mode); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto out_path; - } + inode = bpf_get_inode(sb, NULL, mode); + if (IS_ERR(inode)) + return PTR_ERR(inode); inode->i_op = &bpf_token_iops; inode->i_fop = &bpf_token_fops; @@ -185,8 +170,7 @@ int bpf_token_create(union bpf_attr *attr) file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops); if (IS_ERR(file)) { iput(inode); - err = PTR_ERR(file); - goto out_path; + return PTR_ERR(file); } token = kzalloc(sizeof(*token), GFP_USER); @@ -218,33 +202,27 @@ int bpf_token_create(union bpf_attr *attr) file->private_data = token; fd_install(fd, file); - path_put(&path); return fd; out_token: bpf_token_free(token); out_file: fput(file); -out_path: - path_put(&path); return err; } struct bpf_token *bpf_token_get_from_fd(u32 ufd) { - struct fd f = fdget(ufd); + CLASS(fd, f)(ufd); struct bpf_token *token; - if (!fd_file(f)) + if (fd_empty(f)) return ERR_PTR(-EBADF); - if (fd_file(f)->f_op != &bpf_token_fops) { - fdput(f); + if (fd_file(f)->f_op != &bpf_token_fops) return ERR_PTR(-EINVAL); - } token = fd_file(f)->private_data; bpf_token_inc(token); - fdput(f); return token; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index dd86282ccaa4..9a7ed527e47e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -18920,6 +18920,53 @@ static bool bpf_map_is_cgroup_storage(struct bpf_map *map) map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); } +/* Add map behind fd to used maps list, if it's not already there, and return + * its index. Also set *reused to true if this map was already in the list of + * used maps. + * Returns <0 on error, or >= 0 index, on success. + */ +static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reused) +{ + CLASS(fd, f)(fd); + struct bpf_map *map; + int i; + + map = __bpf_map_get(f); + if (IS_ERR(map)) { + verbose(env, "fd %d is not pointing to valid bpf_map\n", fd); + return PTR_ERR(map); + } + + /* check whether we recorded this map already */ + for (i = 0; i < env->used_map_cnt; i++) { + if (env->used_maps[i] == map) { + *reused = true; + return i; + } + } + + if (env->used_map_cnt >= MAX_USED_MAPS) { + verbose(env, "The total number of maps per program has reached the limit of %u\n", + MAX_USED_MAPS); + return -E2BIG; + } + + if (env->prog->sleepable) + atomic64_inc(&map->sleepable_refcnt); + + /* hold the map. If the program is rejected by verifier, + * the map will be released by release_maps() or it + * will be used by the valid program until it's unloaded + * and all maps are released in bpf_free_used_maps() + */ + bpf_map_inc(map); + + *reused = false; + env->used_maps[env->used_map_cnt++] = map; + + return env->used_map_cnt - 1; +} + /* find and rewrite pseudo imm in ld_imm64 instructions: * * 1. if it accesses map FD, replace it with actual map pointer. @@ -18931,7 +18978,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; - int i, j, err; + int i, err; err = bpf_prog_calc_tag(env->prog); if (err) @@ -18948,9 +18995,10 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_insn_aux_data *aux; struct bpf_map *map; - struct fd f; + int map_idx; u64 addr; u32 fd; + bool reused; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || @@ -19011,20 +19059,18 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) break; } - f = fdget(fd); - map = __bpf_map_get(f); - if (IS_ERR(map)) { - verbose(env, "fd %d is not pointing to valid bpf_map\n", fd); - return PTR_ERR(map); - } + map_idx = add_used_map_from_fd(env, fd, &reused); + if (map_idx < 0) + return map_idx; + map = env->used_maps[map_idx]; + + aux = &env->insn_aux_data[i]; + aux->map_index = map_idx; err = check_map_prog_compatibility(env, map, env->prog); - if (err) { - fdput(f); + if (err) return err; - } - aux = &env->insn_aux_data[i]; if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { addr = (unsigned long)map; @@ -19033,13 +19079,11 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) if (off >= BPF_MAX_VAR_OFF) { verbose(env, "direct value offset of %u is not allowed\n", off); - fdput(f); return -EINVAL; } if (!map->ops->map_direct_value_addr) { verbose(env, "no direct value access support for this map type\n"); - fdput(f); return -EINVAL; } @@ -19047,7 +19091,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) if (err) { verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", map->value_size, off); - fdput(f); return err; } @@ -19058,70 +19101,39 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) insn[0].imm = (u32)addr; insn[1].imm = addr >> 32; - /* check whether we recorded this map already */ - for (j = 0; j < env->used_map_cnt; j++) { - if (env->used_maps[j] == map) { - aux->map_index = j; - fdput(f); - goto next_insn; - } - } - - if (env->used_map_cnt >= MAX_USED_MAPS) { - verbose(env, "The total number of maps per program has reached the limit of %u\n", - MAX_USED_MAPS); - fdput(f); - return -E2BIG; - } - - if (env->prog->sleepable) - atomic64_inc(&map->sleepable_refcnt); - /* hold the map. If the program is rejected by verifier, - * the map will be released by release_maps() or it - * will be used by the valid program until it's unloaded - * and all maps are released in bpf_free_used_maps() - */ - bpf_map_inc(map); - - aux->map_index = env->used_map_cnt; - env->used_maps[env->used_map_cnt++] = map; + /* proceed with extra checks only if its newly added used map */ + if (reused) + goto next_insn; if (bpf_map_is_cgroup_storage(map) && bpf_cgroup_storage_assign(env->prog->aux, map)) { verbose(env, "only one cgroup storage of each type is allowed\n"); - fdput(f); return -EBUSY; } if (map->map_type == BPF_MAP_TYPE_ARENA) { if (env->prog->aux->arena) { verbose(env, "Only one arena per program\n"); - fdput(f); return -EBUSY; } if (!env->allow_ptr_leaks || !env->bpf_capable) { verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n"); - fdput(f); return -EPERM; } if (!env->prog->jit_requested) { verbose(env, "JIT is required to use arena\n"); - fdput(f); return -EOPNOTSUPP; } if (!bpf_jit_supports_arena()) { verbose(env, "JIT doesn't support arena\n"); - fdput(f); return -EOPNOTSUPP; } env->prog->aux->arena = (void *)map; if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { verbose(env, "arena's user address must be set via map_extra or mmap()\n"); - fdput(f); return -EINVAL; } } - fdput(f); next_insn: insn++; i++; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 724b6856fcc3..242c91a6e3d3 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -67,46 +67,39 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) { - u32 ufd = attr->target_fd; struct bpf_map *map; - struct fd f; int ret; if (attr->attach_flags || attr->replace_bpf_fd) return -EINVAL; - f = fdget(ufd); + CLASS(fd, f)(attr->target_fd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); mutex_lock(&sockmap_mutex); ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type); mutex_unlock(&sockmap_mutex); - fdput(f); return ret; } int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) { - u32 ufd = attr->target_fd; struct bpf_prog *prog; struct bpf_map *map; - struct fd f; int ret; if (attr->attach_flags || attr->replace_bpf_fd) return -EINVAL; - f = fdget(ufd); + CLASS(fd, f)(attr->target_fd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); prog = bpf_prog_get(attr->attach_bpf_fd); - if (IS_ERR(prog)) { - ret = PTR_ERR(prog); - goto put_map; - } + if (IS_ERR(prog)) + return PTR_ERR(prog); if (prog->type != ptype) { ret = -EINVAL; @@ -118,8 +111,6 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) mutex_unlock(&sockmap_mutex); put_prog: bpf_prog_put(prog); -put_map: - fdput(f); return ret; } @@ -1551,18 +1542,17 @@ int sock_map_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); - u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd; + u32 prog_cnt = 0, flags = 0; struct bpf_prog **pprog; struct bpf_prog *prog; struct bpf_map *map; - struct fd f; u32 id = 0; int ret; if (attr->query.query_flags) return -EINVAL; - f = fdget(ufd); + CLASS(fd, f)(attr->target_fd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); @@ -1594,7 +1584,6 @@ end: copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) ret = -EFAULT; - fdput(f); return ret; } diff --git a/security/security.c b/security/security.c index 4564a0a1e4ef..6875eb4a59fc 100644 --- a/security/security.c +++ b/security/security.c @@ -5681,7 +5681,7 @@ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, * Return: Returns 0 on success, error on failure. */ int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, - struct path *path) + const struct path *path) { return call_int_hook(bpf_token_create, token, attr, path); } diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 94c523140125..fc926d3cac6e 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -6933,7 +6933,7 @@ static void selinux_bpf_prog_free(struct bpf_prog *prog) } static int selinux_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, - struct path *path) + const struct path *path) { struct bpf_security_struct *bpfsec; |