diff options
author | David S. Miller <davem@davemloft.net> | 2017-06-14 20:56:26 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-06-14 20:56:26 +0200 |
commit | 2fae5d0e647c6470d206e72b5fc24972bb900f70 (patch) | |
tree | bc04b4c45c52a732e1d92c7348e3a1354bfd47f6 | |
parent | macvlan: propagate the mac address change status for lowerdev (diff) | |
parent | selftests/bpf: Add test cases to test narrower ctx field loads (diff) | |
download | linux-2fae5d0e647c6470d206e72b5fc24972bb900f70.tar.xz linux-2fae5d0e647c6470d206e72b5fc24972bb900f70.zip |
Merge branch 'bpf-ctx-narrow'
Yonghong Song says:
====================
bpf: permit bpf program narrower loads for ctx fields
Today, if users try to access a ctx field through a narrower load, e.g.,
__be16 prot = __sk_buff->protocol, verifier will fail.
This set contains the verifier change to permit such loads for
certain ctx fields as well as the new test cases in selftests/bpf.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/bpf.h | 2 | ||||
-rw-r--r-- | include/linux/bpf_verifier.h | 1 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 71 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 21 | ||||
-rw-r--r-- | net/core/filter.c | 56 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/Makefile | 3 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_pkt_md_access.c | 35 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_progs.c | 21 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_verifier.c | 200 |
9 files changed, 324 insertions, 86 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c32bace66d3d..1bcbf0a71f75 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -157,7 +157,7 @@ struct bpf_verifier_ops { * with 'type' (read or write) is allowed */ bool (*is_valid_access)(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type); + enum bpf_reg_type *reg_type, int *ctx_field_size); int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, const struct bpf_prog *prog); u32 (*convert_ctx_access)(enum bpf_access_type type, diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index d5093b52b485..189741c0da85 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -73,6 +73,7 @@ struct bpf_insn_aux_data { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ }; + int ctx_field_size; /* the ctx field size for load/store insns, maybe 0 */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 519a6144d3d3..44b97d958fb7 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -758,15 +758,26 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, } /* check access to 'struct bpf_context' fields */ -static int check_ctx_access(struct bpf_verifier_env *env, int off, int size, +static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { + int ctx_field_size = 0; + /* for analyzer ctx accesses are already validated and converted */ if (env->analyzer_ops) return 0; if (env->prog->aux->ops->is_valid_access && - env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) { + env->prog->aux->ops->is_valid_access(off, size, t, reg_type, &ctx_field_size)) { + /* a non zero ctx_field_size indicates: + * . For this field, the prog type specific ctx conversion algorithm + * only supports whole field access. + * . This ctx access is a candiate for later verifier transformation + * to load the whole field and then apply a mask to get correct result. + */ + if (ctx_field_size) + env->insn_aux_data[insn_idx].ctx_field_size = ctx_field_size; + /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; @@ -868,7 +879,7 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ -static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, +static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno) { @@ -911,7 +922,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, verbose("R%d leaks addr into ctx\n", value_regno); return -EACCES; } - err = check_ctx_access(env, off, size, t, ®_type); + err = check_ctx_access(env, insn_idx, off, size, t, ®_type); if (!err && t == BPF_READ && value_regno >= 0) { mark_reg_unknown_value_and_range(state->regs, value_regno); @@ -972,7 +983,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, return err; } -static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn) +static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { struct bpf_reg_state *regs = env->cur_state.regs; int err; @@ -994,13 +1005,13 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn) return err; /* check whether atomic_add can read the memory */ - err = check_mem_access(env, insn->dst_reg, insn->off, + err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); if (err) return err; /* check whether atomic_add can write into the same memory */ - return check_mem_access(env, insn->dst_reg, insn->off, + return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); } @@ -1416,7 +1427,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { - err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1); + err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); if (err) return err; } @@ -2993,18 +3004,12 @@ static int do_check(struct bpf_verifier_env *env) /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ - err = check_mem_access(env, insn->src_reg, insn->off, + err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg); if (err) return err; - if (BPF_SIZE(insn->code) != BPF_W && - BPF_SIZE(insn->code) != BPF_DW) { - insn_idx++; - continue; - } - prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { @@ -3032,7 +3037,7 @@ static int do_check(struct bpf_verifier_env *env) enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { - err = check_xadd(env, insn); + err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; @@ -3051,7 +3056,7 @@ static int do_check(struct bpf_verifier_env *env) dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ - err = check_mem_access(env, insn->dst_reg, insn->off, + err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg); if (err) @@ -3080,7 +3085,7 @@ static int do_check(struct bpf_verifier_env *env) return err; /* check that memory (dst_reg + off) is writeable */ - err = check_mem_access(env, insn->dst_reg, insn->off, + err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); if (err) @@ -3383,7 +3388,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) struct bpf_insn insn_buf[16], *insn; struct bpf_prog *new_prog; enum bpf_access_type type; - int i, cnt, delta = 0; + int i, cnt, off, size, ctx_field_size, is_narrower_load, delta = 0; if (ops->gen_prologue) { cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, @@ -3423,11 +3428,39 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; + off = insn->off; + size = bpf_size_to_bytes(BPF_SIZE(insn->code)); + ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; + is_narrower_load = (type == BPF_READ && size < ctx_field_size); + + /* If the read access is a narrower load of the field, + * convert to a 4/8-byte load, to minimum program type specific + * convert_ctx_access changes. If conversion is successful, + * we will apply proper mask to the result. + */ + if (is_narrower_load) { + int size_code = BPF_H; + + if (ctx_field_size == 4) + size_code = BPF_W; + else if (ctx_field_size == 8) + size_code = BPF_DW; + insn->off = off & ~(ctx_field_size - 1); + insn->code = BPF_LDX | BPF_MEM | size_code; + } cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose("bpf verifier is misconfigured\n"); return -EINVAL; } + if (is_narrower_load) { + if (ctx_field_size <= 4) + insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, + (1 << size * 8) - 1); + else + insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, + (1 << size * 8) - 1); + } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 051d7fca0c09..9d3ec8253131 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -479,7 +479,7 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func /* bpf+kprobe programs can access fields of 'struct pt_regs' */ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type) + enum bpf_reg_type *reg_type, int *ctx_field_size) { if (off < 0 || off >= sizeof(struct pt_regs)) return false; @@ -562,7 +562,7 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) } static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type) + enum bpf_reg_type *reg_type, int *ctx_field_size) { if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) return false; @@ -581,17 +581,26 @@ const struct bpf_verifier_ops tracepoint_prog_ops = { }; static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type) + enum bpf_reg_type *reg_type, int *ctx_field_size) { + int sample_period_off; + if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) return false; if (type != BPF_READ) return false; if (off % size != 0) return false; - if (off == offsetof(struct bpf_perf_event_data, sample_period)) { - if (size != sizeof(u64)) - return false; + + /* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */ + sample_period_off = offsetof(struct bpf_perf_event_data, sample_period); + if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) { + *ctx_field_size = 8; +#ifdef __LITTLE_ENDIAN + return (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0; +#else + return ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0; +#endif } else { if (size != sizeof(long)) return false; diff --git a/net/core/filter.c b/net/core/filter.c index a65a3b25e104..60ed6f343a63 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2856,7 +2856,8 @@ lwt_xmit_func_proto(enum bpf_func_id func_id) } } -static bool __is_valid_access(int off, int size) +static bool __is_valid_access(int off, int size, enum bpf_access_type type, + int *ctx_field_size) { if (off < 0 || off >= sizeof(struct __sk_buff)) return false; @@ -2872,9 +2873,27 @@ static bool __is_valid_access(int off, int size) offsetof(struct __sk_buff, cb[4]) + sizeof(__u32)) return false; break; - default: + case offsetof(struct __sk_buff, data) ... + offsetof(struct __sk_buff, data) + sizeof(__u32) - 1: + case offsetof(struct __sk_buff, data_end) ... + offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1: if (size != sizeof(__u32)) return false; + break; + default: + /* permit narrower load for not cb/data/data_end fields */ + *ctx_field_size = 4; + if (type == BPF_WRITE) { + if (size != sizeof(__u32)) + return false; + } else { + if (size != sizeof(__u32)) +#ifdef __LITTLE_ENDIAN + return (off & 0x3) == 0 && (size == 1 || size == 2); +#else + return (off & 0x3) + size == 4 && (size == 1 || size == 2); +#endif + } } return true; @@ -2882,12 +2901,16 @@ static bool __is_valid_access(int off, int size) static bool sk_filter_is_valid_access(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type) + enum bpf_reg_type *reg_type, + int *ctx_field_size) { switch (off) { - case offsetof(struct __sk_buff, tc_classid): - case offsetof(struct __sk_buff, data): - case offsetof(struct __sk_buff, data_end): + case offsetof(struct __sk_buff, tc_classid) ... + offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1: + case offsetof(struct __sk_buff, data) ... + offsetof(struct __sk_buff, data) + sizeof(__u32) - 1: + case offsetof(struct __sk_buff, data_end) ... + offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1: return false; } @@ -2901,15 +2924,17 @@ static bool sk_filter_is_valid_access(int off, int size, } } - return __is_valid_access(off, size); + return __is_valid_access(off, size, type, ctx_field_size); } static bool lwt_is_valid_access(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type) + enum bpf_reg_type *reg_type, + int *ctx_field_size) { switch (off) { - case offsetof(struct __sk_buff, tc_classid): + case offsetof(struct __sk_buff, tc_classid) ... + offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1: return false; } @@ -2934,12 +2959,13 @@ static bool lwt_is_valid_access(int off, int size, break; } - return __is_valid_access(off, size); + return __is_valid_access(off, size, type, ctx_field_size); } static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type) + enum bpf_reg_type *reg_type, + int *ctx_field_size) { if (type == BPF_WRITE) { switch (off) { @@ -3002,7 +3028,8 @@ static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, static bool tc_cls_act_is_valid_access(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type) + enum bpf_reg_type *reg_type, + int *ctx_field_size) { if (type == BPF_WRITE) { switch (off) { @@ -3027,7 +3054,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, break; } - return __is_valid_access(off, size); + return __is_valid_access(off, size, type, ctx_field_size); } static bool __is_valid_xdp_access(int off, int size) @@ -3044,7 +3071,8 @@ static bool __is_valid_xdp_access(int off, int size) static bool xdp_is_valid_access(int off, int size, enum bpf_access_type type, - enum bpf_reg_type *reg_type) + enum bpf_reg_type *reg_type, + int *ctx_field_size) { if (type == BPF_WRITE) return false; diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 9f0e07ba5334..2ca51a8a588c 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -14,7 +14,8 @@ LDLIBS += -lcap -lelf TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align -TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o +TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ + test_pkt_md_access.o TEST_PROGS := test_kmod.sh diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/test_pkt_md_access.c new file mode 100644 index 000000000000..71729d47eb85 --- /dev/null +++ b/tools/testing/selftests/bpf/test_pkt_md_access.c @@ -0,0 +1,35 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <stddef.h> +#include <string.h> +#include <linux/bpf.h> +#include <linux/pkt_cls.h> +#include "bpf_helpers.h" + +int _version SEC("version") = 1; + +#define TEST_FIELD(TYPE, FIELD, MASK) \ + { \ + TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ + if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ + return TC_ACT_SHOT; \ + } + +SEC("test1") +int process(struct __sk_buff *skb) +{ + TEST_FIELD(__u8, len, 0xFF); + TEST_FIELD(__u16, len, 0xFFFF); + TEST_FIELD(__u32, len, 0xFFFFFFFF); + TEST_FIELD(__u16, protocol, 0xFFFF); + TEST_FIELD(__u32, protocol, 0xFFFFFFFF); + TEST_FIELD(__u8, hash, 0xFF); + TEST_FIELD(__u16, hash, 0xFFFF); + TEST_FIELD(__u32, hash, 0xFFFFFFFF); + + return TC_ACT_OK; +} diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index f10493d4c37c..5855cd3d3d45 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -484,6 +484,26 @@ done: bpf_object__close(objs[i]); } +static void test_pkt_md_access(void) +{ + const char *file = "./test_pkt_md_access.o"; + struct bpf_object *obj; + __u32 duration, retval; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) + return; + + err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + bpf_object__close(obj); +} + int main(void) { struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; @@ -495,6 +515,7 @@ int main(void) test_l4lb(); test_tcp_estats(); test_bpf_obj_id(); + test_pkt_md_access(); printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 4ee4708b0d60..c0af0195432f 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -1073,44 +1073,75 @@ static struct bpf_test tests[] = { .result = ACCEPT, }, { - "check cb access: byte, oob 1", + "__sk_buff->hash, offset 0, byte store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 4), + offsetof(struct __sk_buff, hash)), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: byte, oob 2", + "__sk_buff->tc_index, offset 3, byte store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) - 1), + offsetof(struct __sk_buff, tc_index) + 3), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: byte, oob 3", + "check skb->hash byte load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 4), + offsetof(struct __sk_buff, hash)), +#else + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 3), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "check skb->hash byte load not permitted 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 1), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: byte, oob 4", + "check skb->hash byte load not permitted 2", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) - 1), + offsetof(struct __sk_buff, hash) + 2), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check skb->hash byte load not permitted 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 3), +#else + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash)), +#endif BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", @@ -1188,44 +1219,53 @@ static struct bpf_test tests[] = { .result = REJECT, }, { - "check cb access: half, oob 1", + "check __sk_buff->hash, offset 0, half store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 4), + offsetof(struct __sk_buff, hash)), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: half, oob 2", + "check __sk_buff->tc_index, offset 2, half store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) - 2), + offsetof(struct __sk_buff, tc_index) + 2), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: half, oob 3", + "check skb->hash half load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 4), + offsetof(struct __sk_buff, hash)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 2), +#endif BPF_EXIT_INSN(), }, - .errstr = "invalid bpf_context access", - .result = REJECT, + .result = ACCEPT, }, { - "check cb access: half, oob 4", + "check skb->hash half load not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 2), +#else BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) - 2), + offsetof(struct __sk_buff, hash)), +#endif BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", @@ -1368,28 +1408,6 @@ static struct bpf_test tests[] = { "check cb access: double, oob 2", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 8), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check cb access: double, oob 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) - 8), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check cb access: double, oob 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, cb[4])), BPF_EXIT_INSN(), @@ -1398,22 +1416,22 @@ static struct bpf_test tests[] = { .result = REJECT, }, { - "check cb access: double, oob 5", + "check __sk_buff->ifindex dw store not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 8), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, ifindex)), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", .result = REJECT, }, { - "check cb access: double, oob 6", + "check __sk_buff->ifindex dw load not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) - 8), + offsetof(struct __sk_buff, ifindex)), BPF_EXIT_INSN(), }, .errstr = "invalid bpf_context access", @@ -5169,6 +5187,98 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, }, + { + "check bpf_perf_event_data->sample_period byte load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), +#else + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period) + 7), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + }, + { + "check bpf_perf_event_data->sample_period half load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period) + 6), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + }, + { + "check bpf_perf_event_data->sample_period word load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), +#else + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period) + 4), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + }, + { + "check bpf_perf_event_data->sample_period dword load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, + }, + { + "check skb->data half load not permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data) + 2), +#endif + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", + }, + { + "check skb->tc_classid half load not permitted for lwt prog", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#ifdef __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid) + 2), +#endif + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, }; static int probe_filter_length(const struct bpf_insn *fp) |