summaryrefslogtreecommitdiffstats
path: root/tools/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/bpf/bpf.c37
-rw-r--r--tools/lib/bpf/bpf.h19
-rw-r--r--tools/lib/bpf/bpf_tracing.h223
-rw-r--r--tools/lib/bpf/btf.c20
-rw-r--r--tools/lib/bpf/btf_dump.c10
-rw-r--r--tools/lib/bpf/libbpf.c324
-rw-r--r--tools/lib/bpf/libbpf.h31
-rw-r--r--tools/lib/bpf/libbpf.map19
-rw-r--r--tools/lib/bpf/libbpf_probes.c1
-rw-r--r--tools/lib/bpf/netlink.c34
-rw-r--r--tools/lib/bpf/xsk.c16
11 files changed, 659 insertions, 75 deletions
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index c6dafe563176..5cc1b0785d18 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -235,7 +235,8 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
- if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS) {
+ if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
+ attr.prog_type == BPF_PROG_TYPE_LSM) {
attr.attach_btf_id = load_attr->attach_btf_id;
} else if (attr.prog_type == BPF_PROG_TYPE_TRACING ||
attr.prog_type == BPF_PROG_TYPE_EXT) {
@@ -584,6 +585,40 @@ int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
}
+int bpf_link_create(int prog_fd, int target_fd,
+ enum bpf_attach_type attach_type,
+ const struct bpf_link_create_opts *opts)
+{
+ union bpf_attr attr;
+
+ if (!OPTS_VALID(opts, bpf_link_create_opts))
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.link_create.prog_fd = prog_fd;
+ attr.link_create.target_fd = target_fd;
+ attr.link_create.attach_type = attach_type;
+
+ return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
+}
+
+int bpf_link_update(int link_fd, int new_prog_fd,
+ const struct bpf_link_update_opts *opts)
+{
+ union bpf_attr attr;
+
+ if (!OPTS_VALID(opts, bpf_link_update_opts))
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.link_update.link_fd = link_fd;
+ attr.link_update.new_prog_fd = new_prog_fd;
+ attr.link_update.flags = OPTS_GET(opts, flags, 0);
+ attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
+
+ return sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
+}
+
int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
__u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
{
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index b976e77316cc..46d47afdd887 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -168,6 +168,25 @@ LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
enum bpf_attach_type type);
+struct bpf_link_create_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+};
+#define bpf_link_create_opts__last_field sz
+
+LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
+ enum bpf_attach_type attach_type,
+ const struct bpf_link_create_opts *opts);
+
+struct bpf_link_update_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 flags; /* extra flags */
+ __u32 old_prog_fd; /* expected old program FD */
+};
+#define bpf_link_update_opts__last_field old_prog_fd
+
+LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd,
+ const struct bpf_link_update_opts *opts);
+
struct bpf_prog_test_run_attr {
int prog_fd;
int repeat;
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index b0dafe8b4ebc..f3f3c3fb98cb 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -49,7 +49,8 @@
#if defined(bpf_target_x86)
-#ifdef __KERNEL__
+#if defined(__KERNEL__) || defined(__VMLINUX_H__)
+
#define PT_REGS_PARM1(x) ((x)->di)
#define PT_REGS_PARM2(x) ((x)->si)
#define PT_REGS_PARM3(x) ((x)->dx)
@@ -60,7 +61,20 @@
#define PT_REGS_RC(x) ((x)->ax)
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->ip)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), di)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), si)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), dx)
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), cx)
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), sp)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), bp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), ax)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), ip)
+
#else
+
#ifdef __i386__
/* i386 kernel is built with -mregparm=3 */
#define PT_REGS_PARM1(x) ((x)->eax)
@@ -73,7 +87,20 @@
#define PT_REGS_RC(x) ((x)->eax)
#define PT_REGS_SP(x) ((x)->esp)
#define PT_REGS_IP(x) ((x)->eip)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), eax)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), edx)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), ecx)
+#define PT_REGS_PARM4_CORE(x) 0
+#define PT_REGS_PARM5_CORE(x) 0
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), esp)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), ebp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), eax)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), esp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), eip)
+
#else
+
#define PT_REGS_PARM1(x) ((x)->rdi)
#define PT_REGS_PARM2(x) ((x)->rsi)
#define PT_REGS_PARM3(x) ((x)->rdx)
@@ -84,6 +111,18 @@
#define PT_REGS_RC(x) ((x)->rax)
#define PT_REGS_SP(x) ((x)->rsp)
#define PT_REGS_IP(x) ((x)->rip)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), rdi)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), rsi)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), rdx)
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), rcx)
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), rsp)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), rbp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), rax)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), rsp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), rip)
+
#endif
#endif
@@ -104,6 +143,17 @@ struct pt_regs;
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[3])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), grps[14])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), pdw.addr)
+
#elif defined(bpf_target_arm)
#define PT_REGS_PARM1(x) ((x)->uregs[0])
@@ -117,6 +167,17 @@ struct pt_regs;
#define PT_REGS_SP(x) ((x)->uregs[13])
#define PT_REGS_IP(x) ((x)->uregs[12])
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), uregs[0])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), uregs[1])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), uregs[2])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), uregs[3])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), uregs[4])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), uregs[14])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), uregs[11])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), uregs[0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), uregs[13])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), uregs[12])
+
#elif defined(bpf_target_arm64)
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
@@ -134,6 +195,17 @@ struct pt_regs;
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[1])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[2])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[3])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[4])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[30])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[29])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), pc)
+
#elif defined(bpf_target_mips)
#define PT_REGS_PARM1(x) ((x)->regs[4])
@@ -147,6 +219,17 @@ struct pt_regs;
#define PT_REGS_SP(x) ((x)->regs[29])
#define PT_REGS_IP(x) ((x)->cp0_epc)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), regs[4])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), regs[5])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), regs[6])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), regs[7])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[1])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc)
+
#elif defined(bpf_target_powerpc)
#define PT_REGS_PARM1(x) ((x)->gpr[3])
@@ -158,6 +241,15 @@ struct pt_regs;
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->nip)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), gpr[3])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), gpr[4])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), gpr[5])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), gpr[6])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), gpr[7])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), gpr[3])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), nip)
+
#elif defined(bpf_target_sparc)
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
@@ -169,11 +261,22 @@ struct pt_regs;
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I1])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I2])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I3])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I4])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I7])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), u_regs[UREG_FP])
+
/* Should this also be a bpf_target check for the sparc case? */
#if defined(__arch64__)
#define PT_REGS_IP(x) ((x)->tpc)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), tpc)
#else
#define PT_REGS_IP(x) ((x)->pc)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc)
#endif
#endif
@@ -192,4 +295,122 @@ struct pt_regs;
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
+#define ___bpf_concat(a, b) a ## b
+#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
+#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
+#define ___bpf_narg(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#define ___bpf_empty(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
+
+#define ___bpf_ctx_cast0() ctx
+#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
+#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
+#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
+#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
+#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
+#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
+#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
+#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
+#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
+#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
+#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
+#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
+#define ___bpf_ctx_cast(args...) \
+ ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
+
+/*
+ * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
+ * similar kinds of BPF programs, that accept input arguments as a single
+ * pointer to untyped u64 array, where each u64 can actually be a typed
+ * pointer or integer of different size. Instead of requring user to write
+ * manual casts and work with array elements by index, BPF_PROG macro
+ * allows user to declare a list of named and typed input arguments in the
+ * same syntax as for normal C function. All the casting is hidden and
+ * performed transparently, while user code can just assume working with
+ * function arguments of specified type and name.
+ *
+ * Original raw context argument is preserved as well as 'ctx' argument.
+ * This is useful when using BPF helpers that expect original context
+ * as one of the parameters (e.g., for bpf_perf_event_output()).
+ */
+#define BPF_PROG(name, args...) \
+name(unsigned long long *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(unsigned long long *ctx, ##args); \
+typeof(name(0)) name(unsigned long long *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_ctx_cast(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(unsigned long long *ctx, ##args)
+
+struct pt_regs;
+
+#define ___bpf_kprobe_args0() ctx
+#define ___bpf_kprobe_args1(x) \
+ ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
+#define ___bpf_kprobe_args2(x, args...) \
+ ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
+#define ___bpf_kprobe_args3(x, args...) \
+ ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
+#define ___bpf_kprobe_args4(x, args...) \
+ ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
+#define ___bpf_kprobe_args5(x, args...) \
+ ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
+#define ___bpf_kprobe_args(args...) \
+ ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
+ * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
+ * low-level way of getting kprobe input arguments from struct pt_regs, and
+ * provides a familiar typed and named function arguments syntax and
+ * semantics of accessing kprobe input paremeters.
+ *
+ * Original struct pt_regs* context is preserved as 'ctx' argument. This might
+ * be necessary when using BPF helpers like bpf_perf_event_output().
+ */
+#define BPF_KPROBE(name, args...) \
+name(struct pt_regs *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_kprobe_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args)
+
+#define ___bpf_kretprobe_args0() ctx
+#define ___bpf_kretprobe_args1(x) \
+ ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
+#define ___bpf_kretprobe_args(args...) \
+ ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
+ * return value (in addition to `struct pt_regs *ctx`), but no input
+ * arguments, because they will be clobbered by the time probed function
+ * returns.
+ */
+#define BPF_KRETPROBE(name, args...) \
+name(struct pt_regs *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_kretprobe_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
+
#endif
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 3d1c25fc97ae..bfef3d606b54 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -657,22 +657,32 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
int btf__load(struct btf *btf)
{
- __u32 log_buf_size = BPF_LOG_BUF_SIZE;
+ __u32 log_buf_size = 0;
char *log_buf = NULL;
int err = 0;
if (btf->fd >= 0)
return -EEXIST;
- log_buf = malloc(log_buf_size);
- if (!log_buf)
- return -ENOMEM;
+retry_load:
+ if (log_buf_size) {
+ log_buf = malloc(log_buf_size);
+ if (!log_buf)
+ return -ENOMEM;
- *log_buf = 0;
+ *log_buf = 0;
+ }
btf->fd = bpf_load_btf(btf->data, btf->data_size,
log_buf, log_buf_size, false);
if (btf->fd < 0) {
+ if (!log_buf || errno == ENOSPC) {
+ log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
+ log_buf_size << 1);
+ free(log_buf);
+ goto retry_load;
+ }
+
err = -errno;
pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
if (*log_buf)
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index bd09ed1710f1..0c28ee82834b 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -916,13 +916,13 @@ static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
/* enumerators share namespace with typedef idents */
dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
if (dup_cnt > 1) {
- btf_dump_printf(d, "\n%s%s___%zu = %d,",
+ btf_dump_printf(d, "\n%s%s___%zu = %u,",
pfx(lvl + 1), name, dup_cnt,
- (__s32)v->val);
+ (__u32)v->val);
} else {
- btf_dump_printf(d, "\n%s%s = %d,",
+ btf_dump_printf(d, "\n%s%s = %u,",
pfx(lvl + 1), name,
- (__s32)v->val);
+ (__u32)v->val);
}
}
btf_dump_printf(d, "\n%s}", pfx(lvl));
@@ -1030,7 +1030,7 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
return -EINVAL;
- fname = OPTS_GET(opts, field_name, NULL);
+ fname = OPTS_GET(opts, field_name, "");
lvl = OPTS_GET(opts, indent_level, 0);
btf_dump_emit_type_decl(d, id, fname, lvl);
return 0;
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 7469c7dcc15e..ff9174282a8c 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1845,7 +1845,6 @@ resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
* type definition, while using only sizeof(void *) space in ELF data section.
*/
static bool get_map_field_int(const char *map_name, const struct btf *btf,
- const struct btf_type *def,
const struct btf_member *m, __u32 *res)
{
const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
@@ -1972,19 +1971,19 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
return -EINVAL;
}
if (strcmp(name, "type") == 0) {
- if (!get_map_field_int(map_name, obj->btf, def, m,
+ if (!get_map_field_int(map_name, obj->btf, m,
&map->def.type))
return -EINVAL;
pr_debug("map '%s': found type = %u.\n",
map_name, map->def.type);
} else if (strcmp(name, "max_entries") == 0) {
- if (!get_map_field_int(map_name, obj->btf, def, m,
+ if (!get_map_field_int(map_name, obj->btf, m,
&map->def.max_entries))
return -EINVAL;
pr_debug("map '%s': found max_entries = %u.\n",
map_name, map->def.max_entries);
} else if (strcmp(name, "map_flags") == 0) {
- if (!get_map_field_int(map_name, obj->btf, def, m,
+ if (!get_map_field_int(map_name, obj->btf, m,
&map->def.map_flags))
return -EINVAL;
pr_debug("map '%s': found map_flags = %u.\n",
@@ -1992,8 +1991,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
} else if (strcmp(name, "key_size") == 0) {
__u32 sz;
- if (!get_map_field_int(map_name, obj->btf, def, m,
- &sz))
+ if (!get_map_field_int(map_name, obj->btf, m, &sz))
return -EINVAL;
pr_debug("map '%s': found key_size = %u.\n",
map_name, sz);
@@ -2035,8 +2033,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
} else if (strcmp(name, "value_size") == 0) {
__u32 sz;
- if (!get_map_field_int(map_name, obj->btf, def, m,
- &sz))
+ if (!get_map_field_int(map_name, obj->btf, m, &sz))
return -EINVAL;
pr_debug("map '%s': found value_size = %u.\n",
map_name, sz);
@@ -2079,8 +2076,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
__u32 val;
int err;
- if (!get_map_field_int(map_name, obj->btf, def, m,
- &val))
+ if (!get_map_field_int(map_name, obj->btf, m, &val))
return -EINVAL;
pr_debug("map '%s': found pinning = %u.\n",
map_name, val);
@@ -2284,11 +2280,16 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
}
}
-static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
+static bool libbpf_needs_btf(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0 ||
- obj->efile.st_ops_shndx >= 0 ||
- obj->nr_extern > 0;
+ obj->efile.st_ops_shndx >= 0 ||
+ obj->nr_extern > 0;
+}
+
+static bool kernel_needs_btf(const struct bpf_object *obj)
+{
+ return obj->efile.st_ops_shndx >= 0;
}
static int bpf_object__init_btf(struct bpf_object *obj,
@@ -2324,7 +2325,7 @@ static int bpf_object__init_btf(struct bpf_object *obj,
}
}
out:
- if (err && bpf_object__is_btf_mandatory(obj)) {
+ if (err && libbpf_needs_btf(obj)) {
pr_warn("BTF is required, but is missing or corrupted.\n");
return err;
}
@@ -2348,7 +2349,7 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
btf_ext__free(obj->btf_ext);
obj->btf_ext = NULL;
- if (bpf_object__is_btf_mandatory(obj)) {
+ if (libbpf_needs_btf(obj)) {
pr_warn("BTF is required, but is missing or corrupted.\n");
return -ENOENT;
}
@@ -2357,7 +2358,8 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
{
- if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
+ if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
+ prog->type == BPF_PROG_TYPE_LSM)
return true;
/* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
@@ -2412,7 +2414,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
obj->btf_ext = NULL;
}
- if (bpf_object__is_btf_mandatory(obj))
+ if (kernel_needs_btf(obj))
return err;
}
return 0;
@@ -3868,6 +3870,10 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
if (str_is_empty(targ_name))
continue;
+ t = skip_mods_and_typedefs(targ_btf, i, NULL);
+ if (!btf_is_composite(t) && !btf_is_array(t))
+ continue;
+
targ_essent_len = bpf_core_essential_name_len(targ_name);
if (targ_essent_len != local_essent_len)
continue;
@@ -4846,8 +4852,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
{
struct bpf_load_program_attr load_attr;
char *cp, errmsg[STRERR_BUFSIZE];
- int log_buf_size = BPF_LOG_BUF_SIZE;
- char *log_buf;
+ size_t log_buf_size = 0;
+ char *log_buf = NULL;
int btf_fd, ret;
if (!insns || !insns_cnt)
@@ -4861,7 +4867,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.insns = insns;
load_attr.insns_cnt = insns_cnt;
load_attr.license = license;
- if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) {
+ if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
+ prog->type == BPF_PROG_TYPE_LSM) {
load_attr.attach_btf_id = prog->attach_btf_id;
} else if (prog->type == BPF_PROG_TYPE_TRACING ||
prog->type == BPF_PROG_TYPE_EXT) {
@@ -4887,22 +4894,28 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_flags = prog->prog_flags;
retry_load:
- log_buf = malloc(log_buf_size);
- if (!log_buf)
- pr_warn("Alloc log buffer for bpf loader error, continue without log\n");
+ if (log_buf_size) {
+ log_buf = malloc(log_buf_size);
+ if (!log_buf)
+ return -ENOMEM;
+
+ *log_buf = 0;
+ }
ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
if (ret >= 0) {
- if (load_attr.log_level)
+ if (log_buf && load_attr.log_level)
pr_debug("verifier log:\n%s", log_buf);
*pfd = ret;
ret = 0;
goto out;
}
- if (errno == ENOSPC) {
- log_buf_size <<= 1;
+ if (!log_buf || errno == ENOSPC) {
+ log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
+ log_buf_size << 1);
+
free(log_buf);
goto retry_load;
}
@@ -4945,8 +4958,9 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{
int err = 0, fd, i, btf_id;
- if (prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_EXT) {
+ if ((prog->type == BPF_PROG_TYPE_TRACING ||
+ prog->type == BPF_PROG_TYPE_LSM ||
+ prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
btf_id = libbpf_find_attach_btf_id(prog);
if (btf_id <= 0)
return btf_id;
@@ -6185,6 +6199,7 @@ bool bpf_program__is_##NAME(const struct bpf_program *prog) \
} \
BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
+BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
@@ -6251,6 +6266,8 @@ static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
struct bpf_program *prog);
static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
struct bpf_program *prog);
+static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
+ struct bpf_program *prog);
struct bpf_sec_def {
const char *sec;
@@ -6290,6 +6307,10 @@ static const struct bpf_sec_def section_defs[] = {
.expected_attach_type = BPF_TRACE_FENTRY,
.is_attach_btf = true,
.attach_fn = attach_trace),
+ SEC_DEF("fmod_ret/", TRACING,
+ .expected_attach_type = BPF_MODIFY_RETURN,
+ .is_attach_btf = true,
+ .attach_fn = attach_trace),
SEC_DEF("fexit/", TRACING,
.expected_attach_type = BPF_TRACE_FEXIT,
.is_attach_btf = true,
@@ -6297,6 +6318,10 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("freplace/", EXT,
.is_attach_btf = true,
.attach_fn = attach_trace),
+ SEC_DEF("lsm/", LSM,
+ .is_attach_btf = true,
+ .expected_attach_type = BPF_LSM_MAC,
+ .attach_fn = attach_lsm),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
@@ -6559,6 +6584,7 @@ invalid_prog:
}
#define BTF_TRACE_PREFIX "btf_trace_"
+#define BTF_LSM_PREFIX "bpf_lsm_"
#define BTF_MAX_NAME_SIZE 128
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
@@ -6586,9 +6612,15 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
if (attach_type == BPF_TRACE_RAW_TP)
err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
BTF_KIND_TYPEDEF);
+ else if (attach_type == BPF_LSM_MAC)
+ err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
+ BTF_KIND_FUNC);
else
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
+ if (err <= 0)
+ pr_warn("%s is not found in vmlinux BTF\n", name);
+
return err;
}
@@ -6661,8 +6693,6 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog)
err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
name + section_defs[i].len,
attach_type);
- if (err <= 0)
- pr_warn("%s is not found in vmlinux BTF\n", name);
return err;
}
pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
@@ -6742,6 +6772,17 @@ void *bpf_map__priv(const struct bpf_map *map)
return map ? map->priv : ERR_PTR(-EINVAL);
}
+int bpf_map__set_initial_value(struct bpf_map *map,
+ const void *data, size_t size)
+{
+ if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
+ size != map->def.value_size || map->fd >= 0)
+ return -EINVAL;
+
+ memcpy(map->mmaped, data, size);
+ return 0;
+}
+
bool bpf_map__is_offload_neutral(const struct bpf_map *map)
{
return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
@@ -6932,9 +6973,17 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_link {
int (*detach)(struct bpf_link *link);
int (*destroy)(struct bpf_link *link);
+ char *pin_path; /* NULL, if not pinned */
+ int fd; /* hook FD, -1 if not applicable */
bool disconnected;
};
+/* Replace link's underlying BPF program with the new one */
+int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
+{
+ return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
+}
+
/* Release "ownership" of underlying BPF resource (typically, BPF program
* attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
* link, when destructed through bpf_link__destroy() call won't attempt to
@@ -6961,26 +7010,109 @@ int bpf_link__destroy(struct bpf_link *link)
err = link->detach(link);
if (link->destroy)
link->destroy(link);
+ if (link->pin_path)
+ free(link->pin_path);
free(link);
return err;
}
-struct bpf_link_fd {
- struct bpf_link link; /* has to be at the top of struct */
- int fd; /* hook FD */
-};
+int bpf_link__fd(const struct bpf_link *link)
+{
+ return link->fd;
+}
+
+const char *bpf_link__pin_path(const struct bpf_link *link)
+{
+ return link->pin_path;
+}
+
+static int bpf_link__detach_fd(struct bpf_link *link)
+{
+ return close(link->fd);
+}
+
+struct bpf_link *bpf_link__open(const char *path)
+{
+ struct bpf_link *link;
+ int fd;
+
+ fd = bpf_obj_get(path);
+ if (fd < 0) {
+ fd = -errno;
+ pr_warn("failed to open link at %s: %d\n", path, fd);
+ return ERR_PTR(fd);
+ }
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ close(fd);
+ return ERR_PTR(-ENOMEM);
+ }
+ link->detach = &bpf_link__detach_fd;
+ link->fd = fd;
+
+ link->pin_path = strdup(path);
+ if (!link->pin_path) {
+ bpf_link__destroy(link);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return link;
+}
+
+int bpf_link__pin(struct bpf_link *link, const char *path)
+{
+ int err;
+
+ if (link->pin_path)
+ return -EBUSY;
+ err = make_parent_dir(path);
+ if (err)
+ return err;
+ err = check_path(path);
+ if (err)
+ return err;
+
+ link->pin_path = strdup(path);
+ if (!link->pin_path)
+ return -ENOMEM;
+
+ if (bpf_obj_pin(link->fd, link->pin_path)) {
+ err = -errno;
+ zfree(&link->pin_path);
+ return err;
+ }
+
+ pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
+ return 0;
+}
+
+int bpf_link__unpin(struct bpf_link *link)
+{
+ int err;
+
+ if (!link->pin_path)
+ return -EINVAL;
+
+ err = unlink(link->pin_path);
+ if (err != 0)
+ return -errno;
+
+ pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
+ zfree(&link->pin_path);
+ return 0;
+}
static int bpf_link__detach_perf_event(struct bpf_link *link)
{
- struct bpf_link_fd *l = (void *)link;
int err;
- err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
+ err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
if (err)
err = -errno;
- close(l->fd);
+ close(link->fd);
return err;
}
@@ -6988,7 +7120,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
int pfd)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link_fd *link;
+ struct bpf_link *link;
int prog_fd, err;
if (pfd < 0) {
@@ -7006,7 +7138,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
link = calloc(1, sizeof(*link));
if (!link)
return ERR_PTR(-ENOMEM);
- link->link.detach = &bpf_link__detach_perf_event;
+ link->detach = &bpf_link__detach_perf_event;
link->fd = pfd;
if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
@@ -7025,7 +7157,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return ERR_PTR(err);
}
- return (struct bpf_link *)link;
+ return link;
}
/*
@@ -7313,18 +7445,11 @@ out:
return link;
}
-static int bpf_link__detach_fd(struct bpf_link *link)
-{
- struct bpf_link_fd *l = (void *)link;
-
- return close(l->fd);
-}
-
struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
const char *tp_name)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link_fd *link;
+ struct bpf_link *link;
int prog_fd, pfd;
prog_fd = bpf_program__fd(prog);
@@ -7337,7 +7462,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
link = calloc(1, sizeof(*link));
if (!link)
return ERR_PTR(-ENOMEM);
- link->link.detach = &bpf_link__detach_fd;
+ link->detach = &bpf_link__detach_fd;
pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
if (pfd < 0) {
@@ -7349,7 +7474,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
return ERR_PTR(pfd);
}
link->fd = pfd;
- return (struct bpf_link *)link;
+ return link;
}
static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
@@ -7360,10 +7485,11 @@ static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
return bpf_program__attach_raw_tracepoint(prog, tp_name);
}
-struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
+/* Common logic for all BPF program types that attach to a btf_id */
+static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link_fd *link;
+ struct bpf_link *link;
int prog_fd, pfd;
prog_fd = bpf_program__fd(prog);
@@ -7376,13 +7502,13 @@ struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
link = calloc(1, sizeof(*link));
if (!link)
return ERR_PTR(-ENOMEM);
- link->link.detach = &bpf_link__detach_fd;
+ link->detach = &bpf_link__detach_fd;
pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
if (pfd < 0) {
pfd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to trace: %s\n",
+ pr_warn("program '%s': failed to attach: %s\n",
bpf_program__title(prog, false),
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
@@ -7391,12 +7517,68 @@ struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
return (struct bpf_link *)link;
}
+struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
+{
+ return bpf_program__attach_btf_id(prog);
+}
+
+struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
+{
+ return bpf_program__attach_btf_id(prog);
+}
+
static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
struct bpf_program *prog)
{
return bpf_program__attach_trace(prog);
}
+static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
+ struct bpf_program *prog)
+{
+ return bpf_program__attach_lsm(prog);
+}
+
+struct bpf_link *
+bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
+{
+ const struct bpf_sec_def *sec_def;
+ enum bpf_attach_type attach_type;
+ char errmsg[STRERR_BUFSIZE];
+ struct bpf_link *link;
+ int prog_fd, link_fd;
+
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ pr_warn("program '%s': can't attach before loaded\n",
+ bpf_program__title(prog, false));
+ return ERR_PTR(-EINVAL);
+ }
+
+ link = calloc(1, sizeof(*link));
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+ link->detach = &bpf_link__detach_fd;
+
+ attach_type = bpf_program__get_expected_attach_type(prog);
+ if (!attach_type) {
+ sec_def = find_sec_def(bpf_program__title(prog, false));
+ if (sec_def)
+ attach_type = sec_def->attach_type;
+ }
+ link_fd = bpf_link_create(prog_fd, cgroup_fd, attach_type, NULL);
+ if (link_fd < 0) {
+ link_fd = -errno;
+ free(link);
+ pr_warn("program '%s': failed to attach to cgroup: %s\n",
+ bpf_program__title(prog, false),
+ libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
+ return ERR_PTR(link_fd);
+ }
+ link->fd = link_fd;
+ return link;
+}
+
struct bpf_link *bpf_program__attach(struct bpf_program *prog)
{
const struct bpf_sec_def *sec_def;
@@ -7410,10 +7592,9 @@ struct bpf_link *bpf_program__attach(struct bpf_program *prog)
static int bpf_link__detach_struct_ops(struct bpf_link *link)
{
- struct bpf_link_fd *l = (void *)link;
__u32 zero = 0;
- if (bpf_map_delete_elem(l->fd, &zero))
+ if (bpf_map_delete_elem(link->fd, &zero))
return -errno;
return 0;
@@ -7422,7 +7603,7 @@ static int bpf_link__detach_struct_ops(struct bpf_link *link)
struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
{
struct bpf_struct_ops *st_ops;
- struct bpf_link_fd *link;
+ struct bpf_link *link;
__u32 i, zero = 0;
int err;
@@ -7454,10 +7635,10 @@ struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
return ERR_PTR(err);
}
- link->link.detach = bpf_link__detach_struct_ops;
+ link->detach = bpf_link__detach_struct_ops;
link->fd = map->fd;
- return (struct bpf_link *)link;
+ return link;
}
enum bpf_perf_event_ret
@@ -8138,6 +8319,31 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
}
}
+int bpf_program__set_attach_target(struct bpf_program *prog,
+ int attach_prog_fd,
+ const char *attach_func_name)
+{
+ int btf_id;
+
+ if (!prog || attach_prog_fd < 0 || !attach_func_name)
+ return -EINVAL;
+
+ if (attach_prog_fd)
+ btf_id = libbpf_find_prog_btf_id(attach_func_name,
+ attach_prog_fd);
+ else
+ btf_id = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
+ attach_func_name,
+ prog->expected_attach_type);
+
+ if (btf_id < 0)
+ return btf_id;
+
+ prog->attach_btf_id = btf_id;
+ prog->attach_prog_fd = attach_prog_fd;
+ return 0;
+}
+
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
{
int err = 0, n, len, start, end = -1;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 3fe12c9d1f92..44df1d3e7287 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -219,6 +219,13 @@ LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
struct bpf_link;
+LIBBPF_API struct bpf_link *bpf_link__open(const char *path);
+LIBBPF_API int bpf_link__fd(const struct bpf_link *link);
+LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link);
+LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
+LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
+LIBBPF_API int bpf_link__update_program(struct bpf_link *link,
+ struct bpf_program *prog);
LIBBPF_API void bpf_link__disconnect(struct bpf_link *link);
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
@@ -240,11 +247,17 @@ bpf_program__attach_tracepoint(struct bpf_program *prog,
LIBBPF_API struct bpf_link *
bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
const char *tp_name);
-
LIBBPF_API struct bpf_link *
bpf_program__attach_trace(struct bpf_program *prog);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_lsm(struct bpf_program *prog);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd);
+
struct bpf_map;
+
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
+
struct bpf_insn;
/*
@@ -316,6 +329,7 @@ LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
@@ -334,10 +348,15 @@ LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
+LIBBPF_API int
+bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
+ const char *attach_func_name);
+
LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
@@ -398,6 +417,8 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
+ const void *data, size_t size);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
@@ -435,7 +456,15 @@ struct xdp_link_info {
__u8 attach_mode;
};
+struct bpf_xdp_set_link_opts {
+ size_t sz;
+ __u32 old_fd;
+};
+#define bpf_xdp_set_link_opts__last_field old_fd
+
LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
+LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
+ const struct bpf_xdp_set_link_opts *opts);
LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
size_t info_size, __u32 flags);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index b035122142bb..bb8831605b25 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -235,3 +235,22 @@ LIBBPF_0.0.7 {
btf__align_of;
libbpf_find_kernel_btf;
} LIBBPF_0.0.6;
+
+LIBBPF_0.0.8 {
+ global:
+ bpf_link__fd;
+ bpf_link__open;
+ bpf_link__pin;
+ bpf_link__pin_path;
+ bpf_link__unpin;
+ bpf_link__update_program;
+ bpf_link_create;
+ bpf_link_update;
+ bpf_map__set_initial_value;
+ bpf_program__attach_cgroup;
+ bpf_program__attach_lsm;
+ bpf_program__is_lsm;
+ bpf_program__set_attach_target;
+ bpf_program__set_lsm;
+ bpf_set_link_xdp_fd_opts;
+} LIBBPF_0.0.7;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index b782ebef6ac9..2c92059c0c90 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -108,6 +108,7 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
case BPF_PROG_TYPE_TRACING:
case BPF_PROG_TYPE_STRUCT_OPS:
case BPF_PROG_TYPE_EXT:
+ case BPF_PROG_TYPE_LSM:
default:
break;
}
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 431bd25c6cdb..18b5319025e1 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -132,7 +132,8 @@ done:
return ret;
}
-int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
+static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
+ __u32 flags)
{
int sock, seq = 0, ret;
struct nlattr *nla, *nla_xdp;
@@ -178,6 +179,14 @@ int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
nla->nla_len += nla_xdp->nla_len;
}
+ if (flags & XDP_FLAGS_REPLACE) {
+ nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
+ nla_xdp->nla_type = IFLA_XDP_EXPECTED_FD;
+ nla_xdp->nla_len = NLA_HDRLEN + sizeof(old_fd);
+ memcpy((char *)nla_xdp + NLA_HDRLEN, &old_fd, sizeof(old_fd));
+ nla->nla_len += nla_xdp->nla_len;
+ }
+
req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
@@ -191,6 +200,29 @@ cleanup:
return ret;
}
+int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
+ const struct bpf_xdp_set_link_opts *opts)
+{
+ int old_fd = -1;
+
+ if (!OPTS_VALID(opts, bpf_xdp_set_link_opts))
+ return -EINVAL;
+
+ if (OPTS_HAS(opts, old_fd)) {
+ old_fd = OPTS_GET(opts, old_fd, -1);
+ flags |= XDP_FLAGS_REPLACE;
+ }
+
+ return __bpf_set_link_xdp_fd_replace(ifindex, fd,
+ old_fd,
+ flags);
+}
+
+int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
+{
+ return __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags);
+}
+
static int __dump_link_nlmsg(struct nlmsghdr *nlh,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
{
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 9807903f121e..f7f4efb70a4c 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -280,7 +280,11 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
fill->consumer = map + off.fr.consumer;
fill->flags = map + off.fr.flags;
fill->ring = map + off.fr.desc;
- fill->cached_cons = umem->config.fill_size;
+ fill->cached_prod = *fill->producer;
+ /* cached_cons is "size" bigger than the real consumer pointer
+ * See xsk_prod_nb_free
+ */
+ fill->cached_cons = *fill->consumer + umem->config.fill_size;
map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
@@ -297,6 +301,8 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
comp->consumer = map + off.cr.consumer;
comp->flags = map + off.cr.flags;
comp->ring = map + off.cr.desc;
+ comp->cached_prod = *comp->producer;
+ comp->cached_cons = *comp->consumer;
*umem_ptr = umem;
return 0;
@@ -672,6 +678,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
rx->consumer = rx_map + off.rx.consumer;
rx->flags = rx_map + off.rx.flags;
rx->ring = rx_map + off.rx.desc;
+ rx->cached_prod = *rx->producer;
+ rx->cached_cons = *rx->consumer;
}
xsk->rx = rx;
@@ -691,7 +699,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
tx->consumer = tx_map + off.tx.consumer;
tx->flags = tx_map + off.tx.flags;
tx->ring = tx_map + off.tx.desc;
- tx->cached_cons = xsk->config.tx_size;
+ tx->cached_prod = *tx->producer;
+ /* cached_cons is r->size bigger than the real consumer pointer
+ * See xsk_prod_nb_free
+ */
+ tx->cached_cons = *tx->consumer + xsk->config.tx_size;
}
xsk->tx = tx;