summaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/core.c65
-rw-r--r--kernel/bpf/syscall.c28
-rw-r--r--kernel/bpf/verifier.c14
3 files changed, 99 insertions, 8 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 82a04143368e..bdcc9f4ba767 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -136,6 +136,71 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp);
}
+#define SHA_BPF_RAW_SIZE \
+ round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
+
+/* Called under verifier mutex. */
+void bpf_prog_calc_digest(struct bpf_prog *fp)
+{
+ const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
+ static u32 ws[SHA_WORKSPACE_WORDS];
+ static u8 raw[SHA_BPF_RAW_SIZE];
+ struct bpf_insn *dst = (void *)raw;
+ u32 i, bsize, psize, blocks;
+ bool was_ld_map;
+ u8 *todo = raw;
+ __be32 *result;
+ __be64 *bits;
+
+ sha_init(fp->digest);
+ memset(ws, 0, sizeof(ws));
+
+ /* We need to take out the map fd for the digest calculation
+ * since they are unstable from user space side.
+ */
+ for (i = 0, was_ld_map = false; i < fp->len; i++) {
+ dst[i] = fp->insnsi[i];
+ if (!was_ld_map &&
+ dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
+ dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
+ was_ld_map = true;
+ dst[i].imm = 0;
+ } else if (was_ld_map &&
+ dst[i].code == 0 &&
+ dst[i].dst_reg == 0 &&
+ dst[i].src_reg == 0 &&
+ dst[i].off == 0) {
+ was_ld_map = false;
+ dst[i].imm = 0;
+ } else {
+ was_ld_map = false;
+ }
+ }
+
+ psize = fp->len * sizeof(struct bpf_insn);
+ memset(&raw[psize], 0, sizeof(raw) - psize);
+ raw[psize++] = 0x80;
+
+ bsize = round_up(psize, SHA_MESSAGE_BYTES);
+ blocks = bsize / SHA_MESSAGE_BYTES;
+ if (bsize - psize >= sizeof(__be64)) {
+ bits = (__be64 *)(todo + bsize - sizeof(__be64));
+ } else {
+ bits = (__be64 *)(todo + bsize + bits_offset);
+ blocks++;
+ }
+ *bits = cpu_to_be64((psize - 1) << 3);
+
+ while (blocks--) {
+ sha_transform(fp->digest, todo, ws);
+ todo += SHA_MESSAGE_BYTES;
+ }
+
+ result = (__force __be32 *)fp->digest;
+ for (i = 0; i < SHA_DIGEST_WORDS; i++)
+ result[i] = cpu_to_be32(fp->digest[i]);
+}
+
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_JMP &&
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 85af86c496cd..88f609f1c0c3 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -662,8 +662,30 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
return 0;
}
+#ifdef CONFIG_PROC_FS
+static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
+{
+ const struct bpf_prog *prog = filp->private_data;
+ char prog_digest[sizeof(prog->digest) * 2 + 1] = { };
+
+ bin2hex(prog_digest, prog->digest, sizeof(prog->digest));
+ seq_printf(m,
+ "prog_type:\t%u\n"
+ "prog_jited:\t%u\n"
+ "prog_digest:\t%s\n"
+ "memlock:\t%llu\n",
+ prog->type,
+ prog->jited,
+ prog_digest,
+ prog->pages * 1ULL << PAGE_SHIFT);
+}
+#endif
+
static const struct file_operations bpf_prog_fops = {
- .release = bpf_prog_release,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = bpf_prog_show_fdinfo,
+#endif
+ .release = bpf_prog_release,
};
int bpf_prog_new_fd(struct bpf_prog *prog)
@@ -764,8 +786,8 @@ static int bpf_prog_load(union bpf_attr *attr)
/* eBPF programs must be GPL compatible to use GPL-ed functions */
is_gpl = license_is_gpl_compatible(license);
- if (attr->insn_cnt >= BPF_MAXINSNS)
- return -EINVAL;
+ if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
+ return -E2BIG;
if (type == BPF_PROG_TYPE_KPROBE &&
attr->kern_version != LINUX_VERSION_CODE)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 0e742210750e..da9fb2a9b7eb 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1481,14 +1481,19 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
struct bpf_reg_state *src_reg = &regs[insn->src_reg];
u8 opcode = BPF_OP(insn->code);
- /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
- * Don't care about overflow or negative values, just add them
+ /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or'
+ * insn. Don't care about overflow or negative values, just add them
*/
if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
dst_reg->imm += insn->imm;
else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
src_reg->type == CONST_IMM)
dst_reg->imm += src_reg->imm;
+ else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K)
+ dst_reg->imm |= insn->imm;
+ else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM)
+ dst_reg->imm |= src_reg->imm;
else
mark_reg_unknown_value(regs, insn->dst_reg);
return 0;
@@ -3128,9 +3133,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
struct bpf_verifier_env *env;
int ret = -EINVAL;
- if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
- return -E2BIG;
-
/* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
@@ -3171,6 +3173,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
log_level = 0;
}
+ bpf_prog_calc_digest(env->prog);
+
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;