summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/Makefile14
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst5
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst85
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst29
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst9
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst5
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool114
-rw-r--r--tools/bpf/bpftool/btf_dumper.c98
-rw-r--r--tools/bpf/bpftool/cfg.c9
-rw-r--r--tools/bpf/bpftool/common.c6
-rw-r--r--tools/bpf/bpftool/feature.c764
-rw-r--r--tools/bpf/bpftool/main.c3
-rw-r--r--tools/bpf/bpftool/main.h4
-rw-r--r--tools/bpf/bpftool/map.c265
-rw-r--r--tools/bpf/bpftool/prog.c28
-rw-r--r--tools/build/Makefile.feature10
-rw-r--r--tools/build/feature/test-all.c10
-rw-r--r--tools/build/feature/test-get_current_dir_name.c1
-rw-r--r--tools/build/feature/test-libpython.c1
-rw-r--r--tools/build/feature/test-reallocarray.c2
-rw-r--r--tools/build/feature/test-sched_getcpu.c2
-rw-r--r--tools/build/feature/test-setns.c1
-rw-r--r--tools/debugging/Makefile16
-rwxr-xr-xtools/debugging/kernel-chktaint202
-rw-r--r--tools/firmware/ihex2fw.c17
-rw-r--r--tools/iio/iio_event_monitor.c14
-rw-r--r--tools/iio/iio_generic_buffer.c2
-rw-r--r--tools/include/linux/filter.h20
-rw-r--r--tools/include/linux/numa.h16
-rw-r--r--tools/include/linux/rbtree.h52
-rw-r--r--tools/include/linux/rbtree_augmented.h60
-rw-r--r--tools/include/nolibc/nolibc.h (renamed from tools/testing/selftests/rcutorture/bin/nolibc.h)118
-rw-r--r--tools/include/uapi/asm/bitsperlong.h2
-rw-r--r--tools/include/uapi/linux/bpf.h116
-rw-r--r--tools/include/uapi/linux/ethtool.h51
-rw-r--r--tools/include/uapi/linux/if_link.h1
-rw-r--r--tools/include/uapi/linux/if_xdp.h78
-rw-r--r--tools/include/uapi/linux/in.h2
-rw-r--r--tools/include/uapi/linux/lirc.h12
-rw-r--r--tools/include/uapi/linux/perf_event.h55
-rw-r--r--tools/include/uapi/linux/prctl.h1
-rw-r--r--tools/include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--tools/io_uring/Makefile18
-rw-r--r--tools/io_uring/README29
-rw-r--r--tools/io_uring/barrier.h16
-rw-r--r--tools/io_uring/io_uring-bench.c616
-rw-r--r--tools/io_uring/io_uring-cp.c251
-rw-r--r--tools/io_uring/liburing.h143
-rw-r--r--tools/io_uring/queue.c164
-rw-r--r--tools/io_uring/setup.c103
-rw-r--r--tools/io_uring/syscall.c40
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile29
-rw-r--r--tools/lib/bpf/README.rst15
-rw-r--r--tools/lib/bpf/bpf.c80
-rw-r--r--tools/lib/bpf/bpf.h3
-rw-r--r--tools/lib/bpf/btf.c2198
-rw-r--r--tools/lib/bpf/btf.h46
-rw-r--r--tools/lib/bpf/libbpf.c212
-rw-r--r--tools/lib/bpf/libbpf.h44
-rw-r--r--tools/lib/bpf/libbpf.map30
-rw-r--r--tools/lib/bpf/libbpf_probes.c242
-rw-r--r--tools/lib/bpf/libbpf_util.h30
-rw-r--r--tools/lib/bpf/netlink.c85
-rw-r--r--tools/lib/bpf/test_libbpf.cpp4
-rw-r--r--tools/lib/bpf/xsk.c723
-rw-r--r--tools/lib/bpf/xsk.h203
-rw-r--r--tools/lib/lockdep/include/liblockdep/common.h2
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h11
-rwxr-xr-xtools/lib/lockdep/run_tests.sh6
-rw-r--r--tools/lib/lockdep/tests/ABBA.c9
-rw-r--r--tools/lib/rbtree.c178
-rw-r--r--tools/lib/traceevent/event-parse.c2
-rw-r--r--tools/memory-model/.gitignore1
-rw-r--r--tools/memory-model/README2
-rw-r--r--tools/memory-model/linux-kernel.bell3
-rw-r--r--tools/memory-model/linux-kernel.cat4
-rw-r--r--tools/memory-model/linux-kernel.def1
-rw-r--r--tools/memory-model/scripts/README70
-rwxr-xr-xtools/memory-model/scripts/checkalllitmus.sh53
-rw-r--r--tools/memory-model/scripts/checkghlitmus.sh65
-rwxr-xr-xtools/memory-model/scripts/checklitmus.sh74
-rw-r--r--tools/memory-model/scripts/checklitmushist.sh60
-rw-r--r--tools/memory-model/scripts/cmplitmushist.sh87
-rw-r--r--tools/memory-model/scripts/initlitmushist.sh68
-rw-r--r--tools/memory-model/scripts/judgelitmus.sh78
-rw-r--r--tools/memory-model/scripts/newlitmushist.sh61
-rw-r--r--tools/memory-model/scripts/parseargs.sh136
-rw-r--r--tools/memory-model/scripts/runlitmushist.sh87
-rw-r--r--tools/perf/Build10
-rw-r--r--tools/perf/Documentation/perf-c2c.txt16
-rw-r--r--tools/perf/Documentation/perf-config.txt31
-rw-r--r--tools/perf/Documentation/perf-diff.txt56
-rw-r--r--tools/perf/Documentation/perf-mem.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt19
-rw-r--r--tools/perf/Documentation/perf-script.txt6
-rw-r--r--tools/perf/Documentation/perf-trace.txt8
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt11
-rw-r--r--tools/perf/Makefile.config14
-rw-r--r--tools/perf/Makefile.perf28
-rw-r--r--tools/perf/arch/Build4
-rw-r--r--tools/perf/arch/arm/Build4
-rw-r--r--tools/perf/arch/arm/tests/Build8
-rw-r--r--tools/perf/arch/arm/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/arm/util/Build8
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c98
-rw-r--r--tools/perf/arch/arm/util/cs-etm.h3
-rw-r--r--tools/perf/arch/arm/util/pmu.c3
-rw-r--r--tools/perf/arch/arm64/Build4
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c2
-rw-r--r--tools/perf/arch/arm64/tests/Build6
-rw-r--r--tools/perf/arch/arm64/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/arm64/util/Build12
-rw-r--r--tools/perf/arch/nds32/Build2
-rw-r--r--tools/perf/arch/nds32/util/Build2
-rw-r--r--tools/perf/arch/powerpc/Build4
-rw-r--r--tools/perf/arch/powerpc/tests/Build6
-rw-r--r--tools/perf/arch/powerpc/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/powerpc/util/Build17
-rw-r--r--tools/perf/arch/powerpc/util/kvm-stat.c2
-rw-r--r--tools/perf/arch/powerpc/util/mem-events.c11
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c3
-rw-r--r--tools/perf/arch/s390/Build2
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c2
-rw-r--r--tools/perf/arch/s390/util/Build12
-rw-r--r--tools/perf/arch/s390/util/kvm-stat.c1
-rw-r--r--tools/perf/arch/sh/Build2
-rw-r--r--tools/perf/arch/sh/util/Build2
-rw-r--r--tools/perf/arch/sparc/Build2
-rw-r--r--tools/perf/arch/sparc/util/Build2
-rw-r--r--tools/perf/arch/x86/Build4
-rw-r--r--tools/perf/arch/x86/tests/Build14
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/x86/util/Build30
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c1
-rw-r--r--tools/perf/arch/xtensa/Build2
-rw-r--r--tools/perf/arch/xtensa/util/Build2
-rw-r--r--tools/perf/bench/numa.c7
-rw-r--r--tools/perf/builtin-annotate.c9
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-buildid-list.c8
-rw-r--r--tools/perf/builtin-c2c.c31
-rw-r--r--tools/perf/builtin-diff.c190
-rw-r--r--tools/perf/builtin-evlist.c4
-rw-r--r--tools/perf/builtin-inject.c12
-rw-r--r--tools/perf/builtin-kallsyms.c1
-rw-r--r--tools/perf/builtin-kmem.c7
-rw-r--r--tools/perf/builtin-kvm.c8
-rw-r--r--tools/perf/builtin-list.c8
-rw-r--r--tools/perf/builtin-lock.c8
-rw-r--r--tools/perf/builtin-mem.c9
-rw-r--r--tools/perf/builtin-probe.c1
-rw-r--r--tools/perf/builtin-record.c74
-rw-r--r--tools/perf/builtin-report.c75
-rw-r--r--tools/perf/builtin-sched.c63
-rw-r--r--tools/perf/builtin-script.c70
-rw-r--r--tools/perf/builtin-stat.c16
-rw-r--r--tools/perf/builtin-timechart.c8
-rw-r--r--tools/perf/builtin-top.c23
-rw-r--r--tools/perf/builtin-trace.c63
-rw-r--r--tools/perf/design.txt4
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c18
-rw-r--r--tools/perf/examples/bpf/augmented_syscalls.c22
-rw-r--r--tools/perf/examples/bpf/etcsnoop.c18
-rw-r--r--tools/perf/include/bpf/bpf.h22
-rw-r--r--tools/perf/perf.h9
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/metrics.json2245
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/metrics.json1982
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json2
-rw-r--r--tools/perf/scripts/Build4
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/python/check-perf-trace.py76
-rw-r--r--tools/perf/scripts/python/compaction-times.py8
-rw-r--r--tools/perf/scripts/python/event_analyzing_sample.py48
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py16
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py14
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py857
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py43
-rw-r--r--tools/perf/scripts/python/futex-contention.py10
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py60
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py25
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py12
-rw-r--r--tools/perf/scripts/python/netdev-times.py92
-rw-r--r--tools/perf/scripts/python/powerpc-hcalls.py18
-rw-r--r--tools/perf/scripts/python/sched-migration.py8
-rw-r--r--tools/perf/scripts/python/sctop.py27
-rwxr-xr-xtools/perf/scripts/python/stackcollapse.py9
-rw-r--r--tools/perf/scripts/python/stat-cpi.py11
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py51
-rw-r--r--tools/perf/scripts/python/syscall-counts.py35
-rw-r--r--tools/perf/tests/attr.py33
-rw-r--r--tools/perf/tests/bp_account.c1
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/tests/evsel-tp-sched.c8
-rw-r--r--tools/perf/tests/hists_common.c9
-rw-r--r--tools/perf/tests/hists_cumulate.c15
-rw-r--r--tools/perf/tests/hists_filter.c1
-rw-r--r--tools/perf/tests/hists_link.c8
-rw-r--r--tools/perf/tests/hists_output.c33
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c1
-rw-r--r--tools/perf/tests/parse-events.c30
-rw-r--r--tools/perf/tests/pmu.c2
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/tests/sdt.c1
-rw-r--r--tools/perf/tests/shell/lib/probe.sh5
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh1
-rw-r--r--tools/perf/trace/beauty/Build26
-rw-r--r--tools/perf/trace/beauty/ioctl.c2
-rw-r--r--tools/perf/trace/beauty/msg_flags.c2
-rw-r--r--tools/perf/trace/beauty/waitid_options.c2
-rw-r--r--tools/perf/ui/Build18
-rw-r--r--tools/perf/ui/browsers/Build10
-rw-r--r--tools/perf/ui/browsers/annotate.c1
-rw-r--r--tools/perf/ui/browsers/header.c2
-rw-r--r--tools/perf/ui/browsers/hists.c23
-rw-r--r--tools/perf/ui/browsers/map.c1
-rw-r--r--tools/perf/ui/gtk/annotate.c3
-rw-r--r--tools/perf/ui/gtk/hists.c7
-rw-r--r--tools/perf/ui/hist.c1
-rw-r--r--tools/perf/ui/stdio/hist.c7
-rw-r--r--tools/perf/ui/tui/Build8
-rw-r--r--tools/perf/util/Build276
-rw-r--r--tools/perf/util/annotate.c80
-rw-r--r--tools/perf/util/annotate.h21
-rw-r--r--tools/perf/util/auxtrace.c30
-rw-r--r--tools/perf/util/auxtrace.h5
-rw-r--r--tools/perf/util/block-range.c2
-rw-r--r--tools/perf/util/block-range.h6
-rw-r--r--tools/perf/util/bpf-event.c263
-rw-r--r--tools/perf/util/bpf-event.h38
-rw-r--r--tools/perf/util/bpf-loader.c31
-rw-r--r--tools/perf/util/bpf-loader.h7
-rw-r--r--tools/perf/util/bpf_map.c72
-rw-r--r--tools/perf/util/bpf_map.h22
-rw-r--r--tools/perf/util/branch.h27
-rw-r--r--tools/perf/util/build-id.c14
-rw-r--r--tools/perf/util/build-id.h3
-rw-r--r--tools/perf/util/c++/Build4
-rw-r--r--tools/perf/util/c++/clang.cpp4
-rw-r--r--tools/perf/util/callchain.c17
-rw-r--r--tools/perf/util/callchain.h21
-rw-r--r--tools/perf/util/color.c39
-rw-r--r--tools/perf/util/color.h1
-rw-r--r--tools/perf/util/color_config.c47
-rw-r--r--tools/perf/util/comm.c1
-rw-r--r--tools/perf/util/comm.h4
-rw-r--r--tools/perf/util/config.c1
-rw-r--r--tools/perf/util/cpu-set-sched.h50
-rw-r--r--tools/perf/util/cpumap.c12
-rw-r--r--tools/perf/util/cpumap.h1
-rw-r--r--tools/perf/util/cputopo.c277
-rw-r--r--tools/perf/util/cputopo.h33
-rw-r--r--tools/perf/util/cs-etm-decoder/Build2
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c41
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h16
-rw-r--r--tools/perf/util/cs-etm.c832
-rw-r--r--tools/perf/util/cs-etm.h57
-rw-r--r--tools/perf/util/data-convert-bt.c4
-rw-r--r--tools/perf/util/data.c175
-rw-r--r--tools/perf/util/data.h16
-rw-r--r--tools/perf/util/db-export.c16
-rw-r--r--tools/perf/util/db-export.h3
-rw-r--r--tools/perf/util/drv_configs.c78
-rw-r--r--tools/perf/util/drv_configs.h26
-rw-r--r--tools/perf/util/dso.c11
-rw-r--r--tools/perf/util/dso.h17
-rw-r--r--tools/perf/util/event.c43
-rw-r--r--tools/perf/util/event.h60
-rw-r--r--tools/perf/util/evlist.c31
-rw-r--r--tools/perf/util/evlist.h6
-rw-r--r--tools/perf/util/evsel.c35
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/header.c289
-rw-r--r--tools/perf/util/hist.c271
-rw-r--r--tools/perf/util/hist.h19
-rw-r--r--tools/perf/util/intel-bts.c26
-rw-r--r--tools/perf/util/intel-pt-decoder/Build2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c39
-rw-r--r--tools/perf/util/intel-pt.c25
-rw-r--r--tools/perf/util/intlist.h2
-rw-r--r--tools/perf/util/jitdump.c1
-rw-r--r--tools/perf/util/kvm-stat.h7
-rw-r--r--tools/perf/util/machine.c112
-rw-r--r--tools/perf/util/machine.h17
-rw-r--r--tools/perf/util/map.c14
-rw-r--r--tools/perf/util/map.h100
-rw-r--r--tools/perf/util/map_groups.h91
-rw-r--r--tools/perf/util/map_symbol.h22
-rw-r--r--tools/perf/util/mem-events.c2
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/metricgroup.h3
-rw-r--r--tools/perf/util/mmap.c105
-rw-r--r--tools/perf/util/mmap.h3
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/parse-events.y4
-rw-r--r--tools/perf/util/pmu.c16
-rw-r--r--tools/perf/util/pmu.h6
-rw-r--r--tools/perf/util/probe-event.c15
-rw-r--r--tools/perf/util/probe-event.h5
-rw-r--r--tools/perf/util/probe-file.c1
-rw-r--r--tools/perf/util/rb_resort.h8
-rw-r--r--tools/perf/util/rblist.c28
-rw-r--r--tools/perf/util/rblist.h2
-rw-r--r--tools/perf/util/s390-cpumcf-kernel.h62
-rw-r--r--tools/perf/util/s390-cpumsf.c89
-rw-r--r--tools/perf/util/s390-sample-raw.c222
-rw-r--r--tools/perf/util/sample-raw.c18
-rw-r--r--tools/perf/util/sample-raw.h14
-rw-r--r--tools/perf/util/scripting-engines/Build4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c25
-rw-r--r--tools/perf/util/session.c114
-rw-r--r--tools/perf/util/setup.py7
-rw-r--r--tools/perf/util/sort.c18
-rw-r--r--tools/perf/util/sort.h7
-rw-r--r--tools/perf/util/srccode.h13
-rw-r--r--tools/perf/util/srcline.c45
-rw-r--r--tools/perf/util/srcline.h13
-rw-r--r--tools/perf/util/stat-display.c1
-rw-r--r--tools/perf/util/stat-shadow.c2
-rw-r--r--tools/perf/util/strlist.h2
-rw-r--r--tools/perf/util/symbol-elf.c25
-rw-r--r--tools/perf/util/symbol-minimal.c1
-rw-r--r--tools/perf/util/symbol.c90
-rw-r--r--tools/perf/util/symbol.h102
-rw-r--r--tools/perf/util/symbol_conf.h73
-rw-r--r--tools/perf/util/symbol_fprintf.c3
-rw-r--r--tools/perf/util/thread-stack.c251
-rw-r--r--tools/perf/util/thread-stack.h9
-rw-r--r--tools/perf/util/thread.c24
-rw-r--r--tools/perf/util/thread.h11
-rw-r--r--tools/perf/util/time-utils.c51
-rw-r--r--tools/perf/util/time-utils.h6
-rw-r--r--tools/perf/util/tool.h5
-rw-r--r--tools/perf/util/unwind-libdw.c2
-rw-r--r--tools/perf/util/unwind-libunwind-local.c1
-rw-r--r--tools/perf/util/unwind-libunwind.c1
-rw-r--r--tools/perf/util/util.c82
-rw-r--r--tools/perf/util/util.h3
-rw-r--r--tools/perf/util/vdso.c1
-rw-r--r--tools/perf/util/zlib.c1
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/testing/selftests/Makefile4
-rw-r--r--tools/testing/selftests/bpf/.gitignore2
-rw-r--r--tools/testing/selftests/bpf/Makefile144
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h40
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h39
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c43
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c249
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c139
-rw-r--r--tools/testing/selftests/bpf/prog_tests/l4lb_all.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lock.c75
-rw-r--r--tools/testing/selftests/bpf/prog_tests/obj_name.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_access.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_md_access.c24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/queue_stack_map.c103
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/signal_pending.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spinlock.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c165
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c150
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map.c103
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c59
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_estats.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c132
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_noinline.c82
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c (renamed from tools/testing/selftests/bpf/bpf_flow.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c (renamed from tools/testing/selftests/bpf/connect4_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/connect6_prog.c (renamed from tools/testing/selftests/bpf/connect6_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/dev_cgroup.c (renamed from tools/testing/selftests/bpf/dev_cgroup.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c (renamed from tools/testing/selftests/bpf/get_cgroup_id_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/netcnt_prog.c (renamed from tools/testing/selftests/bpf/netcnt_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sample_map_ret0.c (renamed from tools/testing/selftests/bpf/sample_map_ret0.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sample_ret0.c (renamed from tools/testing/selftests/bpf/sample_ret0.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg4_prog.c (renamed from tools/testing/selftests/bpf/sendmsg4_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c (renamed from tools/testing/selftests/bpf/sendmsg6_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/socket_cookie_prog.c (renamed from tools/testing/selftests/bpf/socket_cookie_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_parse_prog.c (renamed from tools/testing/selftests/bpf/sockmap_parse_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c (renamed from tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c (renamed from tools/testing/selftests/bpf/sockmap_verdict_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_adjust_tail.c (renamed from tools/testing/selftests/bpf/test_adjust_tail.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c (renamed from tools/testing/selftests/bpf/test_btf_haskv.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_nokv.c (renamed from tools/testing/selftests/bpf/test_btf_nokv.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c (renamed from tools/testing/selftests/bpf/test_get_stack_rawtp.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb.c (renamed from tools/testing/selftests/bpf/test_l4lb.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c (renamed from tools/testing/selftests/bpf/test_l4lb_noinline.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c (renamed from tools/testing/selftests/bpf/test_lirc_mode2_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c85
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_seg6local.c (renamed from tools/testing/selftests/bpf/test_lwt_seg6local.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c (renamed from tools/testing/selftests/bpf/test_map_in_map.c)4
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lock.c66
-rw-r--r--tools/testing/selftests/bpf/progs/test_obj_id.c (renamed from tools/testing/selftests/bpf/test_obj_id.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c (renamed from tools/testing/selftests/bpf/test_pkt_access.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_md_access.c (renamed from tools/testing/selftests/bpf/test_pkt_md_access.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_queue_map.c (renamed from tools/testing/selftests/bpf/test_queue_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c (renamed from tools/testing/selftests/bpf/test_select_reuseport_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c (renamed from tools/testing/selftests/bpf/test_sk_lookup_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c (renamed from tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields_kern.c152
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockhash_kern.c (renamed from tools/testing/selftests/bpf/test_sockhash_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.c (renamed from tools/testing/selftests/bpf/test_sockmap_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c108
-rw-r--r--tools/testing/selftests/bpf/progs/test_stack_map.c (renamed from tools/testing/selftests/bpf/test_stack_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c (renamed from tools/testing/selftests/bpf/test_stacktrace_build_id.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c (renamed from tools/testing/selftests/bpf/test_stacktrace_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c (renamed from tools/testing/selftests/bpf/test_tcp_estats.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c (renamed from tools/testing/selftests/bpf/test_tcpbpf_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c (renamed from tools/testing/selftests/bpf/test_tcpnotify_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tracepoint.c (renamed from tools/testing/selftests/bpf/test_tracepoint.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c (renamed from tools/testing/selftests/bpf/test_tunnel_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c (renamed from tools/testing/selftests/bpf/test_xdp.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_meta.c (renamed from tools/testing/selftests/bpf/test_xdp_meta.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c (renamed from tools/testing/selftests/bpf/test_xdp_noinline.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_redirect.c (renamed from tools/testing/selftests/bpf/test_xdp_redirect.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_vlan.c (renamed from tools/testing/selftests/bpf/test_xdp_vlan.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_dummy.c (renamed from tools/testing/selftests/bpf/xdp_dummy.c)0
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py3
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py5
-rw-r--r--tools/testing/selftests/bpf/test_btf.c1300
-rw-r--r--tools/testing/selftests/bpf/test_flow_dissector.c2
-rw-r--r--tools/testing/selftests/bpf/test_libbpf_open.c32
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c10
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh426
-rw-r--r--tools/testing/selftests/bpf/test_maps.c41
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py135
-rw-r--r--tools/testing/selftests/bpf/test_progs.c1824
-rw-r--r--tools/testing/selftests/bpf/test_progs.h93
-rw-r--r--tools/testing/selftests/bpf/test_sock.c9
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c328
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c4
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c1
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c6
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c15575
-rw-r--r--tools/testing/selftests/bpf/verifier/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c50
-rw-r--r--tools/testing/selftests/bpf/verifier/array_access.c219
-rw-r--r--tools/testing/selftests/bpf/verifier/basic.c23
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_call.c50
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_instr.c134
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stack.c64
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stx_ldx.c45
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c508
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_deduction.c124
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c406
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_get_stack.c44
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c1942
-rw-r--r--tools/testing/selftests/bpf/verifier/cfg.c70
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_skb.c197
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_storage.c220
-rw-r--r--tools/testing/selftests/bpf/verifier/const_or.c60
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c93
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_msg.c181
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c1034
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c159
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c633
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c40
-rw-r--r--tools/testing/selftests/bpf/verifier/div0.c184
-rw-r--r--tools/testing/selftests/bpf/verifier/div_overflow.c104
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_access_var_len.c614
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_packet_access.c460
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_value_access.c953
-rw-r--r--tools/testing/selftests/bpf/verifier/jit.c88
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c746
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c167
-rw-r--r--tools/testing/selftests/bpf/verifier/jump.c180
-rw-r--r--tools/testing/selftests/bpf/verifier/junk_insn.c45
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_abs.c286
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_dw.c36
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_imm64.c154
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_ind.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/leak_ptr.c67
-rw-r--r--tools/testing/selftests/bpf/verifier/lwt.c189
-rw-r--r--tools/testing/selftests/bpf/verifier/map_in_map.c62
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ptr_mixing.c100
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ret_val.c65
-rw-r--r--tools/testing/selftests/bpf/verifier/masking.c322
-rw-r--r--tools/testing/selftests/bpf/verifier/meta_access.c235
-rw-r--r--tools/testing/selftests/bpf/verifier/perf_event_sample_period.c59
-rw-r--r--tools/testing/selftests/bpf/verifier/prevent_map_lookup.c74
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_stack.c305
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c607
-rw-r--r--tools/testing/selftests/bpf/verifier/runtime_jit.c80
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c156
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c384
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c76
-rw-r--r--tools/testing/selftests/bpf/verifier/spin_lock.c333
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c317
-rw-r--r--tools/testing/selftests/bpf/verifier/uninit.c39
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c522
-rw-r--r--tools/testing/selftests/bpf/verifier/value.c104
-rw-r--r--tools/testing/selftests/bpf/verifier/value_adj_spill.c43
-rw-r--r--tools/testing/selftests/bpf/verifier/value_illegal_alu.c94
-rw-r--r--tools/testing/selftests/bpf/verifier/value_or_null.c152
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c838
-rw-r--r--tools/testing/selftests/bpf/verifier/var_off.c66
-rw-r--r--tools/testing/selftests/bpf/verifier/xadd.c97
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c900
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh200
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh459
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh126
-rw-r--r--tools/testing/selftests/filesystems/binderfs/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/binderfs/Makefile6
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c275
-rw-r--r--tools/testing/selftests/filesystems/binderfs/config3
-rw-r--r--tools/testing/selftests/firmware/config1
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh9
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh2
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest21
-rw-r--r--tools/testing/selftests/ir/ir_loopback.c15
-rwxr-xr-xtools/testing/selftests/ir/ir_loopback.sh5
-rw-r--r--tools/testing/selftests/kselftest.h1
-rw-r--r--tools/testing/selftests/kselftest_harness.h10
-rw-r--r--tools/testing/selftests/livepatch/Makefile8
-rw-r--r--tools/testing/selftests/livepatch/README43
-rw-r--r--tools/testing/selftests/livepatch/config1
-rw-r--r--tools/testing/selftests/livepatch/functions.sh198
-rwxr-xr-xtools/testing/selftests/livepatch/test-callbacks.sh587
-rwxr-xr-xtools/testing/selftests/livepatch/test-livepatch.sh168
-rwxr-xr-xtools/testing/selftests/livepatch/test-shadow-vars.sh60
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c74
-rw-r--r--tools/testing/selftests/net/config3
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh1
-rw-r--r--tools/testing/selftests/net/forwarding/config2
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample3
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh63
-rw-r--r--tools/testing/selftests/net/forwarding/ipip_lib.sh349
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh9
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_flower.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh88
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh5
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_broadcast.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_asymmetric.sh567
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_symmetric.sh551
-rw-r--r--tools/testing/selftests/net/ip_defrag.c69
-rwxr-xr-xtools/testing/selftests/net/ip_defrag.sh16
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh96
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh140
-rw-r--r--tools/testing/selftests/net/tls.c164
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh8
-rw-r--r--tools/testing/selftests/net/udpgso.c1
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c42
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rw-r--r--tools/testing/selftests/netfilter/config2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh762
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile3
-rw-r--r--tools/testing/selftests/networking/timestamping/rxtimestamp.c1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/null_syscall.c2
-rw-r--r--tools/testing/selftests/powerpc/include/reg.h8
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c184
-rw-r--r--tools/testing/selftests/proc/.gitignore1
-rw-r--r--tools/testing/selftests/proc/Makefile1
-rw-r--r--tools/testing/selftests/proc/proc-loadavg-001.c2
-rw-r--r--tools/testing/selftests/proc/proc-pid-vm.c406
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-002.c2
-rw-r--r--tools/testing/selftests/proc/proc-self-syscall.c3
-rw-r--r--tools/testing/selftests/proc/proc-self-wchan.c2
-rw-r--r--tools/testing/selftests/proc/read.c14
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh27
-rw-r--r--tools/testing/selftests/safesetid/.gitignore1
-rw-r--r--tools/testing/selftests/safesetid/Makefile8
-rw-r--r--tools/testing/selftests/safesetid/config2
-rw-r--r--tools/testing/selftests/safesetid/safesetid-test.c334
-rwxr-xr-xtools/testing/selftests/safesetid/safesetid-test.sh26
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c47
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/TdcPlugin.py4
-rw-r--r--tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt5
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py16
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json177
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py15
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_batch.py58
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py1
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_multibatch.py65
-rw-r--r--tools/testing/selftests/tmpfs/.gitignore1
-rw-r--r--tools/testing/selftests/tmpfs/Makefile7
-rw-r--r--tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c67
-rw-r--r--tools/testing/selftests/tpm2/Makefile4
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh4
-rwxr-xr-xtools/testing/selftests/tpm2/test_space.sh4
-rw-r--r--tools/testing/selftests/tpm2/tpm2.py696
-rw-r--r--tools/testing/selftests/tpm2/tpm2_tests.py227
-rw-r--r--tools/testing/selftests/vm/map_hugetlb.c29
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests16
-rwxr-xr-xtools/testing/selftests/vm/test_vmalloc.sh176
-rw-r--r--tools/vm/page-types.c2
-rw-r--r--tools/vm/slabinfo.c35
624 files changed, 47925 insertions, 21028 deletions
diff --git a/tools/Makefile b/tools/Makefile
index 77f1aee8ea01..3dfd72ae6c1a 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -12,6 +12,7 @@ help:
@echo ' acpi - ACPI tools'
@echo ' cgroup - cgroup tools'
@echo ' cpupower - a tool for all things x86 CPU power'
+ @echo ' debugging - tools for debugging'
@echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
@echo ' firmware - Firmware tools'
@echo ' freefall - laptop accelerometer program for disk protection'
@@ -61,7 +62,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware: FORCE
+cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
$(call descend,$@)
liblockdep: FORCE
@@ -96,7 +97,8 @@ kvm_stat: FORCE
all: acpi cgroup cpupower gpio hv firewire liblockdep \
perf selftests spi turbostat usb \
virtio vm bpf x86_energy_perf_policy \
- tmon freefall iio objtool kvm_stat wmi pci
+ tmon freefall iio objtool kvm_stat wmi \
+ pci debugging
acpi_install:
$(call descend,power/$(@:_install=),install)
@@ -104,7 +106,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install:
+cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
$(call descend,$(@:_install=),install)
liblockdep_install:
@@ -130,7 +132,7 @@ install: acpi_install cgroup_install cpupower_install gpio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install bpf_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install \
- wmi_install pci_install
+ wmi_install pci_install debugging_install
acpi_clean:
$(call descend,power/acpi,clean)
@@ -138,7 +140,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean:
+cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
$(call descend,$(@:_clean=),clean)
liblockdep_clean:
@@ -176,6 +178,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
- gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean
+ gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean
.PHONY: FORCE
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index d07ccf8a23f7..9bb9ace54ba8 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -17,8 +17,8 @@ SYNOPSIS
*COMMANDS* :=
{ **show** | **list** | **tree** | **attach** | **detach** | **help** }
-MAP COMMANDS
-=============
+CGROUP COMMANDS
+===============
| **bpftool** **cgroup { show | list }** *CGROUP*
| **bpftool** **cgroup tree** [*CGROUP_ROOT*]
@@ -142,5 +142,6 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
new file mode 100644
index 000000000000..82de03dd8f52
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -0,0 +1,85 @@
+===============
+bpftool-feature
+===============
+-------------------------------------------------------------------------------
+tool for inspection of eBPF-related parameters for Linux kernel or net device
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **feature** *COMMAND*
+
+ *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+
+ *COMMANDS* := { **probe** | **help** }
+
+FEATURE COMMANDS
+================
+
+| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature help**
+|
+| *COMPONENT* := { **kernel** | **dev** *NAME* }
+
+DESCRIPTION
+===========
+ **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]]
+ Probe the running kernel and dump a number of eBPF-related
+ parameters, such as availability of the **bpf()** system call,
+ JIT status, eBPF program types availability, eBPF helper
+ functions availability, and more.
+
+ If the **macros** keyword (but not the **-j** option) is
+ passed, a subset of the output is dumped as a list of
+ **#define** macros that are ready to be included in a C
+ header file, for example. If, additionally, **prefix** is
+ used to define a *PREFIX*, the provided string will be used
+ as a prefix to the names of the macros: this can be used to
+ avoid conflicts on macro names when including the output of
+ this command as a header file.
+
+ Keyword **kernel** can be omitted. If no probe target is
+ specified, probing the kernel is the default behaviour.
+
+ Note that when probed, some eBPF helpers (e.g.
+ **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may
+ print warnings to kernel logs.
+
+ **bpftool feature probe dev** *NAME* [**macros** [**prefix** *PREFIX*]]
+ Probe network device for supported eBPF features and dump
+ results to the console.
+
+ The two keywords **macros** and **prefix** have the same
+ role as when probing the kernel.
+
+ **bpftool feature help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -v, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+SEE ALSO
+========
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 64b001b4f777..5c984ffc9f01 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -25,12 +25,17 @@ MAP COMMANDS
| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
| **bpftool** **map dump** *MAP*
-| **bpftool** **map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*]
-| **bpftool** **map lookup** *MAP* **key** *DATA*
+| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
+| **bpftool** **map lookup** *MAP* [**key** *DATA*]
| **bpftool** **map getnext** *MAP* [**key** *DATA*]
| **bpftool** **map delete** *MAP* **key** *DATA*
| **bpftool** **map pin** *MAP* *FILE*
| **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*]
+| **bpftool** **map peek** *MAP*
+| **bpftool** **map push** *MAP* **value** *VALUE*
+| **bpftool** **map pop** *MAP*
+| **bpftool** **map enqueue** *MAP* **value** *VALUE*
+| **bpftool** **map dequeue** *MAP*
| **bpftool** **map help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
@@ -62,7 +67,7 @@ DESCRIPTION
**bpftool map dump** *MAP*
Dump all entries in a given *MAP*.
- **bpftool map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*]
+ **bpftool map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
Update map entry for a given *KEY*.
*UPDATE_FLAGS* can be one of: **any** update existing entry
@@ -75,7 +80,7 @@ DESCRIPTION
the bytes are parsed as decimal values, unless a "0x" prefix
(for hexadecimal) or a "0" prefix (for octal) is provided.
- **bpftool map lookup** *MAP* **key** *DATA*
+ **bpftool map lookup** *MAP* [**key** *DATA*]
Lookup **key** in the map.
**bpftool map getnext** *MAP* [**key** *DATA*]
@@ -107,6 +112,21 @@ DESCRIPTION
replace any existing ring. Any other application will stop
receiving events if it installed its rings earlier.
+ **bpftool map peek** *MAP*
+ Peek next **value** in the queue or stack.
+
+ **bpftool map push** *MAP* **value** *VALUE*
+ Push **value** onto the stack.
+
+ **bpftool map pop** *MAP*
+ Pop and print **value** from the stack.
+
+ **bpftool map enqueue** *MAP* **value** *VALUE*
+ Enqueue **value** into the queue.
+
+ **bpftool map dequeue** *MAP*
+ Dequeue and print **value** from the queue.
+
**bpftool map help**
Print short help message.
@@ -236,5 +256,6 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index ed87c9b619ad..779dab3650ee 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -142,4 +142,5 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index f4c5e5538bb8..bca5590a80d0 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -84,4 +84,5 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 58c8369b77dd..9386bd6e0396 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -18,7 +18,7 @@ SYNOPSIS
{ **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load**
| **loadall** | **help** }
-MAP COMMANDS
+PROG COMMANDS
=============
| **bpftool** **prog { show | list }** [*PROG*]
@@ -42,7 +42,7 @@ MAP COMMANDS
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6**
| }
| *ATTACH_TYPE* := {
-| **msg_verdict** | **skb_verdict** | **skb_parse** | **flow_dissector**
+| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
| }
@@ -171,7 +171,7 @@ EXAMPLES
::
- 10: xdp name some_prog tag 005a3d2123620c8b gpl
+ 10: xdp name some_prog tag 005a3d2123620c8b gpl run_time_ns 81632 run_cnt 10
loaded_at 2017-09-29T20:11:00+0000 uid 0
xlated 528B jited 370B memlock 4096B map_ids 10
@@ -184,6 +184,8 @@ EXAMPLES
"type": "xdp",
"tag": "005a3d2123620c8b",
"gpl_compatible": true,
+ "run_time_ns": 81632,
+ "run_cnt": 10,
"loaded_at": 1506715860,
"uid": 0,
"bytes_xlated": 528,
@@ -258,5 +260,6 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index e1677e81ed59..4f2188845dd8 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -16,7 +16,7 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** }
+ *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** }
*OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
| { **-j** | **--json** } [{ **-p** | **--pretty** }] }
@@ -34,6 +34,8 @@ SYNOPSIS
*NET-COMMANDS* := { **show** | **list** | **help** }
+ *FEATURE-COMMANDS* := { **probe** | **help** }
+
DESCRIPTION
===========
*bpftool* allows for inspection and simple modification of BPF objects
@@ -72,5 +74,6 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index e4e4fab1b8c7..b803827d01e8 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -50,14 +50,15 @@ _bpftool_get_map_ids()
command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
}
-_bpftool_get_perf_map_ids()
+# Takes map type and adds matching map ids to the list of suggestions.
+_bpftool_get_map_ids_for_type()
{
+ local type="$1"
COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
- command grep -C2 perf_event_array | \
+ command grep -C2 "$type" | \
command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
}
-
_bpftool_get_prog_ids()
{
COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
@@ -99,15 +100,25 @@ _sysfs_get_netdevs()
"$cur" ) )
}
-# For bpftool map update: retrieve type of the map to update.
-_bpftool_map_update_map_type()
+# Retrieve type of the map that we are operating on.
+_bpftool_map_guess_map_type()
{
local keyword ref
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
- if [[ ${words[$((idx-2))]} == "update" ]]; then
- keyword=${words[$((idx-1))]}
- ref=${words[$((idx))]}
- fi
+ case "${words[$((idx-2))]}" in
+ lookup|update)
+ keyword=${words[$((idx-1))]}
+ ref=${words[$((idx))]}
+ ;;
+ push)
+ printf "stack"
+ return 0
+ ;;
+ enqueue)
+ printf "queue"
+ return 0
+ ;;
+ esac
done
[[ -z $ref ]] && return 0
@@ -119,6 +130,8 @@ _bpftool_map_update_map_type()
_bpftool_map_update_get_id()
{
+ local command="$1"
+
# Is it the map to update, or a map to insert into the map to update?
# Search for "value" keyword.
local idx value
@@ -128,11 +141,24 @@ _bpftool_map_update_get_id()
break
fi
done
- [[ $value -eq 0 ]] && _bpftool_get_map_ids && return 0
+ if [[ $value -eq 0 ]]; then
+ case "$command" in
+ push)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ enqueue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
+ return 0
+ fi
# Id to complete is for a value. It can be either prog id or map id. This
# depends on the type of the map to update.
- local type=$(_bpftool_map_update_map_type)
+ local type=$(_bpftool_map_guess_map_type)
case $type in
array_of_maps|hash_of_maps)
_bpftool_get_map_ids
@@ -285,8 +311,8 @@ _bpftool()
return 0
;;
5)
- COMPREPLY=( $( compgen -W 'msg_verdict skb_verdict \
- skb_parse flow_dissector' -- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'msg_verdict stream_verdict \
+ stream_parser flow_dissector' -- "$cur" ) )
return 0
;;
6)
@@ -382,14 +408,28 @@ _bpftool()
map)
local MAP_TYPE='id pinned'
case $command in
- show|list|dump)
+ show|list|dump|peek|pop|dequeue)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
return 0
;;
id)
- _bpftool_get_map_ids
+ case "$command" in
+ peek)
+ _bpftool_get_map_ids_for_type stack
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ pop)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ dequeue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
return 0
;;
*)
@@ -447,19 +487,25 @@ _bpftool()
COMPREPLY+=( $( compgen -W 'hex' -- "$cur" ) )
;;
*)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ return 0
+ ;;
+ esac
+
_bpftool_once_attr 'key'
return 0
;;
esac
;;
- update)
+ update|push|enqueue)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
return 0
;;
id)
- _bpftool_map_update_get_id
+ _bpftool_map_update_get_id $command
return 0
;;
key)
@@ -468,7 +514,7 @@ _bpftool()
value)
# We can have bytes, or references to a prog or a
# map, depending on the type of the map to update.
- case $(_bpftool_map_update_map_type) in
+ case "$(_bpftool_map_guess_map_type)" in
array_of_maps|hash_of_maps)
local MAP_TYPE='id pinned'
COMPREPLY+=( $( compgen -W "$MAP_TYPE" \
@@ -490,6 +536,13 @@ _bpftool()
return 0
;;
*)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ _bpftool_once_attr 'value'
+ return 0;
+ ;;
+ esac
+
_bpftool_once_attr 'key'
local UPDATE_FLAGS='any exist noexist'
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
@@ -508,6 +561,7 @@ _bpftool()
return 0
fi
done
+
return 0
;;
esac
@@ -527,7 +581,7 @@ _bpftool()
return 0
;;
id)
- _bpftool_get_perf_map_ids
+ _bpftool_get_map_ids_for_type perf_event_array
return 0
;;
cpu)
@@ -546,7 +600,8 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'delete dump getnext help \
- lookup pin event_pipe show list update create' -- \
+ lookup pin event_pipe show list update create \
+ peek push enqueue pop dequeue' -- \
"$cur" ) )
;;
esac
@@ -624,6 +679,25 @@ _bpftool()
;;
esac
;;
+ feature)
+ case $command in
+ probe)
+ [[ $prev == "dev" ]] && _sysfs_get_netdevs && return 0
+ [[ $prev == "prefix" ]] && return 0
+ if _bpftool_search_list 'macros'; then
+ COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) )
+ else
+ COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) )
+ fi
+ _bpftool_one_of_list 'kernel dev'
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help probe' -- "$cur" ) )
+ ;;
+ esac
+ ;;
esac
} &&
complete -F _bpftool bpftool
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 6ba5f567a9d8..e63bce0755eb 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -73,35 +73,104 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
return ret;
}
+static void btf_int128_print(json_writer_t *jw, const void *data,
+ bool is_plain_text)
+{
+ /* data points to a __int128 number.
+ * Suppose
+ * int128_num = *(__int128 *)data;
+ * The below formulas shows what upper_num and lower_num represents:
+ * upper_num = int128_num >> 64;
+ * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
+ */
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = *(__u64 *)data;
+ lower_num = *(__u64 *)(data + 8);
+#else
+ upper_num = *(__u64 *)(data + 8);
+ lower_num = *(__u64 *)data;
+#endif
+
+ if (is_plain_text) {
+ if (upper_num == 0)
+ jsonw_printf(jw, "0x%llx", lower_num);
+ else
+ jsonw_printf(jw, "0x%llx%016llx", upper_num, lower_num);
+ } else {
+ if (upper_num == 0)
+ jsonw_printf(jw, "\"0x%llx\"", lower_num);
+ else
+ jsonw_printf(jw, "\"0x%llx%016llx\"", upper_num, lower_num);
+ }
+}
+
+static void btf_int128_shift(__u64 *print_num, u16 left_shift_bits,
+ u16 right_shift_bits)
+{
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = print_num[0];
+ lower_num = print_num[1];
+#else
+ upper_num = print_num[1];
+ lower_num = print_num[0];
+#endif
+
+ /* shake out un-needed bits by shift/or operations */
+ if (left_shift_bits >= 64) {
+ upper_num = lower_num << (left_shift_bits - 64);
+ lower_num = 0;
+ } else {
+ upper_num = (upper_num << left_shift_bits) |
+ (lower_num >> (64 - left_shift_bits));
+ lower_num = lower_num << left_shift_bits;
+ }
+
+ if (right_shift_bits >= 64) {
+ lower_num = upper_num >> (right_shift_bits - 64);
+ upper_num = 0;
+ } else {
+ lower_num = (lower_num >> right_shift_bits) |
+ (upper_num << (64 - right_shift_bits));
+ upper_num = upper_num >> right_shift_bits;
+ }
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ print_num[0] = upper_num;
+ print_num[1] = lower_num;
+#else
+ print_num[0] = lower_num;
+ print_num[1] = upper_num;
+#endif
+}
+
static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int left_shift_bits, right_shift_bits;
+ __u64 print_num[2] = {};
int bytes_to_copy;
int bits_to_copy;
- __u64 print_num;
bits_to_copy = bit_offset + nr_bits;
bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
- print_num = 0;
- memcpy(&print_num, data, bytes_to_copy);
+ memcpy(print_num, data, bytes_to_copy);
#if defined(__BIG_ENDIAN_BITFIELD)
left_shift_bits = bit_offset;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- left_shift_bits = 64 - bits_to_copy;
+ left_shift_bits = 128 - bits_to_copy;
#else
#error neither big nor little endian
#endif
- right_shift_bits = 64 - nr_bits;
+ right_shift_bits = 128 - nr_bits;
- print_num <<= left_shift_bits;
- print_num >>= right_shift_bits;
- if (is_plain_text)
- jsonw_printf(jw, "0x%llx", print_num);
- else
- jsonw_printf(jw, "%llu", print_num);
+ btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
+ btf_int128_print(jw, print_num, is_plain_text);
}
@@ -113,7 +182,7 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
int total_bits_offset;
/* bits_offset is at most 7.
- * BTF_INT_OFFSET() cannot exceed 64 bits.
+ * BTF_INT_OFFSET() cannot exceed 128 bits.
*/
total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
@@ -139,6 +208,11 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
return 0;
}
+ if (nr_bits == 128) {
+ btf_int128_print(jw, data, is_plain_text);
+ return 0;
+ }
+
switch (BTF_INT_ENCODING(*int_type)) {
case 0:
if (BTF_INT_BITS(*int_type) == 64)
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
index 31f0db41513f..3e21f994f262 100644
--- a/tools/bpf/bpftool/cfg.c
+++ b/tools/bpf/bpftool/cfg.c
@@ -157,6 +157,11 @@ static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
return false;
}
+static bool is_jmp_insn(u8 code)
+{
+ return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
+}
+
static bool func_partition_bb_head(struct func_node *func)
{
struct bpf_insn *cur, *end;
@@ -170,7 +175,7 @@ static bool func_partition_bb_head(struct func_node *func)
return true;
for (; cur <= end; cur++) {
- if (BPF_CLASS(cur->code) == BPF_JMP) {
+ if (is_jmp_insn(cur->code)) {
u8 opcode = BPF_OP(cur->code);
if (opcode == BPF_EXIT || opcode == BPF_CALL)
@@ -296,7 +301,7 @@ static bool func_add_bb_edges(struct func_node *func)
e->src = bb;
insn = bb->tail;
- if (BPF_CLASS(insn->code) != BPF_JMP ||
+ if (!is_jmp_insn(insn->code) ||
BPF_OP(insn->code) == BPF_EXIT) {
e->dst = bb_next(bb);
e->flags |= EDGE_FLAG_FALLTHROUGH;
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 897483457bf0..f7261fad45c1 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key)
snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
fdi = fopen(path, "r");
- if (!fdi) {
- p_err("can't open fdinfo: %s", strerror(errno));
+ if (!fdi)
return NULL;
- }
while ((n = getline(&line, &line_n, fdi)) > 0) {
char *value;
@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key)
value = strchr(line, '\t');
if (!value || !value[1]) {
- p_err("malformed fdinfo!?");
free(line);
return NULL;
}
@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key)
return line;
}
- p_err("key '%s' not found in fdinfo", key);
free(line);
fclose(fdi);
return NULL;
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
new file mode 100644
index 000000000000..d672d9086fff
--- /dev/null
+++ b/tools/bpf/bpftool/feature.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/utsname.h>
+#include <sys/vfs.h>
+
+#include <linux/filter.h>
+#include <linux/limits.h>
+
+#include <bpf.h>
+#include <libbpf.h>
+
+#include "main.h"
+
+#ifndef PROC_SUPER_MAGIC
+# define PROC_SUPER_MAGIC 0x9fa0
+#endif
+
+enum probe_component {
+ COMPONENT_UNSPEC,
+ COMPONENT_KERNEL,
+ COMPONENT_DEVICE,
+};
+
+#define BPF_HELPER_MAKE_ENTRY(name) [BPF_FUNC_ ## name] = "bpf_" # name
+static const char * const helper_name[] = {
+ __BPF_FUNC_MAPPER(BPF_HELPER_MAKE_ENTRY)
+};
+
+#undef BPF_HELPER_MAKE_ENTRY
+
+/* Miscellaneous utility functions */
+
+static bool check_procfs(void)
+{
+ struct statfs st_fs;
+
+ if (statfs("/proc", &st_fs) < 0)
+ return false;
+ if ((unsigned long)st_fs.f_type != PROC_SUPER_MAGIC)
+ return false;
+
+ return true;
+}
+
+static void uppercase(char *str, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len && str[i] != '\0'; i++)
+ str[i] = toupper(str[i]);
+}
+
+/* Printing utility functions */
+
+static void
+print_bool_feature(const char *feat_name, const char *plain_name,
+ const char *define_name, bool res, const char *define_prefix)
+{
+ if (json_output)
+ jsonw_bool_field(json_wtr, feat_name, res);
+ else if (define_prefix)
+ printf("#define %s%sHAVE_%s\n", define_prefix,
+ res ? "" : "NO_", define_name);
+ else
+ printf("%s is %savailable\n", plain_name, res ? "" : "NOT ");
+}
+
+static void print_kernel_option(const char *name, const char *value)
+{
+ char *endptr;
+ int res;
+
+ /* No support for C-style ouptut */
+
+ if (json_output) {
+ if (!value) {
+ jsonw_null_field(json_wtr, name);
+ return;
+ }
+ errno = 0;
+ res = strtol(value, &endptr, 0);
+ if (!errno && *endptr == '\n')
+ jsonw_int_field(json_wtr, name, res);
+ else
+ jsonw_string_field(json_wtr, name, value);
+ } else {
+ if (value)
+ printf("%s is set to %s\n", name, value);
+ else
+ printf("%s is not set\n", name);
+ }
+}
+
+static void
+print_start_section(const char *json_title, const char *plain_title,
+ const char *define_comment, const char *define_prefix)
+{
+ if (json_output) {
+ jsonw_name(json_wtr, json_title);
+ jsonw_start_object(json_wtr);
+ } else if (define_prefix) {
+ printf("%s\n", define_comment);
+ } else {
+ printf("%s\n", plain_title);
+ }
+}
+
+static void
+print_end_then_start_section(const char *json_title, const char *plain_title,
+ const char *define_comment,
+ const char *define_prefix)
+{
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+
+ print_start_section(json_title, plain_title, define_comment,
+ define_prefix);
+}
+
+/* Probing functions */
+
+static int read_procfs(const char *path)
+{
+ char *endptr, *line = NULL;
+ size_t len = 0;
+ FILE *fd;
+ int res;
+
+ fd = fopen(path, "r");
+ if (!fd)
+ return -1;
+
+ res = getline(&line, &len, fd);
+ fclose(fd);
+ if (res < 0)
+ return -1;
+
+ errno = 0;
+ res = strtol(line, &endptr, 10);
+ if (errno || *line == '\0' || *endptr != '\n')
+ res = -1;
+ free(line);
+
+ return res;
+}
+
+static void probe_unprivileged_disabled(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "unprivileged_bpf_disabled", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("bpf() syscall for unprivileged users is enabled\n");
+ break;
+ case 1:
+ printf("bpf() syscall restricted to privileged users\n");
+ break;
+ case -1:
+ printf("Unable to retrieve required privileges for bpf() syscall\n");
+ break;
+ default:
+ printf("bpf() syscall restriction has unknown value %d\n", res);
+ }
+ }
+}
+
+static void probe_jit_enable(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_enable");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_enable", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler is enabled\n");
+ break;
+ case 2:
+ printf("JIT compiler is enabled with debugging traces in kernel logs\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT-compiler status\n");
+ break;
+ default:
+ printf("JIT-compiler status has unknown value %d\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_harden(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_harden");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_harden", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler hardening is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler hardening is enabled for unprivileged users\n");
+ break;
+ case 2:
+ printf("JIT compiler hardening is enabled for all users\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT hardening status\n");
+ break;
+ default:
+ printf("JIT hardening status has unknown value %d\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_kallsyms(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_kallsyms", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler kallsyms exports are disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler kallsyms exports are enabled for root\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT kallsyms export status\n");
+ break;
+ default:
+ printf("JIT kallsyms exports status has unknown value %d\n", res);
+ }
+ }
+}
+
+static void probe_jit_limit(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_limit");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_limit", res);
+ } else {
+ switch (res) {
+ case -1:
+ printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
+ break;
+ default:
+ printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res);
+ }
+ }
+}
+
+static char *get_kernel_config_option(FILE *fd, const char *option)
+{
+ size_t line_n = 0, optlen = strlen(option);
+ char *res, *strval, *line = NULL;
+ ssize_t n;
+
+ rewind(fd);
+ while ((n = getline(&line, &line_n, fd)) > 0) {
+ if (strncmp(line, option, optlen))
+ continue;
+ /* Check we have at least '=', value, and '\n' */
+ if (strlen(line) < optlen + 3)
+ continue;
+ if (*(line + optlen) != '=')
+ continue;
+
+ /* Trim ending '\n' */
+ line[strlen(line) - 1] = '\0';
+
+ /* Copy and return config option value */
+ strval = line + optlen + 1;
+ res = strdup(strval);
+ free(line);
+ return res;
+ }
+ free(line);
+
+ return NULL;
+}
+
+static void probe_kernel_image_config(void)
+{
+ static const char * const options[] = {
+ /* Enable BPF */
+ "CONFIG_BPF",
+ /* Enable bpf() syscall */
+ "CONFIG_BPF_SYSCALL",
+ /* Does selected architecture support eBPF JIT compiler */
+ "CONFIG_HAVE_EBPF_JIT",
+ /* Compile eBPF JIT compiler */
+ "CONFIG_BPF_JIT",
+ /* Avoid compiling eBPF interpreter (use JIT only) */
+ "CONFIG_BPF_JIT_ALWAYS_ON",
+
+ /* cgroups */
+ "CONFIG_CGROUPS",
+ /* BPF programs attached to cgroups */
+ "CONFIG_CGROUP_BPF",
+ /* bpf_get_cgroup_classid() helper */
+ "CONFIG_CGROUP_NET_CLASSID",
+ /* bpf_skb_{,ancestor_}cgroup_id() helpers */
+ "CONFIG_SOCK_CGROUP_DATA",
+
+ /* Tracing: attach BPF to kprobes, tracepoints, etc. */
+ "CONFIG_BPF_EVENTS",
+ /* Kprobes */
+ "CONFIG_KPROBE_EVENTS",
+ /* Uprobes */
+ "CONFIG_UPROBE_EVENTS",
+ /* Tracepoints */
+ "CONFIG_TRACING",
+ /* Syscall tracepoints */
+ "CONFIG_FTRACE_SYSCALLS",
+ /* bpf_override_return() helper support for selected arch */
+ "CONFIG_FUNCTION_ERROR_INJECTION",
+ /* bpf_override_return() helper */
+ "CONFIG_BPF_KPROBE_OVERRIDE",
+
+ /* Network */
+ "CONFIG_NET",
+ /* AF_XDP sockets */
+ "CONFIG_XDP_SOCKETS",
+ /* BPF_PROG_TYPE_LWT_* and related helpers */
+ "CONFIG_LWTUNNEL_BPF",
+ /* BPF_PROG_TYPE_SCHED_ACT, TC (traffic control) actions */
+ "CONFIG_NET_ACT_BPF",
+ /* BPF_PROG_TYPE_SCHED_CLS, TC filters */
+ "CONFIG_NET_CLS_BPF",
+ /* TC clsact qdisc */
+ "CONFIG_NET_CLS_ACT",
+ /* Ingress filtering with TC */
+ "CONFIG_NET_SCH_INGRESS",
+ /* bpf_skb_get_xfrm_state() helper */
+ "CONFIG_XFRM",
+ /* bpf_get_route_realm() helper */
+ "CONFIG_IP_ROUTE_CLASSID",
+ /* BPF_PROG_TYPE_LWT_SEG6_LOCAL and related helpers */
+ "CONFIG_IPV6_SEG6_BPF",
+ /* BPF_PROG_TYPE_LIRC_MODE2 and related helpers */
+ "CONFIG_BPF_LIRC_MODE2",
+ /* BPF stream parser and BPF socket maps */
+ "CONFIG_BPF_STREAM_PARSER",
+ /* xt_bpf module for passing BPF programs to netfilter */
+ "CONFIG_NETFILTER_XT_MATCH_BPF",
+ /* bpfilter back-end for iptables */
+ "CONFIG_BPFILTER",
+ /* bpftilter module with "user mode helper" */
+ "CONFIG_BPFILTER_UMH",
+
+ /* test_bpf module for BPF tests */
+ "CONFIG_TEST_BPF",
+ };
+ char *value, *buf = NULL;
+ struct utsname utsn;
+ char path[PATH_MAX];
+ size_t i, n;
+ ssize_t ret;
+ FILE *fd;
+
+ if (uname(&utsn))
+ goto no_config;
+
+ snprintf(path, sizeof(path), "/boot/config-%s", utsn.release);
+
+ fd = fopen(path, "r");
+ if (!fd && errno == ENOENT) {
+ /* Some distributions put the config file at /proc/config, give
+ * it a try.
+ * Sometimes it is also at /proc/config.gz but we do not try
+ * this one for now, it would require linking against libz.
+ */
+ fd = fopen("/proc/config", "r");
+ }
+ if (!fd) {
+ p_info("skipping kernel config, can't open file: %s",
+ strerror(errno));
+ goto no_config;
+ }
+ /* Sanity checks */
+ ret = getline(&buf, &n, fd);
+ ret = getline(&buf, &n, fd);
+ if (!buf || !ret) {
+ p_info("skipping kernel config, can't read from file: %s",
+ strerror(errno));
+ free(buf);
+ goto no_config;
+ }
+ if (strcmp(buf, "# Automatically generated file; DO NOT EDIT.\n")) {
+ p_info("skipping kernel config, can't find correct file");
+ free(buf);
+ goto no_config;
+ }
+ free(buf);
+
+ for (i = 0; i < ARRAY_SIZE(options); i++) {
+ value = get_kernel_config_option(fd, options[i]);
+ print_kernel_option(options[i], value);
+ free(value);
+ }
+ fclose(fd);
+ return;
+
+no_config:
+ for (i = 0; i < ARRAY_SIZE(options); i++)
+ print_kernel_option(options[i], NULL);
+}
+
+static bool probe_bpf_syscall(const char *define_prefix)
+{
+ bool res;
+
+ bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0);
+ res = (errno != ENOSYS);
+
+ print_bool_feature("have_bpf_syscall",
+ "bpf() syscall",
+ "BPF_SYSCALL",
+ res, define_prefix);
+
+ return res;
+}
+
+static void
+probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
+ const char *define_prefix, __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF program_type ";
+ size_t maxlen;
+ bool res;
+
+ if (ifindex)
+ /* Only test offload-able program types */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ res = bpf_probe_prog_type(prog_type, ifindex);
+
+ supported_types[prog_type] |= res;
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(prog_type_name[prog_type]) > maxlen) {
+ p_info("program type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]);
+ sprintf(define_name, "%s_prog_type", prog_type_name[prog_type]);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static void
+probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
+ __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF map_type ";
+ size_t maxlen;
+ bool res;
+
+ res = bpf_probe_map_type(map_type, ifindex);
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(map_type_name[map_type]) > maxlen) {
+ p_info("map type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]);
+ sprintf(define_name, "%s_map_type", map_type_name[map_type]);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static void
+probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
+ const char *define_prefix, __u32 ifindex)
+{
+ const char *ptype_name = prog_type_name[prog_type];
+ char feat_name[128];
+ unsigned int id;
+ bool res;
+
+ if (ifindex)
+ /* Only test helpers for offload-able program types */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ if (json_output) {
+ sprintf(feat_name, "%s_available_helpers", ptype_name);
+ jsonw_name(json_wtr, feat_name);
+ jsonw_start_array(json_wtr);
+ } else if (!define_prefix) {
+ printf("eBPF helpers supported for program type %s:",
+ ptype_name);
+ }
+
+ for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
+ if (!supported_type)
+ res = false;
+ else
+ res = bpf_probe_helper(id, prog_type, ifindex);
+
+ if (json_output) {
+ if (res)
+ jsonw_string(json_wtr, helper_name[id]);
+ } else if (define_prefix) {
+ printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
+ define_prefix, ptype_name, helper_name[id],
+ res ? "1" : "0");
+ } else {
+ if (res)
+ printf("\n\t- %s", helper_name[id]);
+ }
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+ else if (!define_prefix)
+ printf("\n");
+}
+
+static int do_probe(int argc, char **argv)
+{
+ enum probe_component target = COMPONENT_UNSPEC;
+ const char *define_prefix = NULL;
+ bool supported_types[128] = {};
+ __u32 ifindex = 0;
+ unsigned int i;
+ char *ifname;
+
+ /* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN).
+ * Let's approximate, and restrict usage to root user only.
+ */
+ if (geteuid()) {
+ p_err("please run this command as root user");
+ return -1;
+ }
+
+ set_max_rlimit();
+
+ while (argc) {
+ if (is_prefix(*argv, "kernel")) {
+ if (target != COMPONENT_UNSPEC) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ target = COMPONENT_KERNEL;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "dev")) {
+ NEXT_ARG();
+
+ if (target != COMPONENT_UNSPEC || ifindex) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ if (!REQ_ARGS(1))
+ return -1;
+
+ target = COMPONENT_DEVICE;
+ ifname = GET_ARG();
+ ifindex = if_nametoindex(ifname);
+ if (!ifindex) {
+ p_err("unrecognized netdevice '%s': %s", ifname,
+ strerror(errno));
+ return -1;
+ }
+ } else if (is_prefix(*argv, "macros") && !define_prefix) {
+ define_prefix = "";
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "prefix")) {
+ if (!define_prefix) {
+ p_err("'prefix' argument can only be use after 'macros'");
+ return -1;
+ }
+ if (strcmp(define_prefix, "")) {
+ p_err("'prefix' already defined");
+ return -1;
+ }
+ NEXT_ARG();
+
+ if (!REQ_ARGS(1))
+ return -1;
+ define_prefix = GET_ARG();
+ } else {
+ p_err("expected no more arguments, 'kernel', 'dev', 'macros' or 'prefix', got: '%s'?",
+ *argv);
+ return -1;
+ }
+ }
+
+ if (json_output) {
+ define_prefix = NULL;
+ jsonw_start_object(json_wtr);
+ }
+
+ switch (target) {
+ case COMPONENT_KERNEL:
+ case COMPONENT_UNSPEC:
+ if (define_prefix)
+ break;
+
+ print_start_section("system_config",
+ "Scanning system configuration...",
+ NULL, /* define_comment never used here */
+ NULL); /* define_prefix always NULL here */
+ if (check_procfs()) {
+ probe_unprivileged_disabled();
+ probe_jit_enable();
+ probe_jit_harden();
+ probe_jit_kallsyms();
+ probe_jit_limit();
+ } else {
+ p_info("/* procfs not mounted, skipping related probes */");
+ }
+ probe_kernel_image_config();
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+ break;
+ default:
+ break;
+ }
+
+ print_start_section("syscall_config",
+ "Scanning system call availability...",
+ "/*** System call availability ***/",
+ define_prefix);
+
+ if (!probe_bpf_syscall(define_prefix))
+ /* bpf() syscall unavailable, don't probe other BPF features */
+ goto exit_close_json;
+
+ print_end_then_start_section("program_types",
+ "Scanning eBPF program types...",
+ "/*** eBPF program types ***/",
+ define_prefix);
+
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_prog_type(i, supported_types, define_prefix, ifindex);
+
+ print_end_then_start_section("map_types",
+ "Scanning eBPF map types...",
+ "/*** eBPF map types ***/",
+ define_prefix);
+
+ for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
+ probe_map_type(i, define_prefix, ifindex);
+
+ print_end_then_start_section("helpers",
+ "Scanning eBPF helper functions...",
+ "/*** eBPF helper functions ***/",
+ define_prefix);
+
+ if (define_prefix)
+ printf("/*\n"
+ " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
+ " * to determine if <helper_name> is available for <prog_type_name>,\n"
+ " * e.g.\n"
+ " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
+ " * // do stuff with this helper\n"
+ " * #elif\n"
+ " * // use a workaround\n"
+ " * #endif\n"
+ " */\n"
+ "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
+ " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
+ define_prefix, define_prefix, define_prefix,
+ define_prefix);
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_helpers_for_progtype(i, supported_types[i],
+ define_prefix, ifindex);
+
+exit_close_json:
+ if (json_output) {
+ /* End current "section" of probes */
+ jsonw_end_object(json_wtr);
+ /* End root object */
+ jsonw_end_object(json_wtr);
+ }
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s %s probe [COMPONENT] [macros [prefix PREFIX]]\n"
+ " %s %s help\n"
+ "\n"
+ " COMPONENT := { kernel | dev NAME }\n"
+ "",
+ bin_name, argv[-2], bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "probe", do_probe },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_feature(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index f44a1c2c4ea0..a9d5e9e6a732 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -56,7 +56,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map | cgroup | perf | net }\n"
+ " OBJECT := { prog | map | cgroup | perf | net | feature }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@@ -187,6 +187,7 @@ static const struct cmd cmds[] = {
{ "cgroup", do_cgroup },
{ "perf", do_perf },
{ "net", do_net },
+ { "feature", do_feature },
{ "version", do_version },
{ 0 }
};
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 052c91d4dc55..d7dd84d3c660 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -75,6 +75,9 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
};
+extern const char * const map_type_name[];
+extern const size_t map_type_name_size;
+
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
BPF_OBJ_PROG,
@@ -145,6 +148,7 @@ int do_cgroup(int argc, char **arg);
int do_perf(int argc, char **arg);
int do_net(int argc, char **arg);
int do_tracelog(int argc, char **arg);
+int do_feature(int argc, char **argv);
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 2037e3dc864b..e0c650d91784 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -21,7 +21,7 @@
#include "json_writer.h"
#include "main.h"
-static const char * const map_type_name[] = {
+const char * const map_type_name[] = {
[BPF_MAP_TYPE_UNSPEC] = "unspec",
[BPF_MAP_TYPE_HASH] = "hash",
[BPF_MAP_TYPE_ARRAY] = "array",
@@ -48,6 +48,8 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_STACK] = "stack",
};
+const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
+
static bool map_is_per_cpu(__u32 type)
{
return type == BPF_MAP_TYPE_PERCPU_HASH ||
@@ -285,16 +287,21 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
single_line = info->key_size + info->value_size <= 24 &&
!break_names;
- printf("key:%c", break_names ? '\n' : ' ');
- fprint_hex(stdout, key, info->key_size, " ");
+ if (info->key_size) {
+ printf("key:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, key, info->key_size, " ");
- printf(single_line ? " " : "\n");
+ printf(single_line ? " " : "\n");
+ }
- printf("value:%c", break_names ? '\n' : ' ');
- if (value)
- fprint_hex(stdout, value, info->value_size, " ");
- else
- printf("<no entry>");
+ if (info->value_size) {
+ printf("value:%c", break_names ? '\n' : ' ');
+ if (value)
+ fprint_hex(stdout, value, info->value_size,
+ " ");
+ else
+ printf("<no entry>");
+ }
printf("\n");
} else {
@@ -303,19 +310,23 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
n = get_possible_cpus();
step = round_up(info->value_size, 8);
- printf("key:\n");
- fprint_hex(stdout, key, info->key_size, " ");
- printf("\n");
- for (i = 0; i < n; i++) {
- printf("value (CPU %02d):%c",
- i, info->value_size > 16 ? '\n' : ' ');
- if (value)
- fprint_hex(stdout, value + i * step,
- info->value_size, " ");
- else
- printf("<no entry>");
+ if (info->key_size) {
+ printf("key:\n");
+ fprint_hex(stdout, key, info->key_size, " ");
printf("\n");
}
+ if (info->value_size) {
+ for (i = 0; i < n; i++) {
+ printf("value (CPU %02d):%c",
+ i, info->value_size > 16 ? '\n' : ' ');
+ if (value)
+ fprint_hex(stdout, value + i * step,
+ info->value_size, " ");
+ else
+ printf("<no entry>");
+ printf("\n");
+ }
+ }
}
}
@@ -347,6 +358,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
return argv + i;
}
+/* on per cpu maps we must copy the provided value on all value instances */
+static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
+{
+ unsigned int i, n, step;
+
+ if (!map_is_per_cpu(info->type))
+ return;
+
+ n = get_possible_cpus();
+ step = round_up(info->value_size, 8);
+ for (i = 1; i < n; i++)
+ memcpy(value + i * step, value, info->value_size);
+}
+
static int parse_elem(char **argv, struct bpf_map_info *info,
void *key, void *value, __u32 key_size, __u32 value_size,
__u32 *flags, __u32 **value_fd)
@@ -415,6 +440,9 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
p_err("not enough value arguments for map of progs");
return -1;
}
+ if (is_prefix(*argv, "id"))
+ p_info("Warning: updating program array via MAP_ID, make sure this map is kept open\n"
+ " by some process or pinned otherwise update will be lost");
fd = prog_parse_fd(&argc, &argv);
if (fd < 0)
@@ -426,6 +454,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
argv = parse_bytes(argv, "value", value, value_size);
if (!argv)
return -1;
+
+ fill_per_cpu_value(info, value);
}
return parse_elem(argv, info, key, NULL, key_size, value_size,
@@ -497,10 +527,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_uint_field(json_wtr, "owner_prog_type",
prog_type);
}
- if (atoi(owner_jited))
- jsonw_bool_field(json_wtr, "owner_jited", true);
- else
- jsonw_bool_field(json_wtr, "owner_jited", false);
+ if (owner_jited)
+ jsonw_bool_field(json_wtr, "owner_jited",
+ !!atoi(owner_jited));
free(owner_prog_type);
free(owner_jited);
@@ -553,7 +582,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
char *owner_jited = get_fdinfo(fd, "owner_jited");
- printf("\n\t");
+ if (owner_prog_type || owner_jited)
+ printf("\n\t");
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
@@ -563,10 +593,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
else
printf("owner_prog_type %d ", prog_type);
}
- if (atoi(owner_jited))
- printf("owner jited");
- else
- printf("owner not jited");
+ if (owner_jited)
+ printf("owner%s jited",
+ atoi(owner_jited) ? "" : " not");
free(owner_prog_type);
free(owner_jited);
@@ -779,6 +808,32 @@ exit_free:
return err;
}
+static int alloc_key_value(struct bpf_map_info *info, void **key, void **value)
+{
+ *key = NULL;
+ *value = NULL;
+
+ if (info->key_size) {
+ *key = malloc(info->key_size);
+ if (!*key) {
+ p_err("key mem alloc failed");
+ return -1;
+ }
+ }
+
+ if (info->value_size) {
+ *value = alloc_value(info);
+ if (!*value) {
+ p_err("value mem alloc failed");
+ free(*key);
+ *key = NULL;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int do_update(int argc, char **argv)
{
struct bpf_map_info info = {};
@@ -795,13 +850,9 @@ static int do_update(int argc, char **argv)
if (fd < 0)
return -1;
- key = malloc(info.key_size);
- value = alloc_value(&info);
- if (!key || !value) {
- p_err("mem alloc failed");
- err = -1;
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
goto exit_free;
- }
err = parse_elem(argv, &info, key, value, info.key_size,
info.value_size, &flags, &value_fd);
@@ -826,12 +877,51 @@ exit_free:
return err;
}
+static void print_key_value(struct bpf_map_info *info, void *key,
+ void *value)
+{
+ json_writer_t *btf_wtr;
+ struct btf *btf = NULL;
+ int err;
+
+ err = btf__get_from_id(info->btf_id, &btf);
+ if (err) {
+ p_err("failed to get btf");
+ return;
+ }
+
+ if (json_output) {
+ print_entry_json(info, key, value, btf);
+ } else if (btf) {
+ /* if here json_wtr wouldn't have been initialised,
+ * so let's create separate writer for btf
+ */
+ btf_wtr = get_btf_writer();
+ if (!btf_wtr) {
+ p_info("failed to create json writer for btf. falling back to plain output");
+ btf__free(btf);
+ btf = NULL;
+ print_entry_plain(info, key, value);
+ } else {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, info, key, value);
+ jsonw_destroy(&btf_wtr);
+ }
+ } else {
+ print_entry_plain(info, key, value);
+ }
+ btf__free(btf);
+}
+
static int do_lookup(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
- json_writer_t *btf_wtr;
- struct btf *btf = NULL;
void *key, *value;
int err;
int fd;
@@ -843,13 +933,9 @@ static int do_lookup(int argc, char **argv)
if (fd < 0)
return -1;
- key = malloc(info.key_size);
- value = alloc_value(&info);
- if (!key || !value) {
- p_err("mem alloc failed");
- err = -1;
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
goto exit_free;
- }
err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
if (err)
@@ -873,43 +959,12 @@ static int do_lookup(int argc, char **argv)
}
/* here means bpf_map_lookup_elem() succeeded */
- err = btf__get_from_id(info.btf_id, &btf);
- if (err) {
- p_err("failed to get btf");
- goto exit_free;
- }
-
- if (json_output) {
- print_entry_json(&info, key, value, btf);
- } else if (btf) {
- /* if here json_wtr wouldn't have been initialised,
- * so let's create separate writer for btf
- */
- btf_wtr = get_btf_writer();
- if (!btf_wtr) {
- p_info("failed to create json writer for btf. falling back to plain output");
- btf__free(btf);
- btf = NULL;
- print_entry_plain(&info, key, value);
- } else {
- struct btf_dumper d = {
- .btf = btf,
- .jw = btf_wtr,
- .is_plain_text = true,
- };
-
- do_dump_btf(&d, &info, key, value);
- jsonw_destroy(&btf_wtr);
- }
- } else {
- print_entry_plain(&info, key, value);
- }
+ print_key_value(&info, key, value);
exit_free:
free(key);
free(value);
close(fd);
- btf__free(btf);
return err;
}
@@ -1122,6 +1177,49 @@ static int do_create(int argc, char **argv)
return 0;
}
+static int do_pop_dequeue(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ void *key, *value;
+ int err;
+ int fd;
+
+ if (argc < 2)
+ usage();
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
+ goto exit_free;
+
+ err = bpf_map_lookup_and_delete_elem(fd, key, value);
+ if (err) {
+ if (errno == ENOENT) {
+ if (json_output)
+ jsonw_null(json_wtr);
+ else
+ printf("Error: empty map\n");
+ } else {
+ p_err("pop failed: %s", strerror(errno));
+ }
+
+ goto exit_free;
+ }
+
+ print_key_value(&info, key, value);
+
+exit_free:
+ free(key);
+ free(value);
+ close(fd);
+
+ return err;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1135,12 +1233,17 @@ static int do_help(int argc, char **argv)
" entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
" [dev NAME]\n"
" %s %s dump MAP\n"
- " %s %s update MAP key DATA value VALUE [UPDATE_FLAGS]\n"
- " %s %s lookup MAP key DATA\n"
+ " %s %s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
+ " %s %s lookup MAP [key DATA]\n"
" %s %s getnext MAP [key DATA]\n"
" %s %s delete MAP key DATA\n"
" %s %s pin MAP FILE\n"
" %s %s event_pipe MAP [cpu N index M]\n"
+ " %s %s peek MAP\n"
+ " %s %s push MAP value VALUE\n"
+ " %s %s pop MAP\n"
+ " %s %s enqueue MAP value VALUE\n"
+ " %s %s dequeue MAP\n"
" %s %s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
@@ -1158,7 +1261,8 @@ static int do_help(int argc, char **argv)
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
@@ -1175,6 +1279,11 @@ static const struct cmd cmds[] = {
{ "pin", do_pin },
{ "event_pipe", do_event_pipe },
{ "create", do_create },
+ { "peek", do_lookup },
+ { "push", do_update },
+ { "enqueue", do_update },
+ { "pop", do_pop_dequeue },
+ { "dequeue", do_pop_dequeue },
{ 0 }
};
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 2d1bb7d6ff51..8ef80d65a474 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
static int prog_fd_by_tag(unsigned char *tag)
{
- struct bpf_prog_info info = {};
- __u32 len = sizeof(info);
unsigned int id = 0;
int err;
int fd;
while (true) {
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+
err = bpf_prog_get_next_id(id, &id);
if (err) {
p_err("%s", strerror(errno));
@@ -213,6 +214,10 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
+ if (info->run_time_ns) {
+ jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
+ jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
+ }
print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
@@ -276,6 +281,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
printf("%s", info->gpl_compatible ? " gpl" : "");
+ if (info->run_time_ns)
+ printf(" run_time_ns %lld run_cnt %lld",
+ info->run_time_ns, info->run_cnt);
printf("\n");
if (info->load_time) {
@@ -930,10 +938,9 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = libbpf_prog_type_by_name(type, &attr.prog_type,
&expected_attach_type);
free(type);
- if (err < 0) {
- p_err("unknown program type '%s'", *argv);
+ if (err < 0)
goto err_free_reuse_maps;
- }
+
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
void *new_map_replace;
@@ -1028,11 +1035,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = libbpf_prog_type_by_name(sec_name, &prog_type,
&expected_attach_type);
- if (err < 0) {
- p_err("failed to guess program type based on section name %s\n",
- sec_name);
+ if (err < 0)
goto err_close_obj;
- }
}
bpf_program__set_ifindex(pos, ifindex);
@@ -1049,7 +1053,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
j = 0;
while (j < old_map_fds && map_replace[j].name) {
i = 0;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
map_replace[j].idx = i;
break;
@@ -1070,7 +1074,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
/* Set ifindex and name reuse */
j = 0;
idx = 0;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
if (!bpf_map__is_offload_neutral(map))
bpf_map__set_ifindex(map, ifindex);
@@ -1202,7 +1206,7 @@ static int do_help(int argc, char **argv)
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
- " ATTACH_TYPE := { msg_verdict | skb_verdict | skb_parse |\n"
+ " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 5467c6bf9ceb..61e46d54a67c 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -53,10 +53,6 @@ FEATURE_TESTS_BASIC := \
libslang \
libcrypto \
libunwind \
- libunwind-x86 \
- libunwind-x86_64 \
- libunwind-arm \
- libunwind-aarch64 \
pthread-attr-setaffinity-np \
pthread-barrier \
reallocarray \
@@ -70,7 +66,6 @@ FEATURE_TESTS_BASIC := \
sched_getcpu \
sdt \
setns \
- libopencsd \
libaio
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
@@ -84,6 +79,11 @@ FEATURE_TESTS_EXTRA := \
libbabeltrace \
libbfd-liberty \
libbfd-liberty-z \
+ libopencsd \
+ libunwind-x86 \
+ libunwind-x86_64 \
+ libunwind-arm \
+ libunwind-aarch64 \
libunwind-debug-frame \
libunwind-debug-frame-arm \
libunwind-debug-frame-aarch64 \
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 20cdaa4fc112..e903b86b742f 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -170,14 +170,14 @@
# include "test-setns.c"
#undef main
-#define main main_test_libopencsd
-# include "test-libopencsd.c"
-#undef main
-
#define main main_test_libaio
# include "test-libaio.c"
#undef main
+#define main main_test_reallocarray
+# include "test-reallocarray.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -217,8 +217,8 @@ int main(int argc, char *argv[])
main_test_sched_getcpu();
main_test_sdt();
main_test_setns();
- main_test_libopencsd();
main_test_libaio();
+ main_test_reallocarray();
return 0;
}
diff --git a/tools/build/feature/test-get_current_dir_name.c b/tools/build/feature/test-get_current_dir_name.c
index 573000f93212..c3c201691b4f 100644
--- a/tools/build/feature/test-get_current_dir_name.c
+++ b/tools/build/feature/test-get_current_dir_name.c
@@ -8,3 +8,4 @@ int main(void)
free(get_current_dir_name());
return 0;
}
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-libpython.c b/tools/build/feature/test-libpython.c
index 0c1641b0d9a7..371c9113e49d 100644
--- a/tools/build/feature/test-libpython.c
+++ b/tools/build/feature/test-libpython.c
@@ -7,3 +7,4 @@ int main(void)
return 0;
}
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c
index 8170de35150d..8f6743e31da7 100644
--- a/tools/build/feature/test-reallocarray.c
+++ b/tools/build/feature/test-reallocarray.c
@@ -6,3 +6,5 @@ int main(void)
{
return !!reallocarray(NULL, 1, 1);
}
+
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-sched_getcpu.c b/tools/build/feature/test-sched_getcpu.c
index e448deb4124c..48995ac7911e 100644
--- a/tools/build/feature/test-sched_getcpu.c
+++ b/tools/build/feature/test-sched_getcpu.c
@@ -8,3 +8,5 @@ int main(void)
{
return sched_getcpu();
}
+
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-setns.c b/tools/build/feature/test-setns.c
index 1f714d2a658b..4a1581ae7a55 100644
--- a/tools/build/feature/test-setns.c
+++ b/tools/build/feature/test-setns.c
@@ -5,3 +5,4 @@ int main(void)
{
return setns(0, 0);
}
+#undef _GNU_SOURCE
diff --git a/tools/debugging/Makefile b/tools/debugging/Makefile
new file mode 100644
index 000000000000..e2b7c1a6fb8f
--- /dev/null
+++ b/tools/debugging/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for debugging tools
+
+PREFIX ?= /usr
+BINDIR ?= bin
+INSTALL ?= install
+
+TARGET = kernel-chktaint
+
+all: $(TARGET)
+
+clean:
+
+install: kernel-chktaint
+ $(INSTALL) -D -m 755 $(TARGET) $(DESTDIR)$(PREFIX)/$(BINDIR)/$(TARGET)
+
diff --git a/tools/debugging/kernel-chktaint b/tools/debugging/kernel-chktaint
new file mode 100755
index 000000000000..2240cb56e6e5
--- /dev/null
+++ b/tools/debugging/kernel-chktaint
@@ -0,0 +1,202 @@
+#! /bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Randy Dunlap <rdunlap@infradead.org>, 2018
+# Thorsten Leemhuis <linux@leemhuis.info>, 2018
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/}
+ ${0##*/} <int>
+
+Call without parameters to decode /proc/sys/kernel/tainted.
+
+Call with a positive integer as parameter to decode a value you
+retrieved from /proc/sys/kernel/tainted on another system.
+
+EOF
+}
+
+if [ "$1"x != "x" ]; then
+ if [ "$1"x == "--helpx" ] || [ "$1"x == "-hx" ] ; then
+ usage
+ exit 1
+ elif [ $1 -ge 0 ] 2>/dev/null ; then
+ taint=$1
+ else
+ echo "Error: Parameter '$1' not a positive interger. Aborting." >&2
+ exit 1
+ fi
+else
+ TAINTFILE="/proc/sys/kernel/tainted"
+ if [ ! -r $TAINTFILE ]; then
+ echo "No file: $TAINTFILE"
+ exit
+ fi
+
+ taint=`cat $TAINTFILE`
+fi
+
+if [ $taint -eq 0 ]; then
+ echo "Kernel not Tainted"
+ exit
+else
+ echo "Kernel is \"tainted\" for the following reasons:"
+fi
+
+T=$taint
+out=
+
+addout() {
+ out=$out$1
+}
+
+if [ `expr $T % 2` -eq 0 ]; then
+ addout "G"
+else
+ addout "P"
+ echo " * proprietary module was loaded (#0)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "F"
+ echo " * module was force loaded (#1)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "S"
+ echo " * SMP kernel oops on an officially SMP incapable processor (#2)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "R"
+ echo " * module was force unloaded (#3)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "M"
+ echo " * processor reported a Machine Check Exception (MCE) (#4)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "B"
+ echo " * bad page referenced or some unexpected page flags (#5)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "U"
+ echo " * taint requested by userspace application (#6)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "D"
+ echo " * kernel died recently, i.e. there was an OOPS or BUG (#7)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "A"
+ echo " * an ACPI table was overridden by user (#8)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "W"
+ echo " * kernel issued warning (#9)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "C"
+ echo " * staging driver was loaded (#10)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "I"
+ echo " * workaround for bug in platform firmware applied (#11)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "O"
+ echo " * externally-built ('out-of-tree') module was loaded (#12)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "E"
+ echo " * unsigned module was loaded (#13)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "L"
+ echo " * soft lockup occurred (#14)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "K"
+ echo " * kernel has been live patched (#15)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "X"
+ echo " * auxiliary taint, defined for and used by distros (#16)"
+
+fi
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "T"
+ echo " * kernel was built with the struct randomization plugin (#17)"
+fi
+
+echo "For a more detailed explanation of the various taint flags see"
+echo " Documentation/admin-guide/tainted-kernels.rst in the the Linux kernel sources"
+echo " or https://kernel.org/doc/html/latest/admin-guide/tainted-kernels.html"
+echo "Raw taint value as int/string: $taint/'$out'"
+#EOF#
diff --git a/tools/firmware/ihex2fw.c b/tools/firmware/ihex2fw.c
index b58dd061e978..8925b60e51f5 100644
--- a/tools/firmware/ihex2fw.c
+++ b/tools/firmware/ihex2fw.c
@@ -24,6 +24,10 @@
#include <getopt.h>
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+
struct ihex_binrec {
struct ihex_binrec *next; /* not part of the real data structure */
uint32_t addr;
@@ -131,6 +135,7 @@ int main(int argc, char **argv)
static int process_ihex(uint8_t *data, ssize_t size)
{
struct ihex_binrec *record;
+ size_t record_size;
uint32_t offset = 0;
uint32_t data32;
uint8_t type, crc = 0, crcbyte = 0;
@@ -157,12 +162,13 @@ next_record:
len <<= 8;
len += hex(data + i, &crc); i += 2;
}
- record = malloc((sizeof (*record) + len + 3) & ~3);
+ record_size = ALIGN(sizeof(*record) + len, 4);
+ record = malloc(record_size);
if (!record) {
fprintf(stderr, "out of memory for records\n");
return -ENOMEM;
}
- memset(record, 0, (sizeof(*record) + len + 3) & ~3);
+ memset(record, 0, record_size);
record->len = len;
/* now check if we have enough data to read everything */
@@ -259,13 +265,18 @@ static void file_record(struct ihex_binrec *record)
*p = record;
}
+static uint16_t ihex_binrec_size(struct ihex_binrec *p)
+{
+ return p->len + sizeof(p->addr) + sizeof(p->len);
+}
+
static int output_records(int outfd)
{
unsigned char zeroes[6] = {0, 0, 0, 0, 0, 0};
struct ihex_binrec *p = records;
while (p) {
- uint16_t writelen = (p->len + 9) & ~3;
+ uint16_t writelen = ALIGN(ihex_binrec_size(p), 4);
p->addr = htonl(p->addr);
p->len = htons(p->len);
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index ac2de6b7e89f..7bf9bde28bcc 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -60,6 +60,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_GRAVITY] = "gravity",
[IIO_POSITIONRELATIVE] = "positionrelative",
[IIO_PHASE] = "phase",
+ [IIO_MASSCONCENTRATION] = "massconcentration",
};
static const char * const iio_ev_type_text[] = {
@@ -114,7 +115,13 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_I] = "i",
[IIO_MOD_Q] = "q",
[IIO_MOD_CO2] = "co2",
+ [IIO_MOD_ETHANOL] = "ethanol",
+ [IIO_MOD_H2] = "h2",
[IIO_MOD_VOC] = "voc",
+ [IIO_MOD_PM1] = "pm1",
+ [IIO_MOD_PM2P5] = "pm2p5",
+ [IIO_MOD_PM4] = "pm4",
+ [IIO_MOD_PM10] = "pm10",
};
static bool event_is_known(struct iio_event_data *event)
@@ -156,6 +163,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_GRAVITY:
case IIO_POSITIONRELATIVE:
case IIO_PHASE:
+ case IIO_MASSCONCENTRATION:
break;
default:
return false;
@@ -199,7 +207,13 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_I:
case IIO_MOD_Q:
case IIO_MOD_CO2:
+ case IIO_MOD_ETHANOL:
+ case IIO_MOD_H2:
case IIO_MOD_VOC:
+ case IIO_MOD_PM1:
+ case IIO_MOD_PM2P5:
+ case IIO_MOD_PM4:
+ case IIO_MOD_PM10:
break;
default:
return false;
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 3040830d7797..84545666a09c 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -330,7 +330,7 @@ static const struct option longopts[] = {
int main(int argc, char **argv)
{
- unsigned long long num_loops = 2;
+ long long num_loops = 2;
unsigned long timedelay = 1000000;
unsigned long buf_len = 128;
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index af55acf73e75..cce0b02c0e28 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -199,6 +199,16 @@
.off = OFF, \
.imm = 0 })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
@@ -209,6 +219,16 @@
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
diff --git a/tools/include/linux/numa.h b/tools/include/linux/numa.h
new file mode 100644
index 000000000000..110b0e5d0fb0
--- /dev/null
+++ b/tools/include/linux/numa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NUMA_H
+#define _LINUX_NUMA_H
+
+
+#ifdef CONFIG_NODES_SHIFT
+#define NODES_SHIFT CONFIG_NODES_SHIFT
+#else
+#define NODES_SHIFT 0
+#endif
+
+#define MAX_NUMNODES (1 << NODES_SHIFT)
+
+#define NUMA_NO_NODE (-1)
+
+#endif /* _LINUX_NUMA_H */
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
index 112582253dd0..8e9ed4786269 100644
--- a/tools/include/linux/rbtree.h
+++ b/tools/include/linux/rbtree.h
@@ -43,13 +43,28 @@ struct rb_root {
struct rb_node *rb_node;
};
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
-#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
@@ -68,6 +83,12 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
+extern void rb_insert_color_cached(struct rb_node *,
+ struct rb_root_cached *, bool);
+extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
+/* Same as rb_first(), but O(1) */
+#define rb_first_cached(root) (root)->rb_leftmost
+
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
@@ -75,6 +96,8 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
+extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+ struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
@@ -90,12 +113,29 @@ static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
-
-/*
- * Handy for checking that we are not deleting an entry that is
- * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
- * probably should be moved to lib/rbtree.c...
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
+ * given type allowing the backing memory of @pos to be invalidated
+ *
+ * @pos: the 'type *' to use as a loop cursor.
+ * @n: another 'type *' to use as temporary storage
+ * @root: 'rb_root *' of the rbtree.
+ * @field: the name of the rb_node field within 'type'.
+ *
+ * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
+ * list_for_each_entry_safe() and allows the iteration to continue independent
+ * of changes to @pos by the body of the loop.
+ *
+ * Note, however, that it cannot handle other modifications that re-order the
+ * rbtree it is iterating over. This includes calling rb_erase() on @pos, as
+ * rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
+ pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
+ typeof(*pos), field); 1; }); \
+ pos = n)
+
static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
index 43be941db695..d008e1404580 100644
--- a/tools/include/linux/rbtree_augmented.h
+++ b/tools/include/linux/rbtree_augmented.h
@@ -44,7 +44,9 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
-extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+extern void __rb_insert_augmented(struct rb_node *node,
+ struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/*
* Fixup the rbtree and update the augmented information when rebalancing.
@@ -60,7 +62,16 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, root, augment->rotate);
+ __rb_insert_augmented(node, root, false, NULL, augment->rotate);
+}
+
+static inline void
+rb_insert_augmented_cached(struct rb_node *node,
+ struct rb_root_cached *root, bool newleft,
+ const struct rb_augment_callbacks *augment)
+{
+ __rb_insert_augmented(node, &root->rb_root,
+ newleft, &root->rb_leftmost, augment->rotate);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
@@ -93,7 +104,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
old->rbaugmented = rbcompute(old); \
} \
rbstatic const struct rb_augment_callbacks rbname = { \
- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
+ .propagate = rbname ## _propagate, \
+ .copy = rbname ## _copy, \
+ .rotate = rbname ## _rotate \
};
@@ -126,11 +139,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
{
if (parent) {
if (parent->rb_left == old)
- parent->rb_left = new;
+ WRITE_ONCE(parent->rb_left, new);
else
- parent->rb_right = new;
+ WRITE_ONCE(parent->rb_right, new);
} else
- root->rb_node = new;
+ WRITE_ONCE(root->rb_node, new);
}
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
@@ -138,12 +151,17 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ struct rb_node **leftmost,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+ struct rb_node *child = node->rb_right;
+ struct rb_node *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
+ if (leftmost && node == *leftmost)
+ *leftmost = rb_next(node);
+
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
@@ -170,6 +188,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
+
tmp = child->rb_left;
if (!tmp) {
/*
@@ -183,6 +202,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
*/
parent = successor;
child2 = successor->rb_right;
+
augment->copy(node, successor);
} else {
/*
@@ -204,19 +224,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
- parent->rb_left = child2 = successor->rb_right;
- successor->rb_right = child;
+ child2 = successor->rb_right;
+ WRITE_ONCE(parent->rb_left, child2);
+ WRITE_ONCE(successor->rb_right, child);
rb_set_parent(child, successor);
+
augment->copy(node, successor);
augment->propagate(parent, successor);
}
- successor->rb_left = tmp = node->rb_left;
+ tmp = node->rb_left;
+ WRITE_ONCE(successor->rb_left, tmp);
rb_set_parent(tmp, successor);
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
+
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
@@ -237,9 +261,21 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
+ struct rb_node *rebalance = __rb_erase_augmented(node, root,
+ NULL, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
-#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
+static __always_inline void
+rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
+ &root->rb_leftmost,
+ augment);
+ if (rebalance)
+ __rb_erase_color(rebalance, &root->rb_root, augment->rotate);
+}
+
+#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/include/nolibc/nolibc.h
index f98f5b92d3eb..1708e9f9f8aa 100644
--- a/tools/testing/selftests/rcutorture/bin/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -3,7 +3,85 @@
* Copyright (C) 2017-2018 Willy Tarreau <w@1wt.eu>
*/
-/* some archs (at least aarch64) don't expose the regular syscalls anymore by
+/*
+ * This file is designed to be used as a libc alternative for minimal programs
+ * with very limited requirements. It consists of a small number of syscall and
+ * type definitions, and the minimal startup code needed to call main().
+ * All syscalls are declared as static functions so that they can be optimized
+ * away by the compiler when not used.
+ *
+ * Syscalls are split into 3 levels:
+ * - The lower level is the arch-specific syscall() definition, consisting in
+ * assembly code in compound expressions. These are called my_syscall0() to
+ * my_syscall6() depending on the number of arguments. The MIPS
+ * implementation is limited to 5 arguments. All input arguments are cast
+ * to a long stored in a register. These expressions always return the
+ * syscall's return value as a signed long value which is often either a
+ * pointer or the negated errno value.
+ *
+ * - The second level is mostly architecture-independent. It is made of
+ * static functions called sys_<name>() which rely on my_syscallN()
+ * depending on the syscall definition. These functions are responsible
+ * for exposing the appropriate types for the syscall arguments (int,
+ * pointers, etc) and for setting the appropriate return type (often int).
+ * A few of them are architecture-specific because the syscalls are not all
+ * mapped exactly the same among architectures. For example, some archs do
+ * not implement select() and need pselect6() instead, so the sys_select()
+ * function will have to abstract this.
+ *
+ * - The third level is the libc call definition. It exposes the lower raw
+ * sys_<name>() calls in a way that looks like what a libc usually does,
+ * takes care of specific input values, and of setting errno upon error.
+ * There can be minor variations compared to standard libc calls. For
+ * example the open() call always takes 3 args here.
+ *
+ * The errno variable is declared static and unused. This way it can be
+ * optimized away if not used. However this means that a program made of
+ * multiple C files may observe different errno values (one per C file). For
+ * the type of programs this project targets it usually is not a problem. The
+ * resulting program may even be reduced by defining the NOLIBC_IGNORE_ERRNO
+ * macro, in which case the errno value will never be assigned.
+ *
+ * Some stdint-like integer types are defined. These are valid on all currently
+ * supported architectures, because signs are enforced, ints are assumed to be
+ * 32 bits, longs the size of a pointer and long long 64 bits. If more
+ * architectures have to be supported, this may need to be adapted.
+ *
+ * Some macro definitions like the O_* values passed to open(), and some
+ * structures like the sys_stat struct depend on the architecture.
+ *
+ * The definitions start with the architecture-specific parts, which are picked
+ * based on what the compiler knows about the target architecture, and are
+ * completed with the generic code. Since it is the compiler which sets the
+ * target architecture, cross-compiling normally works out of the box without
+ * having to specify anything.
+ *
+ * Finally some very common libc-level functions are provided. It is the case
+ * for a few functions usually found in string.h, ctype.h, or stdlib.h. Nothing
+ * is currently provided regarding stdio emulation.
+ *
+ * The macro NOLIBC is always defined, so that it is possible for a program to
+ * check this macro to know if it is being built against and decide to disable
+ * some features or simply not to include some standard libc files.
+ *
+ * Ideally this file should be split in multiple files for easier long term
+ * maintenance, but provided as a single file as it is now, it's quite
+ * convenient to use. Maybe some variations involving a set of includes at the
+ * top could work.
+ *
+ * A simple static executable may be built this way :
+ * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
+ * -static -include nolibc.h -lgcc -o hello hello.c
+ *
+ * A very useful calling convention table may be found here :
+ * http://man7.org/linux/man-pages/man2/syscall.2.html
+ *
+ * This doc is quite convenient though not necessarily up to date :
+ * https://w3challs.com/syscalls/
+ *
+ */
+
+/* Some archs (at least aarch64) don't expose the regular syscalls anymore by
* default, either because they have an "_at" replacement, or because there are
* more modern alternatives. For now we'd rather still use them.
*/
@@ -19,18 +97,6 @@
#define NOLIBC
-/* Build a static executable this way :
- * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
- * -static -include nolibc.h -lgcc -o hello hello.c
- *
- * Useful calling convention table found here :
- * http://man7.org/linux/man-pages/man2/syscall.2.html
- *
- * This doc is even better :
- * https://w3challs.com/syscalls/
- */
-
-
/* this way it will be removed if unused */
static int errno;
@@ -81,9 +147,9 @@ typedef signed long time_t;
/* for poll() */
struct pollfd {
- int fd;
- short int events;
- short int revents;
+ int fd;
+ short int events;
+ short int revents;
};
/* for select() */
@@ -239,7 +305,7 @@ struct stat {
"syscall\n" \
: "=a" (_ret) \
: "0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -255,7 +321,7 @@ struct stat {
: "=a" (_ret) \
: "r"(_arg1), \
"0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -272,7 +338,7 @@ struct stat {
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), \
"0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -290,7 +356,7 @@ struct stat {
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
"0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -1006,7 +1072,7 @@ struct sys_stat_struct {
: "=r"(_num), "=r"(_arg4) \
: "r"(_num) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1025,7 +1091,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1045,7 +1111,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1066,7 +1132,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1087,7 +1153,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1110,7 +1176,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index fd92ce8388fc..57aaeaf8e192 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -15,6 +15,8 @@
#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
#elif defined(__riscv)
#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
+#elif defined(__alpha__)
+#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
#else
#include <asm-generic/bitsperlong.h>
#endif
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 91c43884f295..3c38ac9a92a7 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -14,6 +14,7 @@
/* Extended instruction set based on top of classic BPF */
/* instruction classes */
+#define BPF_JMP32 0x06 /* jmp mode in word width */
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
@@ -266,6 +267,7 @@ enum bpf_attach_type {
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
+#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
/* flags for BPF_MAP_CREATE command */
#define BPF_F_NO_PREALLOC (1U << 0)
@@ -2014,6 +2016,19 @@ union bpf_attr {
* Only works if *skb* contains an IPv6 packet. Insert a
* Segment Routing Header (**struct ipv6_sr_hdr**) inside
* the IPv6 header.
+ * **BPF_LWT_ENCAP_IP**
+ * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
+ * must be IPv4 or IPv6, followed by zero or more
+ * additional headers, up to LWT_BPF_MAX_HEADROOM total
+ * bytes in all prepended headers. Please note that
+ * if skb_is_gso(skb) is true, no more than two headers
+ * can be prepended, and the inner header, if present,
+ * should be either GRE or UDP/GUE.
+ *
+ * BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of
+ * type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called
+ * by bpf programs of types BPF_PROG_TYPE_LWT_IN and
+ * BPF_PROG_TYPE_LWT_XMIT.
*
* A call to this helper is susceptible to change the underlaying
* packet buffer. Therefore, at load time, all checks on pointers
@@ -2327,6 +2342,30 @@ union bpf_attr {
* "**y**".
* Return
* 0
+ *
+ * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_sock** pointer such
+ * that all the fields in bpf_sock can be accessed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or NULL in
+ * case of failure.
+ *
+ * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_tcp_sock** pointer from a
+ * **struct bpf_sock** pointer.
+ *
+ * Return
+ * A **struct bpf_tcp_sock** pointer on success, or NULL in
+ * case of failure.
+ *
+ * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
+ * Description
+ * Sets ECN of IP header to ce (congestion encountered) if
+ * current value is ect (ECN capable). Works with IPv6 and IPv4.
+ * Return
+ * 1 if set, 0 if not set.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2421,7 +2460,12 @@ union bpf_attr {
FN(map_peek_elem), \
FN(msg_push_data), \
FN(msg_pop_data), \
- FN(rc_pointer_rel),
+ FN(rc_pointer_rel), \
+ FN(spin_lock), \
+ FN(spin_unlock), \
+ FN(sk_fullsock), \
+ FN(tcp_sock), \
+ FN(skb_ecn_set_ce),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2494,7 +2538,8 @@ enum bpf_hdr_start_off {
/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6,
- BPF_LWT_ENCAP_SEG6_INLINE
+ BPF_LWT_ENCAP_SEG6_INLINE,
+ BPF_LWT_ENCAP_IP,
};
#define __bpf_md_ptr(type, name) \
@@ -2540,6 +2585,8 @@ struct __sk_buff {
__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
__u64 tstamp;
__u32 wire_len;
+ __u32 gso_segs;
+ __bpf_md_ptr(struct bpf_sock *, sk);
};
struct bpf_tunnel_key {
@@ -2581,7 +2628,15 @@ enum bpf_ret_code {
BPF_DROP = 2,
/* 3-6 reserved */
BPF_REDIRECT = 7,
- /* >127 are reserved for prog type specific return codes */
+ /* >127 are reserved for prog type specific return codes.
+ *
+ * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
+ * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
+ * changed and should be routed based on its new L3 header.
+ * (This is an L3 redirect, as opposed to L2 redirect
+ * represented by BPF_REDIRECT above).
+ */
+ BPF_LWT_REROUTE = 128,
};
struct bpf_sock {
@@ -2591,14 +2646,52 @@ struct bpf_sock {
__u32 protocol;
__u32 mark;
__u32 priority;
- __u32 src_ip4; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ /* IP address also allows 1 and 2 bytes access */
+ __u32 src_ip4;
+ __u32 src_ip6[4];
+ __u32 src_port; /* host byte order */
+ __u32 dst_port; /* network byte order */
+ __u32 dst_ip4;
+ __u32 dst_ip6[4];
+ __u32 state;
+};
+
+struct bpf_tcp_sock {
+ __u32 snd_cwnd; /* Sending congestion window */
+ __u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ __u32 rtt_min;
+ __u32 snd_ssthresh; /* Slow start size threshold */
+ __u32 rcv_nxt; /* What we want to receive next */
+ __u32 snd_nxt; /* Next sequence we send */
+ __u32 snd_una; /* First byte we want an ack for */
+ __u32 mss_cache; /* Cached effective mss, not including SACKS */
+ __u32 ecn_flags; /* ECN status bits. */
+ __u32 rate_delivered; /* saved rate sample: packets delivered */
+ __u32 rate_interval_us; /* saved rate sample: time elapsed */
+ __u32 packets_out; /* Packets which are "in flight" */
+ __u32 retrans_out; /* Retransmitted packets out */
+ __u32 total_retrans; /* Total retransmits for entire connection */
+ __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
+ * total number of segments in.
*/
- __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
+ * total number of data segments in.
*/
- __u32 src_port; /* Allows 4-byte read.
- * Stored in host byte order
+ __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
+ __u32 lost_out; /* Lost packets */
+ __u32 sacked_out; /* SACK'd packets */
+ __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ * sum(delta(rcv_nxt)), or how many bytes
+ * were acked.
+ */
+ __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+ * sum(delta(snd_una)), or how many bytes
+ * were acked.
*/
};
@@ -2728,6 +2821,8 @@ struct bpf_prog_info {
__u32 jited_line_info_rec_size;
__u32 nr_prog_tags;
__aligned_u64 prog_tags;
+ __u64 run_time_ns;
+ __u64 run_cnt;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -3054,4 +3149,7 @@ struct bpf_line_info {
__u32 line_col;
};
+struct bpf_spin_lock {
+ __u32 val;
+};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/ethtool.h b/tools/include/uapi/linux/ethtool.h
new file mode 100644
index 000000000000..c86c3e942df9
--- /dev/null
+++ b/tools/include/uapi/linux/ethtool.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * ethtool.h: Defines for Linux ethtool.
+ *
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ * Copyright 2001 Jeff Garzik <jgarzik@pobox.com>
+ * Portions Copyright 2001 Sun Microsystems (thockin@sun.com)
+ * Portions Copyright 2002 Intel (eli.kupermann@intel.com,
+ * christopher.leech@intel.com,
+ * scott.feldman@intel.com)
+ * Portions Copyright (C) Sun Microsystems 2008
+ */
+
+#ifndef _UAPI_LINUX_ETHTOOL_H
+#define _UAPI_LINUX_ETHTOOL_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */
+
+/**
+ * struct ethtool_channels - configuring number of network channel
+ * @cmd: ETHTOOL_{G,S}CHANNELS
+ * @max_rx: Read only. Maximum number of receive channel the driver support.
+ * @max_tx: Read only. Maximum number of transmit channel the driver support.
+ * @max_other: Read only. Maximum number of other channel the driver support.
+ * @max_combined: Read only. Maximum number of combined channel the driver
+ * support. Set of queues RX, TX or other.
+ * @rx_count: Valid values are in the range 1 to the max_rx.
+ * @tx_count: Valid values are in the range 1 to the max_tx.
+ * @other_count: Valid values are in the range 1 to the max_other.
+ * @combined_count: Valid values are in the range 1 to the max_combined.
+ *
+ * This can be used to configure RX, TX and other channels.
+ */
+
+struct ethtool_channels {
+ __u32 cmd;
+ __u32 max_rx;
+ __u32 max_tx;
+ __u32 max_other;
+ __u32 max_combined;
+ __u32 rx_count;
+ __u32 tx_count;
+ __u32 other_count;
+ __u32 combined_count;
+};
+
+#endif /* _UAPI_LINUX_ETHTOOL_H */
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index d6533828123a..5b225ff63b48 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -925,6 +925,7 @@ enum {
enum {
LINK_XSTATS_TYPE_UNSPEC,
LINK_XSTATS_TYPE_BRIDGE,
+ LINK_XSTATS_TYPE_BOND,
__LINK_XSTATS_TYPE_MAX
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
new file mode 100644
index 000000000000..caed8b1614ff
--- /dev/null
+++ b/tools/include/uapi/linux/if_xdp.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * if_xdp: XDP socket user-space interface
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * Author(s): Björn Töpel <bjorn.topel@intel.com>
+ * Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#ifndef _LINUX_IF_XDP_H
+#define _LINUX_IF_XDP_H
+
+#include <linux/types.h>
+
+/* Options for the sxdp_flags field */
+#define XDP_SHARED_UMEM (1 << 0)
+#define XDP_COPY (1 << 1) /* Force copy-mode */
+#define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */
+
+struct sockaddr_xdp {
+ __u16 sxdp_family;
+ __u16 sxdp_flags;
+ __u32 sxdp_ifindex;
+ __u32 sxdp_queue_id;
+ __u32 sxdp_shared_umem_fd;
+};
+
+struct xdp_ring_offset {
+ __u64 producer;
+ __u64 consumer;
+ __u64 desc;
+};
+
+struct xdp_mmap_offsets {
+ struct xdp_ring_offset rx;
+ struct xdp_ring_offset tx;
+ struct xdp_ring_offset fr; /* Fill */
+ struct xdp_ring_offset cr; /* Completion */
+};
+
+/* XDP socket options */
+#define XDP_MMAP_OFFSETS 1
+#define XDP_RX_RING 2
+#define XDP_TX_RING 3
+#define XDP_UMEM_REG 4
+#define XDP_UMEM_FILL_RING 5
+#define XDP_UMEM_COMPLETION_RING 6
+#define XDP_STATISTICS 7
+
+struct xdp_umem_reg {
+ __u64 addr; /* Start of packet data area */
+ __u64 len; /* Length of packet data area */
+ __u32 chunk_size;
+ __u32 headroom;
+};
+
+struct xdp_statistics {
+ __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+};
+
+/* Pgoff for mmaping the rings */
+#define XDP_PGOFF_RX_RING 0
+#define XDP_PGOFF_TX_RING 0x80000000
+#define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL
+#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
+
+/* Rx/Tx descriptor */
+struct xdp_desc {
+ __u64 addr;
+ __u32 len;
+ __u32 options;
+};
+
+/* UMEM descriptor is __u64 */
+
+#endif /* _LINUX_IF_XDP_H */
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index f6052e70bf40..a55cb8b10165 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -268,7 +268,7 @@ struct sockaddr_in {
#define IN_MULTICAST(a) IN_CLASSD(a)
#define IN_MULTICAST_NET 0xe0000000
-#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
+#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
diff --git a/tools/include/uapi/linux/lirc.h b/tools/include/uapi/linux/lirc.h
index f189931042a7..45fcbf99d72e 100644
--- a/tools/include/uapi/linux/lirc.h
+++ b/tools/include/uapi/linux/lirc.h
@@ -134,6 +134,12 @@
#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32)
/*
+ * Return the recording timeout, which is either set by
+ * the ioctl LIRC_SET_REC_TIMEOUT or by the kernel after setting the protocols.
+ */
+#define LIRC_GET_REC_TIMEOUT _IOR('i', 0x00000024, __u32)
+
+/*
* struct lirc_scancode - decoded scancode with protocol for use with
* LIRC_MODE_SCANCODE
*
@@ -186,6 +192,9 @@ struct lirc_scancode {
* @RC_PROTO_XMP: XMP protocol
* @RC_PROTO_CEC: CEC protocol
* @RC_PROTO_IMON: iMon Pad protocol
+ * @RC_PROTO_RCMM12: RC-MM protocol 12 bits
+ * @RC_PROTO_RCMM24: RC-MM protocol 24 bits
+ * @RC_PROTO_RCMM32: RC-MM protocol 32 bits
*/
enum rc_proto {
RC_PROTO_UNKNOWN = 0,
@@ -212,6 +221,9 @@ enum rc_proto {
RC_PROTO_XMP = 21,
RC_PROTO_CEC = 22,
RC_PROTO_IMON = 23,
+ RC_PROTO_RCMM12 = 24,
+ RC_PROTO_RCMM24 = 25,
+ RC_PROTO_RCMM32 = 26,
};
#endif
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 9de8780ac8d9..7198ddd0c6b1 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -372,7 +372,9 @@ struct perf_event_attr {
context_switch : 1, /* context switch data */
write_backward : 1, /* Write ring buffer from end to beginning */
namespaces : 1, /* include namespaces data */
- __reserved_1 : 35;
+ ksymbol : 1, /* include ksymbol events */
+ bpf_event : 1, /* include bpf events */
+ __reserved_1 : 33;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -445,8 +447,6 @@ struct perf_event_query_bpf {
__u32 ids[0];
};
-#define perf_flags(attr) (*(&(attr)->read_format + 1))
-
/*
* Ioctls that can be done on a perf event fd:
*/
@@ -965,9 +965,58 @@ enum perf_event_type {
*/
PERF_RECORD_NAMESPACES = 16,
+ /*
+ * Record ksymbol register/unregister events:
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 addr;
+ * u32 len;
+ * u16 ksym_type;
+ * u16 flags;
+ * char name[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_KSYMBOL = 17,
+
+ /*
+ * Record bpf events:
+ * enum perf_bpf_event_type {
+ * PERF_BPF_EVENT_UNKNOWN = 0,
+ * PERF_BPF_EVENT_PROG_LOAD = 1,
+ * PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ * };
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u16 type;
+ * u16 flags;
+ * u32 id;
+ * u8 tag[BPF_TAG_SIZE];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_BPF_EVENT = 18,
+
PERF_RECORD_MAX, /* non-ABI */
};
+enum perf_record_ksymbol_type {
+ PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
+ PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
+ PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
+};
+
+#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
+
+enum perf_bpf_event_type {
+ PERF_BPF_EVENT_UNKNOWN = 0,
+ PERF_BPF_EVENT_PROG_LOAD = 1,
+ PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ PERF_BPF_EVENT_MAX, /* non-ABI */
+};
+
#define PERF_MAX_STACK_DEPTH 127
#define PERF_MAX_CONTEXTS_PER_STACK 8
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index b4875a93363a..094bb03b9cc2 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -219,6 +219,7 @@ struct prctl_mm_map {
# define PR_SPEC_ENABLE (1UL << 1)
# define PR_SPEC_DISABLE (1UL << 2)
# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+# define PR_SPEC_DISABLE_NOEXEC (1UL << 4)
/* Reset arm64 pointer authentication keys */
#define PR_PAC_RESET_KEYS 54
diff --git a/tools/include/uapi/linux/tc_act/tc_bpf.h b/tools/include/uapi/linux/tc_act/tc_bpf.h
index 6e89a5df49a4..653c4f94f76e 100644
--- a/tools/include/uapi/linux/tc_act/tc_bpf.h
+++ b/tools/include/uapi/linux/tc_act/tc_bpf.h
@@ -13,8 +13,6 @@
#include <linux/pkt_cls.h>
-#define TCA_ACT_BPF 13
-
struct tc_act_bpf {
tc_gen;
};
diff --git a/tools/io_uring/Makefile b/tools/io_uring/Makefile
new file mode 100644
index 000000000000..f79522fc37b5
--- /dev/null
+++ b/tools/io_uring/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for io_uring test tools
+CFLAGS += -Wall -Wextra -g -D_GNU_SOURCE
+LDLIBS += -lpthread
+
+all: io_uring-cp io_uring-bench
+%: %.c
+ $(CC) $(CFLAGS) -o $@ $^
+
+io_uring-bench: syscall.o io_uring-bench.o
+ $(CC) $(CFLAGS) $(LDLIBS) -o $@ $^
+
+io_uring-cp: setup.o syscall.o queue.o
+
+clean:
+ $(RM) io_uring-cp io_uring-bench *.o
+
+.PHONY: all clean
diff --git a/tools/io_uring/README b/tools/io_uring/README
new file mode 100644
index 000000000000..67fd70115cff
--- /dev/null
+++ b/tools/io_uring/README
@@ -0,0 +1,29 @@
+This directory includes a few programs that demonstrate how to use io_uring
+in an application. The examples are:
+
+io_uring-cp
+ A very basic io_uring implementation of cp(1). It takes two
+ arguments, copies the first argument to the second. This example
+ is part of liburing, and hence uses the simplified liburing API
+ for setting up an io_uring instance, submitting IO, completing IO,
+ etc. The support functions in queue.c and setup.c are straight
+ out of liburing.
+
+io_uring-bench
+ Benchmark program that does random reads on a number of files. This
+ app demonstrates the various features of io_uring, like fixed files,
+ fixed buffers, and polled IO. There are options in the program to
+ control which features to use. Arguments is the file (or files) that
+ io_uring-bench should operate on. This uses the raw io_uring
+ interface.
+
+liburing can be cloned with git here:
+
+ git://git.kernel.dk/liburing
+
+and contains a number of unit tests as well for testing io_uring. It also
+comes with man pages for the three system calls.
+
+Fio includes an io_uring engine, you can clone fio here:
+
+ git://git.kernel.dk/fio
diff --git a/tools/io_uring/barrier.h b/tools/io_uring/barrier.h
new file mode 100644
index 000000000000..ef00f6722ba9
--- /dev/null
+++ b/tools/io_uring/barrier.h
@@ -0,0 +1,16 @@
+#ifndef LIBURING_BARRIER_H
+#define LIBURING_BARRIER_H
+
+#if defined(__x86_64) || defined(__i386__)
+#define read_barrier() __asm__ __volatile__("":::"memory")
+#define write_barrier() __asm__ __volatile__("":::"memory")
+#else
+/*
+ * Add arch appropriate definitions. Be safe and use full barriers for
+ * archs we don't have support for.
+ */
+#define read_barrier() __sync_synchronize()
+#define write_barrier() __sync_synchronize()
+#endif
+
+#endif
diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c
new file mode 100644
index 000000000000..512306a37531
--- /dev/null
+++ b/tools/io_uring/io_uring-bench.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple benchmark program that uses the various features of io_uring
+ * to provide fast random access to a device/file. It has various
+ * options that are control how we use io_uring, see the OPTIONS section
+ * below. This uses the raw io_uring interface.
+ *
+ * Copyright (C) 2018-2019 Jens Axboe
+ */
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <signal.h>
+#include <inttypes.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <sys/resource.h>
+#include <sys/mman.h>
+#include <sys/uio.h>
+#include <linux/fs.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+#include <sched.h>
+
+#include "liburing.h"
+#include "barrier.h"
+
+#ifndef IOCQE_FLAG_CACHEHIT
+#define IOCQE_FLAG_CACHEHIT (1U << 0)
+#endif
+
+#define min(a, b) ((a < b) ? (a) : (b))
+
+struct io_sq_ring {
+ unsigned *head;
+ unsigned *tail;
+ unsigned *ring_mask;
+ unsigned *ring_entries;
+ unsigned *flags;
+ unsigned *array;
+};
+
+struct io_cq_ring {
+ unsigned *head;
+ unsigned *tail;
+ unsigned *ring_mask;
+ unsigned *ring_entries;
+ struct io_uring_cqe *cqes;
+};
+
+#define DEPTH 128
+
+#define BATCH_SUBMIT 32
+#define BATCH_COMPLETE 32
+
+#define BS 4096
+
+#define MAX_FDS 16
+
+static unsigned sq_ring_mask, cq_ring_mask;
+
+struct file {
+ unsigned long max_blocks;
+ unsigned pending_ios;
+ int real_fd;
+ int fixed_fd;
+};
+
+struct submitter {
+ pthread_t thread;
+ int ring_fd;
+ struct drand48_data rand;
+ struct io_sq_ring sq_ring;
+ struct io_uring_sqe *sqes;
+ struct iovec iovecs[DEPTH];
+ struct io_cq_ring cq_ring;
+ int inflight;
+ unsigned long reaps;
+ unsigned long done;
+ unsigned long calls;
+ unsigned long cachehit, cachemiss;
+ volatile int finish;
+
+ __s32 *fds;
+
+ struct file files[MAX_FDS];
+ unsigned nr_files;
+ unsigned cur_file;
+};
+
+static struct submitter submitters[1];
+static volatile int finish;
+
+/*
+ * OPTIONS: Set these to test the various features of io_uring.
+ */
+static int polled = 1; /* use IO polling */
+static int fixedbufs = 1; /* use fixed user buffers */
+static int register_files = 1; /* use fixed files */
+static int buffered = 0; /* use buffered IO, not O_DIRECT */
+static int sq_thread_poll = 0; /* use kernel submission/poller thread */
+static int sq_thread_cpu = -1; /* pin above thread to this CPU */
+static int do_nop = 0; /* no-op SQ ring commands */
+
+static int io_uring_register_buffers(struct submitter *s)
+{
+ if (do_nop)
+ return 0;
+
+ return io_uring_register(s->ring_fd, IORING_REGISTER_BUFFERS, s->iovecs,
+ DEPTH);
+}
+
+static int io_uring_register_files(struct submitter *s)
+{
+ unsigned i;
+
+ if (do_nop)
+ return 0;
+
+ s->fds = calloc(s->nr_files, sizeof(__s32));
+ for (i = 0; i < s->nr_files; i++) {
+ s->fds[i] = s->files[i].real_fd;
+ s->files[i].fixed_fd = i;
+ }
+
+ return io_uring_register(s->ring_fd, IORING_REGISTER_FILES, s->fds,
+ s->nr_files);
+}
+
+static int gettid(void)
+{
+ return syscall(__NR_gettid);
+}
+
+static unsigned file_depth(struct submitter *s)
+{
+ return (DEPTH + s->nr_files - 1) / s->nr_files;
+}
+
+static void init_io(struct submitter *s, unsigned index)
+{
+ struct io_uring_sqe *sqe = &s->sqes[index];
+ unsigned long offset;
+ struct file *f;
+ long r;
+
+ if (do_nop) {
+ sqe->opcode = IORING_OP_NOP;
+ return;
+ }
+
+ if (s->nr_files == 1) {
+ f = &s->files[0];
+ } else {
+ f = &s->files[s->cur_file];
+ if (f->pending_ios >= file_depth(s)) {
+ s->cur_file++;
+ if (s->cur_file == s->nr_files)
+ s->cur_file = 0;
+ f = &s->files[s->cur_file];
+ }
+ }
+ f->pending_ios++;
+
+ lrand48_r(&s->rand, &r);
+ offset = (r % (f->max_blocks - 1)) * BS;
+
+ if (register_files) {
+ sqe->flags = IOSQE_FIXED_FILE;
+ sqe->fd = f->fixed_fd;
+ } else {
+ sqe->flags = 0;
+ sqe->fd = f->real_fd;
+ }
+ if (fixedbufs) {
+ sqe->opcode = IORING_OP_READ_FIXED;
+ sqe->addr = (unsigned long) s->iovecs[index].iov_base;
+ sqe->len = BS;
+ sqe->buf_index = index;
+ } else {
+ sqe->opcode = IORING_OP_READV;
+ sqe->addr = (unsigned long) &s->iovecs[index];
+ sqe->len = 1;
+ sqe->buf_index = 0;
+ }
+ sqe->ioprio = 0;
+ sqe->off = offset;
+ sqe->user_data = (unsigned long) f;
+}
+
+static int prep_more_ios(struct submitter *s, unsigned max_ios)
+{
+ struct io_sq_ring *ring = &s->sq_ring;
+ unsigned index, tail, next_tail, prepped = 0;
+
+ next_tail = tail = *ring->tail;
+ do {
+ next_tail++;
+ read_barrier();
+ if (next_tail == *ring->head)
+ break;
+
+ index = tail & sq_ring_mask;
+ init_io(s, index);
+ ring->array[index] = index;
+ prepped++;
+ tail = next_tail;
+ } while (prepped < max_ios);
+
+ if (*ring->tail != tail) {
+ /* order tail store with writes to sqes above */
+ write_barrier();
+ *ring->tail = tail;
+ write_barrier();
+ }
+ return prepped;
+}
+
+static int get_file_size(struct file *f)
+{
+ struct stat st;
+
+ if (fstat(f->real_fd, &st) < 0)
+ return -1;
+ if (S_ISBLK(st.st_mode)) {
+ unsigned long long bytes;
+
+ if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
+ return -1;
+
+ f->max_blocks = bytes / BS;
+ return 0;
+ } else if (S_ISREG(st.st_mode)) {
+ f->max_blocks = st.st_size / BS;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int reap_events(struct submitter *s)
+{
+ struct io_cq_ring *ring = &s->cq_ring;
+ struct io_uring_cqe *cqe;
+ unsigned head, reaped = 0;
+
+ head = *ring->head;
+ do {
+ struct file *f;
+
+ read_barrier();
+ if (head == *ring->tail)
+ break;
+ cqe = &ring->cqes[head & cq_ring_mask];
+ if (!do_nop) {
+ f = (struct file *) (uintptr_t) cqe->user_data;
+ f->pending_ios--;
+ if (cqe->res != BS) {
+ printf("io: unexpected ret=%d\n", cqe->res);
+ if (polled && cqe->res == -EOPNOTSUPP)
+ printf("Your filesystem doesn't support poll\n");
+ return -1;
+ }
+ }
+ if (cqe->flags & IOCQE_FLAG_CACHEHIT)
+ s->cachehit++;
+ else
+ s->cachemiss++;
+ reaped++;
+ head++;
+ } while (1);
+
+ s->inflight -= reaped;
+ *ring->head = head;
+ write_barrier();
+ return reaped;
+}
+
+static void *submitter_fn(void *data)
+{
+ struct submitter *s = data;
+ struct io_sq_ring *ring = &s->sq_ring;
+ int ret, prepped;
+
+ printf("submitter=%d\n", gettid());
+
+ srand48_r(pthread_self(), &s->rand);
+
+ prepped = 0;
+ do {
+ int to_wait, to_submit, this_reap, to_prep;
+
+ if (!prepped && s->inflight < DEPTH) {
+ to_prep = min(DEPTH - s->inflight, BATCH_SUBMIT);
+ prepped = prep_more_ios(s, to_prep);
+ }
+ s->inflight += prepped;
+submit_more:
+ to_submit = prepped;
+submit:
+ if (to_submit && (s->inflight + to_submit <= DEPTH))
+ to_wait = 0;
+ else
+ to_wait = min(s->inflight + to_submit, BATCH_COMPLETE);
+
+ /*
+ * Only need to call io_uring_enter if we're not using SQ thread
+ * poll, or if IORING_SQ_NEED_WAKEUP is set.
+ */
+ if (!sq_thread_poll || (*ring->flags & IORING_SQ_NEED_WAKEUP)) {
+ unsigned flags = 0;
+
+ if (to_wait)
+ flags = IORING_ENTER_GETEVENTS;
+ if ((*ring->flags & IORING_SQ_NEED_WAKEUP))
+ flags |= IORING_ENTER_SQ_WAKEUP;
+ ret = io_uring_enter(s->ring_fd, to_submit, to_wait,
+ flags, NULL);
+ s->calls++;
+ }
+
+ /*
+ * For non SQ thread poll, we already got the events we needed
+ * through the io_uring_enter() above. For SQ thread poll, we
+ * need to loop here until we find enough events.
+ */
+ this_reap = 0;
+ do {
+ int r;
+ r = reap_events(s);
+ if (r == -1) {
+ s->finish = 1;
+ break;
+ } else if (r > 0)
+ this_reap += r;
+ } while (sq_thread_poll && this_reap < to_wait);
+ s->reaps += this_reap;
+
+ if (ret >= 0) {
+ if (!ret) {
+ to_submit = 0;
+ if (s->inflight)
+ goto submit;
+ continue;
+ } else if (ret < to_submit) {
+ int diff = to_submit - ret;
+
+ s->done += ret;
+ prepped -= diff;
+ goto submit_more;
+ }
+ s->done += ret;
+ prepped = 0;
+ continue;
+ } else if (ret < 0) {
+ if (errno == EAGAIN) {
+ if (s->finish)
+ break;
+ if (this_reap)
+ goto submit;
+ to_submit = 0;
+ goto submit;
+ }
+ printf("io_submit: %s\n", strerror(errno));
+ break;
+ }
+ } while (!s->finish);
+
+ finish = 1;
+ return NULL;
+}
+
+static void sig_int(int sig)
+{
+ printf("Exiting on signal %d\n", sig);
+ submitters[0].finish = 1;
+ finish = 1;
+}
+
+static void arm_sig_int(void)
+{
+ struct sigaction act;
+
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = sig_int;
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGINT, &act, NULL);
+}
+
+static int setup_ring(struct submitter *s)
+{
+ struct io_sq_ring *sring = &s->sq_ring;
+ struct io_cq_ring *cring = &s->cq_ring;
+ struct io_uring_params p;
+ int ret, fd;
+ void *ptr;
+
+ memset(&p, 0, sizeof(p));
+
+ if (polled && !do_nop)
+ p.flags |= IORING_SETUP_IOPOLL;
+ if (sq_thread_poll) {
+ p.flags |= IORING_SETUP_SQPOLL;
+ if (sq_thread_cpu != -1) {
+ p.flags |= IORING_SETUP_SQ_AFF;
+ p.sq_thread_cpu = sq_thread_cpu;
+ }
+ }
+
+ fd = io_uring_setup(DEPTH, &p);
+ if (fd < 0) {
+ perror("io_uring_setup");
+ return 1;
+ }
+ s->ring_fd = fd;
+
+ if (fixedbufs) {
+ ret = io_uring_register_buffers(s);
+ if (ret < 0) {
+ perror("io_uring_register_buffers");
+ return 1;
+ }
+ }
+
+ if (register_files) {
+ ret = io_uring_register_files(s);
+ if (ret < 0) {
+ perror("io_uring_register_files");
+ return 1;
+ }
+ }
+
+ ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
+ IORING_OFF_SQ_RING);
+ printf("sq_ring ptr = 0x%p\n", ptr);
+ sring->head = ptr + p.sq_off.head;
+ sring->tail = ptr + p.sq_off.tail;
+ sring->ring_mask = ptr + p.sq_off.ring_mask;
+ sring->ring_entries = ptr + p.sq_off.ring_entries;
+ sring->flags = ptr + p.sq_off.flags;
+ sring->array = ptr + p.sq_off.array;
+ sq_ring_mask = *sring->ring_mask;
+
+ s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
+ IORING_OFF_SQES);
+ printf("sqes ptr = 0x%p\n", s->sqes);
+
+ ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
+ IORING_OFF_CQ_RING);
+ printf("cq_ring ptr = 0x%p\n", ptr);
+ cring->head = ptr + p.cq_off.head;
+ cring->tail = ptr + p.cq_off.tail;
+ cring->ring_mask = ptr + p.cq_off.ring_mask;
+ cring->ring_entries = ptr + p.cq_off.ring_entries;
+ cring->cqes = ptr + p.cq_off.cqes;
+ cq_ring_mask = *cring->ring_mask;
+ return 0;
+}
+
+static void file_depths(char *buf)
+{
+ struct submitter *s = &submitters[0];
+ unsigned i;
+ char *p;
+
+ buf[0] = '\0';
+ p = buf;
+ for (i = 0; i < s->nr_files; i++) {
+ struct file *f = &s->files[i];
+
+ if (i + 1 == s->nr_files)
+ p += sprintf(p, "%d", f->pending_ios);
+ else
+ p += sprintf(p, "%d, ", f->pending_ios);
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ struct submitter *s = &submitters[0];
+ unsigned long done, calls, reap, cache_hit, cache_miss;
+ int err, i, flags, fd;
+ char *fdepths;
+ void *ret;
+
+ if (!do_nop && argc < 2) {
+ printf("%s: filename\n", argv[0]);
+ return 1;
+ }
+
+ flags = O_RDONLY | O_NOATIME;
+ if (!buffered)
+ flags |= O_DIRECT;
+
+ i = 1;
+ while (!do_nop && i < argc) {
+ struct file *f;
+
+ if (s->nr_files == MAX_FDS) {
+ printf("Max number of files (%d) reached\n", MAX_FDS);
+ break;
+ }
+ fd = open(argv[i], flags);
+ if (fd < 0) {
+ perror("open");
+ return 1;
+ }
+
+ f = &s->files[s->nr_files];
+ f->real_fd = fd;
+ if (get_file_size(f)) {
+ printf("failed getting size of device/file\n");
+ return 1;
+ }
+ if (f->max_blocks <= 1) {
+ printf("Zero file/device size?\n");
+ return 1;
+ }
+ f->max_blocks--;
+
+ printf("Added file %s\n", argv[i]);
+ s->nr_files++;
+ i++;
+ }
+
+ if (fixedbufs) {
+ struct rlimit rlim;
+
+ rlim.rlim_cur = RLIM_INFINITY;
+ rlim.rlim_max = RLIM_INFINITY;
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
+ perror("setrlimit");
+ return 1;
+ }
+ }
+
+ arm_sig_int();
+
+ for (i = 0; i < DEPTH; i++) {
+ void *buf;
+
+ if (posix_memalign(&buf, BS, BS)) {
+ printf("failed alloc\n");
+ return 1;
+ }
+ s->iovecs[i].iov_base = buf;
+ s->iovecs[i].iov_len = BS;
+ }
+
+ err = setup_ring(s);
+ if (err) {
+ printf("ring setup failed: %s, %d\n", strerror(errno), err);
+ return 1;
+ }
+ printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
+ printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
+
+ pthread_create(&s->thread, NULL, submitter_fn, s);
+
+ fdepths = malloc(8 * s->nr_files);
+ cache_hit = cache_miss = reap = calls = done = 0;
+ do {
+ unsigned long this_done = 0;
+ unsigned long this_reap = 0;
+ unsigned long this_call = 0;
+ unsigned long this_cache_hit = 0;
+ unsigned long this_cache_miss = 0;
+ unsigned long rpc = 0, ipc = 0;
+ double hit = 0.0;
+
+ sleep(1);
+ this_done += s->done;
+ this_call += s->calls;
+ this_reap += s->reaps;
+ this_cache_hit += s->cachehit;
+ this_cache_miss += s->cachemiss;
+ if (this_cache_hit && this_cache_miss) {
+ unsigned long hits, total;
+
+ hits = this_cache_hit - cache_hit;
+ total = hits + this_cache_miss - cache_miss;
+ hit = (double) hits / (double) total;
+ hit *= 100.0;
+ }
+ if (this_call - calls) {
+ rpc = (this_done - done) / (this_call - calls);
+ ipc = (this_reap - reap) / (this_call - calls);
+ } else
+ rpc = ipc = -1;
+ file_depths(fdepths);
+ printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s), Cachehit=%0.2f%%\n",
+ this_done - done, rpc, ipc, s->inflight,
+ fdepths, hit);
+ done = this_done;
+ calls = this_call;
+ reap = this_reap;
+ cache_hit = s->cachehit;
+ cache_miss = s->cachemiss;
+ } while (!finish);
+
+ pthread_join(s->thread, &ret);
+ close(s->ring_fd);
+ free(fdepths);
+ return 0;
+}
diff --git a/tools/io_uring/io_uring-cp.c b/tools/io_uring/io_uring-cp.c
new file mode 100644
index 000000000000..633f65bb43a7
--- /dev/null
+++ b/tools/io_uring/io_uring-cp.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple test program that demonstrates a file copy through io_uring. This
+ * uses the API exposed by liburing.
+ *
+ * Copyright (C) 2018-2019 Jens Axboe
+ */
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+
+#include "liburing.h"
+
+#define QD 64
+#define BS (32*1024)
+
+static int infd, outfd;
+
+struct io_data {
+ int read;
+ off_t first_offset, offset;
+ size_t first_len;
+ struct iovec iov;
+};
+
+static int setup_context(unsigned entries, struct io_uring *ring)
+{
+ int ret;
+
+ ret = io_uring_queue_init(entries, ring, 0);
+ if (ret < 0) {
+ fprintf(stderr, "queue_init: %s\n", strerror(-ret));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int get_file_size(int fd, off_t *size)
+{
+ struct stat st;
+
+ if (fstat(fd, &st) < 0)
+ return -1;
+ if (S_ISREG(st.st_mode)) {
+ *size = st.st_size;
+ return 0;
+ } else if (S_ISBLK(st.st_mode)) {
+ unsigned long long bytes;
+
+ if (ioctl(fd, BLKGETSIZE64, &bytes) != 0)
+ return -1;
+
+ *size = bytes;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void queue_prepped(struct io_uring *ring, struct io_data *data)
+{
+ struct io_uring_sqe *sqe;
+
+ sqe = io_uring_get_sqe(ring);
+ assert(sqe);
+
+ if (data->read)
+ io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset);
+ else
+ io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset);
+
+ io_uring_sqe_set_data(sqe, data);
+}
+
+static int queue_read(struct io_uring *ring, off_t size, off_t offset)
+{
+ struct io_uring_sqe *sqe;
+ struct io_data *data;
+
+ sqe = io_uring_get_sqe(ring);
+ if (!sqe)
+ return 1;
+
+ data = malloc(size + sizeof(*data));
+ data->read = 1;
+ data->offset = data->first_offset = offset;
+
+ data->iov.iov_base = data + 1;
+ data->iov.iov_len = size;
+ data->first_len = size;
+
+ io_uring_prep_readv(sqe, infd, &data->iov, 1, offset);
+ io_uring_sqe_set_data(sqe, data);
+ return 0;
+}
+
+static void queue_write(struct io_uring *ring, struct io_data *data)
+{
+ data->read = 0;
+ data->offset = data->first_offset;
+
+ data->iov.iov_base = data + 1;
+ data->iov.iov_len = data->first_len;
+
+ queue_prepped(ring, data);
+ io_uring_submit(ring);
+}
+
+static int copy_file(struct io_uring *ring, off_t insize)
+{
+ unsigned long reads, writes;
+ struct io_uring_cqe *cqe;
+ off_t write_left, offset;
+ int ret;
+
+ write_left = insize;
+ writes = reads = offset = 0;
+
+ while (insize || write_left) {
+ unsigned long had_reads;
+ int got_comp;
+
+ /*
+ * Queue up as many reads as we can
+ */
+ had_reads = reads;
+ while (insize) {
+ off_t this_size = insize;
+
+ if (reads + writes >= QD)
+ break;
+ if (this_size > BS)
+ this_size = BS;
+ else if (!this_size)
+ break;
+
+ if (queue_read(ring, this_size, offset))
+ break;
+
+ insize -= this_size;
+ offset += this_size;
+ reads++;
+ }
+
+ if (had_reads != reads) {
+ ret = io_uring_submit(ring);
+ if (ret < 0) {
+ fprintf(stderr, "io_uring_submit: %s\n", strerror(-ret));
+ break;
+ }
+ }
+
+ /*
+ * Queue is full at this point. Find at least one completion.
+ */
+ got_comp = 0;
+ while (write_left) {
+ struct io_data *data;
+
+ if (!got_comp) {
+ ret = io_uring_wait_completion(ring, &cqe);
+ got_comp = 1;
+ } else
+ ret = io_uring_get_completion(ring, &cqe);
+ if (ret < 0) {
+ fprintf(stderr, "io_uring_get_completion: %s\n",
+ strerror(-ret));
+ return 1;
+ }
+ if (!cqe)
+ break;
+
+ data = (struct io_data *) (uintptr_t) cqe->user_data;
+ if (cqe->res < 0) {
+ if (cqe->res == -EAGAIN) {
+ queue_prepped(ring, data);
+ continue;
+ }
+ fprintf(stderr, "cqe failed: %s\n",
+ strerror(-cqe->res));
+ return 1;
+ } else if ((size_t) cqe->res != data->iov.iov_len) {
+ /* Short read/write, adjust and requeue */
+ data->iov.iov_base += cqe->res;
+ data->iov.iov_len -= cqe->res;
+ data->offset += cqe->res;
+ queue_prepped(ring, data);
+ continue;
+ }
+
+ /*
+ * All done. if write, nothing else to do. if read,
+ * queue up corresponding write.
+ */
+ if (data->read) {
+ queue_write(ring, data);
+ write_left -= data->first_len;
+ reads--;
+ writes++;
+ } else {
+ free(data);
+ writes--;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct io_uring ring;
+ off_t insize;
+ int ret;
+
+ if (argc < 3) {
+ printf("%s: infile outfile\n", argv[0]);
+ return 1;
+ }
+
+ infd = open(argv[1], O_RDONLY);
+ if (infd < 0) {
+ perror("open infile");
+ return 1;
+ }
+ outfd = open(argv[2], O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (outfd < 0) {
+ perror("open outfile");
+ return 1;
+ }
+
+ if (setup_context(QD, &ring))
+ return 1;
+ if (get_file_size(infd, &insize))
+ return 1;
+
+ ret = copy_file(&ring, insize);
+
+ close(infd);
+ close(outfd);
+ io_uring_queue_exit(&ring);
+ return ret;
+}
diff --git a/tools/io_uring/liburing.h b/tools/io_uring/liburing.h
new file mode 100644
index 000000000000..cab0f50257ba
--- /dev/null
+++ b/tools/io_uring/liburing.h
@@ -0,0 +1,143 @@
+#ifndef LIB_URING_H
+#define LIB_URING_H
+
+#include <sys/uio.h>
+#include <signal.h>
+#include <string.h>
+#include "../../include/uapi/linux/io_uring.h"
+
+/*
+ * Library interface to io_uring
+ */
+struct io_uring_sq {
+ unsigned *khead;
+ unsigned *ktail;
+ unsigned *kring_mask;
+ unsigned *kring_entries;
+ unsigned *kflags;
+ unsigned *kdropped;
+ unsigned *array;
+ struct io_uring_sqe *sqes;
+
+ unsigned sqe_head;
+ unsigned sqe_tail;
+
+ size_t ring_sz;
+};
+
+struct io_uring_cq {
+ unsigned *khead;
+ unsigned *ktail;
+ unsigned *kring_mask;
+ unsigned *kring_entries;
+ unsigned *koverflow;
+ struct io_uring_cqe *cqes;
+
+ size_t ring_sz;
+};
+
+struct io_uring {
+ struct io_uring_sq sq;
+ struct io_uring_cq cq;
+ int ring_fd;
+};
+
+/*
+ * System calls
+ */
+extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
+extern int io_uring_enter(unsigned fd, unsigned to_submit,
+ unsigned min_complete, unsigned flags, sigset_t *sig);
+extern int io_uring_register(int fd, unsigned int opcode, void *arg,
+ unsigned int nr_args);
+
+/*
+ * Library interface
+ */
+extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
+ unsigned flags);
+extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
+ struct io_uring *ring);
+extern void io_uring_queue_exit(struct io_uring *ring);
+extern int io_uring_get_completion(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr);
+extern int io_uring_wait_completion(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr);
+extern int io_uring_submit(struct io_uring *ring);
+extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
+
+/*
+ * Command prep helpers
+ */
+static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
+{
+ sqe->user_data = (unsigned long) data;
+}
+
+static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
+ void *addr, unsigned len, off_t offset)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = op;
+ sqe->fd = fd;
+ sqe->off = offset;
+ sqe->addr = (unsigned long) addr;
+ sqe->len = len;
+}
+
+static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
+ struct iovec *iovecs, unsigned nr_vecs,
+ off_t offset)
+{
+ io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
+}
+
+static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
+ void *buf, unsigned nbytes,
+ off_t offset)
+{
+ io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
+}
+
+static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
+ struct iovec *iovecs, unsigned nr_vecs,
+ off_t offset)
+{
+ io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
+}
+
+static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
+ void *buf, unsigned nbytes,
+ off_t offset)
+{
+ io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
+}
+
+static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
+ short poll_mask)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = IORING_OP_POLL_ADD;
+ sqe->fd = fd;
+ sqe->poll_events = poll_mask;
+}
+
+static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
+ void *user_data)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = IORING_OP_POLL_REMOVE;
+ sqe->addr = (unsigned long) user_data;
+}
+
+static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
+ int datasync)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = IORING_OP_FSYNC;
+ sqe->fd = fd;
+ if (datasync)
+ sqe->fsync_flags = IORING_FSYNC_DATASYNC;
+}
+
+#endif
diff --git a/tools/io_uring/queue.c b/tools/io_uring/queue.c
new file mode 100644
index 000000000000..88505e873ad9
--- /dev/null
+++ b/tools/io_uring/queue.c
@@ -0,0 +1,164 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+
+#include "liburing.h"
+#include "barrier.h"
+
+static int __io_uring_get_completion(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr, int wait)
+{
+ struct io_uring_cq *cq = &ring->cq;
+ const unsigned mask = *cq->kring_mask;
+ unsigned head;
+ int ret;
+
+ *cqe_ptr = NULL;
+ head = *cq->khead;
+ do {
+ /*
+ * It's necessary to use a read_barrier() before reading
+ * the CQ tail, since the kernel updates it locklessly. The
+ * kernel has the matching store barrier for the update. The
+ * kernel also ensures that previous stores to CQEs are ordered
+ * with the tail update.
+ */
+ read_barrier();
+ if (head != *cq->ktail) {
+ *cqe_ptr = &cq->cqes[head & mask];
+ break;
+ }
+ if (!wait)
+ break;
+ ret = io_uring_enter(ring->ring_fd, 0, 1,
+ IORING_ENTER_GETEVENTS, NULL);
+ if (ret < 0)
+ return -errno;
+ } while (1);
+
+ if (*cqe_ptr) {
+ *cq->khead = head + 1;
+ /*
+ * Ensure that the kernel sees our new head, the kernel has
+ * the matching read barrier.
+ */
+ write_barrier();
+ }
+
+ return 0;
+}
+
+/*
+ * Return an IO completion, if one is readily available
+ */
+int io_uring_get_completion(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr)
+{
+ return __io_uring_get_completion(ring, cqe_ptr, 0);
+}
+
+/*
+ * Return an IO completion, waiting for it if necessary
+ */
+int io_uring_wait_completion(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr)
+{
+ return __io_uring_get_completion(ring, cqe_ptr, 1);
+}
+
+/*
+ * Submit sqes acquired from io_uring_get_sqe() to the kernel.
+ *
+ * Returns number of sqes submitted
+ */
+int io_uring_submit(struct io_uring *ring)
+{
+ struct io_uring_sq *sq = &ring->sq;
+ const unsigned mask = *sq->kring_mask;
+ unsigned ktail, ktail_next, submitted;
+ int ret;
+
+ /*
+ * If we have pending IO in the kring, submit it first. We need a
+ * read barrier here to match the kernels store barrier when updating
+ * the SQ head.
+ */
+ read_barrier();
+ if (*sq->khead != *sq->ktail) {
+ submitted = *sq->kring_entries;
+ goto submit;
+ }
+
+ if (sq->sqe_head == sq->sqe_tail)
+ return 0;
+
+ /*
+ * Fill in sqes that we have queued up, adding them to the kernel ring
+ */
+ submitted = 0;
+ ktail = ktail_next = *sq->ktail;
+ while (sq->sqe_head < sq->sqe_tail) {
+ ktail_next++;
+ read_barrier();
+
+ sq->array[ktail & mask] = sq->sqe_head & mask;
+ ktail = ktail_next;
+
+ sq->sqe_head++;
+ submitted++;
+ }
+
+ if (!submitted)
+ return 0;
+
+ if (*sq->ktail != ktail) {
+ /*
+ * First write barrier ensures that the SQE stores are updated
+ * with the tail update. This is needed so that the kernel
+ * will never see a tail update without the preceeding sQE
+ * stores being done.
+ */
+ write_barrier();
+ *sq->ktail = ktail;
+ /*
+ * The kernel has the matching read barrier for reading the
+ * SQ tail.
+ */
+ write_barrier();
+ }
+
+submit:
+ ret = io_uring_enter(ring->ring_fd, submitted, 0,
+ IORING_ENTER_GETEVENTS, NULL);
+ if (ret < 0)
+ return -errno;
+
+ return 0;
+}
+
+/*
+ * Return an sqe to fill. Application must later call io_uring_submit()
+ * when it's ready to tell the kernel about it. The caller may call this
+ * function multiple times before calling io_uring_submit().
+ *
+ * Returns a vacant sqe, or NULL if we're full.
+ */
+struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
+{
+ struct io_uring_sq *sq = &ring->sq;
+ unsigned next = sq->sqe_tail + 1;
+ struct io_uring_sqe *sqe;
+
+ /*
+ * All sqes are used
+ */
+ if (next - sq->sqe_head > *sq->kring_entries)
+ return NULL;
+
+ sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask];
+ sq->sqe_tail = next;
+ return sqe;
+}
diff --git a/tools/io_uring/setup.c b/tools/io_uring/setup.c
new file mode 100644
index 000000000000..4da19a77132c
--- /dev/null
+++ b/tools/io_uring/setup.c
@@ -0,0 +1,103 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+
+#include "liburing.h"
+
+static int io_uring_mmap(int fd, struct io_uring_params *p,
+ struct io_uring_sq *sq, struct io_uring_cq *cq)
+{
+ size_t size;
+ void *ptr;
+ int ret;
+
+ sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
+ ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
+ if (ptr == MAP_FAILED)
+ return -errno;
+ sq->khead = ptr + p->sq_off.head;
+ sq->ktail = ptr + p->sq_off.tail;
+ sq->kring_mask = ptr + p->sq_off.ring_mask;
+ sq->kring_entries = ptr + p->sq_off.ring_entries;
+ sq->kflags = ptr + p->sq_off.flags;
+ sq->kdropped = ptr + p->sq_off.dropped;
+ sq->array = ptr + p->sq_off.array;
+
+ size = p->sq_entries * sizeof(struct io_uring_sqe),
+ sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd,
+ IORING_OFF_SQES);
+ if (sq->sqes == MAP_FAILED) {
+ ret = -errno;
+err:
+ munmap(sq->khead, sq->ring_sz);
+ return ret;
+ }
+
+ cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
+ ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
+ if (ptr == MAP_FAILED) {
+ ret = -errno;
+ munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
+ goto err;
+ }
+ cq->khead = ptr + p->cq_off.head;
+ cq->ktail = ptr + p->cq_off.tail;
+ cq->kring_mask = ptr + p->cq_off.ring_mask;
+ cq->kring_entries = ptr + p->cq_off.ring_entries;
+ cq->koverflow = ptr + p->cq_off.overflow;
+ cq->cqes = ptr + p->cq_off.cqes;
+ return 0;
+}
+
+/*
+ * For users that want to specify sq_thread_cpu or sq_thread_idle, this
+ * interface is a convenient helper for mmap()ing the rings.
+ * Returns -1 on error, or zero on success. On success, 'ring'
+ * contains the necessary information to read/write to the rings.
+ */
+int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
+{
+ int ret;
+
+ memset(ring, 0, sizeof(*ring));
+ ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
+ if (!ret)
+ ring->ring_fd = fd;
+ return ret;
+}
+
+/*
+ * Returns -1 on error, or zero on success. On success, 'ring'
+ * contains the necessary information to read/write to the rings.
+ */
+int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
+{
+ struct io_uring_params p;
+ int fd;
+
+ memset(&p, 0, sizeof(p));
+ p.flags = flags;
+
+ fd = io_uring_setup(entries, &p);
+ if (fd < 0)
+ return fd;
+
+ return io_uring_queue_mmap(fd, &p, ring);
+}
+
+void io_uring_queue_exit(struct io_uring *ring)
+{
+ struct io_uring_sq *sq = &ring->sq;
+ struct io_uring_cq *cq = &ring->cq;
+
+ munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
+ munmap(sq->khead, sq->ring_sz);
+ munmap(cq->khead, cq->ring_sz);
+ close(ring->ring_fd);
+}
diff --git a/tools/io_uring/syscall.c b/tools/io_uring/syscall.c
new file mode 100644
index 000000000000..6b835e5c6a5b
--- /dev/null
+++ b/tools/io_uring/syscall.c
@@ -0,0 +1,40 @@
+/*
+ * Will go away once libc support is there
+ */
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/uio.h>
+#include <signal.h>
+#include "liburing.h"
+
+#if defined(__x86_64) || defined(__i386__)
+#ifndef __NR_sys_io_uring_setup
+#define __NR_sys_io_uring_setup 425
+#endif
+#ifndef __NR_sys_io_uring_enter
+#define __NR_sys_io_uring_enter 426
+#endif
+#ifndef __NR_sys_io_uring_register
+#define __NR_sys_io_uring_register 427
+#endif
+#else
+#error "Arch not supported yet"
+#endif
+
+int io_uring_register(int fd, unsigned int opcode, void *arg,
+ unsigned int nr_args)
+{
+ return syscall(__NR_sys_io_uring_register, fd, opcode, arg, nr_args);
+}
+
+int io_uring_setup(unsigned entries, struct io_uring_params *p)
+{
+ return syscall(__NR_sys_io_uring_setup, entries, p);
+}
+
+int io_uring_enter(unsigned fd, unsigned to_submit, unsigned min_complete,
+ unsigned flags, sigset_t *sig)
+{
+ return syscall(__NR_sys_io_uring_enter, fd, to_submit, min_complete,
+ flags, sig, _NSIG / 8);
+}
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 197b40f5b5c6..ee9d5362f35b 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 34d9c3619c96..61aaacf0cfa1 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -14,21 +14,6 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
#$(info Determined 'srctree' to be $(srctree))
endif
-# Makefiles suck: This macro sets a default value of $(2) for the
-# variable named by $(1), unless the variable has been set by
-# environment or command line. This is necessary for CC and AR
-# because make sets default values, so the simpler ?= approach
-# won't work as expected.
-define allow-override
- $(if $(or $(findstring environment,$(origin $(1))),\
- $(findstring command line,$(origin $(1)))),,\
- $(eval $(1) = $(2)))
-endef
-
-# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-
INSTALL = install
# Use DESTDIR for installing into a different root directory.
@@ -54,7 +39,7 @@ man_dir_SQ = '$(subst ','\'',$(man_dir))'
export man_dir man_dir_SQ INSTALL
export DESTDIR DESTDIR_SQ
-include ../../scripts/Makefile.include
+include $(srctree)/tools/scripts/Makefile.include
# copy a bit from Linux kbuild
@@ -147,9 +132,9 @@ BPF_IN := $(OUTPUT)libbpf-in.o
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
VERSION_SCRIPT := libbpf.map
-GLOBAL_SYM_COUNT = $(shell readelf -s $(BPF_IN) | \
+GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
-VERSIONED_SYM_COUNT = $(shell readelf -s $(OUTPUT)libbpf.so | \
+VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
CMD_TARGETS = $(LIB_FILE)
@@ -162,7 +147,8 @@ endif
TARGETS = $(CMD_TARGETS)
-all: fixdep all_cmd
+all: fixdep
+ $(Q)$(MAKE) all_cmd
all_cmd: $(CMD_TARGETS) check
@@ -179,6 +165,9 @@ $(BPF_IN): force elfdep bpfdep
@(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \
(diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true
+ @(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
+ (diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
+ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf
$(OUTPUT)libbpf.so: $(BPF_IN)
@@ -189,7 +178,7 @@ $(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
$(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
- $(QUIET_LINK)$(CXX) $^ -lelf -o $@
+ $(QUIET_LINK)$(CXX) $(INCLUDES) $^ -lelf -o $@
check: check_abi
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
index 607aae40f4ed..5788479384ca 100644
--- a/tools/lib/bpf/README.rst
+++ b/tools/lib/bpf/README.rst
@@ -9,7 +9,7 @@ described here. It's recommended to follow these conventions whenever a
new function or type is added to keep libbpf API clean and consistent.
All types and functions provided by libbpf API should have one of the
-following prefixes: ``bpf_``, ``btf_``, ``libbpf_``.
+following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``.
System call wrappers
--------------------
@@ -62,6 +62,19 @@ Auxiliary functions and types that don't fit well in any of categories
described above should have ``libbpf_`` prefix, e.g.
``libbpf_get_error`` or ``libbpf_prog_type_by_name``.
+AF_XDP functions
+-------------------
+
+AF_XDP functions should have an ``xsk_`` prefix, e.g.
+``xsk_umem__get_data`` or ``xsk_umem__create``. The interface consists
+of both low-level ring access functions and high-level configuration
+functions. These can be mixed and matched. Note that these functions
+are not reentrant for performance reasons.
+
+Please take a look at Documentation/networking/af_xdp.rst in the Linux
+kernel source tree on how to use XDP sockets and for some common
+mistakes in case you do not get any traffic up to user space.
+
libbpf ABI
==========
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 88cbd110ae58..9cd015574e83 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -22,6 +22,7 @@
*/
#include <stdlib.h>
+#include <string.h>
#include <memory.h>
#include <unistd.h>
#include <asm/unistd.h>
@@ -214,23 +215,35 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
{
void *finfo = NULL, *linfo = NULL;
union bpf_attr attr;
+ __u32 log_level;
__u32 name_len;
int fd;
- if (!load_attr)
+ if (!load_attr || !log_buf != !log_buf_sz)
+ return -EINVAL;
+
+ log_level = load_attr->log_level;
+ if (log_level > 2 || (log_level && !log_buf))
return -EINVAL;
name_len = load_attr->name ? strlen(load_attr->name) : 0;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
attr.insn_cnt = (__u32)load_attr->insns_cnt;
attr.insns = ptr_to_u64(load_attr->insns);
attr.license = ptr_to_u64(load_attr->license);
- attr.log_buf = ptr_to_u64(NULL);
- attr.log_size = 0;
- attr.log_level = 0;
+
+ attr.log_level = log_level;
+ if (log_level) {
+ attr.log_buf = ptr_to_u64(log_buf);
+ attr.log_size = log_buf_sz;
+ } else {
+ attr.log_buf = ptr_to_u64(NULL);
+ attr.log_size = 0;
+ }
+
attr.kern_version = load_attr->kern_version;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.prog_btf_fd = load_attr->prog_btf_fd;
@@ -286,7 +299,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
goto done;
}
- if (!log_buf || !log_buf_sz)
+ if (log_level || !log_buf)
goto done;
/* Try again with log */
@@ -327,7 +340,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_type = type;
attr.insn_cnt = (__u32)insns_cnt;
attr.insns = ptr_to_u64(insns);
@@ -347,7 +360,7 @@ int bpf_map_update_elem(int fd, const void *key, const void *value,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -360,7 +373,7 @@ int bpf_map_lookup_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -368,11 +381,24 @@ int bpf_map_lookup_elem(int fd, const void *key, void *value)
return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
}
+int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
+{
+ union bpf_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+ attr.flags = flags;
+
+ return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+}
+
int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -384,7 +410,7 @@ int bpf_map_delete_elem(int fd, const void *key)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
@@ -395,7 +421,7 @@ int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.next_key = ptr_to_u64(next_key);
@@ -407,7 +433,7 @@ int bpf_obj_pin(int fd, const char *pathname)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
attr.bpf_fd = fd;
@@ -418,7 +444,7 @@ int bpf_obj_get(const char *pathname)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
@@ -429,7 +455,7 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
@@ -442,7 +468,7 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_type = type;
@@ -453,7 +479,7 @@ int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
@@ -467,7 +493,7 @@ int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
union bpf_attr attr;
int ret;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.query.target_fd = target_fd;
attr.query.attach_type = type;
attr.query.query_flags = query_flags;
@@ -488,7 +514,7 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
union bpf_attr attr;
int ret;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = prog_fd;
attr.test.data_in = ptr_to_u64(data);
attr.test.data_out = ptr_to_u64(data_out);
@@ -513,7 +539,7 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
if (!test_attr->data_out && test_attr->data_size_out > 0)
return -EINVAL;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = test_attr->prog_fd;
attr.test.data_in = ptr_to_u64(test_attr->data_in);
attr.test.data_out = ptr_to_u64(test_attr->data_out);
@@ -533,7 +559,7 @@ int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
@@ -548,7 +574,7 @@ int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
@@ -562,7 +588,7 @@ int bpf_prog_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_id = id;
return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -572,7 +598,7 @@ int bpf_map_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_id = id;
return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -582,7 +608,7 @@ int bpf_btf_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.btf_id = id;
return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -593,7 +619,7 @@ int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.info.bpf_fd = prog_fd;
attr.info.info_len = *info_len;
attr.info.info = ptr_to_u64(info);
@@ -609,7 +635,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 8f09de482839..6ffdd79bea89 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -85,6 +85,7 @@ struct bpf_load_program_attr {
__u32 line_info_rec_size;
const void *line_info;
__u32 line_info_cnt;
+ __u32 log_level;
};
/* Flags to direct loading requirements */
@@ -110,6 +111,8 @@ LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags);
LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value);
+LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value,
+ __u64 flags);
LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
void *value);
LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index d682d3b8f7b9..1b8d8cdd3575 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
@@ -9,12 +10,14 @@
#include <linux/btf.h>
#include "btf.h"
#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_util.h"
-#define elog(fmt, ...) { if (err_log) err_log(fmt, ##__VA_ARGS__); }
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
-#define BTF_MAX_NR_TYPES 65535
+#define BTF_MAX_NR_TYPES 0x7fffffff
+#define BTF_MAX_STR_OFFSET 0x7fffffff
#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
((k) == BTF_KIND_VOLATILE) || \
@@ -39,9 +42,8 @@ struct btf {
struct btf_ext_info {
/*
- * info points to a deep copy of the individual info section
- * (e.g. func_info and line_info) from the .BTF.ext.
- * It does not include the __u32 rec_size.
+ * info points to the individual info section (e.g. func_info and
+ * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
*/
void *info;
__u32 rec_size;
@@ -49,8 +51,13 @@ struct btf_ext_info {
};
struct btf_ext {
+ union {
+ struct btf_ext_header *hdr;
+ void *data;
+ };
struct btf_ext_info func_info;
struct btf_ext_info line_info;
+ __u32 data_size;
};
struct btf_ext_info_sec {
@@ -107,54 +114,54 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
return 0;
}
-static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
+static int btf_parse_hdr(struct btf *btf)
{
const struct btf_header *hdr = btf->hdr;
__u32 meta_left;
if (btf->data_size < sizeof(struct btf_header)) {
- elog("BTF header not found\n");
+ pr_debug("BTF header not found\n");
return -EINVAL;
}
if (hdr->magic != BTF_MAGIC) {
- elog("Invalid BTF magic:%x\n", hdr->magic);
+ pr_debug("Invalid BTF magic:%x\n", hdr->magic);
return -EINVAL;
}
if (hdr->version != BTF_VERSION) {
- elog("Unsupported BTF version:%u\n", hdr->version);
+ pr_debug("Unsupported BTF version:%u\n", hdr->version);
return -ENOTSUP;
}
if (hdr->flags) {
- elog("Unsupported BTF flags:%x\n", hdr->flags);
+ pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
return -ENOTSUP;
}
meta_left = btf->data_size - sizeof(*hdr);
if (!meta_left) {
- elog("BTF has no data\n");
+ pr_debug("BTF has no data\n");
return -EINVAL;
}
if (meta_left < hdr->type_off) {
- elog("Invalid BTF type section offset:%u\n", hdr->type_off);
+ pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
return -EINVAL;
}
if (meta_left < hdr->str_off) {
- elog("Invalid BTF string section offset:%u\n", hdr->str_off);
+ pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
return -EINVAL;
}
if (hdr->type_off >= hdr->str_off) {
- elog("BTF type section offset >= string section offset. No type?\n");
+ pr_debug("BTF type section offset >= string section offset. No type?\n");
return -EINVAL;
}
if (hdr->type_off & 0x02) {
- elog("BTF type section is not aligned to 4 bytes\n");
+ pr_debug("BTF type section is not aligned to 4 bytes\n");
return -EINVAL;
}
@@ -163,15 +170,15 @@ static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
return 0;
}
-static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log)
+static int btf_parse_str_sec(struct btf *btf)
{
const struct btf_header *hdr = btf->hdr;
const char *start = btf->nohdr_data + hdr->str_off;
const char *end = start + btf->hdr->str_len;
- if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
+ if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
start[0] || end[-1]) {
- elog("Invalid BTF string section\n");
+ pr_debug("Invalid BTF string section\n");
return -EINVAL;
}
@@ -180,7 +187,38 @@ static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log)
return 0;
}
-static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
+static int btf_type_size(struct btf_type *t)
+{
+ int base_size = sizeof(struct btf_type);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ return base_size;
+ case BTF_KIND_INT:
+ return base_size + sizeof(__u32);
+ case BTF_KIND_ENUM:
+ return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ARRAY:
+ return base_size + sizeof(struct btf_array);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return base_size + vlen * sizeof(struct btf_member);
+ case BTF_KIND_FUNC_PROTO:
+ return base_size + vlen * sizeof(struct btf_param);
+ default:
+ pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info));
+ return -EINVAL;
+ }
+}
+
+static int btf_parse_type_sec(struct btf *btf)
{
struct btf_header *hdr = btf->hdr;
void *nohdr_data = btf->nohdr_data;
@@ -189,41 +227,13 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
while (next_type < end_type) {
struct btf_type *t = next_type;
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ int type_size;
int err;
- next_type += sizeof(*t);
- switch (BTF_INFO_KIND(t->info)) {
- case BTF_KIND_INT:
- next_type += sizeof(int);
- break;
- case BTF_KIND_ARRAY:
- next_type += sizeof(struct btf_array);
- break;
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION:
- next_type += vlen * sizeof(struct btf_member);
- break;
- case BTF_KIND_ENUM:
- next_type += vlen * sizeof(struct btf_enum);
- break;
- case BTF_KIND_FUNC_PROTO:
- next_type += vlen * sizeof(struct btf_param);
- break;
- case BTF_KIND_FUNC:
- case BTF_KIND_TYPEDEF:
- case BTF_KIND_PTR:
- case BTF_KIND_FWD:
- case BTF_KIND_VOLATILE:
- case BTF_KIND_CONST:
- case BTF_KIND_RESTRICT:
- break;
- default:
- elog("Unsupported BTF_KIND:%u\n",
- BTF_INFO_KIND(t->info));
- return -EINVAL;
- }
-
+ type_size = btf_type_size(t);
+ if (type_size < 0)
+ return type_size;
+ next_type += type_size;
err = btf_add_type(btf, t);
if (err)
return err;
@@ -232,6 +242,11 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
return 0;
}
+__u32 btf__get_nr_types(const struct btf *btf)
+{
+ return btf->nr_types;
+}
+
const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
{
if (type_id > btf->nr_types)
@@ -250,21 +265,6 @@ static bool btf_type_is_void_or_null(const struct btf_type *t)
return !t || btf_type_is_void(t);
}
-static __s64 btf_type_size(const struct btf_type *t)
-{
- switch (BTF_INFO_KIND(t->info)) {
- case BTF_KIND_INT:
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION:
- case BTF_KIND_ENUM:
- return t->size;
- case BTF_KIND_PTR:
- return sizeof(void *);
- default:
- return -EINVAL;
- }
-}
-
#define MAX_RESOLVE_DEPTH 32
__s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
@@ -278,11 +278,16 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
t = btf__type_by_id(btf, type_id);
for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
i++) {
- size = btf_type_size(t);
- if (size >= 0)
- break;
-
switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_ENUM:
+ size = t->size;
+ goto done;
+ case BTF_KIND_PTR:
+ size = sizeof(void *);
+ goto done;
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
@@ -306,6 +311,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
if (size < 0)
return -EINVAL;
+done:
if (nelems && size > UINT32_MAX / nelems)
return -E2BIG;
@@ -363,10 +369,8 @@ void btf__free(struct btf *btf)
free(btf);
}
-struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
+struct btf *btf__new(__u8 *data, __u32 size)
{
- __u32 log_buf_size = 0;
- char *log_buf = NULL;
struct btf *btf;
int err;
@@ -376,16 +380,6 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
btf->fd = -1;
- if (err_log) {
- log_buf = malloc(BPF_LOG_BUF_SIZE);
- if (!log_buf) {
- err = -ENOMEM;
- goto done;
- }
- *log_buf = 0;
- log_buf_size = BPF_LOG_BUF_SIZE;
- }
-
btf->data = malloc(size);
if (!btf->data) {
err = -ENOMEM;
@@ -395,30 +389,17 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
memcpy(btf->data, data, size);
btf->data_size = size;
- btf->fd = bpf_load_btf(btf->data, btf->data_size,
- log_buf, log_buf_size, false);
-
- if (btf->fd == -1) {
- err = -errno;
- elog("Error loading BTF: %s(%d)\n", strerror(errno), errno);
- if (log_buf && *log_buf)
- elog("%s\n", log_buf);
- goto done;
- }
-
- err = btf_parse_hdr(btf, err_log);
+ err = btf_parse_hdr(btf);
if (err)
goto done;
- err = btf_parse_str_sec(btf, err_log);
+ err = btf_parse_str_sec(btf);
if (err)
goto done;
- err = btf_parse_type_sec(btf, err_log);
+ err = btf_parse_type_sec(btf);
done:
- free(log_buf);
-
if (err) {
btf__free(btf);
return ERR_PTR(err);
@@ -427,11 +408,47 @@ done:
return btf;
}
+int btf__load(struct btf *btf)
+{
+ __u32 log_buf_size = BPF_LOG_BUF_SIZE;
+ char *log_buf = NULL;
+ int err = 0;
+
+ if (btf->fd >= 0)
+ return -EEXIST;
+
+ log_buf = malloc(log_buf_size);
+ if (!log_buf)
+ return -ENOMEM;
+
+ *log_buf = 0;
+
+ btf->fd = bpf_load_btf(btf->data, btf->data_size,
+ log_buf, log_buf_size, false);
+ if (btf->fd < 0) {
+ err = -errno;
+ pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
+ if (*log_buf)
+ pr_warning("%s\n", log_buf);
+ goto done;
+ }
+
+done:
+ free(log_buf);
+ return err;
+}
+
int btf__fd(const struct btf *btf)
{
return btf->fd;
}
+const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
+{
+ *size = btf->data_size;
+ return btf->data;
+}
+
const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
{
if (offset < btf->hdr->str_len)
@@ -467,7 +484,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
- bzero(ptr, last_size);
+ memset(ptr, 0, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
@@ -481,7 +498,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
ptr = temp_ptr;
- bzero(ptr, last_size);
+ memset(ptr, 0, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
}
@@ -491,7 +508,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
- *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size, NULL);
+ *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
if (IS_ERR(*btf)) {
err = PTR_ERR(*btf);
*btf = NULL;
@@ -504,7 +521,79 @@ exit_free:
return err;
}
-struct btf_ext_sec_copy_param {
+int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
+ __u32 expected_key_size, __u32 expected_value_size,
+ __u32 *key_type_id, __u32 *value_type_id)
+{
+ const struct btf_type *container_type;
+ const struct btf_member *key, *value;
+ const size_t max_name = 256;
+ char container_name[max_name];
+ __s64 key_size, value_size;
+ __s32 container_id;
+
+ if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
+ max_name) {
+ pr_warning("map:%s length of '____btf_map_%s' is too long\n",
+ map_name, map_name);
+ return -EINVAL;
+ }
+
+ container_id = btf__find_by_name(btf, container_name);
+ if (container_id < 0) {
+ pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
+ map_name, container_name);
+ return container_id;
+ }
+
+ container_type = btf__type_by_id(btf, container_id);
+ if (!container_type) {
+ pr_warning("map:%s cannot find BTF type for container_id:%u\n",
+ map_name, container_id);
+ return -EINVAL;
+ }
+
+ if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
+ BTF_INFO_VLEN(container_type->info) < 2) {
+ pr_warning("map:%s container_name:%s is an invalid container struct\n",
+ map_name, container_name);
+ return -EINVAL;
+ }
+
+ key = (struct btf_member *)(container_type + 1);
+ value = key + 1;
+
+ key_size = btf__resolve_size(btf, key->type);
+ if (key_size < 0) {
+ pr_warning("map:%s invalid BTF key_type_size\n", map_name);
+ return key_size;
+ }
+
+ if (expected_key_size != key_size) {
+ pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
+ map_name, (__u32)key_size, expected_key_size);
+ return -EINVAL;
+ }
+
+ value_size = btf__resolve_size(btf, value->type);
+ if (value_size < 0) {
+ pr_warning("map:%s invalid BTF value_type_size\n", map_name);
+ return value_size;
+ }
+
+ if (expected_value_size != value_size) {
+ pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
+ map_name, (__u32)value_size, expected_value_size);
+ return -EINVAL;
+ }
+
+ *key_type_id = key->type;
+ *value_type_id = value->type;
+
+ return 0;
+}
+
+struct btf_ext_sec_setup_param {
__u32 off;
__u32 len;
__u32 min_rec_size;
@@ -512,41 +601,33 @@ struct btf_ext_sec_copy_param {
const char *desc;
};
-static int btf_ext_copy_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- struct btf_ext_sec_copy_param *ext_sec,
- btf_print_fn_t err_log)
+static int btf_ext_setup_info(struct btf_ext *btf_ext,
+ struct btf_ext_sec_setup_param *ext_sec)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
/* The start of the info sec (including the __u32 record_size). */
- const void *info;
-
- /* data and data_size do not include btf_ext_header from now on */
- data = data + hdr->hdr_len;
- data_size -= hdr->hdr_len;
+ void *info;
if (ext_sec->off & 0x03) {
- elog(".BTF.ext %s section is not aligned to 4 bytes\n",
+ pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
ext_sec->desc);
return -EINVAL;
}
- if (data_size < ext_sec->off ||
- ext_sec->len > data_size - ext_sec->off) {
- elog("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
- ext_sec->desc, ext_sec->off, ext_sec->len);
+ info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
+ info_left = ext_sec->len;
+
+ if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
+ pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
+ ext_sec->desc, ext_sec->off, ext_sec->len);
return -EINVAL;
}
- info = data + ext_sec->off;
- info_left = ext_sec->len;
-
/* At least a record size */
if (info_left < sizeof(__u32)) {
- elog(".BTF.ext %s record size not found\n", ext_sec->desc);
+ pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
return -EINVAL;
}
@@ -554,8 +635,8 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
record_size = *(__u32 *)info;
if (record_size < ext_sec->min_rec_size ||
record_size & 0x03) {
- elog("%s section in .BTF.ext has invalid record size %u\n",
- ext_sec->desc, record_size);
+ pr_debug("%s section in .BTF.ext has invalid record size %u\n",
+ ext_sec->desc, record_size);
return -EINVAL;
}
@@ -564,7 +645,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
/* If no records, return failure now so .BTF.ext won't be used. */
if (!info_left) {
- elog("%s section in .BTF.ext has no records", ext_sec->desc);
+ pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
return -EINVAL;
}
@@ -574,14 +655,14 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
__u32 num_records;
if (info_left < sec_hdrlen) {
- elog("%s section header is not found in .BTF.ext\n",
+ pr_debug("%s section header is not found in .BTF.ext\n",
ext_sec->desc);
return -EINVAL;
}
num_records = sinfo->num_info;
if (num_records == 0) {
- elog("%s section has incorrect num_records in .BTF.ext\n",
+ pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
return -EINVAL;
}
@@ -589,7 +670,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
total_record_size = sec_hdrlen +
(__u64)num_records * record_size;
if (info_left < total_record_size) {
- elog("%s section has incorrect num_records in .BTF.ext\n",
+ pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
return -EINVAL;
}
@@ -601,74 +682,64 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
ext_info = ext_sec->ext_info;
ext_info->len = ext_sec->len - sizeof(__u32);
ext_info->rec_size = record_size;
- ext_info->info = malloc(ext_info->len);
- if (!ext_info->info)
- return -ENOMEM;
- memcpy(ext_info->info, info + sizeof(__u32), ext_info->len);
+ ext_info->info = info + sizeof(__u32);
return 0;
}
-static int btf_ext_copy_func_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- btf_print_fn_t err_log)
+static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- struct btf_ext_sec_copy_param param = {
- .off = hdr->func_info_off,
- .len = hdr->func_info_len,
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->func_info_off,
+ .len = btf_ext->hdr->func_info_len,
.min_rec_size = sizeof(struct bpf_func_info_min),
.ext_info = &btf_ext->func_info,
.desc = "func_info"
};
- return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log);
+ return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_copy_line_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- btf_print_fn_t err_log)
+static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- struct btf_ext_sec_copy_param param = {
- .off = hdr->line_info_off,
- .len = hdr->line_info_len,
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->line_info_off,
+ .len = btf_ext->hdr->line_info_len,
.min_rec_size = sizeof(struct bpf_line_info_min),
.ext_info = &btf_ext->line_info,
.desc = "line_info",
};
- return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log);
+ return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_parse_hdr(__u8 *data, __u32 data_size,
- btf_print_fn_t err_log)
+static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
{
const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
if (data_size < offsetof(struct btf_ext_header, func_info_off) ||
data_size < hdr->hdr_len) {
- elog("BTF.ext header not found");
+ pr_debug("BTF.ext header not found");
return -EINVAL;
}
if (hdr->magic != BTF_MAGIC) {
- elog("Invalid BTF.ext magic:%x\n", hdr->magic);
+ pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
return -EINVAL;
}
if (hdr->version != BTF_VERSION) {
- elog("Unsupported BTF.ext version:%u\n", hdr->version);
+ pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
return -ENOTSUP;
}
if (hdr->flags) {
- elog("Unsupported BTF.ext flags:%x\n", hdr->flags);
+ pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
return -ENOTSUP;
}
if (data_size == hdr->hdr_len) {
- elog("BTF.ext has no data\n");
+ pr_debug("BTF.ext has no data\n");
return -EINVAL;
}
@@ -679,18 +750,16 @@ void btf_ext__free(struct btf_ext *btf_ext)
{
if (!btf_ext)
return;
-
- free(btf_ext->func_info.info);
- free(btf_ext->line_info.info);
+ free(btf_ext->data);
free(btf_ext);
}
-struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
+struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
{
struct btf_ext *btf_ext;
int err;
- err = btf_ext_parse_hdr(data, size, err_log);
+ err = btf_ext_parse_hdr(data, size);
if (err)
return ERR_PTR(err);
@@ -698,13 +767,23 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
if (!btf_ext)
return ERR_PTR(-ENOMEM);
- err = btf_ext_copy_func_info(btf_ext, data, size, err_log);
- if (err) {
- btf_ext__free(btf_ext);
- return ERR_PTR(err);
+ btf_ext->data_size = size;
+ btf_ext->data = malloc(size);
+ if (!btf_ext->data) {
+ err = -ENOMEM;
+ goto done;
}
+ memcpy(btf_ext->data, data, size);
+
+ err = btf_ext_setup_func_info(btf_ext);
+ if (err)
+ goto done;
+
+ err = btf_ext_setup_line_info(btf_ext);
+ if (err)
+ goto done;
- err = btf_ext_copy_line_info(btf_ext, data, size, err_log);
+done:
if (err) {
btf_ext__free(btf_ext);
return ERR_PTR(err);
@@ -713,6 +792,12 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
return btf_ext;
}
+const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
+{
+ *size = btf_ext->data_size;
+ return btf_ext->data;
+}
+
static int btf_ext_reloc_info(const struct btf *btf,
const struct btf_ext_info *ext_info,
const char *sec_name, __u32 insns_cnt,
@@ -761,7 +846,8 @@ static int btf_ext_reloc_info(const struct btf *btf,
return -ENOENT;
}
-int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext,
+int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **func_info, __u32 *cnt)
{
@@ -769,7 +855,8 @@ int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ex
insns_cnt, func_info, cnt);
}
-int btf_ext__reloc_line_info(const struct btf *btf, const struct btf_ext *btf_ext,
+int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **line_info, __u32 *cnt)
{
@@ -786,3 +873,1778 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
{
return btf_ext->line_info.rec_size;
}
+
+struct btf_dedup;
+
+static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts);
+static void btf_dedup_free(struct btf_dedup *d);
+static int btf_dedup_strings(struct btf_dedup *d);
+static int btf_dedup_prim_types(struct btf_dedup *d);
+static int btf_dedup_struct_types(struct btf_dedup *d);
+static int btf_dedup_ref_types(struct btf_dedup *d);
+static int btf_dedup_compact_types(struct btf_dedup *d);
+static int btf_dedup_remap_types(struct btf_dedup *d);
+
+/*
+ * Deduplicate BTF types and strings.
+ *
+ * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
+ * section with all BTF type descriptors and string data. It overwrites that
+ * memory in-place with deduplicated types and strings without any loss of
+ * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
+ * is provided, all the strings referenced from .BTF.ext section are honored
+ * and updated to point to the right offsets after deduplication.
+ *
+ * If function returns with error, type/string data might be garbled and should
+ * be discarded.
+ *
+ * More verbose and detailed description of both problem btf_dedup is solving,
+ * as well as solution could be found at:
+ * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
+ *
+ * Problem description and justification
+ * =====================================
+ *
+ * BTF type information is typically emitted either as a result of conversion
+ * from DWARF to BTF or directly by compiler. In both cases, each compilation
+ * unit contains information about a subset of all the types that are used
+ * in an application. These subsets are frequently overlapping and contain a lot
+ * of duplicated information when later concatenated together into a single
+ * binary. This algorithm ensures that each unique type is represented by single
+ * BTF type descriptor, greatly reducing resulting size of BTF data.
+ *
+ * Compilation unit isolation and subsequent duplication of data is not the only
+ * problem. The same type hierarchy (e.g., struct and all the type that struct
+ * references) in different compilation units can be represented in BTF to
+ * various degrees of completeness (or, rather, incompleteness) due to
+ * struct/union forward declarations.
+ *
+ * Let's take a look at an example, that we'll use to better understand the
+ * problem (and solution). Suppose we have two compilation units, each using
+ * same `struct S`, but each of them having incomplete type information about
+ * struct's fields:
+ *
+ * // CU #1:
+ * struct S;
+ * struct A {
+ * int a;
+ * struct A* self;
+ * struct S* parent;
+ * };
+ * struct B;
+ * struct S {
+ * struct A* a_ptr;
+ * struct B* b_ptr;
+ * };
+ *
+ * // CU #2:
+ * struct S;
+ * struct A;
+ * struct B {
+ * int b;
+ * struct B* self;
+ * struct S* parent;
+ * };
+ * struct S {
+ * struct A* a_ptr;
+ * struct B* b_ptr;
+ * };
+ *
+ * In case of CU #1, BTF data will know only that `struct B` exist (but no
+ * more), but will know the complete type information about `struct A`. While
+ * for CU #2, it will know full type information about `struct B`, but will
+ * only know about forward declaration of `struct A` (in BTF terms, it will
+ * have `BTF_KIND_FWD` type descriptor with name `B`).
+ *
+ * This compilation unit isolation means that it's possible that there is no
+ * single CU with complete type information describing structs `S`, `A`, and
+ * `B`. Also, we might get tons of duplicated and redundant type information.
+ *
+ * Additional complication we need to keep in mind comes from the fact that
+ * types, in general, can form graphs containing cycles, not just DAGs.
+ *
+ * While algorithm does deduplication, it also merges and resolves type
+ * information (unless disabled throught `struct btf_opts`), whenever possible.
+ * E.g., in the example above with two compilation units having partial type
+ * information for structs `A` and `B`, the output of algorithm will emit
+ * a single copy of each BTF type that describes structs `A`, `B`, and `S`
+ * (as well as type information for `int` and pointers), as if they were defined
+ * in a single compilation unit as:
+ *
+ * struct A {
+ * int a;
+ * struct A* self;
+ * struct S* parent;
+ * };
+ * struct B {
+ * int b;
+ * struct B* self;
+ * struct S* parent;
+ * };
+ * struct S {
+ * struct A* a_ptr;
+ * struct B* b_ptr;
+ * };
+ *
+ * Algorithm summary
+ * =================
+ *
+ * Algorithm completes its work in 6 separate passes:
+ *
+ * 1. Strings deduplication.
+ * 2. Primitive types deduplication (int, enum, fwd).
+ * 3. Struct/union types deduplication.
+ * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
+ * protos, and const/volatile/restrict modifiers).
+ * 5. Types compaction.
+ * 6. Types remapping.
+ *
+ * Algorithm determines canonical type descriptor, which is a single
+ * representative type for each truly unique type. This canonical type is the
+ * one that will go into final deduplicated BTF type information. For
+ * struct/unions, it is also the type that algorithm will merge additional type
+ * information into (while resolving FWDs), as it discovers it from data in
+ * other CUs. Each input BTF type eventually gets either mapped to itself, if
+ * that type is canonical, or to some other type, if that type is equivalent
+ * and was chosen as canonical representative. This mapping is stored in
+ * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
+ * FWD type got resolved to.
+ *
+ * To facilitate fast discovery of canonical types, we also maintain canonical
+ * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
+ * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
+ * that match that signature. With sufficiently good choice of type signature
+ * hashing function, we can limit number of canonical types for each unique type
+ * signature to a very small number, allowing to find canonical type for any
+ * duplicated type very quickly.
+ *
+ * Struct/union deduplication is the most critical part and algorithm for
+ * deduplicating structs/unions is described in greater details in comments for
+ * `btf_dedup_is_equiv` function.
+ */
+int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts)
+{
+ struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
+ int err;
+
+ if (IS_ERR(d)) {
+ pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
+ return -EINVAL;
+ }
+
+ err = btf_dedup_strings(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_strings failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_prim_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_prim_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_struct_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_struct_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_ref_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_ref_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_compact_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_compact_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_remap_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_remap_types failed:%d\n", err);
+ goto done;
+ }
+
+done:
+ btf_dedup_free(d);
+ return err;
+}
+
+#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14)
+#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31
+#define BTF_UNPROCESSED_ID ((__u32)-1)
+#define BTF_IN_PROGRESS_ID ((__u32)-2)
+
+struct btf_dedup_node {
+ struct btf_dedup_node *next;
+ __u32 type_id;
+};
+
+struct btf_dedup {
+ /* .BTF section to be deduped in-place */
+ struct btf *btf;
+ /*
+ * Optional .BTF.ext section. When provided, any strings referenced
+ * from it will be taken into account when deduping strings
+ */
+ struct btf_ext *btf_ext;
+ /*
+ * This is a map from any type's signature hash to a list of possible
+ * canonical representative type candidates. Hash collisions are
+ * ignored, so even types of various kinds can share same list of
+ * candidates, which is fine because we rely on subsequent
+ * btf_xxx_equal() checks to authoritatively verify type equality.
+ */
+ struct btf_dedup_node **dedup_table;
+ /* Canonical types map */
+ __u32 *map;
+ /* Hypothetical mapping, used during type graph equivalence checks */
+ __u32 *hypot_map;
+ __u32 *hypot_list;
+ size_t hypot_cnt;
+ size_t hypot_cap;
+ /* Various option modifying behavior of algorithm */
+ struct btf_dedup_opts opts;
+};
+
+struct btf_str_ptr {
+ const char *str;
+ __u32 new_off;
+ bool used;
+};
+
+struct btf_str_ptrs {
+ struct btf_str_ptr *ptrs;
+ const char *data;
+ __u32 cnt;
+ __u32 cap;
+};
+
+static inline __u32 hash_combine(__u32 h, __u32 value)
+{
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e370001UL
+ return h * 37 + value * GOLDEN_RATIO_PRIME;
+#undef GOLDEN_RATIO_PRIME
+}
+
+#define for_each_dedup_cand(d, hash, node) \
+ for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \
+ node; \
+ node = node->next)
+
+static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id)
+{
+ struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node));
+ int bucket = hash & (d->opts.dedup_table_size - 1);
+
+ if (!node)
+ return -ENOMEM;
+ node->type_id = type_id;
+ node->next = d->dedup_table[bucket];
+ d->dedup_table[bucket] = node;
+ return 0;
+}
+
+static int btf_dedup_hypot_map_add(struct btf_dedup *d,
+ __u32 from_id, __u32 to_id)
+{
+ if (d->hypot_cnt == d->hypot_cap) {
+ __u32 *new_list;
+
+ d->hypot_cap += max(16, d->hypot_cap / 2);
+ new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
+ if (!new_list)
+ return -ENOMEM;
+ d->hypot_list = new_list;
+ }
+ d->hypot_list[d->hypot_cnt++] = from_id;
+ d->hypot_map[from_id] = to_id;
+ return 0;
+}
+
+static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
+{
+ int i;
+
+ for (i = 0; i < d->hypot_cnt; i++)
+ d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
+ d->hypot_cnt = 0;
+}
+
+static void btf_dedup_table_free(struct btf_dedup *d)
+{
+ struct btf_dedup_node *head, *tmp;
+ int i;
+
+ if (!d->dedup_table)
+ return;
+
+ for (i = 0; i < d->opts.dedup_table_size; i++) {
+ while (d->dedup_table[i]) {
+ tmp = d->dedup_table[i];
+ d->dedup_table[i] = tmp->next;
+ free(tmp);
+ }
+
+ head = d->dedup_table[i];
+ while (head) {
+ tmp = head;
+ head = head->next;
+ free(tmp);
+ }
+ }
+
+ free(d->dedup_table);
+ d->dedup_table = NULL;
+}
+
+static void btf_dedup_free(struct btf_dedup *d)
+{
+ btf_dedup_table_free(d);
+
+ free(d->map);
+ d->map = NULL;
+
+ free(d->hypot_map);
+ d->hypot_map = NULL;
+
+ free(d->hypot_list);
+ d->hypot_list = NULL;
+
+ free(d);
+}
+
+/* Find closest power of two >= to size, capped at 2^max_size_log */
+static __u32 roundup_pow2_max(__u32 size, int max_size_log)
+{
+ int i;
+
+ for (i = 0; i < max_size_log && (1U << i) < size; i++)
+ ;
+ return 1U << i;
+}
+
+
+static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts)
+{
+ struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
+ int i, err = 0;
+ __u32 sz;
+
+ if (!d)
+ return ERR_PTR(-ENOMEM);
+
+ d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
+ sz = opts && opts->dedup_table_size ? opts->dedup_table_size
+ : BTF_DEDUP_TABLE_DEFAULT_SIZE;
+ sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG);
+ d->opts.dedup_table_size = sz;
+
+ d->btf = btf;
+ d->btf_ext = btf_ext;
+
+ d->dedup_table = calloc(d->opts.dedup_table_size,
+ sizeof(struct btf_dedup_node *));
+ if (!d->dedup_table) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
+ if (!d->map) {
+ err = -ENOMEM;
+ goto done;
+ }
+ /* special BTF "void" type is made canonical immediately */
+ d->map[0] = 0;
+ for (i = 1; i <= btf->nr_types; i++)
+ d->map[i] = BTF_UNPROCESSED_ID;
+
+ d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
+ if (!d->hypot_map) {
+ err = -ENOMEM;
+ goto done;
+ }
+ for (i = 0; i <= btf->nr_types; i++)
+ d->hypot_map[i] = BTF_UNPROCESSED_ID;
+
+done:
+ if (err) {
+ btf_dedup_free(d);
+ return ERR_PTR(err);
+ }
+
+ return d;
+}
+
+typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
+
+/*
+ * Iterate over all possible places in .BTF and .BTF.ext that can reference
+ * string and pass pointer to it to a provided callback `fn`.
+ */
+static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
+{
+ void *line_data_cur, *line_data_end;
+ int i, j, r, rec_size;
+ struct btf_type *t;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ t = d->btf->types[i];
+ r = fn(&t->name_off, ctx);
+ if (r)
+ return r;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *m = (struct btf_member *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (j = 0; j < vlen; j++) {
+ r = fn(&m->name_off, ctx);
+ if (r)
+ return r;
+ m++;
+ }
+ break;
+ }
+ case BTF_KIND_ENUM: {
+ struct btf_enum *m = (struct btf_enum *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (j = 0; j < vlen; j++) {
+ r = fn(&m->name_off, ctx);
+ if (r)
+ return r;
+ m++;
+ }
+ break;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *m = (struct btf_param *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (j = 0; j < vlen; j++) {
+ r = fn(&m->name_off, ctx);
+ if (r)
+ return r;
+ m++;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (!d->btf_ext)
+ return 0;
+
+ line_data_cur = d->btf_ext->line_info.info;
+ line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
+ rec_size = d->btf_ext->line_info.rec_size;
+
+ while (line_data_cur < line_data_end) {
+ struct btf_ext_info_sec *sec = line_data_cur;
+ struct bpf_line_info_min *line_info;
+ __u32 num_info = sec->num_info;
+
+ r = fn(&sec->sec_name_off, ctx);
+ if (r)
+ return r;
+
+ line_data_cur += sizeof(struct btf_ext_info_sec);
+ for (i = 0; i < num_info; i++) {
+ line_info = line_data_cur;
+ r = fn(&line_info->file_name_off, ctx);
+ if (r)
+ return r;
+ r = fn(&line_info->line_off, ctx);
+ if (r)
+ return r;
+ line_data_cur += rec_size;
+ }
+ }
+
+ return 0;
+}
+
+static int str_sort_by_content(const void *a1, const void *a2)
+{
+ const struct btf_str_ptr *p1 = a1;
+ const struct btf_str_ptr *p2 = a2;
+
+ return strcmp(p1->str, p2->str);
+}
+
+static int str_sort_by_offset(const void *a1, const void *a2)
+{
+ const struct btf_str_ptr *p1 = a1;
+ const struct btf_str_ptr *p2 = a2;
+
+ if (p1->str != p2->str)
+ return p1->str < p2->str ? -1 : 1;
+ return 0;
+}
+
+static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
+{
+ const struct btf_str_ptr *p = pelem;
+
+ if (str_ptr != p->str)
+ return (const char *)str_ptr < p->str ? -1 : 1;
+ return 0;
+}
+
+static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
+{
+ struct btf_str_ptrs *strs;
+ struct btf_str_ptr *s;
+
+ if (*str_off_ptr == 0)
+ return 0;
+
+ strs = ctx;
+ s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
+ sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
+ if (!s)
+ return -EINVAL;
+ s->used = true;
+ return 0;
+}
+
+static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
+{
+ struct btf_str_ptrs *strs;
+ struct btf_str_ptr *s;
+
+ if (*str_off_ptr == 0)
+ return 0;
+
+ strs = ctx;
+ s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
+ sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
+ if (!s)
+ return -EINVAL;
+ *str_off_ptr = s->new_off;
+ return 0;
+}
+
+/*
+ * Dedup string and filter out those that are not referenced from either .BTF
+ * or .BTF.ext (if provided) sections.
+ *
+ * This is done by building index of all strings in BTF's string section,
+ * then iterating over all entities that can reference strings (e.g., type
+ * names, struct field names, .BTF.ext line info, etc) and marking corresponding
+ * strings as used. After that all used strings are deduped and compacted into
+ * sequential blob of memory and new offsets are calculated. Then all the string
+ * references are iterated again and rewritten using new offsets.
+ */
+static int btf_dedup_strings(struct btf_dedup *d)
+{
+ const struct btf_header *hdr = d->btf->hdr;
+ char *start = (char *)d->btf->nohdr_data + hdr->str_off;
+ char *end = start + d->btf->hdr->str_len;
+ char *p = start, *tmp_strs = NULL;
+ struct btf_str_ptrs strs = {
+ .cnt = 0,
+ .cap = 0,
+ .ptrs = NULL,
+ .data = start,
+ };
+ int i, j, err = 0, grp_idx;
+ bool grp_used;
+
+ /* build index of all strings */
+ while (p < end) {
+ if (strs.cnt + 1 > strs.cap) {
+ struct btf_str_ptr *new_ptrs;
+
+ strs.cap += max(strs.cnt / 2, 16);
+ new_ptrs = realloc(strs.ptrs,
+ sizeof(strs.ptrs[0]) * strs.cap);
+ if (!new_ptrs) {
+ err = -ENOMEM;
+ goto done;
+ }
+ strs.ptrs = new_ptrs;
+ }
+
+ strs.ptrs[strs.cnt].str = p;
+ strs.ptrs[strs.cnt].used = false;
+
+ p += strlen(p) + 1;
+ strs.cnt++;
+ }
+
+ /* temporary storage for deduplicated strings */
+ tmp_strs = malloc(d->btf->hdr->str_len);
+ if (!tmp_strs) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* mark all used strings */
+ strs.ptrs[0].used = true;
+ err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
+ if (err)
+ goto done;
+
+ /* sort strings by context, so that we can identify duplicates */
+ qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
+
+ /*
+ * iterate groups of equal strings and if any instance in a group was
+ * referenced, emit single instance and remember new offset
+ */
+ p = tmp_strs;
+ grp_idx = 0;
+ grp_used = strs.ptrs[0].used;
+ /* iterate past end to avoid code duplication after loop */
+ for (i = 1; i <= strs.cnt; i++) {
+ /*
+ * when i == strs.cnt, we want to skip string comparison and go
+ * straight to handling last group of strings (otherwise we'd
+ * need to handle last group after the loop w/ duplicated code)
+ */
+ if (i < strs.cnt &&
+ !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
+ grp_used = grp_used || strs.ptrs[i].used;
+ continue;
+ }
+
+ /*
+ * this check would have been required after the loop to handle
+ * last group of strings, but due to <= condition in a loop
+ * we avoid that duplication
+ */
+ if (grp_used) {
+ int new_off = p - tmp_strs;
+ __u32 len = strlen(strs.ptrs[grp_idx].str);
+
+ memmove(p, strs.ptrs[grp_idx].str, len + 1);
+ for (j = grp_idx; j < i; j++)
+ strs.ptrs[j].new_off = new_off;
+ p += len + 1;
+ }
+
+ if (i < strs.cnt) {
+ grp_idx = i;
+ grp_used = strs.ptrs[i].used;
+ }
+ }
+
+ /* replace original strings with deduped ones */
+ d->btf->hdr->str_len = p - tmp_strs;
+ memmove(start, tmp_strs, d->btf->hdr->str_len);
+ end = start + d->btf->hdr->str_len;
+
+ /* restore original order for further binary search lookups */
+ qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
+
+ /* remap string offsets */
+ err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
+ if (err)
+ goto done;
+
+ d->btf->hdr->str_len = end - start;
+
+done:
+ free(tmp_strs);
+ free(strs.ptrs);
+ return err;
+}
+
+static __u32 btf_hash_common(struct btf_type *t)
+{
+ __u32 h;
+
+ h = hash_combine(0, t->name_off);
+ h = hash_combine(h, t->info);
+ h = hash_combine(h, t->size);
+ return h;
+}
+
+static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
+{
+ return t1->name_off == t2->name_off &&
+ t1->info == t2->info &&
+ t1->size == t2->size;
+}
+
+/* Calculate type signature hash of INT. */
+static __u32 btf_hash_int(struct btf_type *t)
+{
+ __u32 info = *(__u32 *)(t + 1);
+ __u32 h;
+
+ h = btf_hash_common(t);
+ h = hash_combine(h, info);
+ return h;
+}
+
+/* Check structural equality of two INTs. */
+static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
+{
+ __u32 info1, info2;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+ info1 = *(__u32 *)(t1 + 1);
+ info2 = *(__u32 *)(t2 + 1);
+ return info1 == info2;
+}
+
+/* Calculate type signature hash of ENUM. */
+static __u32 btf_hash_enum(struct btf_type *t)
+{
+ struct btf_enum *member = (struct btf_enum *)(t + 1);
+ __u32 vlen = BTF_INFO_VLEN(t->info);
+ __u32 h = btf_hash_common(t);
+ int i;
+
+ for (i = 0; i < vlen; i++) {
+ h = hash_combine(h, member->name_off);
+ h = hash_combine(h, member->val);
+ member++;
+ }
+ return h;
+}
+
+/* Check structural equality of two ENUMs. */
+static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_enum *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_enum *)(t1 + 1);
+ m2 = (struct btf_enum *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->val != m2->val)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
+ * as referenced type IDs equivalence is established separately during type
+ * graph equivalence check algorithm.
+ */
+static __u32 btf_hash_struct(struct btf_type *t)
+{
+ struct btf_member *member = (struct btf_member *)(t + 1);
+ __u32 vlen = BTF_INFO_VLEN(t->info);
+ __u32 h = btf_hash_common(t);
+ int i;
+
+ for (i = 0; i < vlen; i++) {
+ h = hash_combine(h, member->name_off);
+ h = hash_combine(h, member->offset);
+ /* no hashing of referenced type ID, it can be unresolved yet */
+ member++;
+ }
+ return h;
+}
+
+/*
+ * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
+ * IDs. This check is performed during type graph equivalence check and
+ * referenced types equivalence is checked separately.
+ */
+static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_member *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_member *)(t1 + 1);
+ m2 = (struct btf_member *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->offset != m2->offset)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Calculate type signature hash of ARRAY, including referenced type IDs,
+ * under assumption that they were already resolved to canonical type IDs and
+ * are not going to change.
+ */
+static __u32 btf_hash_array(struct btf_type *t)
+{
+ struct btf_array *info = (struct btf_array *)(t + 1);
+ __u32 h = btf_hash_common(t);
+
+ h = hash_combine(h, info->type);
+ h = hash_combine(h, info->index_type);
+ h = hash_combine(h, info->nelems);
+ return h;
+}
+
+/*
+ * Check exact equality of two ARRAYs, taking into account referenced
+ * type IDs, under assumption that they were already resolved to canonical
+ * type IDs and are not going to change.
+ * This function is called during reference types deduplication to compare
+ * ARRAY to potential canonical representative.
+ */
+static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_array *info1, *info2;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ info1 = (struct btf_array *)(t1 + 1);
+ info2 = (struct btf_array *)(t2 + 1);
+ return info1->type == info2->type &&
+ info1->index_type == info2->index_type &&
+ info1->nelems == info2->nelems;
+}
+
+/*
+ * Check structural compatibility of two ARRAYs, ignoring referenced type
+ * IDs. This check is performed during type graph equivalence check and
+ * referenced types equivalence is checked separately.
+ */
+static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_array *info1, *info2;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ info1 = (struct btf_array *)(t1 + 1);
+ info2 = (struct btf_array *)(t2 + 1);
+ return info1->nelems == info2->nelems;
+}
+
+/*
+ * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
+ * under assumption that they were already resolved to canonical type IDs and
+ * are not going to change.
+ */
+static inline __u32 btf_hash_fnproto(struct btf_type *t)
+{
+ struct btf_param *member = (struct btf_param *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ __u32 h = btf_hash_common(t);
+ int i;
+
+ for (i = 0; i < vlen; i++) {
+ h = hash_combine(h, member->name_off);
+ h = hash_combine(h, member->type);
+ member++;
+ }
+ return h;
+}
+
+/*
+ * Check exact equality of two FUNC_PROTOs, taking into account referenced
+ * type IDs, under assumption that they were already resolved to canonical
+ * type IDs and are not going to change.
+ * This function is called during reference types deduplication to compare
+ * FUNC_PROTO to potential canonical representative.
+ */
+static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_param *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_param *)(t1 + 1);
+ m2 = (struct btf_param *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->type != m2->type)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
+ * IDs. This check is performed during type graph equivalence check and
+ * referenced types equivalence is checked separately.
+ */
+static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_param *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ /* skip return type ID */
+ if (t1->name_off != t2->name_off || t1->info != t2->info)
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_param *)(t1 + 1);
+ m2 = (struct btf_param *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Deduplicate primitive types, that can't reference other types, by calculating
+ * their type signature hash and comparing them with any possible canonical
+ * candidate. If no canonical candidate matches, type itself is marked as
+ * canonical and is added into `btf_dedup->dedup_table` as another candidate.
+ */
+static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_type *t = d->btf->types[type_id];
+ struct btf_type *cand;
+ struct btf_dedup_node *cand_node;
+ /* if we don't find equivalent type, then we are canonical */
+ __u32 new_id = type_id;
+ __u32 h;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_ARRAY:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_FUNC_PROTO:
+ return 0;
+
+ case BTF_KIND_INT:
+ h = btf_hash_int(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_int(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ case BTF_KIND_ENUM:
+ h = btf_hash_enum(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_enum(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ case BTF_KIND_FWD:
+ h = btf_hash_common(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_common(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ d->map[type_id] = new_id;
+ if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int btf_dedup_prim_types(struct btf_dedup *d)
+{
+ int i, err;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ err = btf_dedup_prim_type(d, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Check whether type is already mapped into canonical one (could be to itself).
+ */
+static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
+{
+ return d->map[type_id] <= BTF_MAX_NR_TYPES;
+}
+
+/*
+ * Resolve type ID into its canonical type ID, if any; otherwise return original
+ * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
+ * STRUCT/UNION link and resolve it into canonical type ID as well.
+ */
+static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
+{
+ while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
+ type_id = d->map[type_id];
+ return type_id;
+}
+
+/*
+ * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
+ * type ID.
+ */
+static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
+{
+ __u32 orig_type_id = type_id;
+
+ if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ return type_id;
+
+ while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
+ type_id = d->map[type_id];
+
+ if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ return type_id;
+
+ return orig_type_id;
+}
+
+
+static inline __u16 btf_fwd_kind(struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
+}
+
+/*
+ * Check equivalence of BTF type graph formed by candidate struct/union (we'll
+ * call it "candidate graph" in this description for brevity) to a type graph
+ * formed by (potential) canonical struct/union ("canonical graph" for brevity
+ * here, though keep in mind that not all types in canonical graph are
+ * necessarily canonical representatives themselves, some of them might be
+ * duplicates or its uniqueness might not have been established yet).
+ * Returns:
+ * - >0, if type graphs are equivalent;
+ * - 0, if not equivalent;
+ * - <0, on error.
+ *
+ * Algorithm performs side-by-side DFS traversal of both type graphs and checks
+ * equivalence of BTF types at each step. If at any point BTF types in candidate
+ * and canonical graphs are not compatible structurally, whole graphs are
+ * incompatible. If types are structurally equivalent (i.e., all information
+ * except referenced type IDs is exactly the same), a mapping from `canon_id` to
+ * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
+ * If a type references other types, then those referenced types are checked
+ * for equivalence recursively.
+ *
+ * During DFS traversal, if we find that for current `canon_id` type we
+ * already have some mapping in hypothetical map, we check for two possible
+ * situations:
+ * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
+ * happen when type graphs have cycles. In this case we assume those two
+ * types are equivalent.
+ * - `canon_id` is mapped to different type. This is contradiction in our
+ * hypothetical mapping, because same graph in canonical graph corresponds
+ * to two different types in candidate graph, which for equivalent type
+ * graphs shouldn't happen. This condition terminates equivalence check
+ * with negative result.
+ *
+ * If type graphs traversal exhausts types to check and find no contradiction,
+ * then type graphs are equivalent.
+ *
+ * When checking types for equivalence, there is one special case: FWD types.
+ * If FWD type resolution is allowed and one of the types (either from canonical
+ * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
+ * flag) and their names match, hypothetical mapping is updated to point from
+ * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
+ * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
+ *
+ * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
+ * if there are two exactly named (or anonymous) structs/unions that are
+ * compatible structurally, one of which has FWD field, while other is concrete
+ * STRUCT/UNION, but according to C sources they are different structs/unions
+ * that are referencing different types with the same name. This is extremely
+ * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
+ * this logic is causing problems.
+ *
+ * Doing FWD resolution means that both candidate and/or canonical graphs can
+ * consists of portions of the graph that come from multiple compilation units.
+ * This is due to the fact that types within single compilation unit are always
+ * deduplicated and FWDs are already resolved, if referenced struct/union
+ * definiton is available. So, if we had unresolved FWD and found corresponding
+ * STRUCT/UNION, they will be from different compilation units. This
+ * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
+ * type graph will likely have at least two different BTF types that describe
+ * same type (e.g., most probably there will be two different BTF types for the
+ * same 'int' primitive type) and could even have "overlapping" parts of type
+ * graph that describe same subset of types.
+ *
+ * This in turn means that our assumption that each type in canonical graph
+ * must correspond to exactly one type in candidate graph might not hold
+ * anymore and will make it harder to detect contradictions using hypothetical
+ * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
+ * resolution only in canonical graph. FWDs in candidate graphs are never
+ * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
+ * that can occur:
+ * - Both types in canonical and candidate graphs are FWDs. If they are
+ * structurally equivalent, then they can either be both resolved to the
+ * same STRUCT/UNION or not resolved at all. In both cases they are
+ * equivalent and there is no need to resolve FWD on candidate side.
+ * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
+ * so nothing to resolve as well, algorithm will check equivalence anyway.
+ * - Type in canonical graph is FWD, while type in candidate is concrete
+ * STRUCT/UNION. In this case candidate graph comes from single compilation
+ * unit, so there is exactly one BTF type for each unique C type. After
+ * resolving FWD into STRUCT/UNION, there might be more than one BTF type
+ * in canonical graph mapping to single BTF type in candidate graph, but
+ * because hypothetical mapping maps from canonical to candidate types, it's
+ * alright, and we still maintain the property of having single `canon_id`
+ * mapping to single `cand_id` (there could be two different `canon_id`
+ * mapped to the same `cand_id`, but it's not contradictory).
+ * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
+ * graph is FWD. In this case we are just going to check compatibility of
+ * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
+ * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
+ * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
+ * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
+ * canonical graph.
+ */
+static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
+ __u32 canon_id)
+{
+ struct btf_type *cand_type;
+ struct btf_type *canon_type;
+ __u32 hypot_type_id;
+ __u16 cand_kind;
+ __u16 canon_kind;
+ int i, eq;
+
+ /* if both resolve to the same canonical, they must be equivalent */
+ if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
+ return 1;
+
+ canon_id = resolve_fwd_id(d, canon_id);
+
+ hypot_type_id = d->hypot_map[canon_id];
+ if (hypot_type_id <= BTF_MAX_NR_TYPES)
+ return hypot_type_id == cand_id;
+
+ if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
+ return -ENOMEM;
+
+ cand_type = d->btf->types[cand_id];
+ canon_type = d->btf->types[canon_id];
+ cand_kind = BTF_INFO_KIND(cand_type->info);
+ canon_kind = BTF_INFO_KIND(canon_type->info);
+
+ if (cand_type->name_off != canon_type->name_off)
+ return 0;
+
+ /* FWD <--> STRUCT/UNION equivalence check, if enabled */
+ if (!d->opts.dont_resolve_fwds
+ && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
+ && cand_kind != canon_kind) {
+ __u16 real_kind;
+ __u16 fwd_kind;
+
+ if (cand_kind == BTF_KIND_FWD) {
+ real_kind = canon_kind;
+ fwd_kind = btf_fwd_kind(cand_type);
+ } else {
+ real_kind = cand_kind;
+ fwd_kind = btf_fwd_kind(canon_type);
+ }
+ return fwd_kind == real_kind;
+ }
+
+ if (cand_type->info != canon_type->info)
+ return 0;
+
+ switch (cand_kind) {
+ case BTF_KIND_INT:
+ return btf_equal_int(cand_type, canon_type);
+
+ case BTF_KIND_ENUM:
+ return btf_equal_enum(cand_type, canon_type);
+
+ case BTF_KIND_FWD:
+ return btf_equal_common(cand_type, canon_type);
+
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *cand_arr, *canon_arr;
+
+ if (!btf_compat_array(cand_type, canon_type))
+ return 0;
+ cand_arr = (struct btf_array *)(cand_type + 1);
+ canon_arr = (struct btf_array *)(canon_type + 1);
+ eq = btf_dedup_is_equiv(d,
+ cand_arr->index_type, canon_arr->index_type);
+ if (eq <= 0)
+ return eq;
+ return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
+ }
+
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *cand_m, *canon_m;
+ __u16 vlen;
+
+ if (!btf_shallow_equal_struct(cand_type, canon_type))
+ return 0;
+ vlen = BTF_INFO_VLEN(cand_type->info);
+ cand_m = (struct btf_member *)(cand_type + 1);
+ canon_m = (struct btf_member *)(canon_type + 1);
+ for (i = 0; i < vlen; i++) {
+ eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
+ if (eq <= 0)
+ return eq;
+ cand_m++;
+ canon_m++;
+ }
+
+ return 1;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *cand_p, *canon_p;
+ __u16 vlen;
+
+ if (!btf_compat_fnproto(cand_type, canon_type))
+ return 0;
+ eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
+ if (eq <= 0)
+ return eq;
+ vlen = BTF_INFO_VLEN(cand_type->info);
+ cand_p = (struct btf_param *)(cand_type + 1);
+ canon_p = (struct btf_param *)(canon_type + 1);
+ for (i = 0; i < vlen; i++) {
+ eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
+ if (eq <= 0)
+ return eq;
+ cand_p++;
+ canon_p++;
+ }
+ return 1;
+ }
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Use hypothetical mapping, produced by successful type graph equivalence
+ * check, to augment existing struct/union canonical mapping, where possible.
+ *
+ * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
+ * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
+ * it doesn't matter if FWD type was part of canonical graph or candidate one,
+ * we are recording the mapping anyway. As opposed to carefulness required
+ * for struct/union correspondence mapping (described below), for FWD resolution
+ * it's not important, as by the time that FWD type (reference type) will be
+ * deduplicated all structs/unions will be deduped already anyway.
+ *
+ * Recording STRUCT/UNION mapping is purely a performance optimization and is
+ * not required for correctness. It needs to be done carefully to ensure that
+ * struct/union from candidate's type graph is not mapped into corresponding
+ * struct/union from canonical type graph that itself hasn't been resolved into
+ * canonical representative. The only guarantee we have is that canonical
+ * struct/union was determined as canonical and that won't change. But any
+ * types referenced through that struct/union fields could have been not yet
+ * resolved, so in case like that it's too early to establish any kind of
+ * correspondence between structs/unions.
+ *
+ * No canonical correspondence is derived for primitive types (they are already
+ * deduplicated completely already anyway) or reference types (they rely on
+ * stability of struct/union canonical relationship for equivalence checks).
+ */
+static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
+{
+ __u32 cand_type_id, targ_type_id;
+ __u16 t_kind, c_kind;
+ __u32 t_id, c_id;
+ int i;
+
+ for (i = 0; i < d->hypot_cnt; i++) {
+ cand_type_id = d->hypot_list[i];
+ targ_type_id = d->hypot_map[cand_type_id];
+ t_id = resolve_type_id(d, targ_type_id);
+ c_id = resolve_type_id(d, cand_type_id);
+ t_kind = BTF_INFO_KIND(d->btf->types[t_id]->info);
+ c_kind = BTF_INFO_KIND(d->btf->types[c_id]->info);
+ /*
+ * Resolve FWD into STRUCT/UNION.
+ * It's ok to resolve FWD into STRUCT/UNION that's not yet
+ * mapped to canonical representative (as opposed to
+ * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
+ * eventually that struct is going to be mapped and all resolved
+ * FWDs will automatically resolve to correct canonical
+ * representative. This will happen before ref type deduping,
+ * which critically depends on stability of these mapping. This
+ * stability is not a requirement for STRUCT/UNION equivalence
+ * checks, though.
+ */
+ if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
+ d->map[c_id] = t_id;
+ else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
+ d->map[t_id] = c_id;
+
+ if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
+ c_kind != BTF_KIND_FWD &&
+ is_type_mapped(d, c_id) &&
+ !is_type_mapped(d, t_id)) {
+ /*
+ * as a perf optimization, we can map struct/union
+ * that's part of type graph we just verified for
+ * equivalence. We can do that for struct/union that has
+ * canonical representative only, though.
+ */
+ d->map[t_id] = c_id;
+ }
+ }
+}
+
+/*
+ * Deduplicate struct/union types.
+ *
+ * For each struct/union type its type signature hash is calculated, taking
+ * into account type's name, size, number, order and names of fields, but
+ * ignoring type ID's referenced from fields, because they might not be deduped
+ * completely until after reference types deduplication phase. This type hash
+ * is used to iterate over all potential canonical types, sharing same hash.
+ * For each canonical candidate we check whether type graphs that they form
+ * (through referenced types in fields and so on) are equivalent using algorithm
+ * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
+ * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
+ * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
+ * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
+ * potentially map other structs/unions to their canonical representatives,
+ * if such relationship hasn't yet been established. This speeds up algorithm
+ * by eliminating some of the duplicate work.
+ *
+ * If no matching canonical representative was found, struct/union is marked
+ * as canonical for itself and is added into btf_dedup->dedup_table hash map
+ * for further look ups.
+ */
+static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_dedup_node *cand_node;
+ struct btf_type *cand_type, *t;
+ /* if we don't find equivalent type, then we are canonical */
+ __u32 new_id = type_id;
+ __u16 kind;
+ __u32 h;
+
+ /* already deduped or is in process of deduping (loop detected) */
+ if (d->map[type_id] <= BTF_MAX_NR_TYPES)
+ return 0;
+
+ t = d->btf->types[type_id];
+ kind = BTF_INFO_KIND(t->info);
+
+ if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
+ return 0;
+
+ h = btf_hash_struct(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ int eq;
+
+ /*
+ * Even though btf_dedup_is_equiv() checks for
+ * btf_shallow_equal_struct() internally when checking two
+ * structs (unions) for equivalence, we need to guard here
+ * from picking matching FWD type as a dedup candidate.
+ * This can happen due to hash collision. In such case just
+ * relying on btf_dedup_is_equiv() would lead to potentially
+ * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
+ * FWD and compatible STRUCT/UNION are considered equivalent.
+ */
+ cand_type = d->btf->types[cand_node->type_id];
+ if (!btf_shallow_equal_struct(t, cand_type))
+ continue;
+
+ btf_dedup_clear_hypot_map(d);
+ eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id);
+ if (eq < 0)
+ return eq;
+ if (!eq)
+ continue;
+ new_id = cand_node->type_id;
+ btf_dedup_merge_hypot_map(d);
+ break;
+ }
+
+ d->map[type_id] = new_id;
+ if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int btf_dedup_struct_types(struct btf_dedup *d)
+{
+ int i, err;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ err = btf_dedup_struct_type(d, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Deduplicate reference type.
+ *
+ * Once all primitive and struct/union types got deduplicated, we can easily
+ * deduplicate all other (reference) BTF types. This is done in two steps:
+ *
+ * 1. Resolve all referenced type IDs into their canonical type IDs. This
+ * resolution can be done either immediately for primitive or struct/union types
+ * (because they were deduped in previous two phases) or recursively for
+ * reference types. Recursion will always terminate at either primitive or
+ * struct/union type, at which point we can "unwind" chain of reference types
+ * one by one. There is no danger of encountering cycles because in C type
+ * system the only way to form type cycle is through struct/union, so any chain
+ * of reference types, even those taking part in a type cycle, will inevitably
+ * reach struct/union at some point.
+ *
+ * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
+ * becomes "stable", in the sense that no further deduplication will cause
+ * any changes to it. With that, it's now possible to calculate type's signature
+ * hash (this time taking into account referenced type IDs) and loop over all
+ * potential canonical representatives. If no match was found, current type
+ * will become canonical representative of itself and will be added into
+ * btf_dedup->dedup_table as another possible canonical representative.
+ */
+static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_dedup_node *cand_node;
+ struct btf_type *t, *cand;
+ /* if we don't find equivalent type, then we are representative type */
+ __u32 new_id = type_id;
+ int ref_type_id;
+ __u32 h;
+
+ if (d->map[type_id] == BTF_IN_PROGRESS_ID)
+ return -ELOOP;
+ if (d->map[type_id] <= BTF_MAX_NR_TYPES)
+ return resolve_type_id(d, type_id);
+
+ t = d->btf->types[type_id];
+ d->map[type_id] = BTF_IN_PROGRESS_ID;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ ref_type_id = btf_dedup_ref_type(d, t->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ t->type = ref_type_id;
+
+ h = btf_hash_common(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_common(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *info = (struct btf_array *)(t + 1);
+
+ ref_type_id = btf_dedup_ref_type(d, info->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ info->type = ref_type_id;
+
+ ref_type_id = btf_dedup_ref_type(d, info->index_type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ info->index_type = ref_type_id;
+
+ h = btf_hash_array(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_array(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *param;
+ __u16 vlen;
+ int i;
+
+ ref_type_id = btf_dedup_ref_type(d, t->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ t->type = ref_type_id;
+
+ vlen = BTF_INFO_VLEN(t->info);
+ param = (struct btf_param *)(t + 1);
+ for (i = 0; i < vlen; i++) {
+ ref_type_id = btf_dedup_ref_type(d, param->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ param->type = ref_type_id;
+ param++;
+ }
+
+ h = btf_hash_fnproto(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_fnproto(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ d->map[type_id] = new_id;
+ if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+
+ return new_id;
+}
+
+static int btf_dedup_ref_types(struct btf_dedup *d)
+{
+ int i, err;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ err = btf_dedup_ref_type(d, i);
+ if (err < 0)
+ return err;
+ }
+ btf_dedup_table_free(d);
+ return 0;
+}
+
+/*
+ * Compact types.
+ *
+ * After we established for each type its corresponding canonical representative
+ * type, we now can eliminate types that are not canonical and leave only
+ * canonical ones layed out sequentially in memory by copying them over
+ * duplicates. During compaction btf_dedup->hypot_map array is reused to store
+ * a map from original type ID to a new compacted type ID, which will be used
+ * during next phase to "fix up" type IDs, referenced from struct/union and
+ * reference types.
+ */
+static int btf_dedup_compact_types(struct btf_dedup *d)
+{
+ struct btf_type **new_types;
+ __u32 next_type_id = 1;
+ char *types_start, *p;
+ int i, len;
+
+ /* we are going to reuse hypot_map to store compaction remapping */
+ d->hypot_map[0] = 0;
+ for (i = 1; i <= d->btf->nr_types; i++)
+ d->hypot_map[i] = BTF_UNPROCESSED_ID;
+
+ types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
+ p = types_start;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ if (d->map[i] != i)
+ continue;
+
+ len = btf_type_size(d->btf->types[i]);
+ if (len < 0)
+ return len;
+
+ memmove(p, d->btf->types[i], len);
+ d->hypot_map[i] = next_type_id;
+ d->btf->types[next_type_id] = (struct btf_type *)p;
+ p += len;
+ next_type_id++;
+ }
+
+ /* shrink struct btf's internal types index and update btf_header */
+ d->btf->nr_types = next_type_id - 1;
+ d->btf->types_size = d->btf->nr_types;
+ d->btf->hdr->type_len = p - types_start;
+ new_types = realloc(d->btf->types,
+ (1 + d->btf->nr_types) * sizeof(struct btf_type *));
+ if (!new_types)
+ return -ENOMEM;
+ d->btf->types = new_types;
+
+ /* make sure string section follows type information without gaps */
+ d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
+ memmove(p, d->btf->strings, d->btf->hdr->str_len);
+ d->btf->strings = p;
+ p += d->btf->hdr->str_len;
+
+ d->btf->data_size = p - (char *)d->btf->data;
+ return 0;
+}
+
+/*
+ * Figure out final (deduplicated and compacted) type ID for provided original
+ * `type_id` by first resolving it into corresponding canonical type ID and
+ * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
+ * which is populated during compaction phase.
+ */
+static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
+{
+ __u32 resolved_type_id, new_type_id;
+
+ resolved_type_id = resolve_type_id(d, type_id);
+ new_type_id = d->hypot_map[resolved_type_id];
+ if (new_type_id > BTF_MAX_NR_TYPES)
+ return -EINVAL;
+ return new_type_id;
+}
+
+/*
+ * Remap referenced type IDs into deduped type IDs.
+ *
+ * After BTF types are deduplicated and compacted, their final type IDs may
+ * differ from original ones. The map from original to a corresponding
+ * deduped type ID is stored in btf_dedup->hypot_map and is populated during
+ * compaction phase. During remapping phase we are rewriting all type IDs
+ * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
+ * their final deduped type IDs.
+ */
+static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_type *t = d->btf->types[type_id];
+ int i, r;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_ENUM:
+ break;
+
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ r = btf_dedup_remap_type_id(d, t->type);
+ if (r < 0)
+ return r;
+ t->type = r;
+ break;
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *arr_info = (struct btf_array *)(t + 1);
+
+ r = btf_dedup_remap_type_id(d, arr_info->type);
+ if (r < 0)
+ return r;
+ arr_info->type = r;
+ r = btf_dedup_remap_type_id(d, arr_info->index_type);
+ if (r < 0)
+ return r;
+ arr_info->index_type = r;
+ break;
+ }
+
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *member = (struct btf_member *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (i = 0; i < vlen; i++) {
+ r = btf_dedup_remap_type_id(d, member->type);
+ if (r < 0)
+ return r;
+ member->type = r;
+ member++;
+ }
+ break;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *param = (struct btf_param *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ r = btf_dedup_remap_type_id(d, t->type);
+ if (r < 0)
+ return r;
+ t->type = r;
+
+ for (i = 0; i < vlen; i++) {
+ r = btf_dedup_remap_type_id(d, param->type);
+ if (r < 0)
+ return r;
+ param->type = r;
+ param++;
+ }
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btf_dedup_remap_types(struct btf_dedup *d)
+{
+ int i, r;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ r = btf_dedup_remap_type(d, i);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index b0610dcdae6b..28a1e1e59861 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -55,33 +55,47 @@ struct btf_ext_header {
__u32 line_info_len;
};
-typedef int (*btf_print_fn_t)(const char *, ...)
- __attribute__((format(printf, 1, 2)));
-
LIBBPF_API void btf__free(struct btf *btf);
-LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
+LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
+LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
const char *type_name);
+LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__fd(const struct btf *btf);
+LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
+LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
+ __u32 expected_key_size,
+ __u32 expected_value_size,
+ __u32 *key_type_id, __u32 *value_type_id);
+
+LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size);
+LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
+LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext,
+ __u32 *size);
+LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **func_info, __u32 *cnt);
+LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **line_info, __u32 *cnt);
+LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
+LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
+
+struct btf_dedup_opts {
+ unsigned int dedup_table_size;
+ bool dont_resolve_fwds;
+};
-struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
-void btf_ext__free(struct btf_ext *btf_ext);
-int btf_ext__reloc_func_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **func_info, __u32 *func_info_len);
-int btf_ext__reloc_line_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **line_info, __u32 *cnt);
-__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
-__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
+LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts);
#ifdef __cplusplus
} /* extern "C" */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 169e347c76f6..d5b830d60601 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -42,6 +42,7 @@
#include "bpf.h"
#include "btf.h"
#include "str_error.h"
+#include "libbpf_util.h"
#ifndef EM_BPF
#define EM_BPF 247
@@ -53,39 +54,33 @@
#define __printf(a, b) __attribute__((format(printf, a, b)))
-__printf(1, 2)
-static int __base_pr(const char *format, ...)
+static int __base_pr(enum libbpf_print_level level, const char *format,
+ va_list args)
{
- va_list args;
- int err;
+ if (level == LIBBPF_DEBUG)
+ return 0;
- va_start(args, format);
- err = vfprintf(stderr, format, args);
- va_end(args);
- return err;
+ return vfprintf(stderr, format, args);
}
-static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
-static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
-static __printf(1, 2) libbpf_print_fn_t __pr_debug;
-
-#define __pr(func, fmt, ...) \
-do { \
- if ((func)) \
- (func)("libbpf: " fmt, ##__VA_ARGS__); \
-} while (0)
+static libbpf_print_fn_t __libbpf_pr = __base_pr;
-#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
-#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
-#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
+void libbpf_set_print(libbpf_print_fn_t fn)
+{
+ __libbpf_pr = fn;
+}
-void libbpf_set_print(libbpf_print_fn_t warn,
- libbpf_print_fn_t info,
- libbpf_print_fn_t debug)
+__printf(2, 3)
+void libbpf_print(enum libbpf_print_level level, const char *format, ...)
{
- __pr_warning = warn;
- __pr_info = info;
- __pr_debug = debug;
+ va_list args;
+
+ if (!__libbpf_pr)
+ return;
+
+ va_start(args, format);
+ __libbpf_pr(level, format, args);
+ va_end(args);
}
#define STRERR_BUFSIZE 128
@@ -312,7 +307,7 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
return -EINVAL;
}
- bzero(prog, sizeof(*prog));
+ memset(prog, 0, sizeof(*prog));
prog->section_name = strdup(section_name);
if (!prog->section_name) {
@@ -839,11 +834,12 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
else if (strcmp(name, "maps") == 0)
obj->efile.maps_shndx = idx;
else if (strcmp(name, BTF_ELF_SEC) == 0) {
- obj->btf = btf__new(data->d_buf, data->d_size,
- __pr_debug);
- if (IS_ERR(obj->btf)) {
+ obj->btf = btf__new(data->d_buf, data->d_size);
+ if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_ELF_SEC, PTR_ERR(obj->btf));
+ if (!IS_ERR(obj->btf))
+ btf__free(obj->btf);
obj->btf = NULL;
}
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
@@ -915,8 +911,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
} else {
obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
- btf_ext_data->d_size,
- __pr_debug);
+ btf_ext_data->d_size);
if (IS_ERR(obj->btf_ext)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_EXT_ELF_SEC,
@@ -1057,72 +1052,18 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
{
- const struct btf_type *container_type;
- const struct btf_member *key, *value;
struct bpf_map_def *def = &map->def;
- const size_t max_name = 256;
- char container_name[max_name];
- __s64 key_size, value_size;
- __s32 container_id;
-
- if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
- max_name) {
- pr_warning("map:%s length of '____btf_map_%s' is too long\n",
- map->name, map->name);
- return -EINVAL;
- }
-
- container_id = btf__find_by_name(btf, container_name);
- if (container_id < 0) {
- pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
- map->name, container_name);
- return container_id;
- }
-
- container_type = btf__type_by_id(btf, container_id);
- if (!container_type) {
- pr_warning("map:%s cannot find BTF type for container_id:%u\n",
- map->name, container_id);
- return -EINVAL;
- }
-
- if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
- BTF_INFO_VLEN(container_type->info) < 2) {
- pr_warning("map:%s container_name:%s is an invalid container struct\n",
- map->name, container_name);
- return -EINVAL;
- }
-
- key = (struct btf_member *)(container_type + 1);
- value = key + 1;
-
- key_size = btf__resolve_size(btf, key->type);
- if (key_size < 0) {
- pr_warning("map:%s invalid BTF key_type_size\n",
- map->name);
- return key_size;
- }
-
- if (def->key_size != key_size) {
- pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
- map->name, (__u32)key_size, def->key_size);
- return -EINVAL;
- }
-
- value_size = btf__resolve_size(btf, value->type);
- if (value_size < 0) {
- pr_warning("map:%s invalid BTF value_type_size\n", map->name);
- return value_size;
- }
+ __u32 key_type_id, value_type_id;
+ int ret;
- if (def->value_size != value_size) {
- pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
- map->name, (__u32)value_size, def->value_size);
- return -EINVAL;
- }
+ ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
+ def->value_size, &key_type_id,
+ &value_type_id);
+ if (ret)
+ return ret;
- map->btf_key_type_id = key->type;
- map->btf_value_type_id = value->type;
+ map->btf_key_type_id = key_type_id;
+ map->btf_value_type_id = value_type_id;
return 0;
}
@@ -1174,6 +1115,20 @@ err_free_new_name:
return -errno;
}
+int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
+{
+ if (!map || !max_entries)
+ return -EINVAL;
+
+ /* If map already created, its attributes can't be changed. */
+ if (map->fd >= 0)
+ return -EBUSY;
+
+ map->def.max_entries = max_entries;
+
+ return 0;
+}
+
static int
bpf_object__probe_name(struct bpf_object *obj)
{
@@ -1637,7 +1592,7 @@ bpf_program__load(struct bpf_program *prog,
struct bpf_prog_prep_result result;
bpf_program_prep_t preprocessor = prog->preprocessor;
- bzero(&result, sizeof(result));
+ memset(&result, 0, sizeof(result));
err = preprocessor(prog, i, prog->insns,
prog->insns_cnt, &result);
if (err) {
@@ -2147,7 +2102,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
if (err)
return err;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
char buf[PATH_MAX];
int len;
@@ -2194,7 +2149,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
if (!obj)
return -ENOENT;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
char buf[PATH_MAX];
int len;
@@ -2378,6 +2333,11 @@ unsigned int bpf_object__kversion(struct bpf_object *obj)
return obj ? obj->kern_version : 0;
}
+struct btf *bpf_object__btf(struct bpf_object *obj)
+{
+ return obj ? obj->btf : NULL;
+}
+
int bpf_object__btf_fd(const struct bpf_object *obj)
{
return obj->btf ? btf__fd(obj->btf) : -1;
@@ -2667,9 +2627,38 @@ static const struct {
#undef BPF_EAPROG_SEC
#undef BPF_APROG_COMPAT
+#define MAX_TYPE_NAME_SIZE 32
+
+static char *libbpf_get_type_names(bool attach_type)
+{
+ int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
+ char *buf;
+
+ buf = malloc(len);
+ if (!buf)
+ return NULL;
+
+ buf[0] = '\0';
+ /* Forge string buf with all available names */
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (attach_type && !section_names[i].is_attachable)
+ continue;
+
+ if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
+ free(buf);
+ return NULL;
+ }
+ strcat(buf, " ");
+ strcat(buf, section_names[i].sec);
+ }
+
+ return buf;
+}
+
int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type)
{
+ char *type_names;
int i;
if (!name)
@@ -2682,12 +2671,20 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
*expected_attach_type = section_names[i].expected_attach_type;
return 0;
}
+ pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
+ type_names = libbpf_get_type_names(false);
+ if (type_names != NULL) {
+ pr_info("supported section(type) names are:%s\n", type_names);
+ free(type_names);
+ }
+
return -EINVAL;
}
int libbpf_attach_type_by_name(const char *name,
enum bpf_attach_type *attach_type)
{
+ char *type_names;
int i;
if (!name)
@@ -2701,6 +2698,13 @@ int libbpf_attach_type_by_name(const char *name,
*attach_type = section_names[i].attach_type;
return 0;
}
+ pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
+ type_names = libbpf_get_type_names(true);
+ if (type_names != NULL) {
+ pr_info("attachable section(type) names are:%s\n", type_names);
+ free(type_names);
+ }
+
return -EINVAL;
}
@@ -2833,13 +2837,19 @@ bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
{
struct bpf_map *pos;
- bpf_map__for_each(pos, obj) {
+ bpf_object__for_each_map(pos, obj) {
if (pos->name && !strcmp(pos->name, name))
return pos;
}
return NULL;
}
+int
+bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
+{
+ return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
+}
+
struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
{
@@ -2907,8 +2917,6 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
err = bpf_program__identify_section(prog, &prog_type,
&expected_attach_type);
if (err < 0) {
- pr_warning("failed to guess program type based on section name %s\n",
- prog->section_name);
bpf_object__close(obj);
return -EINVAL;
}
@@ -2922,7 +2930,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
first_prog = prog;
}
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
if (!bpf_map__is_offload_neutral(map))
map->map_ifindex = attr->ifindex;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 5f68d7b75215..b4652aa1a58a 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -47,17 +47,16 @@ enum libbpf_errno {
LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
-/*
- * __printf is defined in include/linux/compiler-gcc.h. However,
- * it would be better if libbpf.h didn't depend on Linux header files.
- * So instead of __printf, here we use gcc attribute directly.
- */
-typedef int (*libbpf_print_fn_t)(const char *, ...)
- __attribute__((format(printf, 1, 2)));
+enum libbpf_print_level {
+ LIBBPF_WARN,
+ LIBBPF_INFO,
+ LIBBPF_DEBUG,
+};
-LIBBPF_API void libbpf_set_print(libbpf_print_fn_t warn,
- libbpf_print_fn_t info,
- libbpf_print_fn_t debug);
+typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
+ const char *, va_list ap);
+
+LIBBPF_API void libbpf_set_print(libbpf_print_fn_t fn);
/* Hide internal to user */
struct bpf_object;
@@ -90,6 +89,9 @@ LIBBPF_API int bpf_object__load(struct bpf_object *obj);
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj);
+
+struct btf;
+LIBBPF_API struct btf *bpf_object__btf(struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
LIBBPF_API struct bpf_program *
@@ -264,6 +266,9 @@ struct bpf_map;
LIBBPF_API struct bpf_map *
bpf_object__find_map_by_name(struct bpf_object *obj, const char *name);
+LIBBPF_API int
+bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name);
+
/*
* Get bpf_map through the offset of corresponding struct bpf_map_def
* in the BPF object file.
@@ -273,10 +278,11 @@ bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
LIBBPF_API struct bpf_map *
bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
-#define bpf_map__for_each(pos, obj) \
+#define bpf_object__for_each_map(pos, obj) \
for ((pos) = bpf_map__next(NULL, (obj)); \
(pos) != NULL; \
(pos) = bpf_map__next((pos), (obj)))
+#define bpf_map__for_each bpf_object__for_each_map
LIBBPF_API struct bpf_map *
bpf_map__prev(struct bpf_map *map, struct bpf_object *obj);
@@ -292,6 +298,7 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
LIBBPF_API void *bpf_map__priv(struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
@@ -314,6 +321,7 @@ LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
+LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_DONE = 0,
@@ -355,6 +363,20 @@ LIBBPF_API const struct bpf_line_info *
bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
__u32 insn_off, __u32 nr_skip);
+/*
+ * Probe for supported system features
+ *
+ * Note that running many of these probes in a short amount of time can cause
+ * the kernel to reach the maximal size of lockable memory allowed for the
+ * user, causing subsequent probes to fail. In this case, the caller may want
+ * to adjust that limit with setrlimit().
+ */
+LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
+ __u32 ifindex);
+LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
+LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
+ enum bpf_prog_type prog_type, __u32 ifindex);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index cd02cd4e2cc3..778a26702a70 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -124,3 +124,33 @@ LIBBPF_0.0.1 {
local:
*;
};
+
+LIBBPF_0.0.2 {
+ global:
+ bpf_probe_helper;
+ bpf_probe_map_type;
+ bpf_probe_prog_type;
+ bpf_map__resize;
+ bpf_map_lookup_elem_flags;
+ bpf_object__btf;
+ bpf_object__find_map_fd_by_name;
+ bpf_get_link_xdp_id;
+ btf__dedup;
+ btf__get_map_kv_tids;
+ btf__get_nr_types;
+ btf__get_raw_data;
+ btf__load;
+ btf_ext__free;
+ btf_ext__func_info_rec_size;
+ btf_ext__get_raw_data;
+ btf_ext__line_info_rec_size;
+ btf_ext__new;
+ btf_ext__reloc_func_info;
+ btf_ext__reloc_line_info;
+ xsk_umem__create;
+ xsk_socket__create;
+ xsk_umem__delete;
+ xsk_socket__delete;
+ xsk_umem__fd;
+ xsk_socket__fd;
+} LIBBPF_0.0.1;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
new file mode 100644
index 000000000000..8c3a1c04dcb2
--- /dev/null
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/utsname.h>
+
+#include <linux/filter.h>
+#include <linux/kernel.h>
+
+#include "bpf.h"
+#include "libbpf.h"
+
+static bool grep(const char *buffer, const char *pattern)
+{
+ return !!strstr(buffer, pattern);
+}
+
+static int get_vendor_id(int ifindex)
+{
+ char ifname[IF_NAMESIZE], path[64], buf[8];
+ ssize_t len;
+ int fd;
+
+ if (!if_indextoname(ifindex, ifname))
+ return -1;
+
+ snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ len = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (len < 0)
+ return -1;
+ if (len >= (ssize_t)sizeof(buf))
+ return -1;
+ buf[len] = '\0';
+
+ return strtol(buf, NULL, 0);
+}
+
+static int get_kernel_version(void)
+{
+ int version, subversion, patchlevel;
+ struct utsname utsn;
+
+ /* Return 0 on failure, and attempt to probe with empty kversion */
+ if (uname(&utsn))
+ return 0;
+
+ if (sscanf(utsn.release, "%d.%d.%d",
+ &version, &subversion, &patchlevel) != 3)
+ return 0;
+
+ return (version << 16) + (subversion << 8) + patchlevel;
+}
+
+static void
+probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
+ size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
+{
+ struct bpf_load_program_attr xattr = {};
+ int fd;
+
+ switch (prog_type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
+ break;
+ case BPF_PROG_TYPE_KPROBE:
+ xattr.kern_version = get_kernel_version();
+ break;
+ case BPF_PROG_TYPE_UNSPEC:
+ case BPF_PROG_TYPE_SOCKET_FILTER:
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
+ case BPF_PROG_TYPE_TRACEPOINT:
+ case BPF_PROG_TYPE_XDP:
+ case BPF_PROG_TYPE_PERF_EVENT:
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_LWT_IN:
+ case BPF_PROG_TYPE_LWT_OUT:
+ case BPF_PROG_TYPE_LWT_XMIT:
+ case BPF_PROG_TYPE_SOCK_OPS:
+ case BPF_PROG_TYPE_SK_SKB:
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
+ case BPF_PROG_TYPE_SK_MSG:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_LWT_SEG6LOCAL:
+ case BPF_PROG_TYPE_LIRC_MODE2:
+ case BPF_PROG_TYPE_SK_REUSEPORT:
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ default:
+ break;
+ }
+
+ xattr.prog_type = prog_type;
+ xattr.insns = insns;
+ xattr.insns_cnt = insns_cnt;
+ xattr.license = "GPL";
+ xattr.prog_ifindex = ifindex;
+
+ fd = bpf_load_program_xattr(&xattr, buf, buf_len);
+ if (fd >= 0)
+ close(fd);
+}
+
+bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN()
+ };
+
+ if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
+ /* nfp returns -EINVAL on exit(0) with TC offload */
+ insns[0].imm = 2;
+
+ errno = 0;
+ probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
+
+ return errno != EINVAL && errno != EOPNOTSUPP;
+}
+
+bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
+{
+ int key_size, value_size, max_entries, map_flags;
+ struct bpf_create_map_attr attr = {};
+ int fd = -1, fd_inner;
+
+ key_size = sizeof(__u32);
+ value_size = sizeof(__u32);
+ max_entries = 1;
+ map_flags = 0;
+
+ switch (map_type) {
+ case BPF_MAP_TYPE_STACK_TRACE:
+ value_size = sizeof(__u64);
+ break;
+ case BPF_MAP_TYPE_LPM_TRIE:
+ key_size = sizeof(__u64);
+ value_size = sizeof(__u64);
+ map_flags = BPF_F_NO_PREALLOC;
+ break;
+ case BPF_MAP_TYPE_CGROUP_STORAGE:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+ key_size = sizeof(struct bpf_cgroup_storage_key);
+ value_size = sizeof(__u64);
+ max_entries = 0;
+ break;
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ key_size = 0;
+ break;
+ case BPF_MAP_TYPE_UNSPEC:
+ case BPF_MAP_TYPE_HASH:
+ case BPF_MAP_TYPE_ARRAY:
+ case BPF_MAP_TYPE_PROG_ARRAY:
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_CGROUP_ARRAY:
+ case BPF_MAP_TYPE_LRU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_CPUMAP:
+ case BPF_MAP_TYPE_XSKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ default:
+ break;
+ }
+
+ if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+ map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+ /* TODO: probe for device, once libbpf has a function to create
+ * map-in-map for offload
+ */
+ if (ifindex)
+ return false;
+
+ fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
+ sizeof(__u32), sizeof(__u32), 1, 0);
+ if (fd_inner < 0)
+ return false;
+ fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
+ fd_inner, 1, 0);
+ close(fd_inner);
+ } else {
+ /* Note: No other restriction on map type probes for offload */
+ attr.map_type = map_type;
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.max_entries = max_entries;
+ attr.map_flags = map_flags;
+ attr.map_ifindex = ifindex;
+
+ fd = bpf_create_map_xattr(&attr);
+ }
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0;
+}
+
+bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
+ __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_EMIT_CALL(id),
+ BPF_EXIT_INSN()
+ };
+ char buf[4096] = {};
+ bool res;
+
+ probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
+ ifindex);
+ res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
+
+ if (ifindex) {
+ switch (get_vendor_id(ifindex)) {
+ case 0x19ee: /* Netronome specific */
+ res = res && !grep(buf, "not supported by FW") &&
+ !grep(buf, "unsupported function id");
+ break;
+ default:
+ break;
+ }
+ }
+
+ return res;
+}
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h
new file mode 100644
index 000000000000..81ecda0cb9c9
--- /dev/null
+++ b/tools/lib/bpf/libbpf_util.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2019 Facebook */
+
+#ifndef __LIBBPF_LIBBPF_UTIL_H
+#define __LIBBPF_LIBBPF_UTIL_H
+
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void libbpf_print(enum libbpf_print_level level,
+ const char *format, ...)
+ __attribute__((format(printf, 2, 3)));
+
+#define __pr(level, fmt, ...) \
+do { \
+ libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
+#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
+#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 0ce67aea8f3b..ce3ec81b71c0 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -21,6 +21,12 @@
typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
void *cookie);
+struct xdp_id_md {
+ int ifindex;
+ __u32 flags;
+ __u32 id;
+};
+
int libbpf_netlink_open(__u32 *nl_pid)
{
struct sockaddr_nl sa;
@@ -196,6 +202,85 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh,
return dump_link_nlmsg(cookie, ifi, tb);
}
+static unsigned char get_xdp_id_attr(unsigned char mode, __u32 flags)
+{
+ if (mode != XDP_ATTACHED_MULTI)
+ return IFLA_XDP_PROG_ID;
+ if (flags & XDP_FLAGS_DRV_MODE)
+ return IFLA_XDP_DRV_PROG_ID;
+ if (flags & XDP_FLAGS_HW_MODE)
+ return IFLA_XDP_HW_PROG_ID;
+ if (flags & XDP_FLAGS_SKB_MODE)
+ return IFLA_XDP_SKB_PROG_ID;
+
+ return IFLA_XDP_UNSPEC;
+}
+
+static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb)
+{
+ struct nlattr *xdp_tb[IFLA_XDP_MAX + 1];
+ struct xdp_id_md *xdp_id = cookie;
+ struct ifinfomsg *ifinfo = msg;
+ unsigned char mode, xdp_attr;
+ int ret;
+
+ if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index)
+ return 0;
+
+ if (!tb[IFLA_XDP])
+ return 0;
+
+ ret = libbpf_nla_parse_nested(xdp_tb, IFLA_XDP_MAX, tb[IFLA_XDP], NULL);
+ if (ret)
+ return ret;
+
+ if (!xdp_tb[IFLA_XDP_ATTACHED])
+ return 0;
+
+ mode = libbpf_nla_getattr_u8(xdp_tb[IFLA_XDP_ATTACHED]);
+ if (mode == XDP_ATTACHED_NONE)
+ return 0;
+
+ xdp_attr = get_xdp_id_attr(mode, xdp_id->flags);
+ if (!xdp_attr || !xdp_tb[xdp_attr])
+ return 0;
+
+ xdp_id->id = libbpf_nla_getattr_u32(xdp_tb[xdp_attr]);
+
+ return 0;
+}
+
+int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
+{
+ struct xdp_id_md xdp_id = {};
+ int sock, ret;
+ __u32 nl_pid;
+ __u32 mask;
+
+ if (flags & ~XDP_FLAGS_MASK)
+ return -EINVAL;
+
+ /* Check whether the single {HW,DRV,SKB} mode is set */
+ flags &= (XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE);
+ mask = flags - 1;
+ if (flags && flags & mask)
+ return -EINVAL;
+
+ sock = libbpf_netlink_open(&nl_pid);
+ if (sock < 0)
+ return sock;
+
+ xdp_id.ifindex = ifindex;
+ xdp_id.flags = flags;
+
+ ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_id, &xdp_id);
+ if (!ret)
+ *prog_id = xdp_id.id;
+
+ close(sock);
+ return ret;
+}
+
int libbpf_nl_get_link(int sock, unsigned int nl_pid,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
{
diff --git a/tools/lib/bpf/test_libbpf.cpp b/tools/lib/bpf/test_libbpf.cpp
index abf3fc25c9fa..fc134873bb6d 100644
--- a/tools/lib/bpf/test_libbpf.cpp
+++ b/tools/lib/bpf/test_libbpf.cpp
@@ -8,11 +8,11 @@
int main(int argc, char *argv[])
{
/* libbpf.h */
- libbpf_set_print(NULL, NULL, NULL);
+ libbpf_set_print(NULL);
/* bpf.h */
bpf_prog_get_fd_by_id(0);
/* btf.h */
- btf__new(NULL, 0, NULL);
+ btf__new(NULL, 0);
}
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
new file mode 100644
index 000000000000..f98ac82c9aea
--- /dev/null
+++ b/tools/lib/bpf/xsk.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * AF_XDP user-space access library.
+ *
+ * Copyright(c) 2018 - 2019 Intel Corporation.
+ *
+ * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <asm/barrier.h>
+#include <linux/compiler.h>
+#include <linux/ethtool.h>
+#include <linux/filter.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_xdp.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_util.h"
+#include "xsk.h"
+
+#ifndef SOL_XDP
+ #define SOL_XDP 283
+#endif
+
+#ifndef AF_XDP
+ #define AF_XDP 44
+#endif
+
+#ifndef PF_XDP
+ #define PF_XDP AF_XDP
+#endif
+
+struct xsk_umem {
+ struct xsk_ring_prod *fill;
+ struct xsk_ring_cons *comp;
+ char *umem_area;
+ struct xsk_umem_config config;
+ int fd;
+ int refcount;
+};
+
+struct xsk_socket {
+ struct xsk_ring_cons *rx;
+ struct xsk_ring_prod *tx;
+ __u64 outstanding_tx;
+ struct xsk_umem *umem;
+ struct xsk_socket_config config;
+ int fd;
+ int xsks_map;
+ int ifindex;
+ int prog_fd;
+ int qidconf_map_fd;
+ int xsks_map_fd;
+ __u32 queue_id;
+ char ifname[IFNAMSIZ];
+};
+
+struct xsk_nl_info {
+ bool xdp_prog_attached;
+ int ifindex;
+ int fd;
+};
+
+/* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit.
+ * Unfortunately, it is not part of glibc.
+ */
+static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, __u64 offset)
+{
+#ifdef __NR_mmap2
+ unsigned int page_shift = __builtin_ffs(getpagesize()) - 1;
+ long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd,
+ (off_t)(offset >> page_shift));
+
+ return (void *)ret;
+#else
+ return mmap(addr, length, prot, flags, fd, offset);
+#endif
+}
+
+int xsk_umem__fd(const struct xsk_umem *umem)
+{
+ return umem ? umem->fd : -EINVAL;
+}
+
+int xsk_socket__fd(const struct xsk_socket *xsk)
+{
+ return xsk ? xsk->fd : -EINVAL;
+}
+
+static bool xsk_page_aligned(void *buffer)
+{
+ unsigned long addr = (unsigned long)buffer;
+
+ return !(addr & (getpagesize() - 1));
+}
+
+static void xsk_set_umem_config(struct xsk_umem_config *cfg,
+ const struct xsk_umem_config *usr_cfg)
+{
+ if (!usr_cfg) {
+ cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
+ cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
+ return;
+ }
+
+ cfg->fill_size = usr_cfg->fill_size;
+ cfg->comp_size = usr_cfg->comp_size;
+ cfg->frame_size = usr_cfg->frame_size;
+ cfg->frame_headroom = usr_cfg->frame_headroom;
+}
+
+static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
+ const struct xsk_socket_config *usr_cfg)
+{
+ if (!usr_cfg) {
+ cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ cfg->libbpf_flags = 0;
+ cfg->xdp_flags = 0;
+ cfg->bind_flags = 0;
+ return;
+ }
+
+ cfg->rx_size = usr_cfg->rx_size;
+ cfg->tx_size = usr_cfg->tx_size;
+ cfg->libbpf_flags = usr_cfg->libbpf_flags;
+ cfg->xdp_flags = usr_cfg->xdp_flags;
+ cfg->bind_flags = usr_cfg->bind_flags;
+}
+
+int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill, struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *usr_config)
+{
+ struct xdp_mmap_offsets off;
+ struct xdp_umem_reg mr;
+ struct xsk_umem *umem;
+ socklen_t optlen;
+ void *map;
+ int err;
+
+ if (!umem_area || !umem_ptr || !fill || !comp)
+ return -EFAULT;
+ if (!size && !xsk_page_aligned(umem_area))
+ return -EINVAL;
+
+ umem = calloc(1, sizeof(*umem));
+ if (!umem)
+ return -ENOMEM;
+
+ umem->fd = socket(AF_XDP, SOCK_RAW, 0);
+ if (umem->fd < 0) {
+ err = -errno;
+ goto out_umem_alloc;
+ }
+
+ umem->umem_area = umem_area;
+ xsk_set_umem_config(&umem->config, usr_config);
+
+ mr.addr = (uintptr_t)umem_area;
+ mr.len = size;
+ mr.chunk_size = umem->config.frame_size;
+ mr.headroom = umem->config.frame_headroom;
+
+ err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
+ &umem->config.fill_size,
+ sizeof(umem->config.fill_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
+ &umem->config.comp_size,
+ sizeof(umem->config.comp_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ optlen = sizeof(off);
+ err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ map = xsk_mmap(NULL, off.fr.desc +
+ umem->config.fill_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ umem->fd, XDP_UMEM_PGOFF_FILL_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ umem->fill = fill;
+ fill->mask = umem->config.fill_size - 1;
+ fill->size = umem->config.fill_size;
+ fill->producer = map + off.fr.producer;
+ fill->consumer = map + off.fr.consumer;
+ fill->ring = map + off.fr.desc;
+ fill->cached_cons = umem->config.fill_size;
+
+ map = xsk_mmap(NULL,
+ off.cr.desc + umem->config.comp_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_mmap;
+ }
+
+ umem->comp = comp;
+ comp->mask = umem->config.comp_size - 1;
+ comp->size = umem->config.comp_size;
+ comp->producer = map + off.cr.producer;
+ comp->consumer = map + off.cr.consumer;
+ comp->ring = map + off.cr.desc;
+
+ *umem_ptr = umem;
+ return 0;
+
+out_mmap:
+ munmap(umem->fill,
+ off.fr.desc + umem->config.fill_size * sizeof(__u64));
+out_socket:
+ close(umem->fd);
+out_umem_alloc:
+ free(umem);
+ return err;
+}
+
+static int xsk_load_xdp_prog(struct xsk_socket *xsk)
+{
+ char bpf_log_buf[BPF_LOG_BUF_SIZE];
+ int err, prog_fd;
+
+ /* This is the C-program:
+ * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
+ * {
+ * int *qidconf, index = ctx->rx_queue_index;
+ *
+ * // A set entry here means that the correspnding queue_id
+ * // has an active AF_XDP socket bound to it.
+ * qidconf = bpf_map_lookup_elem(&qidconf_map, &index);
+ * if (!qidconf)
+ * return XDP_ABORTED;
+ *
+ * if (*qidconf)
+ * return bpf_redirect_map(&xsks_map, index, 0);
+ *
+ * return XDP_PASS;
+ * }
+ */
+ struct bpf_insn prog[] = {
+ /* r1 = *(u32 *)(r1 + 16) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16),
+ /* *(u32 *)(r10 - 4) = r1 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, xsk->qidconf_map_fd),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ /* if r1 == 0 goto +8 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ /* r1 = *(u32 *)(r1 + 0) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
+ /* if r1 == 0 goto +5 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
+ /* r2 = *(u32 *)(r10 - 4) */
+ BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
+ BPF_MOV32_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_redirect_map),
+ /* The jumps are to this instruction */
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+
+ prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
+ "LGPL-2.1 or BSD-2-Clause", 0, bpf_log_buf,
+ BPF_LOG_BUF_SIZE);
+ if (prog_fd < 0) {
+ pr_warning("BPF log buffer:\n%s", bpf_log_buf);
+ return prog_fd;
+ }
+
+ err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
+ if (err) {
+ close(prog_fd);
+ return err;
+ }
+
+ xsk->prog_fd = prog_fd;
+ return 0;
+}
+
+static int xsk_get_max_queues(struct xsk_socket *xsk)
+{
+ struct ethtool_channels channels;
+ struct ifreq ifr;
+ int fd, err, ret;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return -errno;
+
+ channels.cmd = ETHTOOL_GCHANNELS;
+ ifr.ifr_data = (void *)&channels;
+ strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ);
+ err = ioctl(fd, SIOCETHTOOL, &ifr);
+ if (err && errno != EOPNOTSUPP) {
+ ret = -errno;
+ goto out;
+ }
+
+ if (channels.max_combined == 0 || errno == EOPNOTSUPP)
+ /* If the device says it has no channels, then all traffic
+ * is sent to a single stream, so max queues = 1.
+ */
+ ret = 1;
+ else
+ ret = channels.max_combined;
+
+out:
+ close(fd);
+ return ret;
+}
+
+static int xsk_create_bpf_maps(struct xsk_socket *xsk)
+{
+ int max_queues;
+ int fd;
+
+ max_queues = xsk_get_max_queues(xsk);
+ if (max_queues < 0)
+ return max_queues;
+
+ fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "qidconf_map",
+ sizeof(int), sizeof(int), max_queues, 0);
+ if (fd < 0)
+ return fd;
+ xsk->qidconf_map_fd = fd;
+
+ fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
+ sizeof(int), sizeof(int), max_queues, 0);
+ if (fd < 0) {
+ close(xsk->qidconf_map_fd);
+ return fd;
+ }
+ xsk->xsks_map_fd = fd;
+
+ return 0;
+}
+
+static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
+{
+ close(xsk->qidconf_map_fd);
+ close(xsk->xsks_map_fd);
+}
+
+static int xsk_update_bpf_maps(struct xsk_socket *xsk, int qidconf_value,
+ int xsks_value)
+{
+ bool qidconf_map_updated = false, xsks_map_updated = false;
+ struct bpf_prog_info prog_info = {};
+ __u32 prog_len = sizeof(prog_info);
+ struct bpf_map_info map_info;
+ __u32 map_len = sizeof(map_info);
+ __u32 *map_ids;
+ int reset_value = 0;
+ __u32 num_maps;
+ unsigned int i;
+ int err;
+
+ err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
+ if (err)
+ return err;
+
+ num_maps = prog_info.nr_map_ids;
+
+ map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
+ if (!map_ids)
+ return -ENOMEM;
+
+ memset(&prog_info, 0, prog_len);
+ prog_info.nr_map_ids = num_maps;
+ prog_info.map_ids = (__u64)(unsigned long)map_ids;
+
+ err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
+ if (err)
+ goto out_map_ids;
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ int fd;
+
+ fd = bpf_map_get_fd_by_id(map_ids[i]);
+ if (fd < 0) {
+ err = -errno;
+ goto out_maps;
+ }
+
+ err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
+ if (err)
+ goto out_maps;
+
+ if (!strcmp(map_info.name, "qidconf_map")) {
+ err = bpf_map_update_elem(fd, &xsk->queue_id,
+ &qidconf_value, 0);
+ if (err)
+ goto out_maps;
+ qidconf_map_updated = true;
+ xsk->qidconf_map_fd = fd;
+ } else if (!strcmp(map_info.name, "xsks_map")) {
+ err = bpf_map_update_elem(fd, &xsk->queue_id,
+ &xsks_value, 0);
+ if (err)
+ goto out_maps;
+ xsks_map_updated = true;
+ xsk->xsks_map_fd = fd;
+ }
+
+ if (qidconf_map_updated && xsks_map_updated)
+ break;
+ }
+
+ if (!(qidconf_map_updated && xsks_map_updated)) {
+ err = -ENOENT;
+ goto out_maps;
+ }
+
+ err = 0;
+ goto out_success;
+
+out_maps:
+ if (qidconf_map_updated)
+ (void)bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id,
+ &reset_value, 0);
+ if (xsks_map_updated)
+ (void)bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
+ &reset_value, 0);
+out_success:
+ if (qidconf_map_updated)
+ close(xsk->qidconf_map_fd);
+ if (xsks_map_updated)
+ close(xsk->xsks_map_fd);
+out_map_ids:
+ free(map_ids);
+ return err;
+}
+
+static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
+{
+ bool prog_attached = false;
+ __u32 prog_id = 0;
+ int err;
+
+ err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
+ xsk->config.xdp_flags);
+ if (err)
+ return err;
+
+ if (!prog_id) {
+ prog_attached = true;
+ err = xsk_create_bpf_maps(xsk);
+ if (err)
+ return err;
+
+ err = xsk_load_xdp_prog(xsk);
+ if (err)
+ goto out_maps;
+ } else {
+ xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ }
+
+ err = xsk_update_bpf_maps(xsk, true, xsk->fd);
+ if (err)
+ goto out_load;
+
+ return 0;
+
+out_load:
+ if (prog_attached)
+ close(xsk->prog_fd);
+out_maps:
+ if (prog_attached)
+ xsk_delete_bpf_maps(xsk);
+ return err;
+}
+
+int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+ __u32 queue_id, struct xsk_umem *umem,
+ struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
+ const struct xsk_socket_config *usr_config)
+{
+ struct sockaddr_xdp sxdp = {};
+ struct xdp_mmap_offsets off;
+ struct xsk_socket *xsk;
+ socklen_t optlen;
+ void *map;
+ int err;
+
+ if (!umem || !xsk_ptr || !rx || !tx)
+ return -EFAULT;
+
+ if (umem->refcount) {
+ pr_warning("Error: shared umems not supported by libbpf.\n");
+ return -EBUSY;
+ }
+
+ xsk = calloc(1, sizeof(*xsk));
+ if (!xsk)
+ return -ENOMEM;
+
+ if (umem->refcount++ > 0) {
+ xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
+ if (xsk->fd < 0) {
+ err = -errno;
+ goto out_xsk_alloc;
+ }
+ } else {
+ xsk->fd = umem->fd;
+ }
+
+ xsk->outstanding_tx = 0;
+ xsk->queue_id = queue_id;
+ xsk->umem = umem;
+ xsk->ifindex = if_nametoindex(ifname);
+ if (!xsk->ifindex) {
+ err = -errno;
+ goto out_socket;
+ }
+ strncpy(xsk->ifname, ifname, IFNAMSIZ);
+
+ xsk_set_xdp_socket_config(&xsk->config, usr_config);
+
+ if (rx) {
+ err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
+ &xsk->config.rx_size,
+ sizeof(xsk->config.rx_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ }
+ if (tx) {
+ err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
+ &xsk->config.tx_size,
+ sizeof(xsk->config.tx_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ }
+
+ optlen = sizeof(off);
+ err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ if (rx) {
+ map = xsk_mmap(NULL, off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_RX_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ rx->mask = xsk->config.rx_size - 1;
+ rx->size = xsk->config.rx_size;
+ rx->producer = map + off.rx.producer;
+ rx->consumer = map + off.rx.consumer;
+ rx->ring = map + off.rx.desc;
+ }
+ xsk->rx = rx;
+
+ if (tx) {
+ map = xsk_mmap(NULL, off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_TX_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_mmap_rx;
+ }
+
+ tx->mask = xsk->config.tx_size - 1;
+ tx->size = xsk->config.tx_size;
+ tx->producer = map + off.tx.producer;
+ tx->consumer = map + off.tx.consumer;
+ tx->ring = map + off.tx.desc;
+ tx->cached_cons = xsk->config.tx_size;
+ }
+ xsk->tx = tx;
+
+ sxdp.sxdp_family = PF_XDP;
+ sxdp.sxdp_ifindex = xsk->ifindex;
+ sxdp.sxdp_queue_id = xsk->queue_id;
+ sxdp.sxdp_flags = xsk->config.bind_flags;
+
+ err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
+ if (err) {
+ err = -errno;
+ goto out_mmap_tx;
+ }
+
+ if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
+ err = xsk_setup_xdp_prog(xsk);
+ if (err)
+ goto out_mmap_tx;
+ }
+
+ *xsk_ptr = xsk;
+ return 0;
+
+out_mmap_tx:
+ if (tx)
+ munmap(xsk->tx,
+ off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc));
+out_mmap_rx:
+ if (rx)
+ munmap(xsk->rx,
+ off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc));
+out_socket:
+ if (--umem->refcount)
+ close(xsk->fd);
+out_xsk_alloc:
+ free(xsk);
+ return err;
+}
+
+int xsk_umem__delete(struct xsk_umem *umem)
+{
+ struct xdp_mmap_offsets off;
+ socklen_t optlen;
+ int err;
+
+ if (!umem)
+ return 0;
+
+ if (umem->refcount)
+ return -EBUSY;
+
+ optlen = sizeof(off);
+ err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (!err) {
+ munmap(umem->fill->ring,
+ off.fr.desc + umem->config.fill_size * sizeof(__u64));
+ munmap(umem->comp->ring,
+ off.cr.desc + umem->config.comp_size * sizeof(__u64));
+ }
+
+ close(umem->fd);
+ free(umem);
+
+ return 0;
+}
+
+void xsk_socket__delete(struct xsk_socket *xsk)
+{
+ struct xdp_mmap_offsets off;
+ socklen_t optlen;
+ int err;
+
+ if (!xsk)
+ return;
+
+ (void)xsk_update_bpf_maps(xsk, 0, 0);
+
+ optlen = sizeof(off);
+ err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (!err) {
+ if (xsk->rx)
+ munmap(xsk->rx->ring,
+ off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc));
+ if (xsk->tx)
+ munmap(xsk->tx->ring,
+ off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc));
+ }
+
+ xsk->umem->refcount--;
+ /* Do not close an fd that also has an associated umem connected
+ * to it.
+ */
+ if (xsk->fd != xsk->umem->fd)
+ close(xsk->fd);
+ free(xsk);
+}
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
new file mode 100644
index 000000000000..a497f00e2962
--- /dev/null
+++ b/tools/lib/bpf/xsk.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/*
+ * AF_XDP user-space access library.
+ *
+ * Copyright(c) 2018 - 2019 Intel Corporation.
+ *
+ * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#ifndef __LIBBPF_XSK_H
+#define __LIBBPF_XSK_H
+
+#include <stdio.h>
+#include <stdint.h>
+#include <linux/if_xdp.h>
+
+#include "libbpf.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Do not access these members directly. Use the functions below. */
+#define DEFINE_XSK_RING(name) \
+struct name { \
+ __u32 cached_prod; \
+ __u32 cached_cons; \
+ __u32 mask; \
+ __u32 size; \
+ __u32 *producer; \
+ __u32 *consumer; \
+ void *ring; \
+}
+
+DEFINE_XSK_RING(xsk_ring_prod);
+DEFINE_XSK_RING(xsk_ring_cons);
+
+struct xsk_umem;
+struct xsk_socket;
+
+static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
+ __u32 idx)
+{
+ __u64 *addrs = (__u64 *)fill->ring;
+
+ return &addrs[idx & fill->mask];
+}
+
+static inline const __u64 *
+xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
+{
+ const __u64 *addrs = (const __u64 *)comp->ring;
+
+ return &addrs[idx & comp->mask];
+}
+
+static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
+ __u32 idx)
+{
+ struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
+
+ return &descs[idx & tx->mask];
+}
+
+static inline const struct xdp_desc *
+xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
+{
+ const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
+
+ return &descs[idx & rx->mask];
+}
+
+static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
+{
+ __u32 free_entries = r->cached_cons - r->cached_prod;
+
+ if (free_entries >= nb)
+ return free_entries;
+
+ /* Refresh the local tail pointer.
+ * cached_cons is r->size bigger than the real consumer pointer so
+ * that this addition can be avoided in the more frequently
+ * executed code that computs free_entries in the beginning of
+ * this function. Without this optimization it whould have been
+ * free_entries = r->cached_prod - r->cached_cons + r->size.
+ */
+ r->cached_cons = *r->consumer + r->size;
+
+ return r->cached_cons - r->cached_prod;
+}
+
+static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
+{
+ __u32 entries = r->cached_prod - r->cached_cons;
+
+ if (entries == 0) {
+ r->cached_prod = *r->producer;
+ entries = r->cached_prod - r->cached_cons;
+ }
+
+ return (entries > nb) ? nb : entries;
+}
+
+static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
+ size_t nb, __u32 *idx)
+{
+ if (unlikely(xsk_prod_nb_free(prod, nb) < nb))
+ return 0;
+
+ *idx = prod->cached_prod;
+ prod->cached_prod += nb;
+
+ return nb;
+}
+
+static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
+{
+ /* Make sure everything has been written to the ring before signalling
+ * this to the kernel.
+ */
+ smp_wmb();
+
+ *prod->producer += nb;
+}
+
+static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
+ size_t nb, __u32 *idx)
+{
+ size_t entries = xsk_cons_nb_avail(cons, nb);
+
+ if (likely(entries > 0)) {
+ /* Make sure we do not speculatively read the data before
+ * we have received the packet buffers from the ring.
+ */
+ smp_rmb();
+
+ *idx = cons->cached_cons;
+ cons->cached_cons += entries;
+ }
+
+ return entries;
+}
+
+static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
+{
+ *cons->consumer += nb;
+}
+
+static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
+{
+ return &((char *)umem_area)[addr];
+}
+
+LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
+LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
+
+#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
+#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
+#define XSK_UMEM__DEFAULT_FRAME_SHIFT 11 /* 2048 bytes */
+#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
+#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
+
+struct xsk_umem_config {
+ __u32 fill_size;
+ __u32 comp_size;
+ __u32 frame_size;
+ __u32 frame_headroom;
+};
+
+/* Flags for the libbpf_flags field. */
+#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
+
+struct xsk_socket_config {
+ __u32 rx_size;
+ __u32 tx_size;
+ __u32 libbpf_flags;
+ __u32 xdp_flags;
+ __u16 bind_flags;
+};
+
+/* Set config to NULL to get the default configuration. */
+LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
+LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
+ const char *ifname, __u32 queue_id,
+ struct xsk_umem *umem,
+ struct xsk_ring_cons *rx,
+ struct xsk_ring_prod *tx,
+ const struct xsk_socket_config *config);
+
+/* Returns 0 for success and -EBUSY if the umem is still in use. */
+LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
+LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __LIBBPF_XSK_H */
diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h
index d640a9761f09..a81d91d4fc78 100644
--- a/tools/lib/lockdep/include/liblockdep/common.h
+++ b/tools/lib/lockdep/include/liblockdep/common.h
@@ -45,6 +45,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
void lockdep_reset_lock(struct lockdep_map *lock);
+void lockdep_register_key(struct lock_class_key *key);
+void lockdep_unregister_key(struct lock_class_key *key);
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index 2073d4e1f2f0..783dd0df06f9 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -7,6 +7,7 @@
struct liblockdep_pthread_mutex {
pthread_mutex_t mutex;
+ struct lock_class_key key;
struct lockdep_map dep_map;
};
@@ -27,11 +28,10 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
return pthread_mutex_init(&lock->mutex, __mutexattr);
}
-#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
-({ \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key, (mutexattr)); \
+#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
+({ \
+ lockdep_register_key(&(mutex)->key); \
+ __mutex_init((mutex), #mutex, &(mutex)->key, (mutexattr)); \
})
static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
@@ -55,6 +55,7 @@ static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *l
static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock)
{
lockdep_reset_lock(&lock->dep_map);
+ lockdep_unregister_key(&lock->key);
return pthread_mutex_destroy(&lock->mutex);
}
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
index c8fbd0306960..11f425662b43 100755
--- a/tools/lib/lockdep/run_tests.sh
+++ b/tools/lib/lockdep/run_tests.sh
@@ -11,7 +11,7 @@ find tests -name '*.c' | sort | while read -r i; do
testname=$(basename "$i" .c)
echo -ne "$testname... "
if gcc -o "tests/$testname" -pthread "$i" liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &&
- timeout 1 "tests/$testname" 2>&1 | "tests/${testname}.sh"; then
+ timeout 1 "tests/$testname" 2>&1 | /bin/bash "tests/${testname}.sh"; then
echo "PASSED!"
else
echo "FAILED!"
@@ -24,7 +24,7 @@ find tests -name '*.c' | sort | while read -r i; do
echo -ne "(PRELOAD) $testname... "
if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
timeout 1 ./lockdep "tests/$testname" 2>&1 |
- "tests/${testname}.sh"; then
+ /bin/bash "tests/${testname}.sh"; then
echo "PASSED!"
else
echo "FAILED!"
@@ -37,7 +37,7 @@ find tests -name '*.c' | sort | while read -r i; do
echo -ne "(PRELOAD + Valgrind) $testname... "
if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
{ timeout 10 valgrind --read-var-info=yes ./lockdep "./tests/$testname" >& "tests/${testname}.vg.out"; true; } &&
- "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
+ /bin/bash "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
! grep -Eq '(^==[0-9]*== (Invalid |Uninitialised ))|Mismatched free|Source and destination overlap| UME ' "tests/${testname}.vg.out"; then
echo "PASSED!"
else
diff --git a/tools/lib/lockdep/tests/ABBA.c b/tools/lib/lockdep/tests/ABBA.c
index 623313f54720..543789bc3e37 100644
--- a/tools/lib/lockdep/tests/ABBA.c
+++ b/tools/lib/lockdep/tests/ABBA.c
@@ -14,4 +14,13 @@ void main(void)
pthread_mutex_destroy(&b);
pthread_mutex_destroy(&a);
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+
+ LOCK_UNLOCK_2(a, b);
+ LOCK_UNLOCK_2(b, a);
+
+ pthread_mutex_destroy(&b);
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
index 17c2b596f043..904adb70a4f0 100644
--- a/tools/lib/rbtree.c
+++ b/tools/lib/rbtree.c
@@ -22,6 +22,7 @@
*/
#include <linux/rbtree_augmented.h>
+#include <linux/export.h>
/*
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
@@ -43,6 +44,30 @@
* parentheses and have some accompanying text comment.
*/
+/*
+ * Notes on lockless lookups:
+ *
+ * All stores to the tree structure (rb_left and rb_right) must be done using
+ * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
+ * tree structure as seen in program order.
+ *
+ * These two requirements will allow lockless iteration of the tree -- not
+ * correct iteration mind you, tree rotations are not atomic so a lookup might
+ * miss entire subtrees.
+ *
+ * But they do guarantee that any such traversal will only see valid elements
+ * and that it will indeed complete -- does not get stuck in a loop.
+ *
+ * It also guarantees that if the lookup returns an element it is the 'correct'
+ * one. But not returning an element does _NOT_ mean it's not present.
+ *
+ * NOTE:
+ *
+ * Stores to __rb_parent_color are not important for simple lookups so those
+ * are left undone as of now. Nor did I check for loops involving parent
+ * pointers.
+ */
+
static inline void rb_set_black(struct rb_node *rb)
{
rb->__rb_parent_color |= RB_BLACK;
@@ -70,22 +95,35 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+ if (newleft)
+ *leftmost = node;
+
while (true) {
/*
- * Loop invariant: node is red
- *
- * If there is a black parent, we are done.
- * Otherwise, take some corrective action as we don't
- * want a red root or two consecutive red nodes.
+ * Loop invariant: node is red.
*/
- if (!parent) {
+ if (unlikely(!parent)) {
+ /*
+ * The inserted node is root. Either this is the
+ * first node, or we recursed at Case 1 below and
+ * are no longer violating 4).
+ */
rb_set_parent_color(node, NULL, RB_BLACK);
break;
- } else if (rb_is_black(parent))
+ }
+
+ /*
+ * If there is a black parent, we are done.
+ * Otherwise, take some corrective action as,
+ * per 4), we don't want a red root or two
+ * consecutive red nodes.
+ */
+ if(rb_is_black(parent))
break;
gparent = rb_red_parent(parent);
@@ -94,7 +132,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
if (parent != tmp) { /* parent == gparent->rb_left */
if (tmp && rb_is_red(tmp)) {
/*
- * Case 1 - color flips
+ * Case 1 - node's uncle is red (color flips).
*
* G g
* / \ / \
@@ -117,7 +155,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
tmp = parent->rb_right;
if (node == tmp) {
/*
- * Case 2 - left rotate at parent
+ * Case 2 - node's uncle is black and node is
+ * the parent's right child (left rotate at parent).
*
* G G
* / \ / \
@@ -128,8 +167,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* This still leaves us in violation of 4), the
* continuation into Case 3 will fix that.
*/
- parent->rb_right = tmp = node->rb_left;
- node->rb_left = parent;
+ tmp = node->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp);
+ WRITE_ONCE(node->rb_left, parent);
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
@@ -140,7 +180,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
}
/*
- * Case 3 - right rotate at gparent
+ * Case 3 - node's uncle is black and node is
+ * the parent's left child (right rotate at gparent).
*
* G P
* / \ / \
@@ -148,8 +189,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* / \
* n U
*/
- gparent->rb_left = tmp; /* == parent->rb_right */
- parent->rb_right = gparent;
+ WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
+ WRITE_ONCE(parent->rb_right, gparent);
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -170,8 +211,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
tmp = parent->rb_left;
if (node == tmp) {
/* Case 2 - right rotate at parent */
- parent->rb_left = tmp = node->rb_right;
- node->rb_right = parent;
+ tmp = node->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp);
+ WRITE_ONCE(node->rb_right, parent);
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
@@ -182,8 +224,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
}
/* Case 3 - left rotate at gparent */
- gparent->rb_right = tmp; /* == parent->rb_left */
- parent->rb_left = gparent;
+ WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
+ WRITE_ONCE(parent->rb_left, gparent);
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -223,8 +265,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* Sl Sr N Sl
*/
- parent->rb_right = tmp1 = sibling->rb_left;
- sibling->rb_left = parent;
+ tmp1 = sibling->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp1);
+ WRITE_ONCE(sibling->rb_left, parent);
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
@@ -268,15 +311,31 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
*
* (p) (p)
* / \ / \
- * N S --> N Sl
+ * N S --> N sl
* / \ \
- * sl Sr s
+ * sl Sr S
* \
* Sr
+ *
+ * Note: p might be red, and then both
+ * p and sl are red after rotation(which
+ * breaks property 4). This is fixed in
+ * Case 4 (in __rb_rotate_set_parents()
+ * which set sl the color of p
+ * and set p RB_BLACK)
+ *
+ * (p) (sl)
+ * / \ / \
+ * N sl --> P S
+ * \ / \
+ * S N Sr
+ * \
+ * Sr
*/
- sibling->rb_left = tmp1 = tmp2->rb_right;
- tmp2->rb_right = sibling;
- parent->rb_right = tmp2;
+ tmp1 = tmp2->rb_right;
+ WRITE_ONCE(sibling->rb_left, tmp1);
+ WRITE_ONCE(tmp2->rb_right, sibling);
+ WRITE_ONCE(parent->rb_right, tmp2);
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
@@ -296,8 +355,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* (sl) sr N (sl)
*/
- parent->rb_right = tmp2 = sibling->rb_left;
- sibling->rb_left = parent;
+ tmp2 = sibling->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp2);
+ WRITE_ONCE(sibling->rb_left, parent);
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
@@ -309,8 +369,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
sibling = parent->rb_left;
if (rb_is_red(sibling)) {
/* Case 1 - right rotate at parent */
- parent->rb_left = tmp1 = sibling->rb_right;
- sibling->rb_right = parent;
+ tmp1 = sibling->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp1);
+ WRITE_ONCE(sibling->rb_right, parent);
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
@@ -334,10 +395,11 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
}
break;
}
- /* Case 3 - right rotate at sibling */
- sibling->rb_right = tmp1 = tmp2->rb_left;
- tmp2->rb_left = sibling;
- parent->rb_left = tmp2;
+ /* Case 3 - left rotate at sibling */
+ tmp1 = tmp2->rb_left;
+ WRITE_ONCE(sibling->rb_right, tmp1);
+ WRITE_ONCE(tmp2->rb_left, sibling);
+ WRITE_ONCE(parent->rb_left, tmp2);
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
@@ -345,9 +407,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
tmp1 = sibling;
sibling = tmp2;
}
- /* Case 4 - left rotate at parent + color flips */
- parent->rb_left = tmp2 = sibling->rb_right;
- sibling->rb_right = parent;
+ /* Case 4 - right rotate at parent + color flips */
+ tmp2 = sibling->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp2);
+ WRITE_ONCE(sibling->rb_right, parent);
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
@@ -378,22 +441,41 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
static const struct rb_augment_callbacks dummy_callbacks = {
- dummy_propagate, dummy_copy, dummy_rotate
+ .propagate = dummy_propagate,
+ .copy = dummy_copy,
+ .rotate = dummy_rotate
};
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- __rb_insert(node, root, dummy_rotate);
+ __rb_insert(node, root, false, NULL, dummy_rotate);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+ rebalance = __rb_erase_augmented(node, root,
+ NULL, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
+void rb_insert_color_cached(struct rb_node *node,
+ struct rb_root_cached *root, bool leftmost)
+{
+ __rb_insert(node, &root->rb_root, leftmost,
+ &root->rb_leftmost, dummy_rotate);
+}
+
+void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
+{
+ struct rb_node *rebalance;
+ rebalance = __rb_erase_augmented(node, &root->rb_root,
+ &root->rb_leftmost, &dummy_callbacks);
+ if (rebalance)
+ ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
+}
+
/*
* Augmented rbtree manipulation functions.
*
@@ -402,9 +484,10 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- __rb_insert(node, root, augment_rotate);
+ __rb_insert(node, root, newleft, leftmost, augment_rotate);
}
/*
@@ -498,15 +581,24 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
{
struct rb_node *parent = rb_parent(victim);
+ /* Copy the pointers/colour from the victim to the replacement */
+ *new = *victim;
+
/* Set the surrounding nodes to point to the replacement */
- __rb_change_child(victim, new, parent, root);
if (victim->rb_left)
rb_set_parent(victim->rb_left, new);
if (victim->rb_right)
rb_set_parent(victim->rb_right, new);
+ __rb_change_child(victim, new, parent, root);
+}
- /* Copy the pointers/colour from the victim to the replacement */
- *new = *victim;
+void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+ struct rb_root_cached *root)
+{
+ rb_replace_node(victim, new, &root->rb_root);
+
+ if (root->rb_leftmost == victim)
+ root->rb_leftmost = new;
}
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index abd4fa5d3088..87494c7c619d 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -2457,7 +2457,7 @@ static int arg_num_eval(struct tep_print_arg *arg, long long *val)
static char *arg_eval (struct tep_print_arg *arg)
{
long long val;
- static char buf[20];
+ static char buf[24];
switch (arg->type) {
case TEP_PRINT_ATOM:
diff --git a/tools/memory-model/.gitignore b/tools/memory-model/.gitignore
new file mode 100644
index 000000000000..b1d34c52f3c3
--- /dev/null
+++ b/tools/memory-model/.gitignore
@@ -0,0 +1 @@
+litmus
diff --git a/tools/memory-model/README b/tools/memory-model/README
index acf9077cffaa..0f2c366518c6 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -156,6 +156,8 @@ lock.cat
README
This file.
+scripts Various scripts, see scripts/README.
+
===========
LIMITATIONS
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index b84fb2f67109..796513362c05 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -29,7 +29,8 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'sync-rcu (*synchronize_rcu*) ||
'before-atomic (*smp_mb__before_atomic*) ||
'after-atomic (*smp_mb__after_atomic*) ||
- 'after-spinlock (*smp_mb__after_spinlock*)
+ 'after-spinlock (*smp_mb__after_spinlock*) ||
+ 'after-unlock-lock (*smp_mb__after_unlock_lock*)
instructions F[Barriers]
(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *)
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index 882fc33274ac..8f23c74a96fd 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -30,7 +30,9 @@ let wmb = [W] ; fencerel(Wmb) ; [W]
let mb = ([M] ; fencerel(Mb) ; [M]) |
([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
- ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M])
+ ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
+ ([M] ; po ; [UL] ; (co | po) ; [LKW] ;
+ fencerel(After-unlock-lock) ; [M])
let gp = po ; [Sync-rcu] ; po?
let strong-fence = mb | gp
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index 6fa3eb28d40b..b27911cc087d 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -23,6 +23,7 @@ smp_wmb() { __fence{wmb}; }
smp_mb__before_atomic() { __fence{before-atomic}; }
smp_mb__after_atomic() { __fence{after-atomic}; }
smp_mb__after_spinlock() { __fence{after-spinlock}; }
+smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
// Exchange
xchg(X,V) __xchg{mb}(X,V)
diff --git a/tools/memory-model/scripts/README b/tools/memory-model/scripts/README
new file mode 100644
index 000000000000..29375a1fbbfa
--- /dev/null
+++ b/tools/memory-model/scripts/README
@@ -0,0 +1,70 @@
+ ============
+ LKMM SCRIPTS
+ ============
+
+
+These scripts are run from the tools/memory-model directory.
+
+checkalllitmus.sh
+
+ Run all litmus tests in the litmus-tests directory, checking
+ the results against the expected results recorded in the
+ "Result:" comment lines.
+
+checkghlitmus.sh
+
+ Run all litmus tests in the https://github.com/paulmckrcu/litmus
+ archive that are C-language and that have "Result:" comment lines
+ documenting expected results, comparing the actual results to
+ those expected.
+
+checklitmushist.sh
+
+ Run all litmus tests having .litmus.out files from previous
+ initlitmushist.sh or newlitmushist.sh runs, comparing the
+ herd output to that of the original runs.
+
+checklitmus.sh
+
+ Check a single litmus test against its "Result:" expected result.
+
+cmplitmushist.sh
+
+ Compare output from two different runs of the same litmus tests,
+ with the absolute pathnames of the tests to run provided one
+ name per line on standard input. Not normally run manually,
+ provided instead for use by other scripts.
+
+initlitmushist.sh
+
+ Run all litmus tests having no more than the specified number
+ of processes given a specified timeout, recording the results
+ in .litmus.out files.
+
+judgelitmus.sh
+
+ Given a .litmus file and its .litmus.out herd output, check the
+ .litmus.out file against the .litmus file's "Result:" comment to
+ judge whether the test ran correctly. Not normally run manually,
+ provided instead for use by other scripts.
+
+newlitmushist.sh
+
+ For all new or updated litmus tests having no more than the
+ specified number of processes given a specified timeout, run
+ and record the results in .litmus.out files.
+
+parseargs.sh
+
+ Parse command-line arguments. Not normally run manually,
+ provided instead for use by other scripts.
+
+runlitmushist.sh
+
+ Run the litmus tests whose absolute pathnames are provided one
+ name per line on standard input. Not normally run manually,
+ provided instead for use by other scripts.
+
+README
+
+ This file
diff --git a/tools/memory-model/scripts/checkalllitmus.sh b/tools/memory-model/scripts/checkalllitmus.sh
index ca528f9a24d4..b35fcd61ecf6 100755
--- a/tools/memory-model/scripts/checkalllitmus.sh
+++ b/tools/memory-model/scripts/checkalllitmus.sh
@@ -1,42 +1,27 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
#
-# Run herd tests on all .litmus files in the specified directory (which
-# defaults to litmus-tests) and check each file's result against a "Result:"
-# comment within that litmus test. If the verification result does not
-# match that specified in the litmus test, this script prints an error
-# message prefixed with "^^^". It also outputs verification results to
-# a file whose name is that of the specified litmus test, but with ".out"
-# appended.
+# Run herd tests on all .litmus files in the litmus-tests directory
+# and check each file's result against a "Result:" comment within that
+# litmus test. If the verification result does not match that specified
+# in the litmus test, this script prints an error message prefixed with
+# "^^^". It also outputs verification results to a file whose name is
+# that of the specified litmus test, but with ".out" appended.
#
# Usage:
-# checkalllitmus.sh [ directory ]
+# checkalllitmus.sh
#
-# The LINUX_HERD_OPTIONS environment variable may be used to specify
-# arguments to herd, whose default is defined by the checklitmus.sh script.
-# Thus, one would normally run this in the directory containing the memory
-# model, specifying the pathname of the litmus test to check.
+# Run this in the directory containing the memory model.
#
# This script makes no attempt to run the litmus tests concurrently.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright IBM Corporation, 2018
#
# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-litmusdir=${1-litmus-tests}
+. scripts/parseargs.sh
+
+litmusdir=litmus-tests
if test -d "$litmusdir" -a -r "$litmusdir" -a -x "$litmusdir"
then
:
@@ -45,6 +30,14 @@ else
exit 255
fi
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find $litmusdir -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
# Find the checklitmus script. If it is not where we expect it, then
# assume that the caller has the PATH environment variable set
# appropriately.
@@ -57,7 +50,7 @@ fi
# Run the script on all the litmus tests in the specified directory
ret=0
-for i in litmus-tests/*.litmus
+for i in $litmusdir/*.litmus
do
if ! $clscript $i
then
@@ -66,8 +59,8 @@ do
done
if test "$ret" -ne 0
then
- echo " ^^^ VERIFICATION MISMATCHES"
+ echo " ^^^ VERIFICATION MISMATCHES" 1>&2
else
- echo All litmus tests verified as was expected.
+ echo All litmus tests verified as was expected. 1>&2
fi
exit $ret
diff --git a/tools/memory-model/scripts/checkghlitmus.sh b/tools/memory-model/scripts/checkghlitmus.sh
new file mode 100644
index 000000000000..6589fbb6f653
--- /dev/null
+++ b/tools/memory-model/scripts/checkghlitmus.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests having a maximum number of processes
+# to run, defaults to 6.
+#
+# sh checkghlitmus.sh
+#
+# Run from the Linux kernel tools/memory-model directory. See the
+# parseargs.sh scripts for arguments.
+
+. scripts/parseargs.sh
+
+T=/tmp/checkghlitmus.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+# Clone the repository if it is not already present.
+if test -d litmus
+then
+ :
+else
+ git clone https://github.com/paulmckrcu/litmus
+ ( cd litmus; git checkout origin/master )
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find litmus -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests previously run.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+ sed -e 's/\.out$//' |
+ xargs -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' |
+ xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+
+# Create a list of C-language litmus tests with "Result:" commands and
+# no more than the specified number of processes.
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
+xargs < $T/list-C -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' > $T/list-C-result
+xargs < $T/list-C-result -r grep -L "^P${LKMM_PROCS}" > $T/list-C-result-short
+
+# Form list of tests without corresponding .litmus.out files
+sort $T/list-C-already $T/list-C-result-short | uniq -u > $T/list-C-needed
+
+# Run any needed tests.
+if scripts/runlitmushist.sh < $T/list-C-needed > $T/run.stdout 2> $T/run.stderr
+then
+ errs=
+else
+ errs=1
+fi
+
+sed < $T/list-C-result-short -e 's,^,scripts/judgelitmus.sh ,' |
+ sh > $T/judge.stdout 2> $T/judge.stderr
+
+if test -n "$errs"
+then
+ cat $T/run.stderr 1>&2
+fi
+grep '!!!' $T/judge.stdout
diff --git a/tools/memory-model/scripts/checklitmus.sh b/tools/memory-model/scripts/checklitmus.sh
index bf12a75c0719..dd08801a30b0 100755
--- a/tools/memory-model/scripts/checklitmus.sh
+++ b/tools/memory-model/scripts/checklitmus.sh
@@ -1,40 +1,24 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
#
-# Run a herd test and check the result against a "Result:" comment within
-# the litmus test. If the verification result does not match that specified
-# in the litmus test, this script prints an error message prefixed with
-# "^^^" and exits with a non-zero status. It also outputs verification
+# Run a herd test and invokes judgelitmus.sh to check the result against
+# a "Result:" comment within the litmus test. It also outputs verification
# results to a file whose name is that of the specified litmus test, but
# with ".out" appended.
#
# Usage:
# checklitmus.sh file.litmus
#
-# The LINUX_HERD_OPTIONS environment variable may be used to specify
-# arguments to herd, which default to "-conf linux-kernel.cfg". Thus,
-# one would normally run this in the directory containing the memory model,
-# specifying the pathname of the litmus test to check.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check. The caller is expected to have
+# properly set up the LKMM environment variables.
#
# Copyright IBM Corporation, 2018
#
# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
litmus=$1
-herdoptions=${LINUX_HERD_OPTIONS--conf linux-kernel.cfg}
+herdoptions=${LKMM_HERD_OPTIONS--conf linux-kernel.cfg}
if test -f "$litmus" -a -r "$litmus"
then
@@ -43,44 +27,8 @@ else
echo ' --- ' error: \"$litmus\" is not a readable file
exit 255
fi
-if grep -q '^ \* Result: ' $litmus
-then
- outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
-else
- outcome=specified
-fi
-echo Herd options: $herdoptions > $litmus.out
-/usr/bin/time herd7 -o ~/tmp $herdoptions $litmus >> $litmus.out 2>&1
-grep "Herd options:" $litmus.out
-grep '^Observation' $litmus.out
-if grep -q '^Observation' $litmus.out
-then
- :
-else
- cat $litmus.out
- echo ' ^^^ Verification error'
- echo ' ^^^ Verification error' >> $litmus.out 2>&1
- exit 255
-fi
-if test "$outcome" = DEADLOCK
-then
- echo grep 3 and 4
- if grep '^Observation' $litmus.out | grep -q 'Never 0 0$'
- then
- ret=0
- else
- echo " ^^^ Unexpected non-$outcome verification"
- echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1
- ret=1
- fi
-elif grep '^Observation' $litmus.out | grep -q $outcome || test "$outcome" = Maybe
-then
- ret=0
-else
- echo " ^^^ Unexpected non-$outcome verification"
- echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1
- ret=1
-fi
-tail -2 $litmus.out | head -1
-exit $ret
+echo Herd options: $herdoptions > $LKMM_DESTDIR/$litmus.out
+/usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $litmus >> $LKMM_DESTDIR/$litmus.out 2>&1
+
+scripts/judgelitmus.sh $litmus
diff --git a/tools/memory-model/scripts/checklitmushist.sh b/tools/memory-model/scripts/checklitmushist.sh
new file mode 100644
index 000000000000..1d210ffb7c8a
--- /dev/null
+++ b/tools/memory-model/scripts/checklitmushist.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Reruns the C-language litmus tests previously run that match the
+# specified criteria, and compares the result to that of the previous
+# runs from initlitmushist.sh and/or newlitmushist.sh.
+#
+# sh checklitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+. scripts/parseargs.sh
+
+T=/tmp/checklitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ echo Run scripts/initlitmushist.sh first, need litmus repo.
+ exit 1
+fi
+
+# Create the results directory and populate it with subdirectories.
+# The initial output is created here to avoid clobbering the output
+# generated earlier.
+mkdir $T/results
+find litmus -type d -print | ( cd $T/results; sed -e 's/^/mkdir -p /' | sh )
+
+# Create the list of litmus tests already run, then remove those that
+# are excluded by this run's --procs argument.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+ sed -e 's/\.out$//' |
+ xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+xargs < $T/list-C-already -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+# Redirect output, run tests, then restore destination directory.
+destdir="$LKMM_DESTDIR"
+LKMM_DESTDIR=$T/results; export LKMM_DESTDIR
+scripts/runlitmushist.sh < $T/list-C-short > $T/runlitmushist.sh.out 2>&1
+LKMM_DESTDIR="$destdir"; export LKMM_DESTDIR
+
+# Move the newly generated .litmus.out files to .litmus.out.new files
+# in the destination directory.
+cdir=`pwd`
+ddir=`awk -v c="$cdir" -v d="$LKMM_DESTDIR" \
+ 'END { if (d ~ /^\//) print d; else print c "/" d; }' < /dev/null`
+( cd $T/results; find litmus -type f -name '*.litmus.out' -print |
+ sed -e 's,^.*$,cp & '"$ddir"'/&.new,' | sh )
+
+sed < $T/list-C-short -e 's,^,'"$LKMM_DESTDIR/"',' |
+ sh scripts/cmplitmushist.sh
+exit $?
diff --git a/tools/memory-model/scripts/cmplitmushist.sh b/tools/memory-model/scripts/cmplitmushist.sh
new file mode 100644
index 000000000000..0f498aeeccf5
--- /dev/null
+++ b/tools/memory-model/scripts/cmplitmushist.sh
@@ -0,0 +1,87 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Compares .out and .out.new files for each name on standard input,
+# one full pathname per line. Outputs comparison results followed by
+# a summary.
+#
+# sh cmplitmushist.sh
+
+T=/tmp/cmplitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+# comparetest oldpath newpath
+perfect=0
+obsline=0
+noobsline=0
+obsresult=0
+badcompare=0
+comparetest () {
+ grep -v 'maxresident)k\|minor)pagefaults\|^Time' $1 > $T/oldout
+ grep -v 'maxresident)k\|minor)pagefaults\|^Time' $2 > $T/newout
+ if cmp -s $T/oldout $T/newout && grep -q '^Observation' $1
+ then
+ echo Exact output match: $2
+ perfect=`expr "$perfect" + 1`
+ return 0
+ fi
+
+ grep '^Observation' $1 > $T/oldout
+ grep '^Observation' $2 > $T/newout
+ if test -s $T/oldout -o -s $T/newout
+ then
+ if cmp -s $T/oldout $T/newout
+ then
+ echo Matching Observation result and counts: $2
+ obsline=`expr "$obsline" + 1`
+ return 0
+ fi
+ else
+ echo Missing Observation line "(e.g., herd7 timeout)": $2
+ noobsline=`expr "$noobsline" + 1`
+ return 0
+ fi
+
+ grep '^Observation' $1 | awk '{ print $3 }' > $T/oldout
+ grep '^Observation' $2 | awk '{ print $3 }' > $T/newout
+ if cmp -s $T/oldout $T/newout
+ then
+ echo Matching Observation Always/Sometimes/Never result: $2
+ obsresult=`expr "$obsresult" + 1`
+ return 0
+ fi
+ echo ' !!!' Result changed: $2
+ badcompare=`expr "$badcompare" + 1`
+ return 1
+}
+
+sed -e 's/^.*$/comparetest &.out &.out.new/' > $T/cmpscript
+. $T/cmpscript > $T/cmpscript.out
+cat $T/cmpscript.out
+
+echo ' ---' Summary: 1>&2
+grep '!!!' $T/cmpscript.out 1>&2
+if test "$perfect" -ne 0
+then
+ echo Exact output matches: $perfect 1>&2
+fi
+if test "$obsline" -ne 0
+then
+ echo Matching Observation result and counts: $obsline 1>&2
+fi
+if test "$noobsline" -ne 0
+then
+ echo Missing Observation line "(e.g., herd7 timeout)": $noobsline 1>&2
+fi
+if test "$obsresult" -ne 0
+then
+ echo Matching Observation Always/Sometimes/Never result: $obsresult 1>&2
+fi
+if test "$badcompare" -ne 0
+then
+ echo "!!!" Result changed: $badcompare 1>&2
+ exit 1
+fi
+
+exit 0
diff --git a/tools/memory-model/scripts/initlitmushist.sh b/tools/memory-model/scripts/initlitmushist.sh
new file mode 100644
index 000000000000..956b6957484d
--- /dev/null
+++ b/tools/memory-model/scripts/initlitmushist.sh
@@ -0,0 +1,68 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests matching the specified criteria.
+# Generates the output for each .litmus file into a corresponding
+# .litmus.out file, and does not judge the result.
+#
+# sh initlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# This script can consume significant wallclock time and CPU, especially as
+# the value of --procs rises. On a four-core (eight hardware threads)
+# 2.5GHz x86 with a one-minute per-run timeout:
+#
+# --procs wallclock CPU timeouts tests
+# 1 0m11.241s 0m1.086s 0 19
+# 2 1m12.598s 2m8.459s 2 393
+# 3 1m30.007s 6m2.479s 4 2291
+# 4 3m26.042s 18m5.139s 9 3217
+# 5 4m26.661s 23m54.128s 13 3784
+# 6 4m41.900s 26m4.721s 13 4352
+# 7 5m51.463s 35m50.868s 13 4626
+# 8 10m5.235s 68m43.672s 34 5117
+# 9 15m57.80s 105m58.101s 69 5156
+# 10 16m14.13s 103m35.009s 69 5165
+# 20 27m48.55s 198m3.286s 156 5269
+#
+# Increasing the timeout on the 20-process run to five minutes increases
+# the runtime to about 90 minutes with the CPU time rising to about
+# 10 hours. On the other hand, it decreases the number of timeouts to 101.
+#
+# Note that there are historical tests for which herd7 will fail
+# completely, for example, litmus/manual/atomic/C-unlock-wait-00.litmus
+# contains a call to spin_unlock_wait(), which no longer exists in either
+# the kernel or LKMM.
+
+. scripts/parseargs.sh
+
+T=/tmp/initlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ git clone https://github.com/paulmckrcu/litmus
+ ( cd litmus; git checkout origin/master )
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find litmus -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests with no more than the
+# specified number of processes (per the --procs argument).
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
+xargs < $T/list-C -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+scripts/runlitmushist.sh < $T/list-C-short
+
+exit 0
diff --git a/tools/memory-model/scripts/judgelitmus.sh b/tools/memory-model/scripts/judgelitmus.sh
new file mode 100644
index 000000000000..0cc63875e395
--- /dev/null
+++ b/tools/memory-model/scripts/judgelitmus.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Given a .litmus test and the corresponding .litmus.out file, check
+# the .litmus.out file against the "Result:" comment to judge whether
+# the test ran correctly.
+#
+# Usage:
+# judgelitmus.sh file.litmus
+#
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+litmus=$1
+
+if test -f "$litmus" -a -r "$litmus"
+then
+ :
+else
+ echo ' --- ' error: \"$litmus\" is not a readable file
+ exit 255
+fi
+if test -f "$LKMM_DESTDIR/$litmus".out -a -r "$LKMM_DESTDIR/$litmus".out
+then
+ :
+else
+ echo ' --- ' error: \"$LKMM_DESTDIR/$litmus\".out is not a readable file
+ exit 255
+fi
+if grep -q '^ \* Result: ' $litmus
+then
+ outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
+else
+ outcome=specified
+fi
+
+grep '^Observation' $LKMM_DESTDIR/$litmus.out
+if grep -q '^Observation' $LKMM_DESTDIR/$litmus.out
+then
+ :
+else
+ echo ' !!! Verification error' $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ then
+ echo ' !!! Verification error' >> $LKMM_DESTDIR/$litmus.out 2>&1
+ fi
+ exit 255
+fi
+if test "$outcome" = DEADLOCK
+then
+ if grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q 'Never 0 0$'
+ then
+ ret=0
+ else
+ echo " !!! Unexpected non-$outcome verification" $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ then
+ echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+ fi
+ ret=1
+ fi
+elif grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q $outcome || test "$outcome" = Maybe
+then
+ ret=0
+else
+ echo " !!! Unexpected non-$outcome verification" $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ then
+ echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+ fi
+ ret=1
+fi
+tail -2 $LKMM_DESTDIR/$litmus.out | head -1
+exit $ret
diff --git a/tools/memory-model/scripts/newlitmushist.sh b/tools/memory-model/scripts/newlitmushist.sh
new file mode 100644
index 000000000000..991f8f814881
--- /dev/null
+++ b/tools/memory-model/scripts/newlitmushist.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests matching the specified criteria
+# that do not already have a corresponding .litmus.out file, and does
+# not judge the result.
+#
+# sh newlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+. scripts/parseargs.sh
+
+T=/tmp/newlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ echo Run scripts/initlitmushist.sh first, need litmus repo.
+ exit 1
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find litmus -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests previously run.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+ sed -e 's/\.out$//' |
+ xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+
+# Form full list of litmus tests with no more than the specified
+# number of processes (per the --procs argument).
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C-all
+xargs < $T/list-C-all -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+# Form list of new tests. Note: This does not handle litmus-test deletion!
+sort $T/list-C-already $T/list-C-short | uniq -u > $T/list-C-new
+
+# Form list of litmus tests that have changed since the last run.
+sed < $T/list-C-short -e 's,^.*$,if test & -nt '"$LKMM_DESTDIR"'/&.out; then echo &; fi,' > $T/list-C-script
+sh $T/list-C-script > $T/list-C-newer
+
+# Merge the list of new and of updated litmus tests: These must be (re)run.
+sort -u $T/list-C-new $T/list-C-newer > $T/list-C-needed
+
+scripts/runlitmushist.sh < $T/list-C-needed
+
+exit 0
diff --git a/tools/memory-model/scripts/parseargs.sh b/tools/memory-model/scripts/parseargs.sh
new file mode 100644
index 000000000000..859e1d581e05
--- /dev/null
+++ b/tools/memory-model/scripts/parseargs.sh
@@ -0,0 +1,136 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# the corresponding .litmus.out file, and does not judge the result.
+#
+# . scripts/parseargs.sh
+#
+# Include into other Linux kernel tools/memory-model scripts.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/parseargs.sh.$$
+mkdir $T
+
+# Initialize one parameter: initparam name default
+initparam () {
+ echo if test -z '"$'$1'"' > $T/s
+ echo then >> $T/s
+ echo $1='"'$2'"' >> $T/s
+ echo export $1 >> $T/s
+ echo fi >> $T/s
+ echo $1_DEF='$'$1 >> $T/s
+ . $T/s
+}
+
+initparam LKMM_DESTDIR "."
+initparam LKMM_HERD_OPTIONS "-conf linux-kernel.cfg"
+initparam LKMM_JOBS `getconf _NPROCESSORS_ONLN`
+initparam LKMM_PROCS "3"
+initparam LKMM_TIMEOUT "1m"
+
+scriptname=$0
+
+usagehelp () {
+ echo "Usage $scriptname [ arguments ]"
+ echo " --destdir path (place for .litmus.out, default by .litmus)"
+ echo " --herdopts -conf linux-kernel.cfg ..."
+ echo " --jobs N (number of jobs, default one per CPU)"
+ echo " --procs N (litmus tests with at most this many processes)"
+ echo " --timeout N (herd7 timeout (e.g., 10s, 1m, 2hr, 1d, '')"
+ echo "Defaults: --destdir '$LKMM_DESTDIR_DEF' --herdopts '$LKMM_HERD_OPTIONS_DEF' --jobs '$LKMM_JOBS_DEF' --procs '$LKMM_PROCS_DEF' --timeout '$LKMM_TIMEOUT_DEF'"
+ exit 1
+}
+
+usage () {
+ usagehelp 1>&2
+}
+
+# checkarg --argname argtype $# arg mustmatch cannotmatch
+checkarg () {
+ if test $3 -le 1
+ then
+ echo $1 needs argument $2 matching \"$5\"
+ usage
+ fi
+ if echo "$4" | grep -q -e "$5"
+ then
+ :
+ else
+ echo $1 $2 \"$4\" must match \"$5\"
+ usage
+ fi
+ if echo "$4" | grep -q -e "$6"
+ then
+ echo $1 $2 \"$4\" must not match \"$6\"
+ usage
+ fi
+}
+
+while test $# -gt 0
+do
+ case "$1" in
+ --destdir)
+ checkarg --destdir "(path to directory)" "$#" "$2" '.\+' '^--'
+ LKMM_DESTDIR="$2"
+ mkdir $LKMM_DESTDIR > /dev/null 2>&1
+ if ! test -e "$LKMM_DESTDIR"
+ then
+ echo "Cannot create directory --destdir '$LKMM_DESTDIR'"
+ usage
+ fi
+ if test -d "$LKMM_DESTDIR" -a -w "$LKMM_DESTDIR" -a -x "$LKMM_DESTDIR"
+ then
+ :
+ else
+ echo "Directory --destdir '$LKMM_DESTDIR' insufficient permissions to create files"
+ usage
+ fi
+ shift
+ ;;
+ --herdopts|--herdopt)
+ checkarg --destdir "(herd options)" "$#" "$2" '.*' '^--'
+ LKMM_HERD_OPTIONS="$2"
+ shift
+ ;;
+ -j[1-9]*)
+ njobs="`echo $1 | sed -e 's/^-j//'`"
+ trailchars="`echo $njobs | sed -e 's/[0-9]\+\(.*\)$/\1/'`"
+ if test -n "$trailchars"
+ then
+ echo $1 trailing characters "'$trailchars'"
+ usagehelp
+ fi
+ LKMM_JOBS="`echo $njobs | sed -e 's/^\([0-9]\+\).*$/\1/'`"
+ ;;
+ --jobs|--job|-j)
+ checkarg --jobs "(number)" "$#" "$2" '^[1-9][0-9]\+$' '^--'
+ LKMM_JOBS="$2"
+ shift
+ ;;
+ --procs|--proc)
+ checkarg --procs "(number)" "$#" "$2" '^[0-9]\+$' '^--'
+ LKMM_PROCS="$2"
+ shift
+ ;;
+ --timeout)
+ checkarg --timeout "(timeout spec)" "$#" "$2" '^\([0-9]\+[smhd]\?\|\)$' '^--'
+ LKMM_TIMEOUT="$2"
+ shift
+ ;;
+ *)
+ echo Unknown argument $1
+ usage
+ ;;
+ esac
+ shift
+done
+if test -z "$LKMM_TIMEOUT"
+then
+ LKMM_TIMEOUT_CMD=""; export LKMM_TIMEOUT_CMD
+else
+ LKMM_TIMEOUT_CMD="timeout $LKMM_TIMEOUT"; export LKMM_TIMEOUT_CMD
+fi
+rm -rf $T
diff --git a/tools/memory-model/scripts/runlitmushist.sh b/tools/memory-model/scripts/runlitmushist.sh
new file mode 100644
index 000000000000..e507f5f933d5
--- /dev/null
+++ b/tools/memory-model/scripts/runlitmushist.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests specified on standard input, using up
+# to the specified number of CPUs (defaulting to all of them) and placing
+# the results in the specified directory (defaulting to the same place
+# the litmus test came from).
+#
+# sh runlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# This script uses environment variables produced by parseargs.sh.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/runlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ echo Directory \"litmus\" missing, aborting run.
+ exit 1
+fi
+
+# Prefixes for per-CPU scripts
+for ((i=0;i<$LKMM_JOBS;i++))
+do
+ echo dir="$LKMM_DESTDIR" > $T/$i.sh
+ echo T=$T >> $T/$i.sh
+ echo herdoptions=\"$LKMM_HERD_OPTIONS\" >> $T/$i.sh
+ cat << '___EOF___' >> $T/$i.sh
+ runtest () {
+ echo ' ... ' /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 '>' $dir/$1.out '2>&1'
+ if /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 > $dir/$1.out 2>&1
+ then
+ if ! grep -q '^Observation ' $dir/$1.out
+ then
+ echo ' !!! Herd failed, no Observation:' $1
+ fi
+ else
+ exitcode=$?
+ if test "$exitcode" -eq 124
+ then
+ exitmsg="timed out"
+ else
+ exitmsg="failed, exit code $exitcode"
+ fi
+ echo ' !!! Herd' ${exitmsg}: $1
+ fi
+ }
+___EOF___
+done
+
+awk -v q="'" -v b='\\' '
+{
+ print "echo `grep " q "^P[0-9]" b "+(" q " " $0 " | tail -1 | sed -e " q "s/^P" b "([0-9]" b "+" b ")(.*$/" b "1/" q "` " $0
+}' | bash |
+sort -k1n |
+awk -v ncpu=$LKMM_JOBS -v t=$T '
+{
+ print "runtest " $2 >> t "/" NR % ncpu ".sh";
+}
+
+END {
+ for (i = 0; i < ncpu; i++) {
+ print "sh " t "/" i ".sh > " t "/" i ".sh.out 2>&1 &";
+ close(t "/" i ".sh");
+ }
+ print "wait";
+}' | sh
+cat $T/*.sh.out
+if grep -q '!!!' $T/*.sh.out
+then
+ echo ' ---' Summary: 1>&2
+ grep '!!!' $T/*.sh.out 1>&2
+ nfail="`grep '!!!' $T/*.sh.out | wc -l`"
+ echo 'Number of failed herd runs (e.g., timeout): ' $nfail 1>&2
+ exit 1
+else
+ echo All runs completed successfully. 1>&2
+ exit 0
+fi
diff --git a/tools/perf/Build b/tools/perf/Build
index e5232d567611..5f392dbb88fc 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -46,10 +46,10 @@ CFLAGS_builtin-trace.o += -DSTRACE_GROUPS_DIR="BUILD_STR($(STRACE_GROUPS_DIR_
CFLAGS_builtin-report.o += -DTIPDIR="BUILD_STR($(tipdir_SQ))"
CFLAGS_builtin-report.o += -DDOCDIR="BUILD_STR($(srcdir_SQ)/Documentation)"
-libperf-y += util/
-libperf-y += arch/
-libperf-y += ui/
-libperf-y += scripts/
-libperf-$(CONFIG_TRACE) += trace/beauty/
+perf-y += util/
+perf-y += arch/
+perf-y += ui/
+perf-y += scripts/
+perf-$(CONFIG_TRACE) += trace/beauty/
gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index 095aebdc5bb7..e6150f21267d 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -19,8 +19,11 @@ C2C stands for Cache To Cache.
The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows
you to track down the cacheline contentions.
-The tool is based on x86's load latency and precise store facility events
-provided by Intel CPUs. These events provide:
+On x86, the tool is based on load latency and precise store facility events
+provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling
+with thresholding feature.
+
+These events provide:
- memory address of the access
- type of the access (load and store details)
- latency (in cycles) of the load access
@@ -46,7 +49,7 @@ RECORD OPTIONS
-l::
--ldlat::
- Configure mem-loads latency.
+ Configure mem-loads latency. (x86 only)
-k::
--all-kernel::
@@ -119,11 +122,16 @@ Following perf record options are configured by default:
-W,-d,--phys-data,--sample-cpu
Unless specified otherwise with '-e' option, following events are monitored by
-default:
+default on x86:
cpu/mem-loads,ldlat=30/P
cpu/mem-stores/P
+and following on PowerPC:
+
+ cpu/mem-loads/
+ cpu/mem-stores/
+
User can pass any 'perf record' option behind '--' mark, like (to enable
callchains and system wide monitoring):
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 4ac7775fbc11..86f3dcc15f83 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -120,6 +120,10 @@ Given a $HOME/.perfconfig like this:
children = true
group = true
+ [llvm]
+ dump-obj = true
+ clang-opt = -g
+
You can hide source code of annotate feature setting the config to false with
% perf config annotate.hide_src_code=true
@@ -553,6 +557,33 @@ trace.*::
trace.show_zeros::
Do not suppress syscall arguments that are equal to zero.
+llvm.*::
+ llvm.clang-path::
+ Path to clang. If omit, search it from $PATH.
+
+ llvm.clang-bpf-cmd-template::
+ Cmdline template. Below lines show its default value. Environment
+ variable is used to pass options.
+ "$CLANG_EXEC -D__KERNEL__ $CLANG_OPTIONS $KERNEL_INC_OPTIONS \
+ -Wno-unused-value -Wno-pointer-sign -working-directory \
+ $WORKING_DIR -c $CLANG_SOURCE -target bpf -O2 -o -"
+
+ llvm.clang-opt::
+ Options passed to clang.
+
+ llvm.kbuild-dir::
+ kbuild directory. If not set, use /lib/modules/`uname -r`/build.
+ If set to "" deliberately, skip kernel header auto-detector.
+
+ llvm.kbuild-opts::
+ Options passed to 'make' when detecting kernel header options.
+
+ llvm.dump-obj::
+ Enable perf dump BPF object files compiled by LLVM.
+
+ llvm.opts::
+ Options passed to llc.
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index a79c84ae61aa..da7809b15cc9 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -118,6 +118,62 @@ OPTIONS
sum of shown entries will be always 100%. "absolute" means it retains
the original value before and after the filter is applied.
+--time::
+ Analyze samples within given time window. It supports time
+ percent with multiple time ranges. Time string is 'a%/n,b%/m,...'
+ or 'a%-b%,c%-%d,...'.
+
+ For example:
+
+ Select the second 10% time slice to diff:
+
+ perf diff --time 10%/2
+
+ Select from 0% to 10% time slice to diff:
+
+ perf diff --time 0%-10%
+
+ Select the first and the second 10% time slices to diff:
+
+ perf diff --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices to diff:
+
+ perf diff --time 0%-10%,30%-40%
+
+ It also supports analyzing samples within a given time window
+ <start>,<stop>. Times have the format seconds.microseconds. If 'start'
+ is not given (i.e., time string is ',x.y') then analysis starts at
+ the beginning of the file. If stop time is not given (i.e, time
+ string is 'x.y,') then analysis goes to the end of the file. Time string is
+ 'a1.b1,c1.d1:a2.b2,c2.d2'. Use ':' to separate timestamps for different
+ perf.data files.
+
+ For example, we get the timestamp information from 'perf script'.
+
+ perf script -i perf.data.old
+ mgen 13940 [000] 3946.361400: ...
+
+ perf script -i perf.data
+ mgen 13940 [000] 3971.150589 ...
+
+ perf diff --time 3946.361400,:3971.150589,
+
+ It analyzes the perf.data.old from the timestamp 3946.361400 to
+ the end of perf.data.old and analyzes the perf.data from the
+ timestamp 3971.150589 to the end of perf.data.
+
+--cpu:: Only diff samples for the list of CPUs provided. Multiple CPUs can
+ be provided as a comma-separated list with no space: 0,1. Ranges of
+ CPUs are specified with -: 0-2. Default is to report samples on all
+ CPUs.
+
+--pid=::
+ Only diff samples for given process ID (comma separated list).
+
+--tid=::
+ Only diff samples for given thread ID (comma separated list).
+
COMPARISON
----------
The comparison is governed by the baseline file. The baseline perf.data
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index f8d2167cf3e7..199ea0f0a6c0 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -82,7 +82,7 @@ RECORD OPTIONS
Be more verbose (show counter open errors, etc)
--ldlat <n>::
- Specify desired latency for loads event.
+ Specify desired latency for loads event. (x86 only)
In addition, for report all perf report options are valid, and for record
all perf record options.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index d232b13ea713..8f0c2be34848 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -88,6 +88,20 @@ OPTIONS
If you want to profile write accesses in [0x1000~1008), just set
'mem:0x1000/8:w'.
+ - a BPF source file (ending in .c) or a precompiled object file (ending
+ in .o) selects one or more BPF events.
+ The BPF program can attach to various perf events based on the ELF section
+ names.
+
+ When processing a '.c' file, perf searches an installed LLVM to compile it
+ into an object file first. Optional clang options can be passed via the
+ '--clang-opt' command line option, e.g.:
+
+ perf record --clang-opt "-DLINUX_VERSION_CODE=0x50000" \
+ -e tests/bpf-script-example.c
+
+ Note: '--clang-opt' must be placed before '--event/-e'.
+
- a group of events surrounded by a pair of brace ("{event1,event2,...}").
Each event is separated by commas and the group should be quoted to
prevent the shell interpretation. You also need to use --group on
@@ -440,6 +454,11 @@ Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default:
Asynchronous mode is supported only when linking Perf tool with libc library
providing implementation for Posix AIO API.
+--affinity=mode::
+Set affinity mask of trace reading thread according to the policy defined by 'mode' value:
+ node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
+ cpu - thread affinity mask is set to cpu of the processed mmap buffer
+
--all-kernel::
Configure all used events to run in kernel space.
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 9e4def08d569..2e19fd7ffe35 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -159,6 +159,12 @@ OPTIONS
the override, and the result of the above is that only S/W and H/W
events are displayed with the given fields.
+ It's possible tp add/remove fields only for specific event type:
+
+ -Fsw:-cpu,-period
+
+ removes cpu and period from software events.
+
For the 'wildcard' option if a user selected field is invalid for an
event type, a message is displayed to the user that the option is
ignored for that type. For example:
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 631e687be4eb..fc6e43262c41 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -210,6 +210,14 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
may happen, for instance, when a thread gets migrated to a different CPU
while processing a syscall.
+--map-dump::
+ Dump BPF maps setup by events passed via -e, for instance the augmented_raw_syscalls
+ living in tools/perf/examples/bpf/augmented_raw_syscalls.c. For now this
+ dumps just boolean map values and integer keys, in time this will print in hex
+ by default and use BTF when available, as well as use functions to do pretty
+ printing using the existing 'perf trace' syscall arg beautifiers to map integer
+ arguments to strings (pid to comm, syscall id to syscall name, etc).
+
PAGEFAULTS
----------
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index dfb218feaad9..593ef49b273c 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -43,11 +43,10 @@ struct perf_file_section {
Flags section:
-The header is followed by different optional headers, described by the bits set
-in flags. Only headers for which the bit is set are included. Each header
-consists of a perf_file_section located after the initial header.
-The respective perf_file_section points to the data of the additional
-header and defines its size.
+For each of the optional features a perf_file_section it placed after the data
+section if the feature bit is set in the perf_header flags bitset. The
+respective perf_file_section points to the data of the additional header and
+defines its size.
Some headers consist of strings, which are defined like this:
@@ -131,7 +130,7 @@ An uint64_t with the total memory in bytes.
HEADER_CMDLINE = 11,
-A perf_header_string with the perf command line used to collect the data.
+A perf_header_string_list with the perf arg-vector used to collect the data.
HEADER_EVENT_DESC = 12,
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index b441c88cafa1..0f11d5891301 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -109,6 +109,13 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm
+FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64
+FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86
+FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64
+
+FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
+
ifdef CSINCLUDES
LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
endif
@@ -218,6 +225,8 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
+FEATURE_CHECK_LDFLAGS-libaio = -lrt
+
CFLAGS += -fno-omit-frame-pointer
CFLAGS += -ggdb3
CFLAGS += -funwind-tables
@@ -386,7 +395,8 @@ ifeq ($(feature-setns), 1)
$(call detected,CONFIG_SETNS)
endif
-ifndef NO_CORESIGHT
+ifdef CORESIGHT
+ $(call feature_check,libopencsd)
ifeq ($(feature-libopencsd), 1)
CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
LDFLAGS += $(LIBOPENCSD_LDFLAGS)
@@ -482,6 +492,7 @@ endif
ifndef NO_LIBUNWIND
have_libunwind :=
+ $(call feature_check,libunwind-x86)
ifeq ($(feature-libunwind-x86), 1)
$(call detected,CONFIG_LIBUNWIND_X86)
CFLAGS += -DHAVE_LIBUNWIND_X86_SUPPORT
@@ -490,6 +501,7 @@ ifndef NO_LIBUNWIND
have_libunwind = 1
endif
+ $(call feature_check,libunwind-aarch64)
ifeq ($(feature-libunwind-aarch64), 1)
$(call detected,CONFIG_LIBUNWIND_AARCH64)
CFLAGS += -DHAVE_LIBUNWIND_AARCH64_SUPPORT
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 0ee6795d82cc..01f7555fd933 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -102,7 +102,7 @@ include ../scripts/utilities.mak
# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
# llvm-config is not in $PATH.
#
-# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
+# Define CORESIGHT if you DO WANT support for CoreSight trace decoding.
#
# Define NO_AIO if you do not want support of Posix AIO based trace
# streaming for record mode. Currently Posix AIO trace streaming is
@@ -344,9 +344,9 @@ endif
export PERL_PATH
-LIB_FILE=$(OUTPUT)libperf.a
+LIBPERF_A=$(OUTPUT)libperf.a
-PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
+PERFLIBS = $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
ifndef NO_LIBBPF
PERFLIBS += $(LIBBPF)
endif
@@ -549,6 +549,8 @@ JEVENTS_IN := $(OUTPUT)pmu-events/jevents-in.o
PMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
+LIBPERF_IN := $(OUTPUT)libperf-in.o
+
export JEVENTS
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
@@ -565,9 +567,12 @@ $(JEVENTS): $(JEVENTS_IN)
$(PMU_EVENTS_IN): $(JEVENTS) FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events
-$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
+$(LIBPERF_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=libperf
+
+$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
- $(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
+ $(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBS) -o $@
$(GTK_IN): FORCE
$(Q)$(MAKE) $(build)=gtk
@@ -683,12 +688,7 @@ endif
$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
-LIBPERF_IN := $(OUTPUT)libperf-in.o
-
-$(LIBPERF_IN): prepare FORCE
- $(Q)$(MAKE) $(build)=libperf
-
-$(LIB_FILE): $(LIBPERF_IN)
+$(LIBPERF_A): $(LIBPERF_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
@@ -863,8 +863,8 @@ ifndef NO_LIBPYTHON
$(call QUIET_INSTALL, python-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'; \
- $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
- $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
+ $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) scripts/python/*.py -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
endif
$(call QUIET_INSTALL, perf_completion-script) \
@@ -910,7 +910,7 @@ python-clean:
$(python-clean)
clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean python-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index d9b6af837c7d..688818844c11 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,2 @@
-libperf-y += common.o
-libperf-y += $(SRCARCH)/
+perf-y += common.o
+perf-y += $(SRCARCH)/
diff --git a/tools/perf/arch/arm/Build b/tools/perf/arch/arm/Build
index 41bf61da476a..36222e64bbf7 100644
--- a/tools/perf/arch/arm/Build
+++ b/tools/perf/arch/arm/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+perf-y += util/
+perf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build
index d9ae2733f9cc..bc8e97380c82 100644
--- a/tools/perf/arch/arm/tests/Build
+++ b/tools/perf/arch/arm/tests/Build
@@ -1,5 +1,5 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
-libperf-y += vectors-page.o
+perf-y += regs_load.o
+perf-y += dwarf-unwind.o
+perf-y += vectors-page.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c
index 9a0242e74cfc..2c35e532bc9a 100644
--- a/tools/perf/arch/arm/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/arm/util/Build b/tools/perf/arch/arm/util/Build
index e64c5f216448..296f0eac5e18 100644
--- a/tools/perf/arch/arm/util/Build
+++ b/tools/perf/arch/arm/util/Build
@@ -1,6 +1,6 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
+perf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 2f595cd73da6..911426721170 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -5,6 +5,7 @@
*/
#include <api/fs/fs.h>
+#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/coresight-pmu.h>
@@ -22,12 +23,10 @@
#include "../../util/thread_map.h"
#include "../../util/cs-etm.h"
+#include <errno.h>
#include <stdlib.h>
#include <sys/stat.h>
-#define ENABLE_SINK_MAX 128
-#define CS_BUS_DEVICE_PATH "/bus/coresight/devices/"
-
struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
@@ -60,10 +59,48 @@ static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
return 0;
}
+static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
+ struct perf_evsel *evsel)
+{
+ char msg[BUFSIZ], path[PATH_MAX], *sink;
+ struct perf_evsel_config_term *term;
+ int ret = -EINVAL;
+ u32 hash;
+
+ if (evsel->attr.config2 & GENMASK(31, 0))
+ return 0;
+
+ list_for_each_entry(term, &evsel->config_terms, list) {
+ if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
+ continue;
+
+ sink = term->val.drv_cfg;
+ snprintf(path, PATH_MAX, "sinks/%s", sink);
+
+ ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
+ if (ret != 1) {
+ pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
+ sink, perf_evsel__name(evsel), errno,
+ str_error_r(errno, msg, sizeof(msg)));
+ return ret;
+ }
+
+ evsel->attr.config2 |= hash;
+ return 0;
+ }
+
+ /*
+ * No sink was provided on the command line - for _now_ treat
+ * this as an error.
+ */
+ return ret;
+}
+
static int cs_etm_recording_options(struct auxtrace_record *itr,
struct perf_evlist *evlist,
struct record_opts *opts)
{
+ int ret;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
@@ -92,6 +129,10 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
if (!cs_etm_evsel)
return 0;
+ ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
+ if (ret)
+ return ret;
+
if (opts->use_clockid) {
pr_err("Cannot use clockid (-k option) with %s\n",
CORESIGHT_ETM_PMU_NAME);
@@ -598,54 +639,3 @@ struct auxtrace_record *cs_etm_record_init(int *err)
out:
return NULL;
}
-
-static FILE *cs_device__open_file(const char *name)
-{
- struct stat st;
- char path[PATH_MAX];
- const char *sysfs;
-
- sysfs = sysfs__mountpoint();
- if (!sysfs)
- return NULL;
-
- snprintf(path, PATH_MAX,
- "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name);
-
- if (stat(path, &st) < 0)
- return NULL;
-
- return fopen(path, "w");
-
-}
-
-static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
-{
- va_list args;
- FILE *file;
- int ret = -EINVAL;
-
- va_start(args, fmt);
- file = cs_device__open_file(name);
- if (file) {
- ret = vfprintf(file, fmt, args);
- fclose(file);
- }
- va_end(args);
- return ret;
-}
-
-int cs_etm_set_drv_config(struct perf_evsel_config_term *term)
-{
- int ret;
- char enable_sink[ENABLE_SINK_MAX];
-
- snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s",
- term->val.drv_cfg, "enable_sink");
-
- ret = cs_device__print_file(enable_sink, "%d", 1);
- if (ret < 0)
- return ret;
-
- return 0;
-}
diff --git a/tools/perf/arch/arm/util/cs-etm.h b/tools/perf/arch/arm/util/cs-etm.h
index 1a12e64f5127..a3354bda4fe8 100644
--- a/tools/perf/arch/arm/util/cs-etm.h
+++ b/tools/perf/arch/arm/util/cs-etm.h
@@ -7,9 +7,6 @@
#ifndef INCLUDE__PERF_CS_ETM_H__
#define INCLUDE__PERF_CS_ETM_H__
-#include "../../util/evsel.h"
-
struct auxtrace_record *cs_etm_record_init(int *err);
-int cs_etm_set_drv_config(struct perf_evsel_config_term *term);
#endif
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index e047571e6080..bbc297a7e2e3 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -7,8 +7,8 @@
#include <string.h>
#include <linux/coresight-pmu.h>
#include <linux/perf_event.h>
+#include <linux/string.h>
-#include "cs-etm.h"
#include "arm-spe.h"
#include "../../util/pmu.h"
@@ -19,7 +19,6 @@ struct perf_event_attr
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
pmu->selectable = true;
- pmu->set_drv_config = cs_etm_set_drv_config;
#if defined(__aarch64__)
} else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
return arm_spe_pmu_default_config(pmu);
diff --git a/tools/perf/arch/arm64/Build b/tools/perf/arch/arm64/Build
index 41bf61da476a..36222e64bbf7 100644
--- a/tools/perf/arch/arm64/Build
+++ b/tools/perf/arch/arm64/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+perf-y += util/
+perf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 76c6345a57d5..8f70a1b282df 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -58,7 +58,7 @@ out_free_source:
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
static struct ins_ops arm64_mov_ops = {
.parse = arm64_mov__parse,
diff --git a/tools/perf/arch/arm64/tests/Build b/tools/perf/arch/arm64/tests/Build
index 883c57ff0c08..41707fea74b3 100644
--- a/tools/perf/arch/arm64/tests/Build
+++ b/tools/perf/arch/arm64/tests/Build
@@ -1,4 +1,4 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
+perf-y += regs_load.o
+perf-y += dwarf-unwind.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/arm64/tests/dwarf-unwind.c b/tools/perf/arch/arm64/tests/dwarf-unwind.c
index 5522ce384723..a6a407fa1b8b 100644
--- a/tools/perf/arch/arm64/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm64/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index 68f8a8eb3ad0..3cde540d2fcf 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,10 +1,10 @@
-libperf-y += header.o
-libperf-y += sym-handling.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-y += header.o
+perf-y += sym-handling.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
+perf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
../../arm/util/cs-etm.o \
arm-spe.o
diff --git a/tools/perf/arch/nds32/Build b/tools/perf/arch/nds32/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/nds32/Build
+++ b/tools/perf/arch/nds32/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/nds32/util/Build b/tools/perf/arch/nds32/util/Build
index ca623bbf993c..d0bc205fe49a 100644
--- a/tools/perf/arch/nds32/util/Build
+++ b/tools/perf/arch/nds32/util/Build
@@ -1 +1 @@
-libperf-y += header.o
+perf-y += header.o
diff --git a/tools/perf/arch/powerpc/Build b/tools/perf/arch/powerpc/Build
index db52fa22d3a1..a7dd46a5b678 100644
--- a/tools/perf/arch/powerpc/Build
+++ b/tools/perf/arch/powerpc/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-y += tests/
+perf-y += util/
+perf-y += tests/
diff --git a/tools/perf/arch/powerpc/tests/Build b/tools/perf/arch/powerpc/tests/Build
index d827ef384b33..3526ab0af9f9 100644
--- a/tools/perf/arch/powerpc/tests/Build
+++ b/tools/perf/arch/powerpc/tests/Build
@@ -1,4 +1,4 @@
-libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
index 5f39efef0856..5c178e4a1995 100644
--- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c
+++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index 2e6595310420..7cf0b8803097 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -1,10 +1,11 @@
-libperf-y += header.o
-libperf-y += sym-handling.o
-libperf-y += kvm-stat.o
-libperf-y += perf_regs.o
+perf-y += header.o
+perf-y += sym-handling.o
+perf-y += kvm-stat.o
+perf-y += perf_regs.o
+perf-y += mem-events.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += skip-callchain-idx.o
-libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
index 596ad6aedaac..f9db341c47b6 100644
--- a/tools/perf/arch/powerpc/util/kvm-stat.c
+++ b/tools/perf/arch/powerpc/util/kvm-stat.c
@@ -3,6 +3,8 @@
#include "util/kvm-stat.h"
#include "util/parse-events.h"
#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
#include "book3s_hv_exits.h"
#include "book3s_hcalls.h"
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
new file mode 100644
index 000000000000..d08311f04e95
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/mem-events.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "mem-events.h"
+
+/* PowerPC does not support 'ldlat' parameter. */
+char *perf_mem_events__name(int i)
+{
+ if (i == PERF_MEM_EVENTS__LOAD)
+ return (char *) "cpu/mem-loads/";
+
+ return (char *) "cpu/mem-stores/";
+}
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 7c6eeb4633fe..2918bb16c892 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -16,6 +16,9 @@
#include "util/thread.h"
#include "util/callchain.h"
#include "util/debug.h"
+#include "util/dso.h"
+#include "util/map.h"
+#include "util/symbol.h"
/*
* When saving the callchain on Power, the kernel conservatively saves
diff --git a/tools/perf/arch/s390/Build b/tools/perf/arch/s390/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/s390/Build
+++ b/tools/perf/arch/s390/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index de0dd66dbb48..89bb8f2c54ce 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -46,7 +46,7 @@ static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
static struct ins_ops s390_call_ops = {
.parse = s390_call__parse,
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index 4a233683c684..22797f043b84 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -1,9 +1,9 @@
-libperf-y += header.o
-libperf-y += kvm-stat.o
+perf-y += header.o
+perf-y += kvm-stat.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-y += machine.o
+perf-y += machine.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
index aaabab5e2830..7e3961a4b292 100644
--- a/tools/perf/arch/s390/util/kvm-stat.c
+++ b/tools/perf/arch/s390/util/kvm-stat.c
@@ -11,6 +11,7 @@
#include <errno.h>
#include "../../util/kvm-stat.h"
+#include "../../util/evsel.h"
#include <asm/sie.h>
define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
diff --git a/tools/perf/arch/sh/Build b/tools/perf/arch/sh/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/sh/Build
+++ b/tools/perf/arch/sh/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/sh/util/Build b/tools/perf/arch/sh/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/sh/util/Build
+++ b/tools/perf/arch/sh/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/sparc/Build b/tools/perf/arch/sparc/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/sparc/Build
+++ b/tools/perf/arch/sparc/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/sparc/util/Build b/tools/perf/arch/sparc/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/sparc/util/Build
+++ b/tools/perf/arch/sparc/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build
index db52fa22d3a1..a7dd46a5b678 100644
--- a/tools/perf/arch/x86/Build
+++ b/tools/perf/arch/x86/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-y += tests/
+perf-y += util/
+perf-y += tests/
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 586849ff83a0..3d83d0c6982d 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -1,8 +1,8 @@
-libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-libperf-y += arch-tests.o
-libperf-y += rdpmc.o
-libperf-y += perf-time-to-tsc.o
-libperf-$(CONFIG_AUXTRACE) += insn-x86.o
-libperf-$(CONFIG_X86_64) += bp-modify.o
+perf-y += arch-tests.o
+perf-y += rdpmc.o
+perf-y += perf-time-to-tsc.o
+perf-$(CONFIG_AUXTRACE) += insn-x86.o
+perf-$(CONFIG_X86_64) += bp-modify.o
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 7879df34569a..6ad0a1cedb13 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 844b8f335532..7aab0be5fc5f 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -1,18 +1,18 @@
-libperf-y += header.o
-libperf-y += tsc.o
-libperf-y += pmu.o
-libperf-y += kvm-stat.o
-libperf-y += perf_regs.o
-libperf-y += group.o
-libperf-y += machine.o
-libperf-y += event.o
+perf-y += header.o
+perf-y += tsc.o
+perf-y += pmu.o
+perf-y += kvm-stat.o
+perf-y += perf_regs.o
+perf-y += group.o
+perf-y += machine.o
+perf-y += event.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
-libperf-$(CONFIG_AUXTRACE) += intel-pt.o
-libperf-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index 081353d7b095..865a9762f22e 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include "../../util/kvm-stat.h"
+#include "../../util/evsel.h"
#include <asm/svm.h>
#include <asm/vmx.h>
#include <asm/kvm.h>
diff --git a/tools/perf/arch/xtensa/Build b/tools/perf/arch/xtensa/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/xtensa/Build
+++ b/tools/perf/arch/xtensa/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/xtensa/util/Build b/tools/perf/arch/xtensa/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/xtensa/util/Build
+++ b/tools/perf/arch/xtensa/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 44195514b19e..98ad783efc69 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -34,6 +34,7 @@
#include <sys/types.h>
#include <linux/kernel.h>
#include <linux/time64.h>
+#include <linux/numa.h>
#include <numa.h>
#include <numaif.h>
@@ -298,7 +299,7 @@ static cpu_set_t bind_to_node(int target_node)
CPU_ZERO(&mask);
- if (target_node == -1) {
+ if (target_node == NUMA_NO_NODE) {
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
CPU_SET(cpu, &mask);
} else {
@@ -339,7 +340,7 @@ static void bind_to_memnode(int node)
unsigned long nodemask;
int ret;
- if (node == -1)
+ if (node == NUMA_NO_NODE)
return;
BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
@@ -1363,7 +1364,7 @@ static void init_thread_data(void)
int cpu;
/* Allow all nodes by default: */
- td->bind_node = -1;
+ td->bind_node = NUMA_NO_NODE;
/* Allow all CPUs by default: */
CPU_ZERO(&td->bind_cpumask);
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 93d679eaf1f4..67f9d9ffacfb 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -27,6 +27,7 @@
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
@@ -227,7 +228,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
* the DSO?
*/
if (al->sym != NULL) {
- rb_erase(&al->sym->rb_node,
+ rb_erase_cached(&al->sym->rb_node,
&al->map->dso->symbols);
symbol__delete(al->sym);
dso__reset_find_symbol_cache(al->map->dso);
@@ -305,7 +306,7 @@ static void hists__find_annotations(struct hists *hists,
struct perf_evsel *evsel,
struct perf_annotate *ann)
{
- struct rb_node *nd = rb_first(&hists->entries), *next;
+ struct rb_node *nd = rb_first_cached(&hists->entries), *next;
int key = K_RIGHT;
while (nd) {
@@ -440,7 +441,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
}
if (total_nr_samples == 0) {
- ui__error("The %s file has no samples!\n", session->data->file.path);
+ ui__error("The %s data has no samples!\n", session->data->path);
goto out;
}
@@ -577,7 +578,7 @@ int cmd_annotate(int argc, const char **argv)
if (quiet)
perf_quiet_option();
- data.file.path = input_name;
+ data.path = input_name;
annotate.session = perf_session__new(&data, false, &annotate.tool);
if (annotate.session == NULL)
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 115110a4796a..10457b10e568 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -416,8 +416,8 @@ int cmd_buildid_cache(int argc, const char **argv)
nsi = nsinfo__new(ns_id);
if (missing_filename) {
- data.file.path = missing_filename;
- data.force = force;
+ data.path = missing_filename;
+ data.force = force;
session = perf_session__new(&data, false, NULL);
if (session == NULL)
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 78abbe8d9d5f..f403e19488b5 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -52,11 +52,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
{
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
symbol__elf_init();
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index d340d2e42776..9e6cc868bdb4 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -33,6 +33,7 @@
#include "ui/browsers/hists.h"
#include "thread.h"
#include "mem2node.h"
+#include "symbol.h"
struct c2c_hists {
struct hists hists;
@@ -1969,7 +1970,7 @@ static void calc_width(struct c2c_hist_entry *c2c_he)
set_nodestr(c2c_he);
}
-static int filter_cb(struct hist_entry *he)
+static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
@@ -1986,7 +1987,7 @@ static int filter_cb(struct hist_entry *he)
return 0;
}
-static int resort_cl_cb(struct hist_entry *he)
+static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
struct c2c_hists *c2c_hists;
@@ -2055,6 +2056,12 @@ static int setup_nodes(struct perf_session *session)
if (!set)
return -ENOMEM;
+ nodes[node] = set;
+
+ /* empty node, skip */
+ if (cpu_map__empty(map))
+ continue;
+
for (cpu = 0; cpu < map->nr; cpu++) {
set_bit(map->map[cpu], set);
@@ -2063,8 +2070,6 @@ static int setup_nodes(struct perf_session *session)
cpu2node[map->map[cpu]] = node;
}
-
- nodes[node] = set;
}
setup_nodes_header();
@@ -2073,7 +2078,7 @@ static int setup_nodes(struct perf_session *session)
#define HAS_HITMS(__h) ((__h)->stats.lcl_hitm || (__h)->stats.rmt_hitm)
-static int resort_hitm_cb(struct hist_entry *he)
+static int resort_hitm_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
@@ -2088,14 +2093,14 @@ static int resort_hitm_cb(struct hist_entry *he)
static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
int ret = 0;
while (next) {
struct hist_entry *he;
he = rb_entry(next, struct hist_entry, rb_node);
- ret = cb(he);
+ ret = cb(he, NULL);
if (ret)
break;
next = rb_next(&he->rb_node);
@@ -2215,7 +2220,7 @@ static void print_pareto(FILE *out)
if (WARN_ONCE(ret, "failed to setup sort entries\n"))
return;
- nd = rb_first(&c2c.hists.hists.entries);
+ nd = rb_first_cached(&c2c.hists.hists.entries);
for (; nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
@@ -2283,7 +2288,7 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
static void c2c_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
@@ -2343,7 +2348,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
struct c2c_cacheline_browser *cl_browser;
struct hist_browser *browser;
int key = -1;
- const char help[] =
+ static const char help[] =
" ENTER Toggle callchains (if present) \n"
" n Toggle Node details info \n"
" s Toggle full length of symbol and source line columns \n"
@@ -2424,7 +2429,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
{
struct hist_browser *browser;
int key = -1;
- const char help[] =
+ static const char help[] =
" d Display cacheline details \n"
" ENTER Toggle callchains (if present) \n"
" q Quit \n";
@@ -2749,8 +2754,8 @@ static int perf_c2c__report(int argc, const char **argv)
if (!input_name || !strlen(input_name))
input_name = "perf.data";
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
err = setup_display(display);
if (err)
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 39db2ee32d48..6e7920793729 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -19,12 +19,21 @@
#include "util/util.h"
#include "util/data.h"
#include "util/config.h"
+#include "util/time-utils.h"
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <math.h>
+struct perf_diff {
+ struct perf_tool tool;
+ const char *time_str;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
+};
+
/* Diff command specific HPP columns. */
enum {
PERF_HPP_DIFF__BASELINE,
@@ -74,6 +83,9 @@ static unsigned int sort_compute = 1;
static s64 compute_wdiff_w1;
static s64 compute_wdiff_w2;
+static const char *cpu_list;
+static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+
enum {
COMPUTE_DELTA,
COMPUTE_RATIO,
@@ -323,22 +335,33 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
return -1;
}
-static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
+static int diff__process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
+ struct perf_diff *pdiff = container_of(tool, struct perf_diff, tool);
struct addr_location al;
struct hists *hists = evsel__hists(evsel);
int ret = -1;
+ if (perf_time__ranges_skip_sample(pdiff->ptime_range, pdiff->range_num,
+ sample->time)) {
+ return 0;
+ }
+
if (machine__resolve(machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
+ if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) {
+ ret = 0;
+ goto out_put;
+ }
+
if (!hists__add_entry(hists, &al, NULL, NULL, NULL, sample, true)) {
pr_warning("problem incrementing symbol period, skipping event\n");
goto out_put;
@@ -359,17 +382,19 @@ out_put:
return ret;
}
-static struct perf_tool tool = {
- .sample = diff__process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .lost = perf_event__process_lost,
- .namespaces = perf_event__process_namespaces,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
+static struct perf_diff pdiff = {
+ .tool = {
+ .sample = diff__process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_exit,
+ .fork = perf_event__process_fork,
+ .lost = perf_event__process_lost,
+ .namespaces = perf_event__process_namespaces,
+ .ordered_events = true,
+ .ordering_requires_timestamps = true,
+ },
};
static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
@@ -429,7 +454,7 @@ get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
static void hists__baseline_only(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@@ -437,13 +462,13 @@ static void hists__baseline_only(struct hists *hists)
else
root = hists->entries_in;
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
if (!hist_entry__next_pair(he)) {
- rb_erase(&he->rb_node_in, root);
+ rb_erase_cached(&he->rb_node_in, root);
hist_entry__delete(he);
}
}
@@ -451,7 +476,7 @@ static void hists__baseline_only(struct hists *hists)
static void hists__precompute(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@@ -459,7 +484,7 @@ static void hists__precompute(struct hists *hists)
else
root = hists->entries_in;
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he, *pair;
struct data__file *d;
@@ -708,7 +733,7 @@ static void data__fprintf(void)
data__for_each_file(i, d)
fprintf(stdout, "# [%d] %s %s\n",
- d->idx, d->data.file.path,
+ d->idx, d->data.path,
!d->idx ? "(Baseline)" : "");
fprintf(stdout, "#\n");
@@ -771,26 +796,127 @@ static void data__free(struct data__file *d)
}
}
+static int abstime_str_dup(char **pstr)
+{
+ char *str = NULL;
+
+ if (pdiff.time_str && strchr(pdiff.time_str, ':')) {
+ str = strdup(pdiff.time_str);
+ if (!str)
+ return -ENOMEM;
+ }
+
+ *pstr = str;
+ return 0;
+}
+
+static int parse_absolute_time(struct data__file *d, char **pstr)
+{
+ char *p = *pstr;
+ int ret;
+
+ /*
+ * Absolute timestamp for one file has the format: a.b,c.d
+ * For multiple files, the format is: a.b,c.d:a.b,c.d
+ */
+ p = strchr(*pstr, ':');
+ if (p) {
+ if (p == *pstr) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+
+ *p = 0;
+ p++;
+ if (*p == 0) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = perf_time__parse_for_ranges(*pstr, d->session,
+ &pdiff.ptime_range,
+ &pdiff.range_size,
+ &pdiff.range_num);
+ if (ret < 0)
+ return ret;
+
+ if (!p || *p == 0)
+ *pstr = NULL;
+ else
+ *pstr = p;
+
+ return ret;
+}
+
+static int parse_percent_time(struct data__file *d)
+{
+ int ret;
+
+ ret = perf_time__parse_for_ranges(pdiff.time_str, d->session,
+ &pdiff.ptime_range,
+ &pdiff.range_size,
+ &pdiff.range_num);
+ return ret;
+}
+
+static int parse_time_str(struct data__file *d, char *abstime_ostr,
+ char **pabstime_tmp)
+{
+ int ret = 0;
+
+ if (abstime_ostr)
+ ret = parse_absolute_time(d, pabstime_tmp);
+ else if (pdiff.time_str)
+ ret = parse_percent_time(d);
+
+ return ret;
+}
+
static int __cmd_diff(void)
{
struct data__file *d;
- int ret = -EINVAL, i;
+ int ret, i;
+ char *abstime_ostr, *abstime_tmp;
+
+ ret = abstime_str_dup(&abstime_ostr);
+ if (ret)
+ return ret;
+
+ abstime_tmp = abstime_ostr;
+ ret = -EINVAL;
data__for_each_file(i, d) {
- d->session = perf_session__new(&d->data, false, &tool);
+ d->session = perf_session__new(&d->data, false, &pdiff.tool);
if (!d->session) {
- pr_err("Failed to open %s\n", d->data.file.path);
+ pr_err("Failed to open %s\n", d->data.path);
ret = -1;
goto out_delete;
}
+ if (pdiff.time_str) {
+ ret = parse_time_str(d, abstime_ostr, &abstime_tmp);
+ if (ret < 0)
+ goto out_delete;
+ }
+
+ if (cpu_list) {
+ ret = perf_session__cpu_bitmap(d->session, cpu_list,
+ cpu_bitmap);
+ if (ret < 0)
+ goto out_delete;
+ }
+
ret = perf_session__process_events(d->session);
if (ret) {
- pr_err("Failed to process %s\n", d->data.file.path);
+ pr_err("Failed to process %s\n", d->data.path);
goto out_delete;
}
perf_evlist__collapse_resort(d->session->evlist);
+
+ if (pdiff.ptime_range)
+ zfree(&pdiff.ptime_range);
}
data_process();
@@ -802,6 +928,13 @@ static int __cmd_diff(void)
}
free(data__files);
+
+ if (pdiff.ptime_range)
+ zfree(&pdiff.ptime_range);
+
+ if (abstime_ostr)
+ free(abstime_ostr);
+
return ret;
}
@@ -849,6 +982,13 @@ static const struct option options[] = {
OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"How to display percentage of filtered entries", parse_filter_percentage),
+ OPT_STRING(0, "time", &pdiff.time_str, "str",
+ "Time span (time percent or absolute timestamp)"),
+ OPT_STRING(0, "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
+ "only consider symbols in these pids"),
+ OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
+ "only consider symbols in these tids"),
OPT_END()
};
@@ -1289,9 +1429,9 @@ static int data_init(int argc, const char **argv)
data__for_each_file(i, d) {
struct perf_data *data = &d->data;
- data->file.path = use_default ? defaults[i] : argv[i];
- data->mode = PERF_DATA_MODE_READ,
- data->force = force,
+ data->path = use_default ? defaults[i] : argv[i];
+ data->mode = PERF_DATA_MODE_READ,
+ data->force = force,
d->idx = i;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index e06e822ce634..6e4f63b0da4a 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -23,9 +23,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
struct perf_session *session;
struct perf_evsel *pos;
struct perf_data data = {
- .file = {
- .path = file_name,
- },
+ .path = file_name,
.mode = PERF_DATA_MODE_READ,
.force = details->force,
};
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index eda41673c4f3..24086b7f1b14 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -12,6 +12,7 @@
#include "util/color.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/debug.h"
@@ -19,6 +20,7 @@
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/jit.h"
+#include "util/symbol.h"
#include "util/thread.h"
#include <subcmd/parse-options.h>
@@ -768,10 +770,8 @@ int cmd_inject(int argc, const char **argv)
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
.output = {
- .file = {
- .path = "-",
- },
- .mode = PERF_DATA_MODE_WRITE,
+ .path = "-",
+ .mode = PERF_DATA_MODE_WRITE,
},
};
struct perf_data data = {
@@ -784,7 +784,7 @@ int cmd_inject(int argc, const char **argv)
"Inject build-ids into the output stream"),
OPT_STRING('i', "input", &inject.input_name, "file",
"input file name"),
- OPT_STRING('o', "output", &inject.output.file.path, "file",
+ OPT_STRING('o', "output", &inject.output.path, "file",
"output file name"),
OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
"Merge sched-stat and sched-switch for getting events "
@@ -832,7 +832,7 @@ int cmd_inject(int argc, const char **argv)
inject.tool.ordered_events = inject.sched_stat;
- data.file.path = inject.input_name;
+ data.path = inject.input_name;
inject.session = perf_session__new(&data, true, &inject.tool);
if (inject.session == NULL)
return -1;
diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c
index 90d1a2305b72..bc7a2bc7aed7 100644
--- a/tools/perf/builtin-kallsyms.c
+++ b/tools/perf/builtin-kallsyms.c
@@ -13,6 +13,7 @@
#include <subcmd/parse-options.h>
#include "debug.h"
#include "machine.h"
+#include "map.h"
#include "symbol.h"
static int __cmd_kallsyms(int argc, const char **argv)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index b63bca4b0c2a..fa520f4b8095 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -6,6 +6,7 @@
#include "util/evsel.h"
#include "util/util.h"
#include "util/config.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
@@ -334,7 +335,7 @@ static int build_alloc_func_list(void)
struct alloc_func *func;
struct machine *machine = &kmem_session->machines.host;
regex_t alloc_func_regex;
- const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
+ static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
if (ret) {
@@ -1924,7 +1925,7 @@ int cmd_kmem(int argc, const char **argv)
NULL
};
struct perf_session *session;
- const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
+ static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
int ret = perf_config(kmem_config, NULL);
if (ret)
@@ -1948,7 +1949,7 @@ int cmd_kmem(int argc, const char **argv)
return __cmd_record(argc, argv);
}
- data.file.path = input_name;
+ data.path = input_name;
kmem_session = session = perf_session__new(&data, false, &perf_kmem);
if (session == NULL)
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 3d4cbc4e87c7..dbb6f737a3e2 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1080,11 +1080,9 @@ static int read_events(struct perf_kvm_stat *kvm)
.ordered_events = true,
};
struct perf_data file = {
- .file = {
- .path = kvm->file_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = kvm->force,
+ .path = kvm->file_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = kvm->force,
};
kvm->tool = eops;
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index ead221e49f00..c9f98d00c0e9 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -82,9 +82,9 @@ int cmd_list(int argc, const char **argv)
else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(NULL, NULL, raw_dump);
else if (strcmp(argv[i], "metric") == 0)
- metricgroup__print(true, false, NULL, raw_dump);
+ metricgroup__print(true, false, NULL, raw_dump, details_flag);
else if (strcmp(argv[i], "metricgroup") == 0)
- metricgroup__print(false, true, NULL, raw_dump);
+ metricgroup__print(false, true, NULL, raw_dump, details_flag);
else if ((sep = strchr(argv[i], ':')) != NULL) {
int sep_idx;
@@ -102,7 +102,7 @@ int cmd_list(int argc, const char **argv)
s[sep_idx] = '\0';
print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
print_sdt_events(s, s + sep_idx + 1, raw_dump);
- metricgroup__print(true, true, s, raw_dump);
+ metricgroup__print(true, true, s, raw_dump, details_flag);
free(s);
} else {
if (asprintf(&s, "*%s*", argv[i]) < 0) {
@@ -119,7 +119,7 @@ int cmd_list(int argc, const char **argv)
details_flag);
print_tracepoint_events(NULL, s, raw_dump);
print_sdt_events(NULL, s, raw_dump);
- metricgroup__print(true, true, NULL, raw_dump);
+ metricgroup__print(true, true, NULL, raw_dump, details_flag);
free(s);
}
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 6e0189df2b3b..b9810a8d350a 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -866,11 +866,9 @@ static int __cmd_report(bool display_info)
.ordered_events = true,
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
session = perf_session__new(&data, false, &eops);
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 57393e94d156..f45c8b502f63 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -13,6 +13,7 @@
#include "util/data.h"
#include "util/mem-events.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#define MEM_OPERATION_LOAD 0x1
@@ -238,11 +239,9 @@ static int process_sample_event(struct perf_tool *tool,
static int report_raw_events(struct perf_mem *mem)
{
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = mem->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = mem->force,
};
int ret;
struct perf_session *session = perf_session__new(&data, false,
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 99de91698de1..46d3c2deeb40 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -32,6 +32,7 @@
#include "perf.h"
#include "builtin.h"
+#include "namespaces.h"
#include "util/util.h"
#include "util/strlist.h"
#include "util/strfilter.h"
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 882285fb9f64..f3f7f3100336 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -23,7 +23,6 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
-#include "util/drv_configs.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/symbol.h"
@@ -39,8 +38,10 @@
#include "util/bpf-loader.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
+#include "util/cpu-set-sched.h"
#include "util/time-utils.h"
#include "util/units.h"
+#include "util/bpf-event.h"
#include "asm/bug.h"
#include <errno.h>
@@ -81,12 +82,17 @@ struct record {
bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
+ cpu_set_t affinity_mask;
};
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
+static const char *affinity_tags[PERF_AFFINITY_MAX] = {
+ "SYS", "NODE", "CPU"
+};
+
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
@@ -531,9 +537,13 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
+ if (opts->affinity != PERF_AFFINITY_SYS)
+ cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
+ opts->auxtrace_snapshot_mode,
+ opts->nr_cblocks, opts->affinity) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -566,7 +576,6 @@ static int record__open(struct record *rec)
struct perf_evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
- struct perf_evsel_config_term *err_term;
int rc = 0;
/*
@@ -619,14 +628,6 @@ try_again:
goto out;
}
- if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
- rc = -1;
- goto out;
- }
-
rc = record__mmap(rec);
if (rc)
goto out;
@@ -659,10 +660,9 @@ static int process_sample_event(struct perf_tool *tool,
static int process_buildids(struct record *rec)
{
- struct perf_data *data = &rec->data;
struct perf_session *session = rec->session;
- if (data->size == 0)
+ if (perf_data__size(&rec->data) == 0)
return 0;
/*
@@ -722,6 +722,16 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
+static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
+{
+ if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+ !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
+ CPU_ZERO(&rec->affinity_mask);
+ CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
+ sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
+ }
+}
+
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
bool overwrite)
{
@@ -749,6 +759,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct perf_mmap *map = &maps[i];
if (map->base) {
+ record__adjust_affinity(rec, map);
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) != 0) {
rc = -1;
@@ -839,7 +850,7 @@ record__finish_output(struct record *rec)
return;
rec->session->header.data_size += rec->bytes_written;
- data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
+ data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
if (!rec->no_buildid) {
process_buildids(rec);
@@ -907,7 +918,7 @@ record__switch_output(struct record *rec, bool at_exit)
if (!quiet)
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
- data->file.path, timestamp);
+ data->path, timestamp);
/* Output tracking events */
if (!at_exit) {
@@ -1082,6 +1093,11 @@ static int record__synthesize(struct record *rec, bool tail)
return err;
}
+ err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
+ machine, opts);
+ if (err < 0)
+ pr_warning("Couldn't synthesize bpf events.\n");
+
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address,
1);
@@ -1445,7 +1461,7 @@ out_child:
fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
perf_data__size(data) / 1024.0 / 1024.0,
- data->file.path, postfix, samples);
+ data->path, postfix, samples);
}
out_delete_session:
@@ -1639,6 +1655,21 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
return -1;
}
+static int record__parse_affinity(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+
+ if (unset || !str)
+ return 0;
+
+ if (!strcasecmp(str, "node"))
+ opts->affinity = PERF_AFFINITY_NODE;
+ else if (!strcasecmp(str, "cpu"))
+ opts->affinity = PERF_AFFINITY_CPU;
+
+ return 0;
+}
+
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
int unset __maybe_unused)
@@ -1831,7 +1862,7 @@ static struct option __record_options[] = {
OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
- OPT_STRING('o', "output", &record.data.file.path, "file",
+ OPT_STRING('o', "output", &record.data.path, "file",
"output file name"),
OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
&record.opts.no_inherit_set,
@@ -1839,6 +1870,7 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
+ OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -1946,6 +1978,9 @@ static struct option __record_options[] = {
&nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
record__aio_parse),
#endif
+ OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
+ "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
+ record__parse_affinity),
OPT_END()
};
@@ -1980,6 +2015,9 @@ int cmd_record(int argc, const char **argv)
# undef REASON
#endif
+ CPU_ZERO(&rec->affinity_mask);
+ rec->opts.affinity = PERF_AFFINITY_SYS;
+
rec->evlist = perf_evlist__new();
if (rec->evlist == NULL)
return -ENOMEM;
@@ -2143,6 +2181,8 @@ int cmd_record(int argc, const char **argv)
if (verbose > 0)
pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+ pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
+
err = __cmd_record(&record, argc, argv);
out:
perf_evlist__delete(rec->evlist);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4958095be4fc..ee93c18a6685 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/err.h>
+#include "util/map.h"
#include "util/symbol.h"
#include "util/callchain.h"
#include "util/values.h"
@@ -615,6 +616,21 @@ static int report__collapse_hists(struct report *rep)
return ret;
}
+static int hists__resort_cb(struct hist_entry *he, void *arg)
+{
+ struct report *rep = arg;
+ struct symbol *sym = he->ms.sym;
+
+ if (rep->symbol_ipc && sym && !sym->annotate2) {
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
+
+ symbol__annotate2(sym, he->ms.map, evsel,
+ &annotation__default_options, NULL);
+ }
+
+ return 0;
+}
+
static void report__output_resort(struct report *rep)
{
struct ui_progress prog;
@@ -622,8 +638,10 @@ static void report__output_resort(struct report *rep)
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
- evlist__for_each_entry(rep->session->evlist, pos)
- perf_evsel__output_resort(pos, &prog);
+ evlist__for_each_entry(rep->session->evlist, pos) {
+ perf_evsel__output_resort_cb(pos, &prog,
+ hists__resort_cb, rep);
+ }
ui_progress__finish();
}
@@ -753,7 +771,8 @@ static int tasks_print(struct report *rep, FILE *fp)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
task = tasks + itask++;
task->thread = rb_entry(nd, struct thread, rb_node);
@@ -880,7 +899,7 @@ static int __cmd_report(struct report *rep)
rep->nr_entries += evsel__hists(pos)->nr_entries;
if (rep->nr_entries == 0) {
- ui__error("The %s file has no samples!\n", data->file.path);
+ ui__error("The %s data has no samples!\n", data->path);
return 0;
}
@@ -956,9 +975,9 @@ int cmd_report(int argc, const char **argv)
int branch_mode = -1;
bool branch_call_mode = false;
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
- const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
- CALLCHAIN_REPORT_HELP
- "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
+ static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
+ CALLCHAIN_REPORT_HELP
+ "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
const char * const report_usage[] = {
"perf report [<options>]",
@@ -1188,8 +1207,8 @@ int cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
repeat:
session = perf_session__new(&data, false, &report.tool);
@@ -1356,36 +1375,13 @@ repeat:
if (symbol__init(&session->header.env) < 0)
goto error;
- report.ptime_range = perf_time__range_alloc(report.time_str,
- &report.range_size);
- if (!report.ptime_range) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (perf_time__parse_str(report.ptime_range, report.time_str) != 0) {
- if (session->evlist->first_sample_time == 0 &&
- session->evlist->last_sample_time == 0) {
- pr_err("HINT: no first/last sample time found in perf data.\n"
- "Please use latest perf binary to execute 'perf record'\n"
- "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
- ret = -EINVAL;
- goto error;
- }
-
- report.range_num = perf_time__percent_parse_str(
- report.ptime_range, report.range_size,
- report.time_str,
- session->evlist->first_sample_time,
- session->evlist->last_sample_time);
-
- if (report.range_num < 0) {
- pr_err("Invalid time string\n");
- ret = -EINVAL;
+ if (report.time_str) {
+ ret = perf_time__parse_for_ranges(report.time_str, session,
+ &report.ptime_range,
+ &report.range_size,
+ &report.range_num);
+ if (ret < 0)
goto error;
- }
- } else {
- report.range_num = 1;
}
if (session->tevent.pevent &&
@@ -1407,7 +1403,8 @@ repeat:
ret = 0;
error:
- zfree(&report.ptime_range);
+ if (report.ptime_range)
+ zfree(&report.ptime_range);
perf_session__delete(session);
return ret;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index cbf39dab19c1..275f2d92a7bf 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -213,7 +213,7 @@ struct perf_sched {
u64 all_runtime;
u64 all_count;
u64 cpu_last_switched[MAX_CPUS];
- struct rb_root atom_root, sorted_atom_root, merged_atom_root;
+ struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
struct list_head sort_list, cmp_pid;
bool force;
bool skip_merge;
@@ -271,7 +271,7 @@ struct evsel_runtime {
struct idle_thread_runtime {
struct thread_runtime tr;
struct thread *last_thread;
- struct rb_root sorted_root;
+ struct rb_root_cached sorted_root;
struct callchain_root callchain;
struct callchain_cursor cursor;
};
@@ -950,10 +950,10 @@ thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *
}
static struct work_atoms *
-thread_atoms_search(struct rb_root *root, struct thread *thread,
+thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
struct list_head *sort_list)
{
- struct rb_node *node = root->rb_node;
+ struct rb_node *node = root->rb_root.rb_node;
struct work_atoms key = { .thread = thread };
while (node) {
@@ -976,10 +976,11 @@ thread_atoms_search(struct rb_root *root, struct thread *thread,
}
static void
-__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
+__thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
struct list_head *sort_list)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+ bool leftmost = true;
while (*new) {
struct work_atoms *this;
@@ -992,12 +993,14 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
if (cmp > 0)
new = &((*new)->rb_left);
- else
+ else {
new = &((*new)->rb_right);
+ leftmost = false;
+ }
}
rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
+ rb_insert_color_cached(&data->node, root, leftmost);
}
static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
@@ -1447,15 +1450,15 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
static void perf_sched__sort_lat(struct perf_sched *sched)
{
struct rb_node *node;
- struct rb_root *root = &sched->atom_root;
+ struct rb_root_cached *root = &sched->atom_root;
again:
for (;;) {
struct work_atoms *data;
- node = rb_first(root);
+ node = rb_first_cached(root);
if (!node)
break;
- rb_erase(node, root);
+ rb_erase_cached(node, root);
data = rb_entry(node, struct work_atoms, node);
__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
}
@@ -1782,11 +1785,9 @@ static int perf_sched__read_events(struct perf_sched *sched)
};
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
int rc = -1;
@@ -2762,12 +2763,12 @@ static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
return ret;
}
-static size_t timehist_print_idlehist_callchain(struct rb_root *root)
+static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
{
size_t ret = 0;
FILE *fp = stdout;
struct callchain_node *chain;
- struct rb_node *rb_node = rb_first(root);
+ struct rb_node *rb_node = rb_first_cached(root);
printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
@@ -2868,7 +2869,7 @@ static void timehist_print_summary(struct perf_sched *sched,
if (itr == NULL)
continue;
- callchain_param.sort(&itr->sorted_root, &itr->callchain,
+ callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
0, &callchain_param);
printf(" CPU %2d:", i);
@@ -2955,11 +2956,9 @@ static int perf_sched__timehist(struct perf_sched *sched)
{ "sched:sched_migrate_task", timehist_migrate_task_event, },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
struct perf_session *session;
@@ -3074,11 +3073,12 @@ static void print_bad_events(struct perf_sched *sched)
}
}
-static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
+static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
struct work_atoms *this;
const char *comm = thread__comm_str(data->thread), *this_comm;
+ bool leftmost = true;
while (*new) {
int cmp;
@@ -3092,6 +3092,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
new = &((*new)->rb_left);
} else if (cmp < 0) {
new = &((*new)->rb_right);
+ leftmost = false;
} else {
this->num_merged++;
this->total_runtime += data->total_runtime;
@@ -3109,7 +3110,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
data->num_merged++;
rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
+ rb_insert_color_cached(&data->node, root, leftmost);
}
static void perf_sched__merge_lat(struct perf_sched *sched)
@@ -3120,8 +3121,8 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
if (sched->skip_merge)
return;
- while ((node = rb_first(&sched->atom_root))) {
- rb_erase(node, &sched->atom_root);
+ while ((node = rb_first_cached(&sched->atom_root))) {
+ rb_erase_cached(node, &sched->atom_root);
data = rb_entry(node, struct work_atoms, node);
__merge_work_atoms(&sched->merged_atom_root, data);
}
@@ -3143,7 +3144,7 @@ static int perf_sched__lat(struct perf_sched *sched)
printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
printf(" -----------------------------------------------------------------------------------------------------------------\n");
- next = rb_first(&sched->sorted_atom_root);
+ next = rb_first_cached(&sched->sorted_atom_root);
while (next) {
struct work_atoms *work_list;
@@ -3336,7 +3337,7 @@ static int __cmd_record(int argc, const char **argv)
int cmd_sched(int argc, const char **argv)
{
- const char default_sort_order[] = "avg, max, switch, runtime";
+ static const char default_sort_order[] = "avg, max, switch, runtime";
struct perf_sched sched = {
.tool = {
.sample = perf_sched__process_tracepoint_sample,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index ac221f137ed2..53f78cf3113f 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -10,6 +10,7 @@
#include "util/perf_regs.h"
#include "util/session.h"
#include "util/tool.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
@@ -148,6 +149,7 @@ static struct {
unsigned int print_ip_opts;
u64 fields;
u64 invalid_fields;
+ u64 user_set_fields;
} output[OUTPUT_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = {
@@ -344,7 +346,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
if (attr->sample_type & sample_type)
return 0;
- if (output[type].user_set) {
+ if (output[type].user_set_fields & field) {
if (allow_user_set)
return 0;
evname = perf_evsel__name(evsel);
@@ -2559,6 +2561,10 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
pr_warning("Overriding previous field request for %s events.\n",
event_type(type));
+ /* Don't override defaults for +- */
+ if (strchr(tok, '+') || strchr(tok, '-'))
+ goto parse;
+
output[type].fields = 0;
output[type].user_set = true;
output[type].wildcard_set = false;
@@ -2627,10 +2633,13 @@ parse:
pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
all_output_options[i].str, event_type(j));
} else {
- if (change == REMOVE)
+ if (change == REMOVE) {
output[j].fields &= ~all_output_options[i].field;
- else
+ output[j].user_set_fields &= ~all_output_options[i].field;
+ } else {
output[j].fields |= all_output_options[i].field;
+ output[j].user_set_fields |= all_output_options[i].field;
+ }
output[j].user_set = true;
output[j].wildcard_set = true;
}
@@ -2643,6 +2652,10 @@ parse:
rc = -EINVAL;
goto out;
}
+ if (change == REMOVE)
+ output[type].fields &= ~all_output_options[i].field;
+ else
+ output[type].fields |= all_output_options[i].field;
output[type].user_set = true;
output[type].wildcard_set = true;
}
@@ -2942,10 +2955,8 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
DIR *scripts_dir, *lang_dir;
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
};
char *temp;
int i = 0;
@@ -3418,8 +3429,8 @@ int cmd_script(int argc, const char **argv)
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
@@ -3645,7 +3656,7 @@ int cmd_script(int argc, const char **argv)
goto out_delete;
}
- input = open(data.file.path, O_RDONLY); /* input_name */
+ input = open(data.path, O_RDONLY); /* input_name */
if (input < 0) {
err = -errno;
perror("failed to open file");
@@ -3688,37 +3699,13 @@ int cmd_script(int argc, const char **argv)
if (err < 0)
goto out_delete;
- script.ptime_range = perf_time__range_alloc(script.time_str,
- &script.range_size);
- if (!script.ptime_range) {
- err = -ENOMEM;
- goto out_delete;
- }
-
- /* needs to be parsed after looking up reference time */
- if (perf_time__parse_str(script.ptime_range, script.time_str) != 0) {
- if (session->evlist->first_sample_time == 0 &&
- session->evlist->last_sample_time == 0) {
- pr_err("HINT: no first/last sample time found in perf data.\n"
- "Please use latest perf binary to execute 'perf record'\n"
- "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
- err = -EINVAL;
- goto out_delete;
- }
-
- script.range_num = perf_time__percent_parse_str(
- script.ptime_range, script.range_size,
- script.time_str,
- session->evlist->first_sample_time,
- session->evlist->last_sample_time);
-
- if (script.range_num < 0) {
- pr_err("Invalid time string\n");
- err = -EINVAL;
+ if (script.time_str) {
+ err = perf_time__parse_for_ranges(script.time_str, session,
+ &script.ptime_range,
+ &script.range_size,
+ &script.range_num);
+ if (err < 0)
goto out_delete;
- }
- } else {
- script.range_num = 1;
}
err = __cmd_script(&script);
@@ -3726,7 +3713,8 @@ int cmd_script(int argc, const char **argv)
flush_scripting();
out_delete:
- zfree(&script.ptime_range);
+ if (script.ptime_range)
+ zfree(&script.ptime_range);
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 63a3afc7f32b..7b8f09b0b8bf 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -52,7 +52,6 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
-#include "util/drv_configs.h"
#include "util/color.h"
#include "util/stat.h"
#include "util/header.h"
@@ -83,7 +82,6 @@
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
-#include <sys/wait.h>
#include "sane_ctype.h"
@@ -418,7 +416,6 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
int status = 0;
const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
- struct perf_evsel_config_term *err_term;
if (interval) {
ts.tv_sec = interval / USEC_PER_MSEC;
@@ -515,13 +512,6 @@ try_again:
return -1;
}
- if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(counter), errno,
- str_error_r(errno, msg, sizeof(msg)));
- return -1;
- }
-
if (STAT_RECORD) {
int err, fd = perf_data__fd(&perf_stat.data);
@@ -1332,7 +1322,7 @@ static int __cmd_record(int argc, const char **argv)
PARSE_OPT_STOP_AT_NON_OPTION);
if (output_name)
- data->file.path = output_name;
+ data->path = output_name;
if (stat_config.run_count != 1 || forever) {
pr_err("Cannot use -r option with perf stat record.\n");
@@ -1533,8 +1523,8 @@ static int __cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- perf_stat.data.file.path = input_name;
- perf_stat.data.mode = PERF_DATA_MODE_READ;
+ perf_stat.data.path = input_name;
+ perf_stat.data.mode = PERF_DATA_MODE_READ;
session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
if (session == NULL)
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 775b99833e51..9b98687a27b9 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1602,11 +1602,9 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
{ "syscalls:sys_exit_select", process_exit_poll },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = tchart->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = tchart->force,
};
struct perf_session *session = perf_session__new(&data, false,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index f64e312db787..231a90daa958 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -22,13 +22,14 @@
#include "perf.h"
#include "util/annotate.h"
+#include "util/bpf-event.h"
#include "util/config.h"
#include "util/color.h"
-#include "util/drv_configs.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/event.h"
#include "util/machine.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
@@ -366,7 +367,7 @@ static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
if (p)
*p = 0;
- next = rb_first(&hists->entries);
+ next = rb_first_cached(&hists->entries);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
@@ -1184,10 +1185,6 @@ static void init_process_thread(struct perf_top *top)
static int __cmd_top(struct perf_top *top)
{
- char msg[512];
- struct perf_evsel *pos;
- struct perf_evsel_config_term *err_term;
- struct perf_evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts;
pthread_t thread, thread_process;
int ret;
@@ -1215,6 +1212,12 @@ static int __cmd_top(struct perf_top *top)
init_process_thread(top);
+ ret = perf_event__synthesize_bpf_events(&top->tool, perf_event__process,
+ &top->session->machines.host,
+ &top->record_opts);
+ if (ret < 0)
+ pr_warning("Couldn't synthesize bpf events.\n");
+
machine__synthesize_threads(&top->session->machines.host, &opts->target,
top->evlist->threads, false,
top->nr_threads_synthesize);
@@ -1232,14 +1235,6 @@ static int __cmd_top(struct perf_top *top)
if (ret)
goto out_delete;
- ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
- if (ret) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
- goto out_delete;
- }
-
top->session->evlist = top->evlist;
perf_session__set_id_hdr_size(top->session);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index ed4583128b9c..f5b3a1e9c1dd 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -19,6 +19,7 @@
#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
#include <bpf/bpf.h>
+#include "util/bpf_map.h"
#include "builtin.h"
#include "util/cgroup.h"
#include "util/color.h"
@@ -29,6 +30,8 @@
#include "util/evlist.h"
#include <subcmd/exec-cmd.h>
#include "util/machine.h"
+#include "util/map.h"
+#include "util/symbol.h"
#include "util/path.h"
#include "util/session.h"
#include "util/thread.h"
@@ -85,6 +88,9 @@ struct trace {
*augmented;
} events;
} syscalls;
+ struct {
+ struct bpf_map *map;
+ } dump;
struct record_opts opts;
struct perf_evlist *evlist;
struct machine *host;
@@ -1039,6 +1045,9 @@ static const size_t trace__entry_str_size = 2048;
static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
{
+ if (fd < 0)
+ return NULL;
+
if (fd > ttrace->files.max) {
struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
@@ -2514,19 +2523,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
+ bool found = false;
+ struct perf_evsel *evsel, *tmp;
+ struct parse_events_error err = { .idx = 0, };
+ int ret = parse_events(evlist, "probe:vfs_getname*", &err);
- if (IS_ERR(evsel))
+ if (ret)
return false;
- if (perf_evsel__field(evsel, "pathname") == NULL) {
+ evlist__for_each_entry_safe(evlist, evsel, tmp) {
+ if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
+ continue;
+
+ if (perf_evsel__field(evsel, "pathname")) {
+ evsel->handler = trace__vfs_getname;
+ found = true;
+ continue;
+ }
+
+ list_del_init(&evsel->node);
+ evsel->evlist = NULL;
perf_evsel__delete(evsel);
- return false;
}
- evsel->handler = trace__vfs_getname;
- perf_evlist__add(evlist, evsel);
- return true;
+ return found;
}
static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
@@ -2755,7 +2775,8 @@ static int trace__set_filter_loop_pids(struct trace *trace)
if (parent == NULL)
break;
- if (!strcmp(thread__comm_str(parent), "sshd")) {
+ if (!strcmp(thread__comm_str(parent), "sshd") ||
+ strstarts(thread__comm_str(parent), "gnome-terminal")) {
pids[nr++] = parent->tid;
break;
}
@@ -2980,6 +3001,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_apply_filters;
+ if (trace->dump.map)
+ bpf_map__fprintf(trace->dump.map, trace->output);
+
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
@@ -3130,11 +3154,9 @@ static int trace__replay(struct trace *trace)
{ "probe:vfs_getname", trace__vfs_getname, },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = trace->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = trace->force,
};
struct perf_session *session;
struct perf_evsel *evsel;
@@ -3669,6 +3691,7 @@ int cmd_trace(int argc, const char **argv)
.max_stack = UINT_MAX,
.max_events = ULONG_MAX,
};
+ const char *map_dump_str = NULL;
const char *output_name = NULL;
const struct option trace_options[] = {
OPT_CALLBACK('e', "event", &trace, "event",
@@ -3701,6 +3724,9 @@ int cmd_trace(int argc, const char **argv)
OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms",
trace__set_duration),
+#ifdef HAVE_LIBBPF_SUPPORT
+ OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
+#endif
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_BOOLEAN('T', "time", &trace.full_time,
@@ -3795,6 +3821,14 @@ int cmd_trace(int argc, const char **argv)
err = -1;
+ if (map_dump_str) {
+ trace.dump.map = bpf__find_map_by_name(map_dump_str);
+ if (trace.dump.map == NULL) {
+ pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
+ goto out;
+ }
+ }
+
if (trace.trace_pgfaults) {
trace.opts.sample_address = true;
trace.opts.sample_time = true;
@@ -3854,7 +3888,8 @@ int cmd_trace(int argc, const char **argv)
goto init_augmented_syscall_tp;
}
- if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) {
+ if (trace.syscalls.events.augmented->priv == NULL &&
+ strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
struct perf_evsel *augmented = trace.syscalls.events.augmented;
if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
perf_evsel__init_augmented_syscall_tp_args(augmented))
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index a28dca2582aa..0453ba26cdbd 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -222,6 +222,10 @@ The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a
way to request that counting of events be restricted to times when the
CPU is in user, kernel and/or hypervisor mode.
+Furthermore the 'exclude_host' and 'exclude_guest' bits provide a way
+to request counting of events restricted to guest and host contexts when
+using Linux as the hypervisor.
+
The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap
operations, these can be used to relate userspace IP addresses to actual
code, even after the mapping (or even the whole process) is gone,
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
index 53c233370fae..f9b2161e1ca4 100644
--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -18,23 +18,13 @@
#include <pid_filter.h>
/* bpf-output associated map */
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall {
bool enabled;
};
-struct bpf_map SEC("maps") syscalls = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct syscall),
- .max_entries = 512,
-};
+bpf_map(syscalls, ARRAY, int, struct syscall, 512);
struct syscall_enter_args {
unsigned long long common_tp_fields;
@@ -141,8 +131,8 @@ int sys_enter(struct syscall_enter_args *args)
len = sizeof(augmented_args.args);
}
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
- return 0;
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
}
SEC("raw_syscalls:sys_exit")
diff --git a/tools/perf/examples/bpf/augmented_syscalls.c b/tools/perf/examples/bpf/augmented_syscalls.c
index 2ae44813ef2d..524fdb8534b3 100644
--- a/tools/perf/examples/bpf/augmented_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_syscalls.c
@@ -19,12 +19,8 @@
#include <stdio.h>
#include <linux/socket.h>
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+/* bpf-output associated map */
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall_exit_args {
unsigned long long common_tp_fields;
@@ -55,9 +51,9 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size; \
len &= sizeof(augmented_args.filename.value) - 1; \
} \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, len); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, len); \
} \
int syscall_exit(syscall)(struct syscall_exit_args *args) \
{ \
@@ -125,10 +121,10 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
/* addrlen = augmented_args.args.addrlen; */ \
/* */ \
probe_read(&augmented_args.addr, addrlen, args->addr_ptr); \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen);\
} \
int syscall_exit(syscall)(struct syscall_exit_args *args) \
{ \
diff --git a/tools/perf/examples/bpf/etcsnoop.c b/tools/perf/examples/bpf/etcsnoop.c
index b59e8812ee8c..e81b535346c0 100644
--- a/tools/perf/examples/bpf/etcsnoop.c
+++ b/tools/perf/examples/bpf/etcsnoop.c
@@ -21,12 +21,8 @@
#include <stdio.h>
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+/* bpf-output associated map */
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct augmented_filename {
int size;
@@ -49,11 +45,11 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
args->filename_ptr); \
if (__builtin_memcmp(augmented_args.filename.value, etc, 4) != 0) \
return 0; \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
- augmented_args.filename.size)); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
+ augmented_args.filename.size)); \
}
struct syscall_enter_openat_args {
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index e667577207dc..2eac6d804b2d 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -18,6 +18,20 @@ struct bpf_map {
unsigned int numa_node;
};
+#define bpf_map(name, _type, type_key, type_val, _max_entries) \
+struct bpf_map SEC("maps") name = { \
+ .type = BPF_MAP_TYPE_##_type, \
+ .key_size = sizeof(type_key), \
+ .value_size = sizeof(type_val), \
+ .max_entries = _max_entries, \
+}; \
+struct ____btf_map_##name { \
+ type_key key; \
+ type_val value; \
+}; \
+struct ____btf_map_##name __attribute__((section(".maps." #name), used)) \
+ ____btf_map_##name = { }
+
/*
* FIXME: this should receive .max_entries as a parameter, as careful
* tuning of these limits is needed to avoid hitting limits that
@@ -26,13 +40,7 @@ struct bpf_map {
* For the current need, 'perf trace --filter-pids', 64 should
* be good enough, but this surely needs to be revisited.
*/
-#define pid_map(name, value_type) \
-struct bpf_map SEC("maps") name = { \
- .type = BPF_MAP_TYPE_HASH, \
- .key_size = sizeof(pid_t), \
- .value_size = sizeof(value_type), \
- .max_entries = 64, \
-}
+#define pid_map(name, value_type) bpf_map(name, HASH, pid_t, value_type, 64)
static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 388c6dd128b8..b120e547ddc7 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -66,6 +66,7 @@ struct record_opts {
bool ignore_missing_thread;
bool strict_freq;
bool sample_id;
+ bool bpf_event;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;
@@ -83,6 +84,14 @@ struct record_opts {
clockid_t clockid;
u64 clockid_res_ns;
int nr_cblocks;
+ int affinity;
+};
+
+enum perf_affinity {
+ PERF_AFFINITY_SYS = 0,
+ PERF_AFFINITY_NODE,
+ PERF_AFFINITY_CPU,
+ PERF_AFFINITY_MAX
};
struct option;
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
new file mode 100644
index 000000000000..bffb2d4a6420
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
@@ -0,0 +1,2245 @@
+[
+ {
+ "BriefDescription": "% of finished branches that were treated as BC+8",
+ "MetricExpr": "PM_BR_BC_8_CONV / PM_BRU_FIN * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "bc_8_branch_ratio_percent"
+ },
+ {
+ "BriefDescription": "% of finished branches that were pairable but not treated as BC+8",
+ "MetricExpr": "PM_BR_BC_8 / PM_BRU_FIN * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "bc_8_not_converted_branch_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percent of mispredicted branches out of all predicted (correctly and incorrectly) branches that completed",
+ "MetricExpr": "PM_BR_MPRED_CMPL / (PM_BR_PRED_BR0 + PM_BR_PRED_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "br_misprediction_percent"
+ },
+ {
+ "BriefDescription": "% of Branch miss predictions per instruction",
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "branch_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction per instruction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of count catch mispredictions out of all completed branches that required count cache predictionn",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_misprediction_percent"
+ },
+ {
+ "BriefDescription": "CR MisPredictions per Instruction",
+ "MetricExpr": "PM_BR_MPRED_CR / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "cr_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of link stack mispredictions out of all completed branches that required link stack prediction",
+ "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / (PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_misprediction_percent"
+ },
+ {
+ "BriefDescription": "TA MisPredictions per Instruction",
+ "MetricExpr": "PM_BR_MPRED_TA / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ta_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of target address mispredictions out of all completed branches that required address prediction",
+ "MetricExpr": "PM_BR_MPRED_TA / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1 + PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ta_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Percent of branches completed that were taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL * 100 / PM_BR_CMPL",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "taken_branches_percent"
+ },
+ {
+ "BriefDescription": "Percent of chip+group+sys pumps that were incorrectly predicted",
+ "MetricExpr": "PM_PUMP_MPRED * 100 / (PM_PUMP_CPRED + PM_PUMP_MPRED)",
+ "MetricGroup": "bus_stats",
+ "MetricName": "any_pump_mpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of chip pumps that were correctly predicted as chip pumps the first time",
+ "MetricExpr": "PM_CHIP_PUMP_CPRED * 100 / PM_L2_CHIP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "chip_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of group pumps that were correctly predicted as group pumps the first time",
+ "MetricExpr": "PM_GRP_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "group_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of system pumps that were correctly predicted as group pumps the first time",
+ "MetricExpr": "PM_SYS_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "sys_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to CRU or BRU operations",
+ "MetricExpr": "PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_cru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to ISU Branch Operations",
+ "MetricExpr": "PM_CMPLU_STALL_BRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which a Group Completed",
+ "MetricExpr": "PM_GRP_CMPL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "completion_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by CO queue full",
+ "MetricExpr": "PM_CMPLU_STALL_COQ_FULL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "coq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to CRU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_BRU_CRU - PM_CMPLU_STALL_BRU) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "cru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by flushes",
+ "MetricExpr": "PM_CMPLU_STALL_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "flush_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by FXU Multi-Cycle Instructions",
+ "MetricExpr": "PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_multi_cyc_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by FXU",
+ "MetricExpr": "PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Other cycles stalled by FXU",
+ "MetricExpr": "(PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty due to Branch Mispredicts",
+ "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty due to Branch Mispredicts and Icache Misses",
+ "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_br_mpred_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held",
+ "MetricExpr": "(PM_GCT_NOSLOT_DISP_HELD_MAP + PM_GCT_NOSLOT_DISP_HELD_SRQ + PM_GCT_NOSLOT_DISP_HELD_ISSQ + PM_GCT_NOSLOT_DISP_HELD_OTHER) / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to issue queue",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to maps",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_map_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to syncs and other effects",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to SRQ",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_srq_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses",
+ "MetricExpr": "PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve in the local L2 or L3",
+ "MetricExpr": "(PM_GCT_NOSLOT_IC_MISS - PM_GCT_NOSLOT_IC_L3MISS) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_l2l3_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve off-chip",
+ "MetricExpr": "PM_GCT_NOSLOT_IC_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Other GCT empty cycles",
+ "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL) - ((PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL))",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by heavyweight syncs",
+ "MetricExpr": "PM_CMPLU_STALL_HWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "hwsync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU",
+ "MetricExpr": "PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in distant interventions and memory",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_REMOTE) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_distant_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote or distant caches",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L21_L31 / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l21l31_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was a conflict",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_conflict_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was no conflict",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_noconflict_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in other core's caches or memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in local memory or local L4",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_LMEM / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_lmem_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote interventions and memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_REMOTE / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_remote_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by ERAT Translation rejects",
+ "MetricExpr": "PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_erat_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU load finishes",
+ "MetricExpr": "PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_ld_fin_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LHS rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_lhs_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LMQ Full rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_lmq_full_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU Rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_reject_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Rejects",
+ "MetricExpr": "(PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_reject_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU store forwarding",
+ "MetricExpr": "PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_st_fwd_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU Stores",
+ "MetricExpr": "PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_store_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by lightweight syncs",
+ "MetricExpr": "PM_CMPLU_STALL_LWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lwsync_stall_cpi"
+ },
+ {
+ "MetricExpr": "PM_CMPLU_STALL_MEM_ECC_DELAY / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "mem_ecc_delay_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by nops (nothing next to finish)",
+ "MetricExpr": "PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "no_ntf_stall_cpi"
+ },
+ {
+ "MetricExpr": "PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntcg_all_fin_cpi"
+ },
+ {
+ "MetricExpr": "PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntcg_flush_cpi"
+ },
+ {
+ "BriefDescription": "Other thread block stall cycles",
+ "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_LWSYNC - PM_CMPLU_STALL_HWSYNC - PM_CMPLU_STALL_MEM_ECC_DELAY - PM_CMPLU_STALL_FLUSH - PM_CMPLU_STALL_COQ_FULL) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles unaccounted for",
+ "MetricExpr": "(PM_RUN_CYC / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL) - (PM_GRP_CMPL / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_cpi"
+ },
+ {
+ "BriefDescription": "Stall cycles unaccounted for",
+ "MetricExpr": "(PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Run cycles per run instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cpi"
+ },
+ {
+ "BriefDescription": "Completion Stall Cycles",
+ "MetricExpr": "PM_CMPLU_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles a thread was blocked",
+ "MetricExpr": "PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "thread_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU",
+ "MetricExpr": "PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by other VSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_VSU - PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_SCALAR) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Scalar Operations",
+ "MetricExpr": "PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Scalar Long Operations",
+ "MetricExpr": "PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_long_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other VSU Scalar Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Vector Operations",
+ "MetricExpr": "PM_CMPLU_STALL_VECTOR / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Vector Long Operations",
+ "MetricExpr": "PM_CMPLU_STALL_VECTOR_LONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_long_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by other VSU Vector Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_VECTOR_LONG) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_other_cpi"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_lhs_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_no_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_other_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 M state, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 S tate, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the L3 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_no_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 demand load misses per run instruction",
+ "MetricExpr": "PM_LD_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "l1_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 misses that result in a cache reload",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID * 100 / PM_LD_MISS_L1",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_miss_reloads_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L4",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant Memory",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_lhs_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with no conflicts",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_no_conflict_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_other_conflict_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 where the load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_conflict_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the line was brought into the L3 by a prefetch operation",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_mepf_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 without conflicts",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_no_conflict_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local Memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L4",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote Memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "dL1 miss portion of CPI",
+ "MetricExpr": "( (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)/ (PM_RUN_CYC / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dcache_miss_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant MOD miss rates with measured DL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant SHR miss rates with measured DL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant L4 miss rates with measured DL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant memory miss rates with measured DMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 MOD miss rates with measured L21 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L21_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 SHR miss rates with measured L21 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L21_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2 miss rates with measured L2 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L2 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) ) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l2_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 MOD miss rates with measured L31 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L31_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 SHR miss rates with measured L31 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L31_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl3 miss rates with measured L3 latency as a % of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L3 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l3_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local L4 miss rates with measured LL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_LL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "ll4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local memory miss rates with measured LMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_LMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "lmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 remote MOD miss rates with measured RL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 shared miss rates with measured RL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote L4 miss rates with measured RL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote memory miss rates with measured RMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "Branch Mispredict flushes per instruction",
+ "MetricExpr": "PM_FLUSH_BR_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "general",
+ "MetricName": "br_mpred_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cpi"
+ },
+ {
+ "BriefDescription": "Percentage Cycles a group completed",
+ "MetricExpr": "PM_GRP_CMPL / PM_CYC * 100",
+ "MetricGroup": "general",
+ "MetricName": "cyc_grp_completed_percent"
+ },
+ {
+ "BriefDescription": "Percentage Cycles a group dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
+ "MetricGroup": "general",
+ "MetricName": "cyc_grp_dispatched_percent"
+ },
+ {
+ "BriefDescription": "Cycles per group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cyc_per_group"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_FLUSH_DISP / PM_RUN_INST_CMPL) * 100",
+ "MetricGroup": "general",
+ "MetricName": "disp_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "% DTLB miss rate per inst",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "dtlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Flush rate (%)",
+ "MetricExpr": "PM_FLUSH * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "flush_rate_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (11 to 14) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_11_14_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_11to14_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (15 to 17) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_15_17_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_15to17_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization 18+ as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_18_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_18plus_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (1 to 2) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_1_2_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_1to2_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (3 to 6) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_3_6_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_3to6_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (7 to 10) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_7_10_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_7to10_slots_percent"
+ },
+ {
+ "BriefDescription": "Avg. group size",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "group_size"
+ },
+ {
+ "BriefDescription": "Instructions per group",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "inst_per_group"
+ },
+ {
+ "BriefDescription": "Instructions per cycles",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "general",
+ "MetricName": "ipc"
+ },
+ {
+ "BriefDescription": "% ITLB miss rate per inst",
+ "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "itlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per L1 store ref",
+ "MetricExpr": "PM_ST_MISS_L1 / PM_ST_FIN * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "L2 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 dmand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 demand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Run cycles per cycle",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC*100",
+ "MetricGroup": "general",
+ "MetricName": "run_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT2 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT2_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt2_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT4 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT4_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt4_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT8 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT8_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt8_cycles_percent"
+ },
+ {
+ "BriefDescription": "IPC of all instructions completed by the core while this thread was stalled",
+ "MetricExpr": "PM_CMPLU_STALL_OTHER_CMPL/PM_RUN_CYC",
+ "MetricGroup": "general",
+ "MetricName": "smt_benefit"
+ },
+ {
+ "BriefDescription": "Instruction dispatch-to-completion ratio",
+ "MetricExpr": "PM_INST_DISP / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "speculation"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in Single Thread Mode",
+ "MetricExpr": "(PM_RUN_CYC_ST_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "st_cycles_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2 per Inst",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3 other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3 per Inst",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Instruction Cache Miss Rate (Per run Instruction)(%)",
+ "MetricExpr": "PM_L1_ICACHE_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "l1_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% Branches per instruction",
+ "MetricExpr": "PM_BRU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "branches_per_inst"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations",
+ "MetricExpr": "(PM_FXU0_FIN + PM_FXU1_FIN)/PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fixed_per_inst"
+ },
+ {
+ "BriefDescription": "FXU0 balance",
+ "MetricExpr": "PM_FXU0_FIN / (PM_FXU0_FIN + PM_FXU1_FIN)",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_balance"
+ },
+ {
+ "BriefDescription": "Fraction of cycles that FXU0 is in use",
+ "MetricExpr": "PM_FXU0_FIN / PM_RUN_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_fin"
+ },
+ {
+ "BriefDescription": "FXU0 only Busy",
+ "MetricExpr": "PM_FXU0_BUSY_FXU1_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_only_busy"
+ },
+ {
+ "BriefDescription": "Fraction of cycles that FXU1 is in use",
+ "MetricExpr": "PM_FXU1_FIN / PM_RUN_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu1_fin"
+ },
+ {
+ "BriefDescription": "FXU1 only Busy",
+ "MetricExpr": "PM_FXU1_BUSY_FXU0_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu1_only_busy"
+ },
+ {
+ "BriefDescription": "Both FXU Busy",
+ "MetricExpr": "PM_FXU_BUSY / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu_both_busy"
+ },
+ {
+ "BriefDescription": "Both FXU Idle",
+ "MetricExpr": "PM_FXU_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu_both_idle"
+ },
+ {
+ "BriefDescription": "PCT instruction loads",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "loads_per_inst"
+ },
+ {
+ "BriefDescription": "PCT instruction stores",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "stores_per_inst"
+ },
+ {
+ "BriefDescription": "Icache Fetchs per Icache Miss",
+ "MetricExpr": "(PM_L1_ICACHE_MISS - PM_IC_PREF_WRITE) / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_miss_reload"
+ },
+ {
+ "BriefDescription": "% of ICache reloads due to prefetch",
+ "MetricExpr": "PM_IC_PREF_WRITE * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_pref_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "Average number of stores that gather in the store buffer before being sent to an L2 RC machine",
+ "MetricExpr": "PM_ST_CMPL / (PM_L2_ST / 2)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "avg_stores_gathered"
+ },
+ {
+ "BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
+ "MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 store misses per drained store. A drained store may contain multiple individual stores if they target the same line",
+ "MetricExpr": "PM_L2_ST_MISS / (PM_L2_ST / 2)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_store_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "average L1 miss latency using marked events",
+ "MetricExpr": "PM_MRK_LD_MISS_L1_CYC / PM_MRK_LD_MISS_L1",
+ "MetricGroup": "latency",
+ "MetricName": "average_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Average icache miss latency",
+ "MetricExpr": "(PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ)",
+ "MetricGroup": "latency",
+ "MetricName": "average_il1_miss_latency"
+ },
+ {
+ "BriefDescription": "average service time for SYNC",
+ "MetricExpr": "PM_LSU_SRQ_SYNC_CYC / PM_LSU_SRQ_SYNC",
+ "MetricGroup": "latency",
+ "MetricName": "average_sync_cyc"
+ },
+ {
+ "BriefDescription": "Cycles LMQ slot0 was active on an average",
+ "MetricExpr": "PM_LSU_LMQ_S0_VALID / PM_LSU_LMQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lmq_life_time"
+ },
+ {
+ "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS",
+ "MetricExpr": "PM_LSU_LRQ_S0_VALID / PM_LSU_LRQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lrq_life_time_even"
+ },
+ {
+ "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 43 is valid ONLY FOR ODD THREADS",
+ "MetricExpr": "PM_LSU_LRQ_S43_VALID / PM_LSU_LRQ_S43_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lrq_life_time_odd"
+ },
+ {
+ "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS",
+ "MetricExpr": "PM_LSU_SRQ_S0_VALID / PM_LSU_SRQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_srq_life_time_even"
+ },
+ {
+ "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 39 is valid ONLY FOR ODD THREADS",
+ "MetricExpr": "PM_LSU_SRQ_S39_VALID / PM_LSU_SRQ_S39_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_srq_life_time_odd"
+ },
+ {
+ "BriefDescription": "Marked background kill latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL",
+ "MetricGroup": "latency",
+ "MetricName": "bkill_latency"
+ },
+ {
+ "BriefDescription": "Marked dclaim latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_DCLAIM_CYC / PM_MRK_FAB_RSP_DCLAIM",
+ "MetricGroup": "latency",
+ "MetricName": "dclaim_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 distant Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Distant L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4",
+ "MetricGroup": "latency",
+ "MetricName": "dl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Dmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM",
+ "MetricGroup": "latency",
+ "MetricName": "dmem_latency"
+ },
+ {
+ "BriefDescription": "estimated exposed miss latency for dL1 misses, ie load miss when we were NTC",
+ "MetricExpr": "PM_MRK_LD_MISS_EXPOSED_CYC / PM_MRK_LD_MISS_EXPOSED",
+ "MetricGroup": "latency",
+ "MetricName": "exposed_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the M state",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l21_mod_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the S state",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l21_shr_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time due to load-hit-store",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
+ "MetricGroup": "latency",
+ "MetricName": "l2_disp_conflict_ldhitst_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time NOT due load-hit-store",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER",
+ "MetricGroup": "latency",
+ "MetricName": "l2_disp_conflict_other_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that were satisfied by lines prefetched into the L3. This information is forwarded from the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_MEPF_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_mepf_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered no conflicts",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_no_conflict_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and beyond",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2MISS_CYC/ PM_MRK_DATA_FROM_L2MISS",
+ "MetricGroup": "latency",
+ "MetricName": "l2miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l31_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l31_shr_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3",
+ "MetricGroup": "latency",
+ "MetricName": "l3_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and suffered no conflicts",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l3_no_conflict_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that come from beyond the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3MISS_CYC/ PM_MRK_DATA_FROM_L3MISS",
+ "MetricGroup": "latency",
+ "MetricName": "l3miss_latency"
+ },
+ {
+ "BriefDescription": "Average latency for marked reloads that hit in the L3 on the MEPF state. i.e. lines that were prefetched into the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_MEPF_CYC/ PM_MRK_DATA_FROM_L3_MEPF",
+ "MetricGroup": "latency",
+ "MetricName": "l3pref_latency"
+ },
+ {
+ "BriefDescription": "Local L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4",
+ "MetricGroup": "latency",
+ "MetricName": "ll4_latency"
+ },
+ {
+ "BriefDescription": "Marked Lmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM",
+ "MetricGroup": "latency",
+ "MetricName": "lmem_latency"
+ },
+ {
+ "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on a different chip",
+ "MetricExpr": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_OFF_CHIP_CACHE",
+ "MetricGroup": "latency",
+ "MetricName": "off_chip_cache_latency"
+ },
+ {
+ "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on the same chip",
+ "MetricExpr": "PM_MRK_DATA_FROM_ON_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_ON_CHIP_CACHE",
+ "MetricGroup": "latency",
+ "MetricName": "on_chip_cache_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Remote L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4",
+ "MetricGroup": "latency",
+ "MetricName": "rl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Rmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM",
+ "MetricGroup": "latency",
+ "MetricName": "rmem_latency"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LMQ full reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_LD_REF_L1",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LSU reject ratio",
+ "MetricExpr": "PM_LSU_REJECT *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lsu_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LSU reject ratio",
+ "MetricExpr": "PM_LSU_REJECT *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lsu_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_DL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote+distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / (PM_DATA_FROM_DL4 + PM_DATA_FROM_RL4)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_RL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_rl4"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote and distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_rmem"
+ },
+ {
+ "BriefDescription": "Number of loads from remote memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_rmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Memory locality",
+ "MetricExpr": "(PM_DATA_FROM_LL4 + PM_DATA_FROM_LMEM) * 100/ (PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4 + PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4 + PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4)",
+ "MetricGroup": "memory",
+ "MetricName": "mem_locality_percent"
+ },
+ {
+ "BriefDescription": "DERAT Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_LSU_DERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "derat_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT misses that result in an ERAT reload",
+ "MetricExpr": "PM_DTLB_MISS * 100 / PM_LSU_DERAT_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "derat_miss_reload_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 16G page per inst",
+ "MetricExpr": "100 * PM_DERAT_MISS_16G / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16g_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16G page",
+ "MetricExpr": "PM_DERAT_MISS_16G / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16g_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 16M page per inst",
+ "MetricExpr": "PM_DERAT_MISS_16M * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16m_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16M page",
+ "MetricExpr": "PM_DERAT_MISS_16M / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16m_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 4K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_4K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 64K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_64K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DSLB_Miss_Rate per inst",
+ "MetricExpr": "PM_DSLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "dslb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% ISLB miss rate per inst",
+ "MetricExpr": "PM_ISLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "islb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits on any Centaur (local, remote, or distant) on either L4 or DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_MEMORY / PM_LD_REF_L1",
+ "MetricName": "any_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Base Completion Cycles",
+ "MetricExpr": "PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL",
+ "MetricName": "base_completion_cpi"
+ },
+ {
+ "BriefDescription": "Marked background kill latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL",
+ "MetricName": "bkill_ratio_percent"
+ },
+ {
+ "BriefDescription": "cycles",
+ "MetricExpr": "PM_RUN_CYC",
+ "MetricName": "custom_secs"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant chip's Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4) / PM_LD_REF_L1",
+ "MetricName": "distant_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads that came from the L3 and beyond",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l2_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_l31_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 where the lines were brought into the L3 by a prefetch operation",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l3_mepf_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l3_miss_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a distant chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD / PM_LD_REF_L1",
+ "MetricName": "dl2l3_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a distant chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR / PM_LD_REF_L1",
+ "MetricName": "dl2l3_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL4 / PM_LD_REF_L1",
+ "MetricName": "dl4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DMEM / PM_LD_REF_L1",
+ "MetricName": "dmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Overhead of expansion cycles",
+ "MetricExpr": "(PM_GRP_CMPL / PM_RUN_INST_CMPL) - (PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL)",
+ "MetricName": "expansion_overhead_cpi"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations executded in the Load/Store Unit following a load/store operation",
+ "MetricExpr": "PM_LSU_FX_FIN/PM_RUN_INST_CMPL",
+ "MetricName": "fixed_in_lsu_per_inst"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_CYC) * 100",
+ "MetricName": "gct_empty_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L2",
+ "MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L3",
+ "MetricExpr": "PM_IPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of L1 hits per load ref",
+ "MetricExpr": "(PM_LD_REF_L1 - PM_LD_MISS_L1) / PM_LD_REF_L1",
+ "MetricName": "l1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1",
+ "MetricName": "l1_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L21_MOD + PM_DATA_FROM_L21_SHR) / PM_LD_REF_L1",
+ "MetricName": "l2_1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD / PM_LD_REF_L1",
+ "MetricName": "l2_1_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR / PM_LD_REF_L1",
+ "MetricName": "l2_1_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Average number of Castout machines used. 1 of 16 CO machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_CO_USAGE / PM_RUN_CYC) * 16",
+ "MetricName": "l2_co_usage"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L2 / PM_LD_REF_L1",
+ "MetricName": "l2_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load misses per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_LD_REF_L1",
+ "MetricName": "l2_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST / PM_LD_REF_L1",
+ "MetricName": "l2_lhs_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l2_no_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER / PM_LD_REF_L1",
+ "MetricName": "l2_other_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Average number of Read/Claim machines used. 1 of 16 RC machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_RC_USAGE / PM_RUN_CYC) * 16",
+ "MetricName": "l2_rc_usage"
+ },
+ {
+ "BriefDescription": "Average number of Snoop machines used. 1 of 8 SN machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_SN_USAGE / PM_RUN_CYC) * 8",
+ "MetricName": "l2_sn_usage"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "(PM_MRK_DATA_FROM_L31_SHR_CYC + PM_MRK_DATA_FROM_L31_MOD_CYC) / (PM_MRK_DATA_FROM_L31_SHR + PM_MRK_DATA_FROM_L31_MOD)",
+ "MetricName": "l31_latency"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) / PM_LD_REF_L1",
+ "MetricName": "l3_1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD / PM_LD_REF_L1",
+ "MetricName": "l3_1_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR / PM_LD_REF_L1",
+ "MetricName": "l3_1_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per load ref where the demand load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l3_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3 / PM_LD_REF_L1",
+ "MetricName": "l3_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load misses per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_LD_REF_L1",
+ "MetricName": "l3_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per load ref where the L3 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l3_no_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 hits on lines that were not in the MEPF state per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L3 - PM_DATA_FROM_L3_MEPF) / PM_LD_REF_L1",
+ "MetricName": "l3other_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 hits on lines that were recently prefetched into the L3 (MEPF state) per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF / PM_LD_REF_L1",
+ "MetricName": "l3pref_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_LD_REF_L1",
+ "MetricName": "ll4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_LD_REF_L1",
+ "MetricName": "lmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4) / PM_LD_REF_L1",
+ "MetricName": "local_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_REJECT - PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_STORE) / (PM_LD_REF_L1 - PM_LD_MISS_L1)",
+ "MetricName": "lsu_stall_avg_cyc_per_l1hit_stfw"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 or L3 on a different chip (remote or distant) per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_OFF_CHIP_CACHE / PM_LD_REF_L1",
+ "MetricName": "off_chip_cache_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 or L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_ON_CHIP_CACHE / PM_LD_REF_L1",
+ "MetricName": "on_chip_cache_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote chip's Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4) / PM_LD_REF_L1",
+ "MetricName": "remote_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable resources or facilities",
+ "MetricExpr": "PM_ISU_REJECT_RES_NA *100/ PM_RUN_INST_CMPL",
+ "MetricName": "resource_na_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a remote chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD / PM_LD_REF_L1",
+ "MetricName": "rl2l3_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a remote chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR / PM_LD_REF_L1",
+ "MetricName": "rl2l3_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL4 / PM_LD_REF_L1",
+ "MetricName": "rl4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_LD_REF_L1",
+ "MetricName": "rmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected due to SAR Bypass",
+ "MetricExpr": "PM_ISU_REJECT_SAR_BYPASS *100/ PM_RUN_INST_CMPL",
+ "MetricName": "sar_bypass_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable sources",
+ "MetricExpr": "PM_ISU_REJECT_SRC_NA *100/ PM_RUN_INST_CMPL",
+ "MetricName": "source_na_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Store forward rate",
+ "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / PM_RUN_INST_CMPL",
+ "MetricName": "store_forward_rate_percent"
+ },
+ {
+ "BriefDescription": "Store forward rate",
+ "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / (PM_LD_REF_L1 - PM_LD_MISS_L1)",
+ "MetricName": "store_forward_ratio_percent"
+ },
+ {
+ "BriefDescription": "Marked store latency, from core completion to L2 RC machine completion",
+ "MetricExpr": "(PM_MRK_ST_L2DISP_TO_CMPL_CYC + PM_MRK_ST_DRAIN_TO_L2DISP_CYC) / PM_MRK_ST_NEST",
+ "MetricName": "store_latency"
+ },
+ {
+ "BriefDescription": "Cycles stalled by any sync",
+ "MetricExpr": "(PM_CMPLU_STALL_LWSYNC + PM_CMPLU_STALL_HWSYNC) / PM_RUN_INST_CMPL",
+ "MetricName": "sync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Percentage of lines that were prefetched into the L3 and evicted before they were consumed",
+ "MetricExpr": "(PM_L3_CO_MEPF / 2) / PM_L3_PREF_ALL * 100",
+ "MetricName": "wasted_l3_prefetch_percent"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
new file mode 100644
index 000000000000..811c2a8c1c9e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
@@ -0,0 +1,1982 @@
+[
+ {
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_BR_PRED * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "br_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction per instruction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_BR_PRED_CCACHE * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_LSTACK / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_LSTACK/ PM_BR_PRED_LSTACK * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_misprediction_percent"
+ },
+ {
+ "BriefDescription": "% Branches Taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL * 100 / PM_BRU_FIN",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "taken_branches_percent"
+ },
+ {
+ "BriefDescription": "Completion stall due to a Branch Unit",
+ "MetricExpr": "PM_CMPLU_STALL_BRU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was routed to the crypto execution pipe and was waiting to finish",
+ "MetricExpr": "PM_CMPLU_STALL_CRYPTO/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "crypto_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that missed the L1 and was waiting for the data to return from the nest",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dcache_miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a multi-cycle instruction issued to the Decimal Floating Point execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_DFLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dflong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency decimal floating ops.",
+ "MetricExpr": "(PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_DFLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dfu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was issued to the Decimal Floating Point execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_DFU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dfu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved off node memory/cache",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_REMOTE)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_distant_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L21_L31/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l21_l31_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 with a conflict",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_conflict_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 without conflict",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_noconflict_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved in L2/L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss resolving missed the L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L3MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l3miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in local memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_LMEM/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_lmem_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved outside of local memory",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_non_local_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved from remote chip (cache or memory)",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_REMOTE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_remote_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency double precision ops.",
+ "MetricExpr": "(PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DPLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dp_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_DP/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dp_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_DPLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dplong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction is an EIEIO waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_EIEIO/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "eieio_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the next to finish instruction suffered an ERAT miss and the EMQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_EMQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "emq_full_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "emq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load or store that suffered a translation miss",
+ "MetricExpr": "PM_CMPLU_STALL_ERAT_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "erat_miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete",
+ "MetricExpr": "PM_CMPLU_STALL_EXCEPTION/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exception_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to execution units for other reasons.",
+ "MetricExpr": "(PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_PM - PM_CMPLU_STALL_CRYPTO - PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exec_unit_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to execution units (FXU/VSU/CRU)",
+ "MetricExpr": "PM_CMPLU_STALL_EXEC_UNIT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exec_unit_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because any of the 4 threads in the same core suffered a flush, which blocks completion",
+ "MetricExpr": "PM_CMPLU_STALL_FLUSH_ANY_THREAD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "flush_any_thread_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to a long latency scalar fixed point instruction (division, square root)",
+ "MetricExpr": "PM_CMPLU_STALL_FXLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxlong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency integer ops",
+ "MetricExpr": "(PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_FXLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to a scalar fixed point or CR instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes",
+ "MetricExpr": "PM_CMPLU_STALL_FXU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_NTC_ISSUE_HELD_DARQ_FULL + PM_NTC_ISSUE_HELD_ARB + PM_NTC_ISSUE_HELD_OTHER)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "issue_hold_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a larx waiting to be satisfied",
+ "MetricExpr": "PM_CMPLU_STALL_LARX/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "larx_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that hit on an older store and it was waiting for store data",
+ "MetricExpr": "PM_CMPLU_STALL_LHS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lhs_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that missed in the L1 and the LMQ was unable to accept this load miss request because it was full",
+ "MetricExpr": "PM_CMPLU_STALL_LMQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lmq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load instruction with all its dependencies satisfied just going through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_LOAD_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "load_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that was held in LSAQ because the LRQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_LRQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to LRQ miscellaneous reasons, lost arbitration to LMQ slot, bank collisions, set prediction cleanup, set prediction multihit and others",
+ "MetricExpr": "PM_CMPLU_STALL_LRQ_OTHER/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_other_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_LMQ_FULL + PM_CMPLU_STALL_ST_FWD + PM_CMPLU_STALL_LHS + PM_CMPLU_STALL_LSU_MFSPR + PM_CMPLU_STALL_LARX + PM_CMPLU_STALL_LRQ_OTHER)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load or store that was held in LSAQ because an older instruction from SRQ or LRQ won arbitration to the LSU pipe when this instruction tried to launch",
+ "MetricExpr": "PM_CMPLU_STALL_LSAQ_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsaq_arb_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_LRQ_FULL + PM_CMPLU_STALL_SRQ_FULL + PM_CMPLU_STALL_LSAQ_ARB)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsaq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was an LSU op (other than a load or a store) with all its dependencies met and just going through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_fin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall of one cycle because the LSU requested to flush the next iop in the sequence. It takes 1 cycle for the ISU to process this request before the LSU instruction is allowed to complete",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_FLUSH_NEXT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_flush_next_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a mfspr instruction targeting an LSU SPR and it was waiting for the register data to be returned",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_MFSPR/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_mfspr_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion LSU stall for other reasons",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_LSU_FIN - PM_CMPLU_STALL_STORE_FINISH - PM_CMPLU_STALL_STORE_DATA - PM_CMPLU_STALL_EIEIO - PM_CMPLU_STALL_STCX - PM_CMPLU_STALL_SLB - PM_CMPLU_STALL_TEND - PM_CMPLU_STALL_PASTE - PM_CMPLU_STALL_TLBIE - PM_CMPLU_STALL_STORE_PIPE_ARB - PM_CMPLU_STALL_STORE_FIN_ARB - PM_CMPLU_STALL_LOAD_FINISH + PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_LMQ_FULL - PM_CMPLU_STALL_ST_FWD - PM_CMPLU_STALL_LHS - PM_CMPLU_STALL_LSU_MFSPR - PM_CMPLU_STALL_LARX - PM_CMPLU_STALL_LRQ_OTHER + PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL - PM_CMPLU_STALL_LRQ_FULL - PM_CMPLU_STALL_SRQ_FULL - PM_CMPLU_STALL_LSAQ_ARB) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by LSU instruction",
+ "MetricExpr": "PM_CMPLU_STALL_LSU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the register and notifying the Effective Address Table (EAT)",
+ "MetricExpr": "PM_CMPLU_STALL_MTFPSCR/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "mtfpscr_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tbegin. This is a short delay, and it includes ROT",
+ "MetricExpr": "PM_CMPLU_STALL_NESTED_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nested_tbegin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tend and decrement the TEXASR nested level. This is a short delay",
+ "MetricExpr": "PM_CMPLU_STALL_NESTED_TEND/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nested_tend_stall_cpi"
+ },
+ {
+ "BriefDescription": "Number of cycles the ICT has no itags assigned to this thread",
+ "MetricExpr": "PM_ICT_NOSLOT_CYC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nothing_dispatched_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was one that must finish at dispatch.",
+ "MetricExpr": "PM_CMPLU_STALL_NTC_DISP_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_disp_fin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. This event is used to account for cycles in which work is being completed in the CPI stack",
+ "MetricExpr": "PM_NTC_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_fin_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to ntc flush",
+ "MetricExpr": "PM_CMPLU_STALL_NTC_FLUSH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_flush_stall_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch because it lost arbitration onto the issue pipe to another instruction (from the same thread or a different thread)",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_arb_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch because there are no slots in the DARQ for it",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_DARQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_darq_full_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch during regular pipeline cycles, or because the VSU is busy with multi-cycle instructions, or because of a write-back collision with VSU",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_OTHER/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles unaccounted for.",
+ "MetricExpr": "(PM_RUN_CYC - PM_1PLUS_PPC_CMPL - PM_CMPLU_STALL_THRD - PM_CMPLU_STALL - PM_ICT_NOSLOT_CYC)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall for other reasons",
+ "MetricExpr": "PM_CMPLU_STALL - PM_CMPLU_STALL_NTC_DISP_FIN - PM_CMPLU_STALL_NTC_FLUSH - PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_BRU)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a paste waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_PASTE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "paste_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was issued to the Permute execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_PM/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "pm_stall_cpi"
+ },
+ {
+ "BriefDescription": "Run cycles per run instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cpi"
+ },
+ {
+ "BriefDescription": "Run_cycles",
+ "MetricExpr": "PM_RUN_CYC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cyc_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_FXU + PM_CMPLU_STALL_DP + PM_CMPLU_STALL_DFU + PM_CMPLU_STALL_PM + PM_CMPLU_STALL_CRYPTO)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "scalar_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was awaiting L2 response for an SLB",
+ "MetricExpr": "PM_CMPLU_STALL_SLB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "slb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall while waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC",
+ "MetricExpr": "PM_CMPLU_STALL_SPEC_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "spec_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store that was held in LSAQ because the SRQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_SRQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "srq_full_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_STORE_DATA + PM_CMPLU_STALL_EIEIO + PM_CMPLU_STALL_STCX + PM_CMPLU_STALL_SLB + PM_CMPLU_STALL_TEND + PM_CMPLU_STALL_PASTE + PM_CMPLU_STALL_TLBIE + PM_CMPLU_STALL_STORE_PIPE_ARB + PM_CMPLU_STALL_STORE_FIN_ARB)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "srq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to store forward",
+ "MetricExpr": "PM_CMPLU_STALL_ST_FWD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "st_fwd_stall_cpi"
+ },
+ {
+ "BriefDescription": "Nothing completed and ICT not empty",
+ "MetricExpr": "PM_CMPLU_STALL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a stcx waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_STCX/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stcx_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the next to finish instruction was a store waiting on data",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_DATA/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_data_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store waiting for a slot in the store finish pipe. This means the instruction is ready to finish but there are instructions ahead of it, using the finish pipe",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_FIN_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_fin_arb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store with all its dependencies met, just waiting to go through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store waiting for the next relaunch opportunity after an internal reject. This means the instruction is ready to relaunch and tried once but lost arbitration",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_PIPE_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_pipe_arb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a tend instruction awaiting response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_TEND/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "tend_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion Stalled because the thread was blocked",
+ "MetricExpr": "PM_CMPLU_STALL_THRD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "thread_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a tlbie waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_TLBIE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "tlbie_stall_cpi"
+ },
+ {
+ "BriefDescription": "Vector stalls due to small latency double precision ops",
+ "MetricExpr": "(PM_CMPLU_STALL_VDP - PM_CMPLU_STALL_VDPLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdp_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a vector instruction issued to the Double Precision execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_VDP/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdp_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_VDPLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdplong_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_VFXU + PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vector_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to a long latency vector fixed point instruction (division, square root)",
+ "MetricExpr": "PM_CMPLU_STALL_VFXLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxlong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Vector stalls due to small latency integer ops",
+ "MetricExpr": "(PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VFXLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to a vector fixed point instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes",
+ "MetricExpr": "PM_CMPLU_STALL_VFXU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxu_stall_cpi"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 M state, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 S tate, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads that came from the L3 and were brought into the L3 by a prefetch, per instruction completed",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_mepf_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 demand load misses per run instruction",
+ "MetricExpr": "PM_LD_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "l1_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 misses that result in a cache reload",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID * 100 / PM_LD_MISS_L1",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_miss_reloads_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant Memory",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from sources beyond the local L2",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads that came from L3 and were brought into the L3 by a prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_mepf_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from sources beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local Memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote Memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant MOD miss rates with measured DL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * PM_MRK_DATA_FROM_DL2L3_MOD_CYC / PM_MRK_DATA_FROM_DL2L3_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant SHR miss rates with measured DL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * PM_MRK_DATA_FROM_DL2L3_SHR_CYC / PM_MRK_DATA_FROM_DL2L3_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant L4 miss rates with measured DL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL4 * PM_MRK_DATA_FROM_DL4_CYC / PM_MRK_DATA_FROM_DL4 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant memory miss rates with measured DMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DMEM * PM_MRK_DATA_FROM_DMEM_CYC / PM_MRK_DATA_FROM_DMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 MOD miss rates with measured L21 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * PM_MRK_DATA_FROM_L21_MOD_CYC / PM_MRK_DATA_FROM_L21_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 SHR miss rates with measured L21 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * PM_MRK_DATA_FROM_L21_SHR_CYC / PM_MRK_DATA_FROM_L21_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2 miss rates with measured L2 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L2 * PM_MRK_DATA_FROM_L2_CYC / PM_MRK_DATA_FROM_L2 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l2_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 MOD miss rates with measured L31 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * PM_MRK_DATA_FROM_L31_MOD_CYC / PM_MRK_DATA_FROM_L31_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 SHR miss rates with measured L31 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * PM_MRK_DATA_FROM_L31_SHR_CYC / PM_MRK_DATA_FROM_L31_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl3 miss rates with measured L3 latency as a % of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L3 * PM_MRK_DATA_FROM_L3_CYC / PM_MRK_DATA_FROM_L3 / PM_CMPLU_STALL_DCACHE_MISS * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l3_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local memory miss rates with measured LMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_LMEM * PM_MRK_DATA_FROM_LMEM_CYC / PM_MRK_DATA_FROM_LMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "lmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 remote MOD miss rates with measured RL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * PM_MRK_DATA_FROM_RL2L3_MOD_CYC / PM_MRK_DATA_FROM_RL2L3_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 shared miss rates with measured RL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * PM_MRK_DATA_FROM_RL2L3_SHR_CYC / PM_MRK_DATA_FROM_RL2L3_SHR / PM_CMPLU_STALL_DCACHE_MISS * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote L4 miss rates with measured RL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL4 * PM_MRK_DATA_FROM_RL4_CYC / PM_MRK_DATA_FROM_RL4 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote memory miss rates with measured RMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RMEM * PM_MRK_DATA_FROM_RMEM_CYC / PM_MRK_DATA_FROM_RMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "Branch Mispredict flushes per instruction",
+ "MetricExpr": "PM_FLUSH_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "general",
+ "MetricName": "br_mpred_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cpi"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_FLUSH_DISP / PM_RUN_INST_CMPL) * 100",
+ "MetricGroup": "general",
+ "MetricName": "disp_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "% DTLB miss rate per inst",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "dtlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Flush rate (%)",
+ "MetricExpr": "PM_FLUSH * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Instructions per cycles",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "general",
+ "MetricName": "ipc"
+ },
+ {
+ "BriefDescription": "% ITLB miss rate per inst",
+ "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "itlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per L1 store ref",
+ "MetricExpr": "PM_ST_MISS_L1 / PM_ST_FIN * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "L2 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 dmand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 demand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Run cycles per cycle",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC*100",
+ "MetricGroup": "general",
+ "MetricName": "run_cycles_percent"
+ },
+ {
+ "BriefDescription": "Instruction dispatch-to-completion ratio",
+ "MetricExpr": "PM_INST_DISP / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "speculation"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2 per Inst",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3 other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3 per Inst",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Instruction Cache Miss Rate (Per run Instruction)(%)",
+ "MetricExpr": "PM_L1_ICACHE_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "l1_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Icache Fetchs per Icache Miss",
+ "MetricExpr": "(PM_L1_ICACHE_MISS - PM_IC_PREF_WRITE) / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_miss_reload"
+ },
+ {
+ "BriefDescription": "% of ICache reloads due to prefetch",
+ "MetricExpr": "PM_IC_PREF_WRITE * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_pref_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "%L2 Modified CO Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_CASTOUT_MOD/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_co_m_rd_util"
+ },
+ {
+ "BriefDescription": "L2 dcache invalidates per run inst (per core)",
+ "MetricExpr": "(PM_L2_DC_INV / 2) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_dc_inv_rate_percent"
+ },
+ {
+ "BriefDescription": "Demand load misses as a % of L2 LD dispatches (per thread)",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID / (PM_L2_LD / 2) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_dem_ld_disp_percent"
+ },
+ {
+ "BriefDescription": "L2 Icache invalidates per run inst (per core)",
+ "MetricExpr": "(PM_L2_IC_INV / 2) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ic_inv_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 Inst misses as a % of total L2 Inst dispatches (per thread)",
+ "MetricExpr": "PM_L2_INST_MISS / PM_L2_INST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_inst_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L2 Load hits",
+ "MetricExpr": "(PM_L2_LD_HIT / PM_RUN_CYC) / 2",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_hit_frequency"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L2 Load misses",
+ "MetricExpr": "(PM_L2_LD_MISS / PM_RUN_CYC) / 2",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_miss_frequency"
+ },
+ {
+ "BriefDescription": "L2 Load misses as a % of total L2 Load dispatches (per thread)",
+ "MetricExpr": "PM_L2_LD_MISS / PM_L2_LD * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "% L2 load disp attempts Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_rd_util"
+ },
+ {
+ "BriefDescription": "L2 load misses that require a cache write (4 pclks per disp attempt) % of pclks",
+ "MetricExpr": "((( PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ldmiss_wr_util"
+ },
+ {
+ "BriefDescription": "L2 local pump prediction success",
+ "MetricExpr": "PM_L2_LOC_GUESS_CORRECT / (PM_L2_LOC_GUESS_CORRECT + PM_L2_LOC_GUESS_WRONG) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_local_pred_correct_percent"
+ },
+ {
+ "BriefDescription": "L2 COs that were in M,Me,Mu state as a % of all L2 COs",
+ "MetricExpr": "PM_L2_CASTOUT_MOD / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_mod_co_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Load RC dispatch atampts that failed because of address collisions and cclass conflicts",
+ "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR )/ PM_L2_RCLD_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_ld_disp_addr_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Load RC dispatch attempts that failed",
+ "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR + PM_L2_RCLD_DISP_FAIL_OTHER)/ PM_L2_RCLD_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_ld_disp_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Store RC dispatch atampts that failed because of address collisions and cclass conflicts",
+ "MetricExpr": "PM_L2_RCST_DISP_FAIL_ADDR / PM_L2_RCST_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_st_disp_addr_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Store RC dispatch attempts that failed",
+ "MetricExpr": "(PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/ PM_L2_RCST_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_st_disp_fail_percent"
+ },
+ {
+ "BriefDescription": "L2 Cache Read Utilization (per core)",
+ "MetricExpr": "(((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100) + (((PM_L2_RCST_DISP/2)*4)/PM_RUN_CYC * 100) + (((PM_L2_CASTOUT_MOD/2)*4)/PM_RUN_CYC * 100)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rd_util_percent"
+ },
+ {
+ "BriefDescription": "L2 COs that were in T,Te,Si,S state as a % of all L2 COs",
+ "MetricExpr": "PM_L2_CASTOUT_SHR / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_shr_co_percent"
+ },
+ {
+ "BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
+ "MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "% L2 store disp attempts Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_RCST_DISP/2)*4) / PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_rd_util"
+ },
+ {
+ "BriefDescription": "L2 stores that require a cache write (4 pclks per disp attempt) % of pclks",
+ "MetricExpr": "((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_wr_util"
+ },
+ {
+ "BriefDescription": "L2 Cache Write Utilization (per core)",
+ "MetricExpr": "((((PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4) / PM_RUN_CYC * 100) + (((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_wr_util_percent"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L3 Load hits",
+ "MetricExpr": "(PM_L3_LD_HIT / PM_RUN_CYC) / 2",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_ld_hit_frequency"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L3 Load misses",
+ "MetricExpr": "(PM_L3_LD_MISS / PM_RUN_CYC) / 2",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_ld_miss_frequency"
+ },
+ {
+ "BriefDescription": "Average number of Write-in machines used. 1 of 8 WI machines is sampled every L3 cycle",
+ "MetricExpr": "(PM_L3_WI_USAGE / PM_RUN_CYC) * 8",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_wi_usage"
+ },
+ {
+ "BriefDescription": "Average icache miss latency",
+ "MetricExpr": "PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ",
+ "MetricGroup": "latency",
+ "MetricName": "average_il1_miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 distant Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Distant L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4",
+ "MetricGroup": "latency",
+ "MetricName": "dl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Dmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM",
+ "MetricGroup": "latency",
+ "MetricName": "dmem_latency"
+ },
+ {
+ "BriefDescription": "average L1 miss latency using marked events",
+ "MetricExpr": "PM_MRK_LD_MISS_L1_CYC / PM_MRK_LD_MISS_L1",
+ "MetricGroup": "latency",
+ "MetricName": "estimated_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L21 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l21_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L21 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l21_shr_latency"
+ },
+ {
+ "BriefDescription": "Marked L2 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l31_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l31_shr_latency"
+ },
+ {
+ "BriefDescription": "Marked L3 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3",
+ "MetricGroup": "latency",
+ "MetricName": "l3_latency"
+ },
+ {
+ "BriefDescription": "Local L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4",
+ "MetricGroup": "latency",
+ "MetricName": "ll4_latency"
+ },
+ {
+ "BriefDescription": "Marked Lmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM",
+ "MetricGroup": "latency",
+ "MetricName": "lmem_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Remote L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4",
+ "MetricGroup": "latency",
+ "MetricName": "rl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Rmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM",
+ "MetricGroup": "latency",
+ "MetricName": "rmem_latency"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_LD_REF_L1",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "L4 locality(%)",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / (PM_DATA_FROM_LL4 + PM_DATA_FROM_RL4 + PM_DATA_FROM_DL4)",
+ "MetricGroup": "memory",
+ "MetricName": "l4_locality"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_DL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote+distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / (PM_DATA_FROM_DL4 + PM_DATA_FROM_RL4)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_RL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_rl4"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote and distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_rmem"
+ },
+ {
+ "BriefDescription": "Number of loads from remote memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_rmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Memory locality",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100/ (PM_DATA_FROM_LMEM + PM_DATA_FROM_RMEM + PM_DATA_FROM_DMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "mem_locality_percent"
+ },
+ {
+ "BriefDescription": "L1 Prefetches issued by the prefetch machine per instruction (per thread)",
+ "MetricExpr": "PM_L1_PREF / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "prefetch",
+ "MetricName": "l1_prefetch_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_LSU_DERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "derat_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT misses that result in an ERAT reload",
+ "MetricExpr": "PM_DTLB_MISS * 100 / PM_LSU_DERAT_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "derat_miss_reload_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 4K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_4K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 64K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_64K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_ratio"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio",
+ "MetricExpr": "PM_LSU_DERAT_MISS / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DSLB_Miss_Rate per inst",
+ "MetricExpr": "PM_DSLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "dslb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% ISLB miss rate per inst",
+ "MetricExpr": "PM_ISLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "islb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "ANY_SYNC_STALL_CPI",
+ "MetricExpr": "PM_CMPLU_STALL_ANY_SYNC / PM_RUN_INST_CMPL",
+ "MetricName": "any_sync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Avg. more than 1 instructions completed",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricName": "average_completed_instruction_set_size"
+ },
+ {
+ "BriefDescription": "% Branches per instruction",
+ "MetricExpr": "PM_BRU_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "branches_per_inst"
+ },
+ {
+ "BriefDescription": "Cycles in which at least one instruction completes in this thread",
+ "MetricExpr": "PM_1PLUS_PPC_CMPL/PM_RUN_INST_CMPL",
+ "MetricName": "completion_cpi"
+ },
+ {
+ "BriefDescription": "cycles",
+ "MetricExpr": "PM_RUN_CYC",
+ "MetricName": "custom_secs"
+ },
+ {
+ "BriefDescription": "Percentage Cycles atleast one instruction dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
+ "MetricName": "cycles_atleast_one_inst_dispatched_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricName": "cycles_per_completed_instructions_set"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L4",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_l31_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L4",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles in which the oldest instruction is finished and ready to complete for waiting to get through the completion pipe",
+ "MetricExpr": "PM_NTC_ALL_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "finish_to_cmpl_cpi"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations",
+ "MetricExpr": "PM_FXU_FIN/PM_RUN_INST_CMPL",
+ "MetricName": "fixed_per_inst"
+ },
+ {
+ "BriefDescription": "All FXU Busy",
+ "MetricExpr": "PM_FXU_BUSY / PM_CYC",
+ "MetricName": "fxu_all_busy"
+ },
+ {
+ "BriefDescription": "All FXU Idle",
+ "MetricExpr": "PM_FXU_IDLE / PM_CYC",
+ "MetricName": "fxu_all_idle"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to Icache Miss and branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED_ICMISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_br_mpred_icmiss_cpi"
+ },
+ {
+ "BriefDescription": "ICT other stalls",
+ "MetricExpr": "(PM_ICT_NOSLOT_CYC - PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_BR_MPRED_ICMISS - PM_ICT_NOSLOT_BR_MPRED - PM_ICT_NOSLOT_DISP_HELD)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_cyc_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruciton is held at dispatch for any reason",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_hb_full_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to dispatch hold on this thread due to Issue q full, BRQ full, XVCF Full, Count cache, Link, Tar full",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_ISSQ/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_DISP_HELD_OTHER_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_DISP_HELD - PM_ICT_NOSLOT_DISP_HELD_HB_FULL - PM_ICT_NOSLOT_DISP_HELD_SYNC - PM_ICT_NOSLOT_DISP_HELD_TBEGIN - PM_ICT_NOSLOT_DISP_HELD_ISSQ)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Dispatch held due to a synchronizing instruction at dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_SYNC/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_sync_cpi"
+ },
+ {
+ "BriefDescription": "the NTC instruction is being held at dispatch because it is a tbegin instruction and there is an older tbegin in the pipeline that must complete before the younger tbegin can dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_tbegin_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_IC_L2_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_IC_L3 - PM_ICT_NOSLOT_IC_L3MISS)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l2_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from the local L3",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l3_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from beyond the local L3. The source could be local/remote/distant memory or another core's cache",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3MISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to Icache Miss",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_MISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L2",
+ "MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L3",
+ "MetricExpr": "PM_IPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Average number of Castout machines used. 1 of 16 CO machines is sampled every L2 cycle",
+ "MetricExpr": "PM_CO_USAGE / PM_RUN_CYC * 16",
+ "MetricName": "l2_co_usage"
+ },
+ {
+ "BriefDescription": "Percent of instruction reads out of all L2 commands",
+ "MetricExpr": "PM_ISIDE_DISP * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_instr_commands_percent"
+ },
+ {
+ "BriefDescription": "Percent of loads out of all L2 commands",
+ "MetricExpr": "PM_L2_LD * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_ld_commands_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 store dispatches that failed per core",
+ "MetricExpr": "100 * (PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_rc_st_disp_fail_rate_percent"
+ },
+ {
+ "BriefDescription": "Average number of Read/Claim machines used. 1 of 16 RC machines is sampled every L2 cycle",
+ "MetricExpr": "PM_RC_USAGE / PM_RUN_CYC * 16",
+ "MetricName": "l2_rc_usage"
+ },
+ {
+ "BriefDescription": "Average number of Snoop machines used. 1 of 8 SN machines is sampled every L2 cycle",
+ "MetricExpr": "PM_SN_USAGE / PM_RUN_CYC * 8",
+ "MetricName": "l2_sn_usage"
+ },
+ {
+ "BriefDescription": "Percent of stores out of all L2 commands",
+ "MetricExpr": "PM_L2_ST * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_st_commands_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 store dispatches that failed per core",
+ "MetricExpr": "100 * (PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_st_disp_fail_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 dispatches per core",
+ "MetricExpr": "100 * PM_L2_RCST_DISP/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_st_disp_rate_percent"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "(PM_MRK_DATA_FROM_L31_SHR_CYC + PM_MRK_DATA_FROM_L31_MOD_CYC) / (PM_MRK_DATA_FROM_L31_SHR + PM_MRK_DATA_FROM_L31_MOD)",
+ "MetricName": "l31_latency"
+ },
+ {
+ "BriefDescription": "PCT instruction loads",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricName": "loads_per_inst"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL",
+ "MetricName": "lsu_stall_dcache_miss_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because a different thread was using the completion pipe",
+ "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_EXCEPTION - PM_CMPLU_STALL_ANY_SYNC - PM_CMPLU_STALL_SYNC_PMU_INT - PM_CMPLU_STALL_SPEC_FINISH - PM_CMPLU_STALL_FLUSH_ANY_THREAD - PM_CMPLU_STALL_LSU_FLUSH_NEXT - PM_CMPLU_STALL_NESTED_TBEGIN - PM_CMPLU_STALL_NESTED_TEND - PM_CMPLU_STALL_MTFPSCR)/PM_RUN_INST_CMPL",
+ "MetricName": "other_thread_cmpl_stall"
+ },
+ {
+ "BriefDescription": "PCT instruction stores",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "stores_per_inst"
+ },
+ {
+ "BriefDescription": "ANY_SYNC_STALL_CPI",
+ "MetricExpr": "PM_CMPLU_STALL_SYNC_PMU_INT / PM_RUN_INST_CMPL",
+ "MetricName": "sync_pmu_int_stall_cpi"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 36c903faed0b..71e9737f4614 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -73,7 +73,7 @@
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
diff --git a/tools/perf/scripts/Build b/tools/perf/scripts/Build
index 41efd7e368b3..68d4b54574ad 100644
--- a/tools/perf/scripts/Build
+++ b/tools/perf/scripts/Build
@@ -1,2 +1,2 @@
-libperf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
-libperf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
+perf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
+perf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
index 34faecf774ae..db0036129307 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -1,4 +1,4 @@
-libperf-y += Context.o
+perf-y += Context.o
CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
index aefc15c9444a..7d0e33ce6aba 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
@@ -1,3 +1,3 @@
-libperf-y += Context.o
+perf-y += Context.o
CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index 334599c6032c..d2c22954800d 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -7,6 +7,8 @@
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
+from __future__ import print_function
+
import os
import sys
@@ -19,64 +21,64 @@ from perf_trace_context import *
unhandled = autodict()
def trace_begin():
- print "trace_begin"
+ print("trace_begin")
pass
def trace_end():
- print_unhandled()
+ print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, vec):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "vec=%s\n" % \
- (symbol_str("irq__softirq_entry", "vec", vec)),
+ print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec)))
def kmem__kmalloc(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, call_site, ptr, bytes_req, bytes_alloc,
- gfp_flags):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "call_site=%u, ptr=%u, bytes_req=%u, " \
- "bytes_alloc=%u, gfp_flags=%s\n" % \
+ print("call_site=%u, ptr=%u, bytes_req=%u, "
+ "bytes_alloc=%u, gfp_flags=%s" %
(call_site, ptr, bytes_req, bytes_alloc,
-
- flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)))
def trace_unhandled(event_name, context, event_fields_dict):
- try:
- unhandled[event_name] += 1
- except TypeError:
- unhandled[event_name] = 1
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
- print "%-20s %5u %05u.%09u %8u %-20s " % \
- (event_name, cpu, secs, nsecs, pid, comm),
+ print("%-20s %5u %05u.%09u %8u %-20s " %
+ (event_name, cpu, secs, nsecs, pid, comm),
+ end=' ')
# print trace fields not included in handler args
def print_uncommon(context):
- print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
- % (common_pc(context), trace_flag_str(common_flags(context)), \
- common_lock_depth(context))
+ print("common_preempt_count=%d, common_flags=%s, "
+ "common_lock_depth=%d, " %
+ (common_pc(context), trace_flag_str(common_flags(context)),
+ common_lock_depth(context)))
def print_unhandled():
- keys = unhandled.keys()
- if not keys:
- return
+ keys = unhandled.keys()
+ if not keys:
+ return
- print "\nunhandled events:\n\n",
+ print("\nunhandled events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
- for event_name in keys:
- print "%-40s %10d\n" % (event_name, unhandled[event_name])
+ for event_name in keys:
+ print("%-40s %10d\n" % (event_name, unhandled[event_name]))
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
index 239cb0568ec3..2560a042dc6f 100644
--- a/tools/perf/scripts/python/compaction-times.py
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -216,15 +216,15 @@ def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
pair(nr_migrated, nr_failed), None, None)
def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)
def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 4e843b9864ec..aa1e2cfa26a6 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -15,6 +15,8 @@
# for a x86 HW PMU event: PEBS with load latency data.
#
+from __future__ import print_function
+
import os
import sys
import math
@@ -37,7 +39,7 @@ con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
- print "In trace_begin:\n"
+ print("In trace_begin:\n")
#
# Will create several tables at the start, pebs_ll is for PEBS data with
@@ -76,12 +78,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if ("dso" in param_dict):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
- if (param_dict.has_key("symbol")):
+ if ("symbol" in param_dict):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
@@ -102,7 +104,7 @@ def insert_db(event):
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
- print "In trace_end:\n"
+ print("In trace_end:\n")
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
@@ -123,29 +125,29 @@ def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
- print "There is %d records in gen_events table" % t[0]
+ print("There is %d records in gen_events table" % t[0])
if t[0] == 0:
return
- print "Statistics about the general events grouped by thread/symbol/dso: \n"
+ print("Statistics about the general events grouped by thread/symbol/dso: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dso
- print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+ print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
- print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))
#
# This function just shows the basic info, and we could do more with the
@@ -156,35 +158,35 @@ def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
- print "There is %d records in pebs_ll table" % t[0]
+ print("There is %d records in pebs_ll table" % t[0])
if t[0] == 0:
return
- print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+ print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
- print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
for row in dseq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
- print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
for row in latq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 0564dd7377f2..390a351d15ea 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -394,7 +394,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
@@ -478,8 +479,9 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
- 'parent_call_path_id'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,'
+ 'parent_call_path_id,'
+ 'calls.parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -575,6 +577,7 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
@@ -657,6 +660,7 @@ def trace_end():
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
@@ -728,7 +732,7 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
-def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
- fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
- value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
+def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x):
+ fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq"
+ value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id)
call_file.write(value)
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index 245caf2643ed..eb63e6c7107f 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -222,7 +222,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
# printf was added to sqlite in version 3.8.3
sqlite_has_printf = False
@@ -320,8 +321,9 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
- 'parent_call_path_id'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
+ 'parent_call_path_id,'
+ 'parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -373,7 +375,7 @@ if perf_db_export_calls or perf_db_export_callchains:
call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
if perf_db_export_calls:
call_query = QSqlQuery(db)
- call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+ call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
def trace_begin():
print datetime.datetime.today(), "Writing records..."
@@ -388,6 +390,7 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
@@ -397,6 +400,7 @@ def trace_end():
print datetime.datetime.today(), "Adding indexes"
if perf_db_export_calls:
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
@@ -452,4 +456,4 @@ def call_path_table(*x):
bind_exec(call_path_query, 4, x)
def call_return_table(*x):
- bind_exec(call_query, 11, x)
+ bind_exec(call_query, 12, x)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index f278ce5ebab7..afec9479ca7f 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
@@ -167,9 +167,10 @@ class Thread(QThread):
class TreeModel(QAbstractItemModel):
- def __init__(self, root, parent=None):
+ def __init__(self, glb, parent=None):
super(TreeModel, self).__init__(parent)
- self.root = root
+ self.glb = glb
+ self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
@@ -557,24 +558,12 @@ class CallGraphRootItem(CallGraphLevelItemBase):
self.child_items.append(child_item)
self.child_count += 1
-# Context-sensitive call graph data model
+# Context-sensitive call graph data model base
-class CallGraphModel(TreeModel):
+class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
- super(CallGraphModel, self).__init__(CallGraphRootItem(glb), parent)
- self.glb = glb
-
- def columnCount(self, parent=None):
- return 7
-
- def columnHeader(self, column):
- headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
- return headers[column]
-
- def columnAlignment(self, column):
- alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
- return alignment[column]
+ super(CallGraphModelBase, self).__init__(glb, parent)
def FindSelect(self, value, pattern, query):
if pattern:
@@ -594,34 +583,7 @@ class CallGraphModel(TreeModel):
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
- QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
- " FROM calls"
- " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
- " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
- " WHERE symbols.name" + match +
- " GROUP BY comm_id, thread_id, call_path_id"
- " ORDER BY comm_id, thread_id, call_path_id")
-
- def FindPath(self, query):
- # Turn the query result into a list of ids that the tree view can walk
- # to open the tree at the right place.
- ids = []
- parent_id = query.value(0)
- while parent_id:
- ids.insert(0, parent_id)
- q2 = QSqlQuery(self.glb.db)
- QueryExec(q2, "SELECT parent_id"
- " FROM call_paths"
- " WHERE id = " + str(parent_id))
- if not q2.next():
- break
- parent_id = q2.value(0)
- # The call path root is not used
- if ids[0] == 1:
- del ids[0]
- ids.insert(0, query.value(2))
- ids.insert(0, query.value(1))
- return ids
+ self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
@@ -675,6 +637,201 @@ class CallGraphModel(TreeModel):
def FindDone(self, thread, callback, ids):
callback(ids)
+# Context-sensitive call graph data model
+
+class CallGraphModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallGraphRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " GROUP BY comm_id, thread_id, call_path_id"
+ " ORDER BY comm_id, thread_id, call_path_id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM call_paths"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ # The call path root is not used
+ if ids[0] == 1:
+ del ids[0]
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
+# Call tree data model level 2+ item base
+
+class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
+ super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
+ self.comm_id = comm_id
+ self.thread_id = thread_id
+ self.calls_id = calls_id
+ self.branch_count = branch_count
+ self.time = time
+
+ def Select(self):
+ self.query_done = True;
+ if self.calls_id == 0:
+ comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
+ else:
+ comm_thread = ""
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
+ " ORDER BY call_time, calls.id")
+ while query.next():
+ child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model level three item
+
+class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
+ super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
+ dso = dsoname(dso)
+ self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
+ self.dbid = calls_id
+
+# Call tree data model level two item
+
+class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
+ super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
+ self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
+ self.dbid = thread_id
+
+ def Select(self):
+ super(CallTreeLevelTwoItem, self).Select()
+ for child_item in self.child_items:
+ self.time += child_item.time
+ self.branch_count += child_item.branch_count
+ for child_item in self.child_items:
+ child_item.data[4] = PercentToOneDP(child_item.time, self.time)
+ child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
+
+# Call tree data model level one item
+
+class CallTreeLevelOneItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, comm, parent_item):
+ super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
+ self.data = [comm, "", "", "", "", "", ""]
+ self.dbid = comm_id
+
+ def Select(self):
+ self.query_done = True;
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT thread_id, pid, tid"
+ " FROM comm_threads"
+ " INNER JOIN threads ON thread_id = threads.id"
+ " WHERE comm_id = " + str(self.dbid))
+ while query.next():
+ child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model root item
+
+class CallTreeRootItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb):
+ super(CallTreeRootItem, self).__init__(glb, 0, None)
+ self.dbid = 0
+ self.query_done = True;
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, comm FROM comms")
+ while query.next():
+ if not query.value(0):
+ continue
+ child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call Tree data model
+
+class CallTreeModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallTreeRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT calls.id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " ORDER BY comm_id, thread_id, call_time, calls.id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM calls"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
# Vertical widget layout
class VBox():
@@ -693,28 +850,16 @@ class VBox():
def Widget(self):
return self.vbox
-# Context-sensitive call graph window
-
-class CallGraphWindow(QMdiSubWindow):
-
- def __init__(self, glb, parent=None):
- super(CallGraphWindow, self).__init__(parent)
-
- self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
-
- self.view = QTreeView()
- self.view.setModel(self.model)
-
- for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
- self.view.setColumnWidth(c, w)
-
- self.find_bar = FindBar(self, self)
+# Tree window base
- self.vbox = VBox(self.view, self.find_bar.Widget())
+class TreeWindowBase(QMdiSubWindow):
- self.setWidget(self.vbox.Widget())
+ def __init__(self, parent=None):
+ super(TreeWindowBase, self).__init__(parent)
- AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+ self.model = None
+ self.view = None
+ self.find_bar = None
def DisplayFound(self, ids):
if not len(ids):
@@ -747,6 +892,53 @@ class CallGraphWindow(QMdiSubWindow):
if not found:
self.find_bar.NotFound()
+
+# Context-sensitive call graph window
+
+class CallGraphWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
+
+ self.view = QTreeView()
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+
+# Call tree window
+
+class CallTreeWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
+
+ self.view = QTreeView()
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
+
# Child data item finder
class ChildDataItemFinder():
@@ -1327,8 +1519,7 @@ class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
- super(BranchModel, self).__init__(BranchRootItem(), parent)
- self.glb = glb
+ super(BranchModel, self).__init__(glb, parent)
self.event_id = event_id
self.more = True
self.populated = 0
@@ -1352,6 +1543,9 @@ class BranchModel(TreeModel):
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
+ def GetRoot(self):
+ return BranchRootItem()
+
def columnCount(self, parent=None):
return 8
@@ -1398,18 +1592,28 @@ class BranchModel(TreeModel):
def HasMoreRecords(self):
return self.more
+# Report Variables
+
+class ReportVars():
+
+ def __init__(self, name = "", where_clause = "", limit = ""):
+ self.name = name
+ self.where_clause = where_clause
+ self.limit = limit
+
+ def UniqueId(self):
+ return str(self.where_clause + ";" + self.limit)
+
# Branch window
class BranchWindow(QMdiSubWindow):
- def __init__(self, glb, event_id, name, where_clause, parent=None):
+ def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
- model_name = "Branch Events " + str(event_id)
- if len(where_clause):
- model_name = where_clause + " " + model_name
+ model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
- self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, where_clause))
+ self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
@@ -1427,7 +1631,7 @@ class BranchWindow(QMdiSubWindow):
self.setWidget(self.vbox.Widget())
- AddSubWindow(glb.mainwindow.mdi_area, self, name + " Branch Events")
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
@@ -1472,47 +1676,134 @@ class BranchWindow(QMdiSubWindow):
else:
self.find_bar.NotFound()
-# Dialog data item converted and validated using a SQL table
+# Line edit data item
-class SQLTableDialogDataItem():
+class LineEditDataItem(object):
- def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
- self.table_name = table_name
- self.match_column = match_column
- self.column_name1 = column_name1
- self.column_name2 = column_name2
self.parent = parent
+ self.id = id
- self.value = ""
+ self.value = default
- self.widget = QLineEdit()
+ self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
- self.last_id = 0
- self.first_time = 0
- self.last_time = 2 ** 64
- if self.table_name == "<timeranges>":
- query = QSqlQuery(self.glb.db)
- QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
- if query.next():
- self.last_id = int(query.value(0))
- self.last_time = int(query.value(1))
- QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
- if query.next():
- self.first_time = int(query.value(0))
- if placeholder_text:
- placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
-
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
+ def TurnTextRed(self):
+ if not self.red:
+ palette = QPalette()
+ palette.setColor(QPalette.Text,Qt.red)
+ self.widget.setPalette(palette)
+ self.red = True
+
+ def TurnTextNormal(self):
+ if self.red:
+ palette = QPalette()
+ self.widget.setPalette(palette)
+ self.red = False
+
+ def InvalidValue(self, value):
+ self.value = ""
+ self.TurnTextRed()
+ self.error = self.label + " invalid value '" + value + "'"
+ self.parent.ShowMessage(self.error)
+
+ def Invalidate(self):
+ self.validated = False
+
+ def DoValidate(self, input_string):
+ self.value = input_string.strip()
+
+ def Validate(self):
+ self.validated = True
+ self.error = ""
+ self.TurnTextNormal()
+ self.parent.ClearMessage()
+ input_string = self.widget.text()
+ if not len(input_string.strip()):
+ self.value = ""
+ return
+ self.DoValidate(input_string)
+
+ def IsValid(self):
+ if not self.validated:
+ self.Validate()
+ if len(self.error):
+ self.parent.ShowMessage(self.error)
+ return False
+ return True
+
+ def IsNumber(self, value):
+ try:
+ x = int(value)
+ except:
+ x = 0
+ return str(x) == value
+
+# Non-negative integer ranges dialog data item
+
+class NonNegativeIntegerRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.column_name = column_name
+
+ def DoValidate(self, input_string):
+ singles = []
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if "-" in value:
+ vrange = value.split("-")
+ if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return self.InvalidValue(value)
+ ranges.append(vrange)
+ else:
+ if not self.IsNumber(value):
+ return self.InvalidValue(value)
+ singles.append(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ if len(singles):
+ ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
+ self.value = " OR ".join(ranges)
+
+# Positive integer dialog data item
+
+class PositiveIntegerDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
+ super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
+
+ def DoValidate(self, input_string):
+ if not self.IsNumber(input_string.strip()):
+ return self.InvalidValue(input_string)
+ value = int(input_string.strip())
+ if value <= 0:
+ return self.InvalidValue(input_string)
+ self.value = str(value)
+
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.table_name = table_name
+ self.match_column = match_column
+ self.column_name1 = column_name1
+ self.column_name2 = column_name2
+
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
@@ -1523,6 +1814,42 @@ class SQLTableDialogDataItem():
ids.append(str(query.value(0)))
return ids
+ def DoValidate(self, input_string):
+ all_ids = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ ids = self.ValueToIds(value)
+ if len(ids):
+ all_ids.extend(ids)
+ else:
+ return self.InvalidValue(value)
+ self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+ if self.column_name2:
+ self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+
+# Sample time ranges dialog data item converted and validated using 'samples' SQL table
+
+class SampleTimeRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ self.column_name = column_name
+
+ self.last_id = 0
+ self.first_time = 0
+ self.last_time = 2 ** 64
+
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+ if query.next():
+ self.last_id = int(query.value(0))
+ self.last_time = int(query.value(1))
+ QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
+ if query.next():
+ self.first_time = int(query.value(0))
+ if placeholder_text:
+ placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+ super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
@@ -1560,7 +1887,6 @@ class SQLTableDialogDataItem():
return str(lower_id)
def ConvertRelativeTime(self, val):
- print "val ", val
mult = 1
suffix = val[-2:]
if suffix == "ms":
@@ -1582,29 +1908,23 @@ class SQLTableDialogDataItem():
return str(val)
def ConvertTimeRange(self, vrange):
- print "vrange ", vrange
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
- print "vrange2 ", vrange
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
- print "ok1"
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
- print "ok2"
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
- print "vrange3 ", vrange
return True
def AddTimeRange(self, value, ranges):
- print "value ", value
n = value.count("-")
if n == 1:
pass
@@ -1622,111 +1942,31 @@ class SQLTableDialogDataItem():
return True
return False
- def InvalidValue(self, value):
- self.value = ""
- palette = QPalette()
- palette.setColor(QPalette.Text,Qt.red)
- self.widget.setPalette(palette)
- self.red = True
- self.error = self.label + " invalid value '" + value + "'"
- self.parent.ShowMessage(self.error)
-
- def IsNumber(self, value):
- try:
- x = int(value)
- except:
- x = 0
- return str(x) == value
+ def DoValidate(self, input_string):
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if not self.AddTimeRange(value, ranges):
+ return self.InvalidValue(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ self.value = " OR ".join(ranges)
- def Invalidate(self):
- self.validated = False
+# Report Dialog Base
- def Validate(self):
- input_string = self.widget.text()
- self.validated = True
- if self.red:
- palette = QPalette()
- self.widget.setPalette(palette)
- self.red = False
- if not len(input_string.strip()):
- self.error = ""
- self.value = ""
- return
- if self.table_name == "<timeranges>":
- ranges = []
- for value in [x.strip() for x in input_string.split(",")]:
- if not self.AddTimeRange(value, ranges):
- return self.InvalidValue(value)
- ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
- self.value = " OR ".join(ranges)
- elif self.table_name == "<ranges>":
- singles = []
- ranges = []
- for value in [x.strip() for x in input_string.split(",")]:
- if "-" in value:
- vrange = value.split("-")
- if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
- return self.InvalidValue(value)
- ranges.append(vrange)
- else:
- if not self.IsNumber(value):
- return self.InvalidValue(value)
- singles.append(value)
- ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
- if len(singles):
- ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
- self.value = " OR ".join(ranges)
- elif self.table_name:
- all_ids = []
- for value in [x.strip() for x in input_string.split(",")]:
- ids = self.ValueToIds(value)
- if len(ids):
- all_ids.extend(ids)
- else:
- return self.InvalidValue(value)
- self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
- if self.column_name2:
- self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
- else:
- self.value = input_string.strip()
- self.error = ""
- self.parent.ClearMessage()
+class ReportDialogBase(QDialog):
- def IsValid(self):
- if not self.validated:
- self.Validate()
- if len(self.error):
- self.parent.ShowMessage(self.error)
- return False
- return True
-
-# Selected branch report creation dialog
-
-class SelectedBranchDialog(QDialog):
-
- def __init__(self, glb, parent=None):
- super(SelectedBranchDialog, self).__init__(parent)
+ def __init__(self, glb, title, items, partial, parent=None):
+ super(ReportDialogBase, self).__init__(parent)
self.glb = glb
- self.name = ""
- self.where_clause = ""
+ self.report_vars = ReportVars()
- self.setWindowTitle("Selected Branches")
+ self.setWindowTitle(title)
self.setMinimumWidth(600)
- items = (
- ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
- ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
- ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
- ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
- ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
- ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
- ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
- ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
- ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
- )
- self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
+ self.data_items = [x(glb, self) for x in items]
+
+ self.partial = partial
self.grid = QGridLayout()
@@ -1758,23 +1998,28 @@ class SelectedBranchDialog(QDialog):
self.setLayout(self.vbox);
def Ok(self):
- self.name = self.data_items[0].value
- if not self.name:
+ vars = self.report_vars
+ for d in self.data_items:
+ if d.id == "REPORTNAME":
+ vars.name = d.value
+ if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
- if len(d.value):
- if len(self.where_clause):
- self.where_clause += " AND "
- self.where_clause += d.value
- if len(self.where_clause):
- self.where_clause = " AND ( " + self.where_clause + " ) "
- else:
- self.ShowMessage("No selection")
- return
+ if d.id == "LIMIT":
+ vars.limit = d.value
+ elif len(d.value):
+ if len(vars.where_clause):
+ vars.where_clause += " AND "
+ vars.where_clause += d.value
+ if len(vars.where_clause):
+ if self.partial:
+ vars.where_clause = " AND ( " + vars.where_clause + " ) "
+ else:
+ vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
@@ -1783,6 +2028,23 @@ class SelectedBranchDialog(QDialog):
def ClearMessage(self):
self.status.setText("")
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Selected Branches"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
+ lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
+ super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
+
# Event list
def GetEventList(db):
@@ -1793,6 +2055,16 @@ def GetEventList(db):
events.append(query.value(0))
return events
+# Is a table selectable
+
+def IsSelectable(db, table, sql = ""):
+ query = QSqlQuery(db)
+ try:
+ QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
+ except:
+ return False
+ return True
+
# SQL data preparation
def SQLTableDataPrep(query, count):
@@ -1818,12 +2090,13 @@ class SQLTableModel(TableModel):
progress = Signal(object)
- def __init__(self, glb, sql, column_count, parent=None):
+ def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
- self.fetcher = SQLFetcher(glb, sql, lambda x, y=column_count: SQLTableDataPrep(x, y), self.AddSample)
+ self.column_headers = column_headers
+ self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
@@ -1861,6 +2134,12 @@ class SQLTableModel(TableModel):
def HasMoreRecords(self):
return self.more
+ def columnCount(self, parent=None):
+ return len(self.column_headers)
+
+ def columnHeader(self, column):
+ return self.column_headers[column]
+
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
@@ -1870,12 +2149,12 @@ class SQLAutoTableModel(SQLTableModel):
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
- self.column_headers = []
+ column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
- self.column_headers.append(query.value(1))
+ column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
@@ -1888,14 +2167,8 @@ class SQLAutoTableModel(SQLTableModel):
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
- self.column_headers.append(query.value(0))
- super(SQLAutoTableModel, self).__init__(glb, sql, len(self.column_headers), parent)
-
- def columnCount(self, parent=None):
- return len(self.column_headers)
-
- def columnHeader(self, column):
- return self.column_headers[column]
+ column_headers.append(query.value(0))
+ super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
# Base class for custom ResizeColumnsToContents
@@ -1998,6 +2271,103 @@ def GetTableList(glb):
tables.append("information_schema.columns")
return tables
+# Top Calls data model
+
+class TopCallsModel(SQLTableModel):
+
+ def __init__(self, glb, report_vars, parent=None):
+ text = ""
+ if not glb.dbref.is_sqlite3:
+ text = "::text"
+ limit = ""
+ if len(report_vars.limit):
+ limit = " LIMIT " + report_vars.limit
+ sql = ("SELECT comm, pid, tid, name,"
+ " CASE"
+ " WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
+ " ELSE short_name"
+ " END AS dso,"
+ " call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
+ " CASE"
+ " WHEN (calls.flags = 1) THEN 'no call'" + text +
+ " WHEN (calls.flags = 2) THEN 'no return'" + text +
+ " WHEN (calls.flags = 3) THEN 'no call/return'" + text +
+ " ELSE ''" + text +
+ " END AS flags"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " INNER JOIN comms ON calls.comm_id = comms.id"
+ " INNER JOIN threads ON calls.thread_id = threads.id" +
+ report_vars.where_clause +
+ " ORDER BY elapsed_time DESC" +
+ limit
+ )
+ column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
+ self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
+ super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
+
+ def columnAlignment(self, column):
+ return self.alignment[column]
+
+# Top Calls report creation dialog
+
+class TopCallsDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Top Calls by Elapsed Time"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
+ lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
+ super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
+
+# Top Calls window
+
+class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
+
+ def __init__(self, glb, report_vars, parent=None):
+ super(TopCallsWindow, self).__init__(parent)
+
+ self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
+ self.model = self.data_model
+
+ self.view = QTableView()
+ self.view.setModel(self.model)
+ self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
+ self.view.verticalHeader().setVisible(False)
+
+ self.ResizeColumnsToContents()
+
+ self.find_bar = FindBar(self, self, True)
+
+ self.finder = ChildDataItemFinder(self.model)
+
+ self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
+
+ def Find(self, value, direction, pattern, context):
+ self.view.setFocus()
+ self.find_bar.Busy()
+ self.finder.Find(value, direction, pattern, context, self.FindDone)
+
+ def FindDone(self, row):
+ self.find_bar.Idle()
+ if row >= 0:
+ self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+ else:
+ self.find_bar.NotFound()
+
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
@@ -2099,8 +2469,10 @@ p.c2 {
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
-<p class=c2><a href=#allbranches>1.2 All branches</a></p>
-<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
+<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
+<p class=c2><a href=#allbranches>1.3 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
+<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
@@ -2136,7 +2508,10 @@ v- ls
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
-<h2 id=allbranches>1.2 All branches</h2>
+<h2 id=calltree>1.2 Call Tree</h2>
+The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
+Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
+<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
@@ -2162,10 +2537,10 @@ sudo ldconfig
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
-<h2 id=selectedbranches>1.3 Selected branches</h2>
+<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
-<h3>1.3.1 Time ranges</h3>
+<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
@@ -2176,6 +2551,10 @@ ms, us or ns. Also, negative values are relative to the end of trace. Examples:
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
+The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
+The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=tables>2. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
@@ -2305,10 +2684,17 @@ class MainWindow(QMainWindow):
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
- reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+
+ if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
+ reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
+
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
@@ -2364,14 +2750,23 @@ class MainWindow(QMainWindow):
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
+ def NewCallTree(self):
+ CallTreeWindow(self.glb, self)
+
+ def NewTopCalls(self):
+ dialog = TopCallsDialog(self.glb, self)
+ ret = dialog.exec_()
+ if ret:
+ TopCallsWindow(self.glb, dialog.report_vars, self)
+
def NewBranchView(self, event_id):
- BranchWindow(self.glb, event_id, "", "", self)
+ BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
- BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
+ BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index cafeff3d74db..310efe5e7e23 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -5,6 +5,8 @@
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os
import sys
@@ -32,7 +34,7 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_error_totals()
@@ -56,23 +58,22 @@ def syscalls__sys_exit(event_name, context, common_cpu,
raw_syscalls__sys_exit(**locals())
def print_error_totals():
- if for_comm is not None:
- print "\nsyscall errors for %s:\n\n" % (for_comm),
- else:
- print "\nsyscall errors:\n\n",
-
- print "%-30s %10s\n" % ("comm [pid]", "count"),
- print "%-30s %10s\n" % ("------------------------------", \
- "----------"),
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print "\n%s [%d]\n" % (comm, pid),
- id_keys = syscalls[comm][pid].keys()
- for id in id_keys:
- print " syscall: %-16s\n" % syscall_name(id),
- ret_keys = syscalls[comm][pid][id].keys()
- for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
- print " err = %-20s %10d\n" % (strerror(ret), val),
+ if for_comm is not None:
+ print("\nsyscall errors for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall errors:\n")
+
+ print("%-30s %10s" % ("comm [pid]", "count"))
+ print("%-30s %10s" % ("------------------------------", "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print(" syscall: %-16s" % syscall_name(id))
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" err = %-20s %10d" % (strerror(ret), val))
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index 0f5cf437b602..0c4841acf75d 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -10,6 +10,8 @@
#
# Measures futex contention
+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
@@ -33,18 +35,18 @@ def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
- if thread_blocktime.has_key(tid):
+ if tid in thread_blocktime:
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
- print "%s[%d] lock %x contended %d times, %d avg ns" % \
- (process_names[tid], tid, lock, count, avg)
+ print("%s[%d] lock %x contended %d times, %d avg ns" %
+ (process_names[tid], tid, lock, count, avg))
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index b19172d673af..a73847c8f548 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -22,34 +24,34 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
#from Core import *
def trace_begin():
- print "Intel PT Power Events and PTWRITE"
+ print("Intel PT Power Events and PTWRITE")
def trace_end():
- print "End"
+ print("End")
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
- print "IP: %u payload: %#x" % (exact_ip, payload),
+ print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
- print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
+ print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')
def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
- print "hints: %#x extensions: %#x" % (hints, extensions),
+ print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')
def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -57,13 +59,14 @@ def print_pwre(raw_buf):
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
- print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ end=' ')
def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
- print "IP: %u" % (exact_ip),
+ print("IP: %u" % (exact_ip), end=' ')
def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -71,36 +74,39 @@ def print_pwrx(raw_buf):
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
- print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+ print("deepest cstate: %u last cstate: %u wake reason: %#x" %
+ (deepest_cstate, last_cstate, wake_reason), end=' ')
def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
+ (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ end=' ')
def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
- print "%16x %s (%s)" % (ip, symbol, dso)
+ print("%16x %s (%s)" % (ip, symbol, dso))
def process_event(param_dict):
- event_attr = param_dict["attr"]
- sample = param_dict["sample"]
- raw_buf = param_dict["raw_buf"]
- comm = param_dict["comm"]
- name = param_dict["ev_name"]
-
- # Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
- dso = param_dict["dso"]
- else:
- dso = "[unknown]"
-
- if (param_dict.has_key("symbol")):
- symbol = param_dict["symbol"]
- else:
- symbol = "[unknown]"
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if "dso" in param_dict:
+ dso = param_dict["dso"]
+ else:
+ dso = "[unknown]"
+
+ if "symbol" in param_dict:
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "[unknown]"
if name == "ptwrite":
print_common_start(comm, sample, name)
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
index ebee2c5ae496..1f332e72b9b0 100644
--- a/tools/perf/scripts/python/mem-phys-addr.py
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -4,6 +4,8 @@
# Copyright (c) 2018, Intel Corporation.
from __future__ import division
+from __future__ import print_function
+
import os
import sys
import struct
@@ -31,21 +33,24 @@ def parse_iomem():
for i, j in enumerate(f):
m = re.split('-|:',j,2)
if m[2].strip() == 'System RAM':
- system_ram.append(long(m[0], 16))
- system_ram.append(long(m[1], 16))
+ system_ram.append(int(m[0], 16))
+ system_ram.append(int(m[1], 16))
if m[2].strip() == 'Persistent Memory':
- pmem.append(long(m[0], 16))
- pmem.append(long(m[1], 16))
+ pmem.append(int(m[0], 16))
+ pmem.append(int(m[1], 16))
def print_memory_type():
- print "Event: %s" % (event_name)
- print "%-40s %10s %10s\n" % ("Memory type", "count", "percentage"),
- print "%-40s %10s %10s\n" % ("----------------------------------------", \
+ print("Event: %s" % (event_name))
+ print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
+ print("%-40s %10s %10s\n" % ("----------------------------------------",
"-----------", "-----------"),
+ end='');
total = sum(load_mem_type_cnt.values())
for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
- key = lambda(k, v): (v, k), reverse = True):
- print "%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d %10.1f%%\n" %
+ (mem_type, count, 100 * count / total),
+ end='')
def trace_begin():
parse_iomem()
@@ -80,7 +85,7 @@ def find_memory_type(phys_addr):
f.seek(0, 0)
for j in f:
m = re.split('-|:',j,2)
- if long(m[0], 16) <= phys_addr <= long(m[1], 16):
+ if int(m[0], 16) <= phys_addr <= int(m[1], 16):
return m[2]
return "N/A"
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index a150164b44a3..101059971738 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -1,11 +1,13 @@
# Monitor the system for dropped packets and proudce a report of drop locations and counts
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -50,19 +52,19 @@ def get_sym(sloc):
return (None, 0)
def print_drop_table():
- print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
+ print("%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT"))
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
- print "%25s %25s %25s" % (sym, off, drop_log[i])
+ print("%25s %25s %25s" % (sym, off, drop_log[i]))
def trace_begin():
- print "Starting trace (Ctrl-C to dump results)"
+ print("Starting trace (Ctrl-C to dump results)")
def trace_end():
- print "Gathering kallsyms data"
+ print("Gathering kallsyms data")
get_kallsyms_table()
print_drop_table()
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 9b2050f778f1..ea0c8b90a783 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -8,6 +8,8 @@
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
+from __future__ import print_function
+
import os
import sys
@@ -17,6 +19,7 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
from perf_trace_context import *
from Core import *
from Util import *
+from functools import cmp_to_key
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
@@ -61,12 +64,12 @@ def diff_msec(src, dst):
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
- print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
+ print("%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" %
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
- diff_msec(hunk['xmit_t'], hunk['free_t']))
+ diff_msec(hunk['xmit_t'], hunk['free_t'])))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
@@ -98,55 +101,57 @@ def print_receive(hunk):
if show_hunk == 0:
return
- print "%d.%06dsec cpu=%d" % \
- (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
+ print("%d.%06dsec cpu=%d" %
+ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu))
for i in range(len(irq_list)):
- print PF_IRQ_ENTRY % \
+ print(PF_IRQ_ENTRY %
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
- irq_list[i]['irq'], irq_list[i]['name'])
- print PF_JOINT
+ irq_list[i]['irq'], irq_list[i]['name']))
+ print(PF_JOINT)
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
- print PF_NET_RX % \
+ print(PF_NET_RX %
(diff_msec(base_t, irq_event['time']),
- irq_event['skbaddr'])
- print PF_JOINT
- print PF_SOFT_ENTRY % \
- diff_msec(base_t, hunk['sirq_ent_t'])
- print PF_JOINT
+ irq_event['skbaddr']))
+ print(PF_JOINT)
+ print(PF_SOFT_ENTRY %
+ diff_msec(base_t, hunk['sirq_ent_t']))
+ print(PF_JOINT)
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
- print PF_NAPI_POLL % \
- (diff_msec(base_t, event['event_t']), event['dev'])
+ print(PF_NAPI_POLL %
+ (diff_msec(base_t, event['event_t']),
+ event['dev']))
if i == len(event_list) - 1:
- print ""
+ print("")
else:
- print PF_JOINT
+ print(PF_JOINT)
else:
- print PF_NET_RECV % \
- (diff_msec(base_t, event['event_t']), event['skbaddr'],
- event['len'])
+ print(PF_NET_RECV %
+ (diff_msec(base_t, event['event_t']),
+ event['skbaddr'],
+ event['len']))
if 'comm' in event.keys():
- print PF_WJOINT
- print PF_CPY_DGRAM % \
+ print(PF_WJOINT)
+ print(PF_CPY_DGRAM %
(diff_msec(base_t, event['comm_t']),
- event['pid'], event['comm'])
+ event['pid'], event['comm']))
elif 'handle' in event.keys():
- print PF_WJOINT
+ print(PF_WJOINT)
if event['handle'] == "kfree_skb":
- print PF_KFREE_SKB % \
+ print(PF_KFREE_SKB %
(diff_msec(base_t,
event['comm_t']),
- event['location'])
+ event['location']))
elif event['handle'] == "consume_skb":
- print PF_CONS_SKB % \
+ print(PF_CONS_SKB %
diff_msec(base_t,
- event['comm_t'])
- print PF_JOINT
+ event['comm_t']))
+ print(PF_JOINT)
def trace_begin():
global show_tx
@@ -172,8 +177,7 @@ def trace_begin():
def trace_end():
# order all events in time
- all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
- b[EINFO_IDX_TIME]))
+ all_event_list.sort(key=cmp_to_key(lambda a,b :a[EINFO_IDX_TIME] < b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
@@ -210,19 +214,19 @@ def trace_end():
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
- print " dev len Qdisc " \
- " netdevice free"
+ print(" dev len Qdisc "
+ " netdevice free")
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
- print "debug buffer status"
- print "----------------------------"
- print "xmit Qdisc:remain:%d overflow:%d" % \
- (len(tx_queue_list), of_count_tx_queue_list)
- print "xmit netdevice:remain:%d overflow:%d" % \
- (len(tx_xmit_list), of_count_tx_xmit_list)
- print "receive:remain:%d overflow:%d" % \
- (len(rx_skb_list), of_count_rx_skb_list)
+ print("debug buffer status")
+ print("----------------------------")
+ print("xmit Qdisc:remain:%d overflow:%d" %
+ (len(tx_queue_list), of_count_tx_queue_list))
+ print("xmit netdevice:remain:%d overflow:%d" %
+ (len(tx_xmit_list), of_count_tx_xmit_list))
+ print("receive:remain:%d overflow:%d" %
+ (len(rx_skb_list), of_count_rx_skb_list))
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
@@ -254,7 +258,7 @@ def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, i
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
- dev_name, work=None, budget=None):
+ dev_name, work=None, budget=None):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name, work, budget)
all_event_list.append(event_info)
@@ -351,7 +355,7 @@ def handle_irq_softirq_exit(event_info):
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
- 'irq_list':irq_list, 'event_list':event_list}
+ 'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
@@ -388,7 +392,7 @@ def handle_netif_receive_skb(event_info):
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
- 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+ 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
diff --git a/tools/perf/scripts/python/powerpc-hcalls.py b/tools/perf/scripts/python/powerpc-hcalls.py
index 00e0e7476e55..8b78dc790adb 100644
--- a/tools/perf/scripts/python/powerpc-hcalls.py
+++ b/tools/perf/scripts/python/powerpc-hcalls.py
@@ -4,6 +4,8 @@
#
# Hypervisor call statisics
+from __future__ import print_function
+
import os
import sys
@@ -149,7 +151,7 @@ hcall_table = {
}
def hcall_table_lookup(opcode):
- if (hcall_table.has_key(opcode)):
+ if (opcode in hcall_table):
return hcall_table[opcode]
else:
return opcode
@@ -157,8 +159,8 @@ def hcall_table_lookup(opcode):
print_ptrn = '%-28s%10s%10s%10s%10s'
def trace_end():
- print print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)')
- print '-' * 68
+ print(print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)'))
+ print('-' * 68)
for opcode in output:
h_name = hcall_table_lookup(opcode)
time = output[opcode]['time']
@@ -166,14 +168,14 @@ def trace_end():
min_t = output[opcode]['min']
max_t = output[opcode]['max']
- print print_ptrn % (h_name, cnt, min_t, max_t, time/cnt)
+ print(print_ptrn % (h_name, cnt, min_t, max_t, time//cnt))
def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
opcode, retval):
- if (d_enter.has_key(cpu) and d_enter[cpu].has_key(opcode)):
+ if (cpu in d_enter and opcode in d_enter[cpu]):
diff = nsecs(sec, nsec) - d_enter[cpu][opcode]
- if (output.has_key(opcode)):
+ if (opcode in output):
output[opcode]['time'] += diff
output[opcode]['cnt'] += 1
if (output[opcode]['min'] > diff):
@@ -190,11 +192,11 @@ def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
del d_enter[cpu][opcode]
# else:
-# print "Can't find matching hcall_enter event. Ignoring sample"
+# print("Can't find matching hcall_enter event. Ignoring sample")
def powerpc__hcall_entry(event_name, context, cpu, sec, nsec, pid, comm,
callchain, opcode):
- if (d_enter.has_key(cpu)):
+ if (cpu in d_enter):
d_enter[cpu][opcode] = nsecs(sec, nsec)
else:
d_enter[cpu] = {opcode: nsecs(sec, nsec)}
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index 3473e7f66081..8196e3087c9e 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
@@ -16,10 +14,10 @@ import sys
from collections import defaultdict
try:
- from UserList import UserList
+ from UserList import UserList
except ImportError:
- # Python 3: UserList moved to the collections package
- from collections import UserList
+ # Python 3: UserList moved to the collections package
+ from collections import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 61621b93affb..6e0278dcb092 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -8,7 +8,14 @@
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
-import os, sys, thread, time
+from __future__ import print_function
+
+import os, sys, time
+
+try:
+ import thread
+except ImportError:
+ import _thread as thread
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -62,18 +69,20 @@ def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
+ print("\nsyscall events for %s:\n" % (for_comm))
else:
- print "\nsyscall events:\n\n",
+ print("\nsyscall events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" %
+ ("----------------------------------------",
+ "----------"))
- for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
- reverse = True):
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]),
+ reverse = True):
try:
- print "%-40s %10d\n" % (syscall_name(id), val),
+ print("%-40s %10d" % (syscall_name(id), val))
except TypeError:
pass
syscalls.clear()
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
index 1697b5e18c96..b1c4def1410a 100755
--- a/tools/perf/scripts/python/stackcollapse.py
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -19,13 +19,15 @@
# Written by Paolo Bonzini <pbonzini@redhat.com>
# Based on Brendan Gregg's stackcollapse-perf.pl script.
+from __future__ import print_function
+
import os
import sys
from collections import defaultdict
from optparse import OptionParser, make_option
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -120,7 +122,6 @@ def process_event(param_dict):
lines[stack_string] = lines[stack_string] + 1
def trace_end():
- list = lines.keys()
- list.sort()
+ list = sorted(lines)
for stack in list:
- print "%s %d" % (stack, lines[stack])
+ print("%s %d" % (stack, lines[stack]))
diff --git a/tools/perf/scripts/python/stat-cpi.py b/tools/perf/scripts/python/stat-cpi.py
index 8410672efb8b..01fa933ff3cf 100644
--- a/tools/perf/scripts/python/stat-cpi.py
+++ b/tools/perf/scripts/python/stat-cpi.py
@@ -1,6 +1,7 @@
-#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
data = {}
times = []
threads = []
@@ -20,8 +21,8 @@ def store_key(time, cpu, thread):
threads.append(thread)
def store(time, event, cpu, thread, val, ena, run):
- #print "event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % \
- # (event, cpu, thread, time, val, ena, run)
+ #print("event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" %
+ # (event, cpu, thread, time, val, ena, run))
store_key(time, cpu, thread)
key = get_key(time, event, cpu, thread)
@@ -59,7 +60,7 @@ def stat__interval(time):
if ins != 0:
cpi = cyc/float(ins)
- print "%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)
+ print("%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins))
def trace_end():
pass
@@ -75,4 +76,4 @@ def trace_end():
# if ins != 0:
# cpi = cyc/float(ins)
#
-# print "time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi)
+# print("time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi))
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index daf314cc5dd3..f254e40c6f0f 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -5,6 +5,8 @@
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
@@ -31,17 +33,16 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
-
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
- (for_pid and common_pid != for_pid ):
+ (for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
@@ -49,26 +50,26 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
- else:
- print "\nsyscall events by comm/pid:\n\n",
-
- print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "----------"),
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print "\n%s [%d]\n" % (comm, pid),
- id_keys = syscalls[comm][pid].keys()
- for id, val in sorted(syscalls[comm][pid].iteritems(), \
- key = lambda(k, v): (v, k), reverse = True):
- print " %-38s %10d\n" % (syscall_name(id), val),
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events by comm/pid:\n")
+
+ print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" %-38s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index e66a7730aeb5..8adb95ff1664 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -5,6 +5,8 @@
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os
import sys
@@ -28,14 +30,14 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
@@ -45,20 +47,19 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm, id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
- else:
- print "\nsyscall events:\n\n",
-
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
-
- for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
- reverse = True):
- print "%-40s %10d\n" % (syscall_name(id), val),
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
+
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index 44090a9a19f3..cb39ac46bc73 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -1,6 +1,7 @@
-#! /usr/bin/python
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
import os
import sys
import glob
@@ -8,7 +9,11 @@ import optparse
import tempfile
import logging
import shutil
-import ConfigParser
+
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
def data_equal(a, b):
# Allow multiple values in assignment separated by '|'
@@ -100,20 +105,20 @@ class Event(dict):
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
- if not self.has_key(t) or not other.has_key(t):
+ if t not in self or t not in other:
return False
if not data_equal(self[t], other[t]):
return False
return True
def optional(self):
- if self.has_key('optional') and self['optional'] == '1':
+ if 'optional' in self and self['optional'] == '1':
return True
return False
def diff(self, other):
for t in Event.terms:
- if not self.has_key(t) or not other.has_key(t):
+ if t not in self or t not in other:
continue
if not data_equal(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
@@ -134,7 +139,7 @@ class Event(dict):
# - expected values assignments
class Test(object):
def __init__(self, path, options):
- parser = ConfigParser.SafeConfigParser()
+ parser = configparser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
@@ -193,7 +198,7 @@ class Test(object):
return True
def load_events(self, path, events):
- parser_event = ConfigParser.SafeConfigParser()
+ parser_event = configparser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
@@ -207,7 +212,7 @@ class Test(object):
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
- parser_base = ConfigParser.SafeConfigParser()
+ parser_base = configparser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
@@ -322,9 +327,9 @@ def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
- except Unsup, obj:
+ except Unsup as obj:
log.warning("unsupp %s" % obj.getMsg())
- except Notest, obj:
+ except Notest as obj:
log.warning("skipped %s" % obj.getMsg())
def setup_log(verbose):
@@ -363,7 +368,7 @@ def main():
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
- action="count", dest="verbose")
+ default=0, action="count", dest="verbose")
options, args = parser.parse_args()
if args:
@@ -373,7 +378,7 @@ def main():
setup_log(options.verbose)
if not options.test_dir:
- print 'FAILED no -d option specified'
+ print('FAILED no -d option specified')
sys.exit(-1)
if not options.test:
@@ -382,8 +387,8 @@ def main():
try:
run_tests(options)
- except Fail, obj:
- print "FAILED %s" % obj.getMsg();
+ except Fail as obj:
+ print("FAILED %s" % obj.getMsg())
sys.exit(-1)
sys.exit(0)
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index a20cbc445426..57fc544aedb0 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -15,7 +15,6 @@
#include <sys/mman.h>
#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
-#include <sys/ioctl.h>
#include "tests.h"
#include "debug.h"
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index dbf2c69944d2..4ebd2681e760 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -15,6 +15,8 @@
#include "thread_map.h"
#include "cpumap.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "event.h"
#include "thread.h"
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 7c8d2e422401..077c306c1cae 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -10,6 +10,7 @@
#include "../util/unwind.h"
#include "perf_regs.h"
#include "map.h"
+#include "symbol.h"
#include "thread.h"
#include "callchain.h"
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 5f8501c68da4..ea7acf403727 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
return -1;
}
- is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED);
+ is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED);
if (should_be_signed && !is_signed) {
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
evsel->name, name, is_signed, should_be_signed);
@@ -43,7 +43,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1;
}
- if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
@@ -55,7 +55,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
ret = -1;
- if (perf_evsel__test_field(evsel, "next_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "next_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "next_pid", 4, true))
@@ -73,7 +73,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1;
}
- if (perf_evsel__test_field(evsel, "comm", 16, true))
+ if (perf_evsel__test_field(evsel, "comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "pid", 4, true))
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index b889a28fd80b..469958cd7fe0 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -2,6 +2,7 @@
#include <inttypes.h>
#include "perf.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -161,7 +162,7 @@ out:
void print_hists_in(struct hists *hists)
{
int i = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -170,7 +171,7 @@ void print_hists_in(struct hists *hists)
root = hists->entries_in;
pr_info("----- %s --------\n", __func__);
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@@ -191,13 +192,13 @@ void print_hists_in(struct hists *hists)
void print_hists_out(struct hists *hists)
{
int i = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
root = &hists->entries;
pr_info("----- %s --------\n", __func__);
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 65fe02bebbee..7a2eed6c783e 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -2,6 +2,7 @@
#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -125,8 +126,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -136,12 +137,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
- while (!RB_EMPTY_ROOT(root_out)) {
- node = rb_first(root_out);
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
- rb_erase(node, root_out);
- rb_erase(&he->rb_node_in, root_in);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@@ -198,7 +199,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
print_hists_out(hists);
}
- root = &hists->entries;
+ root = &hists->entries.rb_root;
for (node = rb_first(root), i = 0;
node && (he = rb_entry(node, struct hist_entry, rb_node));
node = rb_next(node), i++) {
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 1c5bedab3c2c..975844807fe2 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 9a9d06cb0222..af633db63f4d 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -142,7 +142,7 @@ static int find_sample(struct sample *samples, size_t nr_samples,
static int __validate_match(struct hists *hists)
{
size_t count = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
/*
@@ -153,7 +153,7 @@ static int __validate_match(struct hists *hists)
else
root = hists->entries_in;
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@@ -192,7 +192,7 @@ static int __validate_link(struct hists *hists, int idx)
size_t count = 0;
size_t count_pair = 0;
size_t count_dummy = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
/*
@@ -205,7 +205,7 @@ static int __validate_link(struct hists *hists, int idx)
else
root = hists->entries_in;
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index faacb4f41460..0a510c524a5d 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -2,6 +2,7 @@
#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -91,8 +92,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -102,12 +103,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
- while (!RB_EMPTY_ROOT(root_out)) {
- node = rb_first(root_out);
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
- rb_erase(node, root_out);
- rb_erase(&he->rb_node_in, root_in);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@@ -126,7 +127,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = NULL;
@@ -162,7 +163,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
@@ -228,7 +229,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "overhead,cpu";
@@ -262,7 +263,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
@@ -284,7 +285,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "comm,overhead,dso";
@@ -316,7 +317,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
@@ -358,7 +359,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "dso,sym,comm,overhead,dso";
@@ -394,7 +395,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
@@ -460,7 +461,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "cpu,pid,comm,dso,sym";
@@ -497,7 +498,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 5ede9b561d32..ba87e6e8d18c 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -11,6 +11,7 @@
#include "tests.h"
#include "machine.h"
#include "thread_map.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include "util.h"
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 3b97ac018d5a..4a69c07f4101 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1330,6 +1330,26 @@ static int test__checkevent_complex_name(struct perf_evlist *evlist)
return 0;
}
+static int test__sym_event_slash(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ return 0;
+}
+
+static int test__sym_event_dc(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ return 0;
+}
+
static int count_tracepoints(void)
{
struct dirent *events_ent;
@@ -1670,6 +1690,16 @@ static struct evlist_test test__events[] = {
.name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
.check = test__checkevent_complex_name,
.id = 53
+ },
+ {
+ .name = "cycles//u",
+ .check = test__sym_event_slash,
+ .id = 54,
+ },
+ {
+ .name = "cycles:k",
+ .check = test__sym_event_dc,
+ .id = 55,
}
};
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 7bedf8608fdd..14a78898d79e 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -4,7 +4,9 @@
#include "util.h"
#include "tests.h"
#include <errno.h>
+#include <stdio.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
/* Simulated format definitions. */
static struct test_format {
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 0e2d00d69e6e..236ce0d6c826 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <inttypes.h>
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "branch.h"
#include "util.h"
#include "event.h"
#include "evsel.h"
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
index 5059452d27dd..8bfaa630389c 100644
--- a/tools/perf/tests/sdt.c
+++ b/tools/perf/tests/sdt.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <sys/epoll.h>
#include <util/evlist.h>
+#include <util/symbol.h>
#include <linux/filter.h>
#include "tests.h"
#include "debug.h"
diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh
index 6293cc660947..e37787be672b 100644
--- a/tools/perf/tests/shell/lib/probe.sh
+++ b/tools/perf/tests/shell/lib/probe.sh
@@ -4,3 +4,8 @@ skip_if_no_perf_probe() {
perf probe 2>&1 | grep -q 'is not a perf-command' && return 2
return 0
}
+
+skip_if_no_perf_trace() {
+ perf trace -h 2>&1 | grep -q -e 'is not a perf-command' -e 'trace command not available' && return 2
+ return 0
+}
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 50109f27ca07..147efeb6b195 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -12,6 +12,7 @@
. $(dirname $0)/lib/probe.sh
skip_if_no_perf_probe || exit 2
+skip_if_no_perf_trace || exit 2
. $(dirname $0)/lib/probe_vfs_getname.sh
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 637365099b7d..85f328ddf897 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -1,15 +1,15 @@
-libperf-y += clone.o
-libperf-y += fcntl.o
-libperf-y += flock.o
+perf-y += clone.o
+perf-y += fcntl.o
+perf-y += flock.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
-libperf-y += ioctl.o
+perf-y += ioctl.o
endif
-libperf-y += kcmp.o
-libperf-y += mount_flags.o
-libperf-y += pkey_alloc.o
-libperf-y += arch_prctl.o
-libperf-y += prctl.o
-libperf-y += renameat.o
-libperf-y += sockaddr.o
-libperf-y += socket.o
-libperf-y += statx.o
+perf-y += kcmp.o
+perf-y += mount_flags.o
+perf-y += pkey_alloc.o
+perf-y += arch_prctl.o
+perf-y += prctl.o
+perf-y += renameat.o
+perf-y += sockaddr.o
+perf-y += socket.o
+perf-y += statx.o
diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
index 620350d41209..52242fa4072b 100644
--- a/tools/perf/trace/beauty/ioctl.c
+++ b/tools/perf/trace/beauty/ioctl.c
@@ -175,7 +175,7 @@ static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size, boo
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long cmd = arg->val;
- unsigned int fd = syscall_arg__val(arg, 0);
+ int fd = syscall_arg__val(arg, 0);
struct file *file = thread__files_entry(arg->thread, fd);
if (file != NULL) {
diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
index d66c66315987..ea68db08b8e7 100644
--- a/tools/perf/trace/beauty/msg_flags.c
+++ b/tools/perf/trace/beauty/msg_flags.c
@@ -29,7 +29,7 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
return scnprintf(bf, size, "NONE");
#define P_MSG_FLAG(n) \
if (flags & MSG_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~MSG_##n; \
}
diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c
index 6897fab40dcc..d4d10b33ba0e 100644
--- a/tools/perf/trace/beauty/waitid_options.c
+++ b/tools/perf/trace/beauty/waitid_options.c
@@ -11,7 +11,7 @@ static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
#define P_OPTION(n) \
if (options & W##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
options &= ~W##n; \
}
diff --git a/tools/perf/ui/Build b/tools/perf/ui/Build
index 0a73538c0441..3aff83c3275f 100644
--- a/tools/perf/ui/Build
+++ b/tools/perf/ui/Build
@@ -1,14 +1,14 @@
-libperf-y += setup.o
-libperf-y += helpline.o
-libperf-y += progress.o
-libperf-y += util.o
-libperf-y += hist.o
-libperf-y += stdio/hist.o
+perf-y += setup.o
+perf-y += helpline.o
+perf-y += progress.o
+perf-y += util.o
+perf-y += hist.o
+perf-y += stdio/hist.o
CFLAGS_setup.o += -DLIBDIR="BUILD_STR($(LIBDIR))"
-libperf-$(CONFIG_SLANG) += browser.o
-libperf-$(CONFIG_SLANG) += browsers/
-libperf-$(CONFIG_SLANG) += tui/
+perf-$(CONFIG_SLANG) += browser.o
+perf-$(CONFIG_SLANG) += browsers/
+perf-$(CONFIG_SLANG) += tui/
CFLAGS_browser.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build
index de223f5bed58..8fee56b46502 100644
--- a/tools/perf/ui/browsers/Build
+++ b/tools/perf/ui/browsers/Build
@@ -1,8 +1,8 @@
-libperf-y += annotate.o
-libperf-y += hists.o
-libperf-y += map.o
-libperf-y += scripts.o
-libperf-y += header.o
+perf-y += annotate.o
+perf-y += hists.o
+perf-y += map.o
+perf-y += scripts.o
+perf-y += header.o
CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
CFLAGS_hists.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 82e16bf84466..35bdfd8b1e71 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -7,6 +7,7 @@
#include "../../util/annotate.h"
#include "../../util/hist.h"
#include "../../util/sort.h"
+#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
diff --git a/tools/perf/ui/browsers/header.c b/tools/perf/ui/browsers/header.c
index d75492189acb..5aeb663dd184 100644
--- a/tools/perf/ui/browsers/header.c
+++ b/tools/perf/ui/browsers/header.c
@@ -35,7 +35,7 @@ static int list_menu__run(struct ui_browser *menu)
{
int key;
unsigned long offset;
- const char help[] =
+ static const char help[] =
"h/?/F1 Show this window\n"
"UP/DOWN/PGUP\n"
"PGDN/SPACE\n"
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index ffac1d54a3d4..aef800d97ea1 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -8,9 +8,12 @@
#include <linux/rbtree.h>
#include <sys/ttydefaults.h>
+#include "../../util/callchain.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
#include "../../util/hist.h"
+#include "../../util/map.h"
+#include "../../util/symbol.h"
#include "../../util/pstack.h"
#include "../../util/sort.h"
#include "../../util/util.h"
@@ -49,7 +52,7 @@ static int hist_browser__get_folding(struct hist_browser *browser)
struct hists *hists = browser->hists;
int unfolded_rows = 0;
- for (nd = rb_first(&hists->entries);
+ for (nd = rb_first_cached(&hists->entries);
(nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
nd = rb_hierarchy_next(nd)) {
struct hist_entry *he =
@@ -267,7 +270,7 @@ static int hierarchy_count_rows(struct hist_browser *hb, struct hist_entry *he,
if (he->has_no_entry)
return 1;
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
while (node) {
float percent;
@@ -372,7 +375,7 @@ static void hist_entry__init_have_children(struct hist_entry *he)
he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
callchain__init_have_children(&he->sorted_chain);
} else {
- he->has_children = !RB_EMPTY_ROOT(&he->hroot_out);
+ he->has_children = !RB_EMPTY_ROOT(&he->hroot_out.rb_root);
}
he->init_have_children = true;
@@ -508,7 +511,7 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
struct hist_entry *child;
int n = 0;
- for (nd = rb_first(&he->hroot_out); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&he->hroot_out); nd; nd = rb_next(nd)) {
child = rb_entry(nd, struct hist_entry, rb_node);
percent = hist_entry__get_percent_limit(child);
if (!child->filtered && percent >= hb->min_pcnt)
@@ -566,7 +569,7 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
struct rb_node *nd;
struct hist_entry *he;
- nd = rb_first(&browser->hists->entries);
+ nd = rb_first_cached(&browser->hists->entries);
while (nd) {
he = rb_entry(nd, struct hist_entry, rb_node);
@@ -1738,7 +1741,7 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
struct hist_browser *hb;
hb = container_of(browser, struct hist_browser, b);
- browser->top = rb_first(&hb->hists->entries);
+ browser->top = rb_first_cached(&hb->hists->entries);
}
}
@@ -2649,7 +2652,7 @@ add_socket_opt(struct hist_browser *browser, struct popup_action *act,
static void hist_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
if (hb->min_pcnt == 0 && !symbol_conf.report_hierarchy) {
hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries;
@@ -2669,7 +2672,7 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb,
double percent)
{
struct hist_entry *he;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
u64 total = hists__total_period(hb->hists);
u64 min_callchain_hits = total * (percent / 100);
@@ -2748,7 +2751,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"S Zoom into current Processor Socket\n" \
/* help messages are sorted by lexical order of the hotkey */
- const char report_help[] = HIST_BROWSER_HELP_COMMON
+ static const char report_help[] = HIST_BROWSER_HELP_COMMON
"i Show header information\n"
"P Print histograms to perf.hist.N\n"
"r Run available scripts\n"
@@ -2756,7 +2759,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
"/ Filter symbol by name";
- const char top_help[] = HIST_BROWSER_HELP_COMMON
+ static const char top_help[] = HIST_BROWSER_HELP_COMMON
"P Print histograms to perf.hist.N\n"
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index 5b8b8c637686..c70d9337405b 100644
--- a/tools/perf/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include "../../util/util.h"
#include "../../util/debug.h"
+#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../browser.h"
#include "../helpline.h"
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 48428c9acd89..df49c9ba1785 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
+#include "util/sort.h"
#include "util/debug.h"
#include "util/annotate.h"
#include "util/evsel.h"
+#include "util/map.h"
+#include "util/symbol.h"
#include "ui/helpline.h"
#include <inttypes.h>
#include <signal.h>
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 4ab663ec3e5e..0c08890f006a 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "../evlist.h"
#include "../cache.h"
+#include "../callchain.h"
#include "../evsel.h"
#include "../sort.h"
#include "../hist.h"
@@ -353,7 +354,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
g_object_unref(GTK_TREE_MODEL(store));
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
GtkTreeIter iter;
u64 total = hists__total_period(h->hists);
@@ -401,7 +402,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
}
static void perf_gtk__add_hierarchy_entries(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
GtkTreeStore *store,
GtkTreeIter *parent,
struct perf_hpp *hpp,
@@ -415,7 +416,7 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
u64 total = hists__total_period(hists);
int size;
- for (node = rb_first(root); node; node = rb_next(node)) {
+ for (node = rb_first_cached(root); node; node = rb_next(node)) {
GtkTreeIter iter;
float percent;
char *bf;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index fe3dfaa64a91..412d6f1626e3 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -3,6 +3,7 @@
#include <math.h>
#include <linux/compiler.h>
+#include "../util/callchain.h"
#include "../util/hist.h"
#include "../util/util.h"
#include "../util/sort.h"
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 74c4ae1f0a05..a60f2993d390 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -2,8 +2,12 @@
#include <stdio.h>
#include <linux/string.h>
+#include "../../util/callchain.h"
#include "../../util/util.h"
#include "../../util/hist.h"
+#include "../../util/map.h"
+#include "../../util/map_groups.h"
+#include "../../util/symbol.h"
#include "../../util/sort.h"
#include "../../util/evsel.h"
#include "../../util/srcline.h"
@@ -788,7 +792,8 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
indent = hists__overhead_width(hists) + 4;
- for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
+ for (nd = rb_first_cached(&hists->entries); nd;
+ nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent;
diff --git a/tools/perf/ui/tui/Build b/tools/perf/ui/tui/Build
index 9e4c6ca41a9f..f916df33a1a7 100644
--- a/tools/perf/ui/tui/Build
+++ b/tools/perf/ui/tui/Build
@@ -1,4 +1,4 @@
-libperf-y += setup.o
-libperf-y += util.o
-libperf-y += helpline.o
-libperf-y += progress.o
+perf-y += setup.o
+perf-y += util.o
+perf-y += helpline.o
+perf-y += progress.o
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index af72be7f5b3b..8dd3102301ea 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -1,158 +1,164 @@
-libperf-y += annotate.o
-libperf-y += block-range.o
-libperf-y += build-id.o
-libperf-y += config.o
-libperf-y += ctype.o
-libperf-y += db-export.o
-libperf-y += env.o
-libperf-y += event.o
-libperf-y += evlist.o
-libperf-y += evsel.o
-libperf-y += evsel_fprintf.o
-libperf-y += find_bit.o
-libperf-y += get_current_dir_name.o
-libperf-y += kallsyms.o
-libperf-y += levenshtein.o
-libperf-y += llvm-utils.o
-libperf-y += mmap.o
-libperf-y += memswap.o
-libperf-y += parse-events.o
-libperf-y += perf_regs.o
-libperf-y += path.o
-libperf-y += print_binary.o
-libperf-y += rbtree.o
-libperf-y += libstring.o
-libperf-y += bitmap.o
-libperf-y += hweight.o
-libperf-y += smt.o
-libperf-y += strbuf.o
-libperf-y += string.o
-libperf-y += strlist.o
-libperf-y += strfilter.o
-libperf-y += top.o
-libperf-y += usage.o
-libperf-y += dso.o
-libperf-y += symbol.o
-libperf-y += symbol_fprintf.o
-libperf-y += color.o
-libperf-y += metricgroup.o
-libperf-y += header.o
-libperf-y += callchain.o
-libperf-y += values.o
-libperf-y += debug.o
-libperf-y += machine.o
-libperf-y += map.o
-libperf-y += pstack.o
-libperf-y += session.o
-libperf-$(CONFIG_TRACE) += syscalltbl.o
-libperf-y += ordered-events.o
-libperf-y += namespaces.o
-libperf-y += comm.o
-libperf-y += thread.o
-libperf-y += thread_map.o
-libperf-y += trace-event-parse.o
-libperf-y += parse-events-flex.o
-libperf-y += parse-events-bison.o
-libperf-y += pmu.o
-libperf-y += pmu-flex.o
-libperf-y += pmu-bison.o
-libperf-y += trace-event-read.o
-libperf-y += trace-event-info.o
-libperf-y += trace-event-scripting.o
-libperf-y += trace-event.o
-libperf-y += svghelper.o
-libperf-y += sort.o
-libperf-y += hist.o
-libperf-y += util.o
-libperf-y += xyarray.o
-libperf-y += cpumap.o
-libperf-y += cgroup.o
-libperf-y += target.o
-libperf-y += rblist.o
-libperf-y += intlist.o
-libperf-y += vdso.o
-libperf-y += counts.o
-libperf-y += stat.o
-libperf-y += stat-shadow.o
-libperf-y += stat-display.o
-libperf-y += record.o
-libperf-y += srcline.o
-libperf-y += srccode.o
-libperf-y += data.o
-libperf-y += tsc.o
-libperf-y += cloexec.o
-libperf-y += call-path.o
-libperf-y += rwsem.o
-libperf-y += thread-stack.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
-libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
-libperf-$(CONFIG_AUXTRACE) += intel-pt.o
-libperf-$(CONFIG_AUXTRACE) += intel-bts.o
-libperf-$(CONFIG_AUXTRACE) += arm-spe.o
-libperf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
-libperf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
+perf-y += annotate.o
+perf-y += block-range.o
+perf-y += build-id.o
+perf-y += config.o
+perf-y += ctype.o
+perf-y += db-export.o
+perf-y += env.o
+perf-y += event.o
+perf-y += evlist.o
+perf-y += evsel.o
+perf-y += evsel_fprintf.o
+perf-y += find_bit.o
+perf-y += get_current_dir_name.o
+perf-y += kallsyms.o
+perf-y += levenshtein.o
+perf-y += llvm-utils.o
+perf-y += mmap.o
+perf-y += memswap.o
+perf-y += parse-events.o
+perf-y += perf_regs.o
+perf-y += path.o
+perf-y += print_binary.o
+perf-y += rbtree.o
+perf-y += libstring.o
+perf-y += bitmap.o
+perf-y += hweight.o
+perf-y += smt.o
+perf-y += strbuf.o
+perf-y += string.o
+perf-y += strlist.o
+perf-y += strfilter.o
+perf-y += top.o
+perf-y += usage.o
+perf-y += dso.o
+perf-y += symbol.o
+perf-y += symbol_fprintf.o
+perf-y += color.o
+perf-y += color_config.o
+perf-y += metricgroup.o
+perf-y += header.o
+perf-y += callchain.o
+perf-y += values.o
+perf-y += debug.o
+perf-y += machine.o
+perf-y += map.o
+perf-y += pstack.o
+perf-y += session.o
+perf-y += sample-raw.o
+perf-y += s390-sample-raw.o
+perf-$(CONFIG_TRACE) += syscalltbl.o
+perf-y += ordered-events.o
+perf-y += namespaces.o
+perf-y += comm.o
+perf-y += thread.o
+perf-y += thread_map.o
+perf-y += trace-event-parse.o
+perf-y += parse-events-flex.o
+perf-y += parse-events-bison.o
+perf-y += pmu.o
+perf-y += pmu-flex.o
+perf-y += pmu-bison.o
+perf-y += trace-event-read.o
+perf-y += trace-event-info.o
+perf-y += trace-event-scripting.o
+perf-y += trace-event.o
+perf-y += svghelper.o
+perf-y += sort.o
+perf-y += hist.o
+perf-y += util.o
+perf-y += xyarray.o
+perf-y += cpumap.o
+perf-y += cputopo.o
+perf-y += cgroup.o
+perf-y += target.o
+perf-y += rblist.o
+perf-y += intlist.o
+perf-y += vdso.o
+perf-y += counts.o
+perf-y += stat.o
+perf-y += stat-shadow.o
+perf-y += stat-display.o
+perf-y += record.o
+perf-y += srcline.o
+perf-y += srccode.o
+perf-y += data.o
+perf-y += tsc.o
+perf-y += cloexec.o
+perf-y += call-path.o
+perf-y += rwsem.o
+perf-y += thread-stack.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
+perf-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-$(CONFIG_AUXTRACE) += arm-spe.o
+perf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
+perf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
ifdef CONFIG_LIBOPENCSD
-libperf-$(CONFIG_AUXTRACE) += cs-etm.o
-libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
+perf-$(CONFIG_AUXTRACE) += cs-etm.o
+perf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
endif
-libperf-y += parse-branch-options.o
-libperf-y += dump-insn.o
-libperf-y += parse-regs-options.o
-libperf-y += term.o
-libperf-y += help-unknown-cmd.o
-libperf-y += mem-events.o
-libperf-y += vsprintf.o
-libperf-y += drv_configs.o
-libperf-y += units.o
-libperf-y += time-utils.o
-libperf-y += expr-bison.o
-libperf-y += branch.o
-libperf-y += mem2node.o
-
-libperf-$(CONFIG_LIBBPF) += bpf-loader.o
-libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
-libperf-$(CONFIG_LIBELF) += symbol-elf.o
-libperf-$(CONFIG_LIBELF) += probe-file.o
-libperf-$(CONFIG_LIBELF) += probe-event.o
+perf-y += parse-branch-options.o
+perf-y += dump-insn.o
+perf-y += parse-regs-options.o
+perf-y += term.o
+perf-y += help-unknown-cmd.o
+perf-y += mem-events.o
+perf-y += vsprintf.o
+perf-y += units.o
+perf-y += time-utils.o
+perf-y += expr-bison.o
+perf-y += branch.o
+perf-y += mem2node.o
+
+perf-$(CONFIG_LIBBPF) += bpf-loader.o
+perf-$(CONFIG_LIBBPF) += bpf_map.o
+perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
+perf-$(CONFIG_LIBELF) += symbol-elf.o
+perf-$(CONFIG_LIBELF) += probe-file.o
+perf-$(CONFIG_LIBELF) += probe-event.o
ifndef CONFIG_LIBELF
-libperf-y += symbol-minimal.o
+perf-y += symbol-minimal.o
endif
ifndef CONFIG_SETNS
-libperf-y += setns.o
+perf-y += setns.o
endif
-libperf-$(CONFIG_DWARF) += probe-finder.o
-libperf-$(CONFIG_DWARF) += dwarf-aux.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += probe-finder.o
+perf-$(CONFIG_DWARF) += dwarf-aux.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
-libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
-libperf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
+perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
+perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
-libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
+perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
-libperf-y += scripting-engines/
+perf-y += scripting-engines/
-libperf-$(CONFIG_ZLIB) += zlib.o
-libperf-$(CONFIG_LZMA) += lzma.o
-libperf-y += demangle-java.o
-libperf-y += demangle-rust.o
+perf-$(CONFIG_ZLIB) += zlib.o
+perf-$(CONFIG_LZMA) += lzma.o
+perf-y += demangle-java.o
+perf-y += demangle-rust.o
ifdef CONFIG_JITDUMP
-libperf-$(CONFIG_LIBELF) += jitdump.o
-libperf-$(CONFIG_LIBELF) += genelf.o
-libperf-$(CONFIG_DWARF) += genelf_debug.o
+perf-$(CONFIG_LIBELF) += jitdump.o
+perf-$(CONFIG_LIBELF) += genelf.o
+perf-$(CONFIG_DWARF) += genelf_debug.o
endif
-libperf-y += perf-hooks.o
+perf-y += perf-hooks.o
-libperf-$(CONFIG_CXX) += c++/
+perf-$(CONFIG_LIBBPF) += bpf-event.o
+
+perf-$(CONFIG_CXX) += c++/
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_llvm-utils.o += -DPERF_INCLUDE_DIR="BUILD_STR($(perf_include_dir_SQ))"
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 70de8f6b3aee..5f6dbbf5d749 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -9,6 +9,7 @@
#include <errno.h>
#include <inttypes.h>
+#include <libgen.h>
#include "util.h"
#include "ui/ui.h"
#include "sort.h"
@@ -16,6 +17,7 @@
#include "color.h"
#include "config.h"
#include "cache.h"
+#include "map.h"
#include "symbol.h"
#include "units.h"
#include "debug.h"
@@ -196,18 +198,18 @@ static void ins__delete(struct ins_operands *ops)
}
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
}
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
if (ins->ops->scnprintf)
- return ins->ops->scnprintf(ins, bf, size, ops);
+ return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
}
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
@@ -271,18 +273,18 @@ indirect_call:
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
if (ops->target.sym)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
if (ops->target.addr == 0)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.name)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name);
- return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
+ return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr);
}
static struct ins_ops call_ops = {
@@ -386,15 +388,15 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
}
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
const char *c;
if (!ops->target.addr || ops->target.offset < 0)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.outside && ops->target.sym != NULL)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
c = strchr(ops->raw, ',');
c = validate_comma(c, ops);
@@ -413,7 +415,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
c++;
}
- return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
+ return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name,
ins->name, c ? c - ops->raw : 0, ops->raw,
ops->target.offset);
}
@@ -481,16 +483,16 @@ out_free_ops:
}
static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
int printed;
if (ops->locked.ins.ops == NULL)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
- printed = scnprintf(bf, size, "%-6s ", ins->name);
+ printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name);
return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
- size - printed, ops->locked.ops);
+ size - printed, ops->locked.ops, max_ins_name);
}
static void lock__delete(struct ins_operands *ops)
@@ -562,9 +564,9 @@ out_free_source:
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s,%s", ins->name,
+ return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
ops->source.name ?: ops->source.raw,
ops->target.name ?: ops->target.raw);
}
@@ -602,9 +604,9 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
}
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s", ins->name,
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
ops->target.name ?: ops->target.raw);
}
@@ -614,9 +616,9 @@ static struct ins_ops dec_ops = {
};
static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
- struct ins_operands *ops __maybe_unused)
+ struct ins_operands *ops __maybe_unused, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s", "nop");
+ return scnprintf(bf, size, "%-*s", max_ins_name, "nop");
}
static struct ins_ops nop_ops = {
@@ -1230,12 +1232,12 @@ void disasm_line__free(struct disasm_line *dl)
annotation_line__delete(&dl->al);
}
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
{
if (raw || !dl->ins.ops)
- return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw);
- return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
+ return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
}
static void annotation_line__add(struct annotation_line *al, struct list_head *head)
@@ -1889,6 +1891,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
struct annotation_options *options,
struct arch **parch)
{
+ struct annotation *notes = symbol__annotation(sym);
struct annotate_args args = {
.privsize = privsize,
.evsel = evsel,
@@ -1919,6 +1922,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
args.ms.map = map;
args.ms.sym = sym;
+ notes->start = map__rip_2objdump(map, sym->start);
return symbol__disassemble(sym, &args);
}
@@ -2410,12 +2414,30 @@ static inline int width_jumps(int n)
return 1;
}
+static int annotation__max_ins_name(struct annotation *notes)
+{
+ int max_name = 0, len;
+ struct annotation_line *al;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ if (al->offset == -1)
+ continue;
+
+ len = strlen(disasm_line(al)->ins.name);
+ if (max_name < len)
+ max_name = len;
+ }
+
+ return max_name;
+}
+
void annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
{
notes->widths.addr = notes->widths.target =
notes->widths.min_addr = hex_width(symbol__size(sym));
notes->widths.max_addr = hex_width(sym->end);
notes->widths.jumps = width_jumps(notes->max_jump_sources);
+ notes->widths.max_ins_name = annotation__max_ins_name(notes);
}
void annotation__update_column_widths(struct annotation *notes)
@@ -2579,7 +2601,7 @@ call_like:
obj__printf(obj, " ");
}
- disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset);
+ disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset, notes->widths.max_ins_name);
}
static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
@@ -2794,8 +2816,6 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
symbol__calc_percent(sym, evsel);
- notes->start = map__rip_2objdump(map, sym->start);
-
annotation__set_offsets(notes, size);
annotation__mark_jump_targets(notes, sym);
annotation__compute_ipc(notes, size);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index fb6463730ba4..df34fe483164 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -4,16 +4,24 @@
#include <stdbool.h>
#include <stdint.h>
+#include <stdio.h>
#include <linux/types.h>
-#include "symbol.h"
-#include "hist.h"
-#include "sort.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <pthread.h>
#include <asm/bug.h>
+#include "symbol_conf.h"
+struct hist_browser_timer;
+struct hist_entry;
struct ins_ops;
+struct map;
+struct map_symbol;
+struct addr_map_symbol;
+struct option;
+struct perf_sample;
+struct perf_evsel;
+struct symbol;
struct ins {
const char *name;
@@ -51,14 +59,14 @@ struct ins_ops {
void (*free)(struct ins_operands *ops);
int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms);
int (*scnprintf)(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
};
bool ins__is_jump(const struct ins *ins);
bool ins__is_call(const struct ins *ins);
bool ins__is_ret(const struct ins *ins);
bool ins__is_lock(const struct ins *ins);
-int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
+int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops, int max_ins_name);
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
#define ANNOTATION__IPC_WIDTH 6
@@ -211,7 +219,7 @@ int __annotation__scnprintf_samples_period(struct annotation *notes,
struct perf_evsel *evsel,
bool show_freq);
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel);
@@ -281,6 +289,7 @@ struct annotation {
u8 target;
u8 min_addr;
u8 max_addr;
+ u8 max_ins_name;
} widths;
bool have_cycles;
struct annotated_source *src;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index f69961c4a4f3..fb76b6b232d4 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -27,6 +27,7 @@
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/string.h>
+#include <linux/time64.h>
#include <sys/param.h>
#include <stdlib.h>
@@ -41,6 +42,7 @@
#include "pmu.h"
#include "evsel.h"
#include "cpumap.h"
+#include "symbol.h"
#include "thread_map.h"
#include "asm/bug.h"
#include "auxtrace.h"
@@ -857,7 +859,7 @@ void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
- const char *msg)
+ const char *msg, u64 timestamp)
{
size_t size;
@@ -869,7 +871,9 @@ void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
auxtrace_error->cpu = cpu;
auxtrace_error->pid = pid;
auxtrace_error->tid = tid;
+ auxtrace_error->fmt = 1;
auxtrace_error->ip = ip;
+ auxtrace_error->time = timestamp;
strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
@@ -1159,12 +1163,27 @@ static const char *auxtrace_error_name(int type)
size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
{
struct auxtrace_error_event *e = &event->auxtrace_error;
+ unsigned long long nsecs = e->time;
+ const char *msg = e->msg;
int ret;
ret = fprintf(fp, " %s error type %u",
auxtrace_error_name(e->type), e->type);
+
+ if (e->fmt && nsecs) {
+ unsigned long secs = nsecs / NSEC_PER_SEC;
+
+ nsecs -= secs * NSEC_PER_SEC;
+ ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
+ } else {
+ ret += fprintf(fp, " time 0");
+ }
+
+ if (!e->fmt)
+ msg = (const char *)&e->time;
+
ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
- e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
+ e->cpu, e->pid, e->tid, e->ip, e->code, msg);
return ret;
}
@@ -1278,9 +1297,9 @@ static int __auxtrace_mmap__read(struct perf_mmap *map,
}
/* padding must be written by fn() e.g. record__process_auxtrace() */
- padding = size & 7;
+ padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
if (padding)
- padding = 8 - padding;
+ padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
memset(&ev, 0, sizeof(ev));
ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
@@ -1899,7 +1918,8 @@ static struct dso *load_dso(const char *name)
if (!map)
return NULL;
- map__load(map);
+ if (map__load(map) < 0)
+ pr_err("File '%s' not found or has no symbols.\n", name);
dso = dso__get(map->dso);
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 8e50f96d4b23..c69bcd9a3091 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -40,6 +40,9 @@ struct record_opts;
struct auxtrace_info_event;
struct events_stats;
+/* Auxtrace records must have the same alignment as perf event records */
+#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
+
enum auxtrace_type {
PERF_AUXTRACE_UNKNOWN,
PERF_AUXTRACE_INTEL_PT,
@@ -516,7 +519,7 @@ void auxtrace_index__free(struct list_head *head);
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
- const char *msg);
+ const char *msg, u64 timestamp);
int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
struct perf_tool *tool,
diff --git a/tools/perf/util/block-range.c b/tools/perf/util/block-range.c
index f1451c987eec..1be432657501 100644
--- a/tools/perf/util/block-range.c
+++ b/tools/perf/util/block-range.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "block-range.h"
#include "annotate.h"
+#include <assert.h>
+#include <stdlib.h>
struct {
struct rb_root root;
diff --git a/tools/perf/util/block-range.h b/tools/perf/util/block-range.h
index a5ba719d69fb..ec0fb534bf56 100644
--- a/tools/perf/util/block-range.h
+++ b/tools/perf/util/block-range.h
@@ -2,7 +2,11 @@
#ifndef __PERF_BLOCK_RANGE_H
#define __PERF_BLOCK_RANGE_H
-#include "symbol.h"
+#include <stdbool.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+struct symbol;
/*
* struct block_range - non-overlapping parts of basic blocks
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
new file mode 100644
index 000000000000..028c8ec1f62a
--- /dev/null
+++ b/tools/perf/util/bpf-event.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <stdlib.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <linux/btf.h>
+#include "bpf-event.h"
+#include "debug.h"
+#include "symbol.h"
+#include "machine.h"
+
+#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
+
+static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
+{
+ int ret = 0;
+ size_t i;
+
+ for (i = 0; i < len; i++)
+ ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
+ return ret;
+}
+
+int machine__process_bpf_event(struct machine *machine __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_bpf_event(event, stdout);
+ return 0;
+}
+
+/*
+ * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
+ * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
+ * one PERF_RECORD_KSYMBOL is generated for each sub program.
+ *
+ * Returns:
+ * 0 for success;
+ * -1 for failures;
+ * -2 for lack of kernel support.
+ */
+static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ int fd,
+ union perf_event *event,
+ struct record_opts *opts)
+{
+ struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
+ struct bpf_event *bpf_event = &event->bpf_event;
+ u32 sub_prog_cnt, i, func_info_rec_size = 0;
+ u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
+ struct bpf_prog_info info = { .type = 0, };
+ u32 info_len = sizeof(info);
+ void *func_infos = NULL;
+ u64 *prog_addrs = NULL;
+ struct btf *btf = NULL;
+ u32 *prog_lens = NULL;
+ bool has_btf = false;
+ char errbuf[512];
+ int err = 0;
+
+ /* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
+ err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+
+ if (err) {
+ pr_debug("%s: failed to get BPF program info: %s, aborting\n",
+ __func__, str_error_r(errno, errbuf, sizeof(errbuf)));
+ return -1;
+ }
+ if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+ pr_debug("%s: the kernel is too old, aborting\n", __func__);
+ return -2;
+ }
+
+ /* number of ksyms, func_lengths, and tags should match */
+ sub_prog_cnt = info.nr_jited_ksyms;
+ if (sub_prog_cnt != info.nr_prog_tags ||
+ sub_prog_cnt != info.nr_jited_func_lens)
+ return -1;
+
+ /* check BTF func info support */
+ if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
+ /* btf func info number should be same as sub_prog_cnt */
+ if (sub_prog_cnt != info.nr_func_info) {
+ pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
+ return -1;
+ }
+ if (btf__get_from_id(info.btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
+ return -1;
+ }
+ func_info_rec_size = info.func_info_rec_size;
+ func_infos = calloc(sub_prog_cnt, func_info_rec_size);
+ if (!func_infos) {
+ pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
+ return -1;
+ }
+ has_btf = true;
+ }
+
+ /*
+ * We need address, length, and tag for each sub program.
+ * Allocate memory and call bpf_obj_get_info_by_fd() again
+ */
+ prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
+ if (!prog_addrs) {
+ pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
+ goto out;
+ }
+ prog_lens = calloc(sub_prog_cnt, sizeof(u32));
+ if (!prog_lens) {
+ pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
+ goto out;
+ }
+ prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
+ if (!prog_tags) {
+ pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
+ goto out;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.nr_jited_ksyms = sub_prog_cnt;
+ info.nr_jited_func_lens = sub_prog_cnt;
+ info.nr_prog_tags = sub_prog_cnt;
+ info.jited_ksyms = ptr_to_u64(prog_addrs);
+ info.jited_func_lens = ptr_to_u64(prog_lens);
+ info.prog_tags = ptr_to_u64(prog_tags);
+ info_len = sizeof(info);
+ if (has_btf) {
+ info.nr_func_info = sub_prog_cnt;
+ info.func_info_rec_size = func_info_rec_size;
+ info.func_info = ptr_to_u64(func_infos);
+ }
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (err) {
+ pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
+ goto out;
+ }
+
+ /* Synthesize PERF_RECORD_KSYMBOL */
+ for (i = 0; i < sub_prog_cnt; i++) {
+ const struct bpf_func_info *finfo;
+ const char *short_name = NULL;
+ const struct btf_type *t;
+ int name_len;
+
+ *ksymbol_event = (struct ksymbol_event){
+ .header = {
+ .type = PERF_RECORD_KSYMBOL,
+ .size = offsetof(struct ksymbol_event, name),
+ },
+ .addr = prog_addrs[i],
+ .len = prog_lens[i],
+ .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
+ .flags = 0,
+ };
+ name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
+ "bpf_prog_");
+ name_len += snprintf_hex(ksymbol_event->name + name_len,
+ KSYM_NAME_LEN - name_len,
+ prog_tags[i], BPF_TAG_SIZE);
+ if (has_btf) {
+ finfo = func_infos + i * info.func_info_rec_size;
+ t = btf__type_by_id(btf, finfo->type_id);
+ short_name = btf__name_by_offset(btf, t->name_off);
+ } else if (i == 0 && sub_prog_cnt == 1) {
+ /* no subprog */
+ if (info.name[0])
+ short_name = info.name;
+ } else
+ short_name = "F";
+ if (short_name)
+ name_len += snprintf(ksymbol_event->name + name_len,
+ KSYM_NAME_LEN - name_len,
+ "_%s", short_name);
+
+ ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
+ sizeof(u64));
+
+ memset((void *)event + event->header.size, 0, machine->id_hdr_size);
+ event->header.size += machine->id_hdr_size;
+ err = perf_tool__process_synth_event(tool, event,
+ machine, process);
+ }
+
+ /* Synthesize PERF_RECORD_BPF_EVENT */
+ if (opts->bpf_event) {
+ *bpf_event = (struct bpf_event){
+ .header = {
+ .type = PERF_RECORD_BPF_EVENT,
+ .size = sizeof(struct bpf_event),
+ },
+ .type = PERF_BPF_EVENT_PROG_LOAD,
+ .flags = 0,
+ .id = info.id,
+ };
+ memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
+ memset((void *)event + event->header.size, 0, machine->id_hdr_size);
+ event->header.size += machine->id_hdr_size;
+ err = perf_tool__process_synth_event(tool, event,
+ machine, process);
+ }
+
+out:
+ free(prog_tags);
+ free(prog_lens);
+ free(prog_addrs);
+ free(func_infos);
+ free(btf);
+ return err ? -1 : 0;
+}
+
+int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct record_opts *opts)
+{
+ union perf_event *event;
+ __u32 id = 0;
+ int err;
+ int fd;
+
+ event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size);
+ if (!event)
+ return -1;
+ while (true) {
+ err = bpf_prog_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT) {
+ err = 0;
+ break;
+ }
+ pr_debug("%s: can't get next program: %s%s\n",
+ __func__, strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ /* don't report error on old kernel or EPERM */
+ err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
+ break;
+ }
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0) {
+ pr_debug("%s: failed to get fd for prog_id %u\n",
+ __func__, id);
+ continue;
+ }
+
+ err = perf_event__synthesize_one_bpf_prog(tool, process,
+ machine, fd,
+ event, opts);
+ close(fd);
+ if (err) {
+ /* do not return error for old kernel */
+ if (err == -2)
+ err = 0;
+ break;
+ }
+ }
+ free(event);
+ return err;
+}
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
new file mode 100644
index 000000000000..7890067e1a37
--- /dev/null
+++ b/tools/perf/util/bpf-event.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_BPF_EVENT_H
+#define __PERF_BPF_EVENT_H
+
+#include <linux/compiler.h>
+#include "event.h"
+
+struct machine;
+union perf_event;
+struct perf_sample;
+struct perf_tool;
+struct record_opts;
+
+#ifdef HAVE_LIBBPF_SUPPORT
+int machine__process_bpf_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+
+int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct record_opts *opts);
+#else
+static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused)
+{
+ return 0;
+}
+
+static inline int perf_event__synthesize_bpf_events(struct perf_tool *tool __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused,
+ struct record_opts *opts __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+#endif
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 2f3eb6d293ee..251d9ea6252f 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -15,6 +15,7 @@
#include <errno.h>
#include "perf.h"
#include "debug.h"
+#include "evlist.h"
#include "bpf-loader.h"
#include "bpf-prologue.h"
#include "probe-event.h"
@@ -24,22 +25,12 @@
#include "llvm-utils.h"
#include "c++/clang-c.h"
-#define DEFINE_PRINT_FN(name, level) \
-static int libbpf_##name(const char *fmt, ...) \
-{ \
- va_list args; \
- int ret; \
- \
- va_start(args, fmt); \
- ret = veprintf(level, verbose, pr_fmt(fmt), args);\
- va_end(args); \
- return ret; \
+static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
+ const char *fmt, va_list args)
+{
+ return veprintf(1, verbose, pr_fmt(fmt), args);
}
-DEFINE_PRINT_FN(warning, 1)
-DEFINE_PRINT_FN(info, 1)
-DEFINE_PRINT_FN(debug, 1)
-
struct bpf_prog_priv {
bool is_tp;
char *sys_name;
@@ -59,9 +50,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
struct bpf_object *obj;
if (!libbpf_initialized) {
- libbpf_set_print(libbpf_warning,
- libbpf_info,
- libbpf_debug);
+ libbpf_set_print(libbpf_perf_print);
libbpf_initialized = true;
}
@@ -79,9 +68,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
struct bpf_object *obj;
if (!libbpf_initialized) {
- libbpf_set_print(libbpf_warning,
- libbpf_info,
- libbpf_debug);
+ libbpf_set_print(libbpf_perf_print);
libbpf_initialized = true;
}
@@ -1503,7 +1490,7 @@ apply_obj_config_object(struct bpf_object *obj)
struct bpf_map *map;
int err;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
err = apply_obj_config_map(map);
if (err)
return err;
@@ -1527,7 +1514,7 @@ int bpf__apply_obj_config(void)
#define bpf__for_each_map(pos, obj, objtmp) \
bpf_object__for_each_safe(obj, objtmp) \
- bpf_map__for_each(pos, obj)
+ bpf_object__for_each_map(pos, obj)
#define bpf__for_each_map_named(pos, obj, objtmp, name) \
bpf__for_each_map(pos, obj, objtmp) \
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index 62d245a90e1d..3f46856e3330 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -8,11 +8,7 @@
#include <linux/compiler.h>
#include <linux/err.h>
-#include <string.h>
#include <bpf/libbpf.h>
-#include "probe-event.h"
-#include "evlist.h"
-#include "debug.h"
enum bpf_loader_errno {
__BPF_LOADER_ERRNO__START = __LIBBPF_ERRNO__START - 100,
@@ -44,6 +40,7 @@ enum bpf_loader_errno {
};
struct perf_evsel;
+struct perf_evlist;
struct bpf_object;
struct parse_events_term;
#define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
@@ -87,6 +84,8 @@ struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const cha
int bpf__strerror_setup_output_event(struct perf_evlist *evlist, int err, char *buf, size_t size);
#else
#include <errno.h>
+#include <string.h>
+#include "debug.h"
static inline struct bpf_object *
bpf__prepare_load(const char *filename __maybe_unused,
diff --git a/tools/perf/util/bpf_map.c b/tools/perf/util/bpf_map.c
new file mode 100644
index 000000000000..eb853ca67cf4
--- /dev/null
+++ b/tools/perf/util/bpf_map.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "util/bpf_map.h"
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+static bool bpf_map_def__is_per_cpu(const struct bpf_map_def *def)
+{
+ return def->type == BPF_MAP_TYPE_PERCPU_HASH ||
+ def->type == BPF_MAP_TYPE_PERCPU_ARRAY ||
+ def->type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
+ def->type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
+}
+
+static void *bpf_map_def__alloc_value(const struct bpf_map_def *def)
+{
+ if (bpf_map_def__is_per_cpu(def))
+ return malloc(round_up(def->value_size, 8) * sysconf(_SC_NPROCESSORS_CONF));
+
+ return malloc(def->value_size);
+}
+
+int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
+{
+ const struct bpf_map_def *def = bpf_map__def(map);
+ void *prev_key = NULL, *key, *value;
+ int fd = bpf_map__fd(map), err;
+ int printed = 0;
+
+ if (fd < 0)
+ return fd;
+
+ if (IS_ERR(def))
+ return PTR_ERR(def);
+
+ err = -ENOMEM;
+ key = malloc(def->key_size);
+ if (key == NULL)
+ goto out;
+
+ value = bpf_map_def__alloc_value(def);
+ if (value == NULL)
+ goto out_free_key;
+
+ while ((err = bpf_map_get_next_key(fd, prev_key, key) == 0)) {
+ int intkey = *(int *)key;
+
+ if (!bpf_map_lookup_elem(fd, key, value)) {
+ bool boolval = *(bool *)value;
+ if (boolval)
+ printed += fprintf(fp, "[%d] = %d,\n", intkey, boolval);
+ } else {
+ printed += fprintf(fp, "[%d] = ERROR,\n", intkey);
+ }
+
+ prev_key = key;
+ }
+
+ if (err == ENOENT)
+ err = printed;
+
+ free(value);
+out_free_key:
+ free(key);
+out:
+ return err;
+}
diff --git a/tools/perf/util/bpf_map.h b/tools/perf/util/bpf_map.h
new file mode 100644
index 000000000000..d6abd5e47af8
--- /dev/null
+++ b/tools/perf/util/bpf_map.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#ifndef __PERF_BPF_MAP_H
+#define __PERF_BPF_MAP_H 1
+
+#include <stdio.h>
+#include <linux/compiler.h>
+struct bpf_map;
+
+#ifdef HAVE_LIBBPF_SUPPORT
+
+int bpf_map__fprintf(struct bpf_map *map, FILE *fp);
+
+#else
+
+static inline int bpf_map__fprintf(struct bpf_map *map __maybe_unused, FILE *fp __maybe_unused)
+{
+ return 0;
+}
+
+#endif // HAVE_LIBBPF_SUPPORT
+
+#endif // __PERF_BPF_MAP_H
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 1e3c7c5cdc63..64f96b79f1d7 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -1,8 +1,31 @@
#ifndef _PERF_BRANCH_H
#define _PERF_BRANCH_H 1
+#include <stdio.h>
#include <stdint.h>
-#include "../perf.h"
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+struct branch_flags {
+ u64 mispred:1;
+ u64 predicted:1;
+ u64 in_tx:1;
+ u64 abort:1;
+ u64 cycles:16;
+ u64 type:4;
+ u64 reserved:40;
+};
+
+struct branch_entry {
+ u64 from;
+ u64 to;
+ struct branch_flags flags;
+};
+
+struct branch_stack {
+ u64 nr;
+ struct branch_entry entries[0];
+};
struct branch_type_stat {
bool branch_to;
@@ -13,8 +36,6 @@ struct branch_type_stat {
u64 cross_2m;
};
-struct branch_flags;
-
void branch_type_count(struct branch_type_stat *st, struct branch_flags *flags,
u64 from, u64 to);
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 04b1d53e4bf9..bff0d17920ed 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -15,6 +15,8 @@
#include <sys/types.h>
#include "build-id.h"
#include "event.h"
+#include "namespaces.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include <linux/kernel.h>
@@ -363,7 +365,8 @@ int perf_session__write_buildid_table(struct perf_session *session,
if (err)
return err;
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
@@ -396,7 +399,8 @@ int dsos__hit_all(struct perf_session *session)
if (err)
return err;
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__hit_all_dsos(pos);
@@ -849,7 +853,8 @@ int perf_session__cache_build_ids(struct perf_session *session)
ret = machine__cache_build_ids(&session->machines.host);
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__cache_build_ids(pos);
}
@@ -866,7 +871,8 @@ bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
struct rb_node *nd;
bool ret = machine__read_build_ids(&session->machines.host, with_hits);
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index f0c565164a97..93668f38f1ed 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -6,9 +6,10 @@
#define SBUILD_ID_SIZE (BUILD_ID_SIZE * 2 + 1)
#include "tool.h"
-#include "namespaces.h"
#include <linux/types.h>
+struct nsinfo;
+
extern struct perf_tool build_id__mark_dso_hit_ops;
struct dso;
struct feat_fd;
diff --git a/tools/perf/util/c++/Build b/tools/perf/util/c++/Build
index 988fef1b11d7..613ecfd76527 100644
--- a/tools/perf/util/c++/Build
+++ b/tools/perf/util/c++/Build
@@ -1,2 +1,2 @@
-libperf-$(CONFIG_CLANGLLVM) += clang.o
-libperf-$(CONFIG_CLANGLLVM) += clang-test.o
+perf-$(CONFIG_CLANGLLVM) += clang.o
+perf-$(CONFIG_CLANGLLVM) += clang-test.o
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
index 89512504551b..fc361c3f8570 100644
--- a/tools/perf/util/c++/clang.cpp
+++ b/tools/perf/util/c++/clang.cpp
@@ -156,11 +156,11 @@ getBPFObjectFromModule(llvm::Module *Module)
#endif
if (NotAdded) {
llvm::errs() << "TargetMachine can't emit a file of this type\n";
- return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
+ return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);
}
PM.run(*Module);
- return std::move(Buffer);
+ return Buffer;
}
}
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index dc2212e12184..abb608b09269 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -23,8 +23,10 @@
#include "util.h"
#include "sort.h"
#include "machine.h"
+#include "map.h"
#include "callchain.h"
#include "branch.h"
+#include "symbol.h"
#define CALLCHAIN_PARAM_DEFAULT \
.mode = CHAIN_GRAPH_ABS, \
@@ -1577,3 +1579,18 @@ int callchain_cursor__copy(struct callchain_cursor *dst,
return rc;
}
+
+/*
+ * Initialize a cursor before adding entries inside, but keep
+ * the previously allocated entries as a cache.
+ */
+void callchain_cursor_reset(struct callchain_cursor *cursor)
+{
+ struct callchain_cursor_node *node;
+
+ cursor->nr = 0;
+ cursor->last = &cursor->first;
+
+ for (node = cursor->first; node != NULL; node = node->next)
+ map__zput(node->map);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 99d38ac019b8..80e056a3d882 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -2,14 +2,14 @@
#ifndef __PERF_CALLCHAIN_H
#define __PERF_CALLCHAIN_H
-#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include "event.h"
-#include "map.h"
-#include "symbol.h"
+#include "map_symbol.h"
#include "branch.h"
+struct map;
+
#define HELP_PAD "\t\t\t\t"
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"
@@ -188,20 +188,7 @@ int callchain_append(struct callchain_root *root,
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src);
-/*
- * Initialize a cursor before adding entries inside, but keep
- * the previously allocated entries as a cache.
- */
-static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
-{
- struct callchain_cursor_node *node;
-
- cursor->nr = 0;
- cursor->last = &cursor->first;
-
- for (node = cursor->first; node != NULL; node = node->next)
- map__zput(node->map);
-}
+void callchain_cursor_reset(struct callchain_cursor *cursor);
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
struct map *map, struct symbol *sym,
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 39e628b8938e..39b8c4ec4e2e 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include "cache.h"
-#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include "color.h"
@@ -10,44 +9,6 @@
int perf_use_color_default = -1;
-int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
-{
- if (value) {
- if (!strcasecmp(value, "never"))
- return 0;
- if (!strcasecmp(value, "always"))
- return 1;
- if (!strcasecmp(value, "auto"))
- goto auto_color;
- }
-
- /* Missing or explicit false to turn off colorization */
- if (!perf_config_bool(var, value))
- return 0;
-
- /* any normal truth value defaults to 'auto' */
- auto_color:
- if (stdout_is_tty < 0)
- stdout_is_tty = isatty(1);
- if (stdout_is_tty || pager_in_use()) {
- char *term = getenv("TERM");
- if (term && strcmp(term, "dumb"))
- return 1;
- }
- return 0;
-}
-
-int perf_color_default_config(const char *var, const char *value,
- void *cb __maybe_unused)
-{
- if (!strcmp(var, "color.ui")) {
- perf_use_color_default = perf_config_colorbool(var, value, -1);
- return 0;
- }
-
- return 0;
-}
-
static int __color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args, const char *trail)
{
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 22777b1812ee..01f7bed21c9b 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -3,6 +3,7 @@
#define __PERF_COLOR_H
#include <stdio.h>
+#include <stdarg.h>
/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
#define COLOR_MAXLEN 24
diff --git a/tools/perf/util/color_config.c b/tools/perf/util/color_config.c
new file mode 100644
index 000000000000..817dc56e7e95
--- /dev/null
+++ b/tools/perf/util/color_config.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include "cache.h"
+#include "config.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include "color.h"
+#include <math.h>
+#include <unistd.h>
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
+{
+ if (value) {
+ if (!strcasecmp(value, "never"))
+ return 0;
+ if (!strcasecmp(value, "always"))
+ return 1;
+ if (!strcasecmp(value, "auto"))
+ goto auto_color;
+ }
+
+ /* Missing or explicit false to turn off colorization */
+ if (!perf_config_bool(var, value))
+ return 0;
+
+ /* any normal truth value defaults to 'auto' */
+ auto_color:
+ if (stdout_is_tty < 0)
+ stdout_is_tty = isatty(1);
+ if (stdout_is_tty || pager_in_use()) {
+ char *term = getenv("TERM");
+ if (term && strcmp(term, "dumb"))
+ return 1;
+ }
+ return 0;
+}
+
+int perf_color_default_config(const char *var, const char *value,
+ void *cb __maybe_unused)
+{
+ if (!strcmp(var, "color.ui")) {
+ perf_use_color_default = perf_config_colorbool(var, value, -1);
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 31279a7bd919..1066de92af12 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -6,6 +6,7 @@
#include <stdio.h>
#include <string.h>
#include <linux/refcount.h>
+#include <linux/rbtree.h>
#include "rwsem.h"
struct comm_str {
diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h
index 3e5c438fe85e..f35d8fbfa2dd 100644
--- a/tools/perf/util/comm.h
+++ b/tools/perf/util/comm.h
@@ -2,9 +2,9 @@
#ifndef __PERF_COMM_H
#define __PERF_COMM_H
-#include "../perf.h"
-#include <linux/rbtree.h>
#include <linux/list.h>
+#include <linux/types.h>
+#include <stdbool.h>
struct comm_str;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 1ea8f898f1a1..fa092511c52b 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -13,6 +13,7 @@
#include <sys/param.h>
#include "util.h"
#include "cache.h"
+#include "callchain.h"
#include <subcmd/exec-cmd.h>
#include "util/event.h" /* proc_map_timeout */
#include "util/hist.h" /* perf_hist_config */
diff --git a/tools/perf/util/cpu-set-sched.h b/tools/perf/util/cpu-set-sched.h
new file mode 100644
index 000000000000..8cf4e40d322a
--- /dev/null
+++ b/tools/perf/util/cpu-set-sched.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: LGPL-2.1
+// Definitions taken from glibc for use with older systems, same licensing.
+#ifndef _CPU_SET_SCHED_PERF_H
+#define _CPU_SET_SCHED_PERF_H
+
+#include <features.h>
+#include <sched.h>
+
+#ifndef CPU_EQUAL
+#ifndef __CPU_EQUAL_S
+#if __GNUC_PREREQ (2, 91)
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+ (__builtin_memcmp (cpusetp1, cpusetp2, setsize) == 0)
+#else
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+ (__extension__ \
+ ({ const __cpu_mask *__arr1 = (cpusetp1)->__bits; \
+ const __cpu_mask *__arr2 = (cpusetp2)->__bits; \
+ size_t __imax = (setsize) / sizeof (__cpu_mask); \
+ size_t __i; \
+ for (__i = 0; __i < __imax; ++__i) \
+ if (__arr1[__i] != __arr2[__i]) \
+ break; \
+ __i == __imax; }))
+#endif
+#endif // __CPU_EQUAL_S
+
+#define CPU_EQUAL(cpusetp1, cpusetp2) \
+ __CPU_EQUAL_S (sizeof (cpu_set_t), cpusetp1, cpusetp2)
+#endif // CPU_EQUAL
+
+#ifndef CPU_OR
+#ifndef __CPU_OP_S
+#define __CPU_OP_S(setsize, destset, srcset1, srcset2, op) \
+ (__extension__ \
+ ({ cpu_set_t *__dest = (destset); \
+ const __cpu_mask *__arr1 = (srcset1)->__bits; \
+ const __cpu_mask *__arr2 = (srcset2)->__bits; \
+ size_t __imax = (setsize) / sizeof (__cpu_mask); \
+ size_t __i; \
+ for (__i = 0; __i < __imax; ++__i) \
+ ((__cpu_mask *) __dest->__bits)[__i] = __arr1[__i] op __arr2[__i]; \
+ __dest; }))
+#endif // __CPU_OP_S
+
+#define CPU_OR(destset, srcset1, srcset2) \
+ __CPU_OP_S (sizeof (cpu_set_t), destset, srcset1, srcset2, |)
+#endif // CPU_OR
+
+#endif // _CPU_SET_SCHED_PERF_H
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 383674f448fc..0b599229bc7e 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -681,7 +681,7 @@ size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
#undef COMMA
- pr_debug("cpumask list: %s\n", buf);
+ pr_debug2("cpumask list: %s\n", buf);
return ret;
}
@@ -730,3 +730,13 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
buf[size - 1] = '\0';
return ptr - buf;
}
+
+const struct cpu_map *cpu_map__online(void) /* thread unsafe */
+{
+ static const struct cpu_map *online = NULL;
+
+ if (!online)
+ online = cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
+
+ return online;
+}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index ed8999d1a640..f00ce624b9f7 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -29,6 +29,7 @@ int cpu_map__get_core_id(int cpu);
int cpu_map__get_core(struct cpu_map *map, int idx, void *data);
int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
+const struct cpu_map *cpu_map__online(void); /* thread unsafe */
struct cpu_map *cpu_map__get(struct cpu_map *map);
void cpu_map__put(struct cpu_map *map);
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
new file mode 100644
index 000000000000..ece0710249d4
--- /dev/null
+++ b/tools/perf/util/cputopo.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/param.h>
+#include <inttypes.h>
+#include <api/fs/fs.h>
+
+#include "cputopo.h"
+#include "cpumap.h"
+#include "util.h"
+#include "env.h"
+
+
+#define CORE_SIB_FMT \
+ "%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
+#define THRD_SIB_FMT \
+ "%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
+#define NODE_ONLINE_FMT \
+ "%s/devices/system/node/online"
+#define NODE_MEMINFO_FMT \
+ "%s/devices/system/node/node%d/meminfo"
+#define NODE_CPULIST_FMT \
+ "%s/devices/system/node/node%d/cpulist"
+
+static int build_cpu_topology(struct cpu_topology *tp, int cpu)
+{
+ FILE *fp;
+ char filename[MAXPATHLEN];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ ssize_t sret;
+ u32 i = 0;
+ int ret = -1;
+
+ scnprintf(filename, MAXPATHLEN, CORE_SIB_FMT,
+ sysfs__mountpoint(), cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto try_threads;
+
+ sret = getline(&buf, &len, fp);
+ fclose(fp);
+ if (sret <= 0)
+ goto try_threads;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->core_sib; i++) {
+ if (!strcmp(buf, tp->core_siblings[i]))
+ break;
+ }
+ if (i == tp->core_sib) {
+ tp->core_siblings[i] = buf;
+ tp->core_sib++;
+ buf = NULL;
+ len = 0;
+ }
+ ret = 0;
+
+try_threads:
+ scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT,
+ sysfs__mountpoint(), cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto done;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->thread_sib; i++) {
+ if (!strcmp(buf, tp->thread_siblings[i]))
+ break;
+ }
+ if (i == tp->thread_sib) {
+ tp->thread_siblings[i] = buf;
+ tp->thread_sib++;
+ buf = NULL;
+ }
+ ret = 0;
+done:
+ if (fp)
+ fclose(fp);
+ free(buf);
+ return ret;
+}
+
+void cpu_topology__delete(struct cpu_topology *tp)
+{
+ u32 i;
+
+ if (!tp)
+ return;
+
+ for (i = 0 ; i < tp->core_sib; i++)
+ zfree(&tp->core_siblings[i]);
+
+ for (i = 0 ; i < tp->thread_sib; i++)
+ zfree(&tp->thread_siblings[i]);
+
+ free(tp);
+}
+
+struct cpu_topology *cpu_topology__new(void)
+{
+ struct cpu_topology *tp = NULL;
+ void *addr;
+ u32 nr, i;
+ size_t sz;
+ long ncpus;
+ int ret = -1;
+ struct cpu_map *map;
+
+ ncpus = cpu__max_present_cpu();
+
+ /* build online CPU map */
+ map = cpu_map__new(NULL);
+ if (map == NULL) {
+ pr_debug("failed to get system cpumap\n");
+ return NULL;
+ }
+
+ nr = (u32)(ncpus & UINT_MAX);
+
+ sz = nr * sizeof(char *);
+ addr = calloc(1, sizeof(*tp) + 2 * sz);
+ if (!addr)
+ goto out_free;
+
+ tp = addr;
+ addr += sizeof(*tp);
+ tp->core_siblings = addr;
+ addr += sz;
+ tp->thread_siblings = addr;
+
+ for (i = 0; i < nr; i++) {
+ if (!cpu_map__has(map, i))
+ continue;
+
+ ret = build_cpu_topology(tp, i);
+ if (ret < 0)
+ break;
+ }
+
+out_free:
+ cpu_map__put(map);
+ if (ret) {
+ cpu_topology__delete(tp);
+ tp = NULL;
+ }
+ return tp;
+}
+
+static int load_numa_node(struct numa_topology_node *node, int nr)
+{
+ char str[MAXPATHLEN];
+ char field[32];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ int ret = -1;
+ FILE *fp;
+ u64 mem;
+
+ node->node = (u32) nr;
+
+ scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
+ sysfs__mountpoint(), nr);
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ while (getline(&buf, &len, fp) > 0) {
+ /* skip over invalid lines */
+ if (!strchr(buf, ':'))
+ continue;
+ if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
+ goto err;
+ if (!strcmp(field, "MemTotal:"))
+ node->mem_total = mem;
+ if (!strcmp(field, "MemFree:"))
+ node->mem_free = mem;
+ if (node->mem_total && node->mem_free)
+ break;
+ }
+
+ fclose(fp);
+ fp = NULL;
+
+ scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
+ sysfs__mountpoint(), nr);
+
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto err;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ node->cpus = buf;
+ fclose(fp);
+ return 0;
+
+err:
+ free(buf);
+ if (fp)
+ fclose(fp);
+ return ret;
+}
+
+struct numa_topology *numa_topology__new(void)
+{
+ struct cpu_map *node_map = NULL;
+ struct numa_topology *tp = NULL;
+ char path[MAXPATHLEN];
+ char *buf = NULL;
+ size_t len = 0;
+ u32 nr, i;
+ FILE *fp;
+ char *c;
+
+ scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
+ sysfs__mountpoint());
+
+ fp = fopen(path, "r");
+ if (!fp)
+ return NULL;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto out;
+
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ node_map = cpu_map__new(buf);
+ if (!node_map)
+ goto out;
+
+ nr = (u32) node_map->nr;
+
+ tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
+ if (!tp)
+ goto out;
+
+ tp->nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ if (load_numa_node(&tp->nodes[i], node_map->map[i])) {
+ numa_topology__delete(tp);
+ tp = NULL;
+ break;
+ }
+ }
+
+out:
+ free(buf);
+ fclose(fp);
+ cpu_map__put(node_map);
+ return tp;
+}
+
+void numa_topology__delete(struct numa_topology *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->nr; i++)
+ free(tp->nodes[i].cpus);
+
+ free(tp);
+}
diff --git a/tools/perf/util/cputopo.h b/tools/perf/util/cputopo.h
new file mode 100644
index 000000000000..47a97e71acdf
--- /dev/null
+++ b/tools/perf/util/cputopo.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_CPUTOPO_H
+#define __PERF_CPUTOPO_H
+
+#include <linux/types.h>
+#include "env.h"
+
+struct cpu_topology {
+ u32 core_sib;
+ u32 thread_sib;
+ char **core_siblings;
+ char **thread_siblings;
+};
+
+struct numa_topology_node {
+ char *cpus;
+ u32 node;
+ u64 mem_total;
+ u64 mem_free;
+};
+
+struct numa_topology {
+ u32 nr;
+ struct numa_topology_node nodes[0];
+};
+
+struct cpu_topology *cpu_topology__new(void);
+void cpu_topology__delete(struct cpu_topology *tp);
+
+struct numa_topology *numa_topology__new(void);
+void numa_topology__delete(struct numa_topology *tp);
+
+#endif /* __PERF_CPUTOPO_H */
diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build
index bc22c39c727f..216cb17a3322 100644
--- a/tools/perf/util/cs-etm-decoder/Build
+++ b/tools/perf/util/cs-etm-decoder/Build
@@ -1 +1 @@
-libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
+perf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 8c155575c6c5..ba4c623cd8de 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -290,6 +290,12 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
decoder->packet_buffer[i].instr_count = 0;
decoder->packet_buffer[i].last_instr_taken_branch = false;
decoder->packet_buffer[i].last_instr_size = 0;
+ decoder->packet_buffer[i].last_instr_type = 0;
+ decoder->packet_buffer[i].last_instr_subtype = 0;
+ decoder->packet_buffer[i].last_instr_cond = 0;
+ decoder->packet_buffer[i].flags = 0;
+ decoder->packet_buffer[i].exception_number = UINT32_MAX;
+ decoder->packet_buffer[i].trace_chan_id = UINT8_MAX;
decoder->packet_buffer[i].cpu = INT_MIN;
}
}
@@ -300,14 +306,12 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
enum cs_etm_sample_type sample_type)
{
u32 et = 0;
- struct int_node *inode = NULL;
+ int cpu;
if (decoder->packet_count >= MAX_BUFFER - 1)
return OCSD_RESP_FATAL_SYS_ERR;
- /* Search the RB tree for the cpu associated with this traceID */
- inode = intlist__find(traceid_list, trace_chan_id);
- if (!inode)
+ if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
return OCSD_RESP_FATAL_SYS_ERR;
et = decoder->tail;
@@ -317,12 +321,18 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
decoder->packet_buffer[et].sample_type = sample_type;
decoder->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
- decoder->packet_buffer[et].cpu = *((int *)inode->priv);
+ decoder->packet_buffer[et].cpu = cpu;
decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].instr_count = 0;
decoder->packet_buffer[et].last_instr_taken_branch = false;
decoder->packet_buffer[et].last_instr_size = 0;
+ decoder->packet_buffer[et].last_instr_type = 0;
+ decoder->packet_buffer[et].last_instr_subtype = 0;
+ decoder->packet_buffer[et].last_instr_cond = 0;
+ decoder->packet_buffer[et].flags = 0;
+ decoder->packet_buffer[et].exception_number = UINT32_MAX;
+ decoder->packet_buffer[et].trace_chan_id = trace_chan_id;
if (decoder->packet_count == MAX_BUFFER - 1)
return OCSD_RESP_WAIT;
@@ -366,6 +376,9 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
packet->start_addr = elem->st_addr;
packet->end_addr = elem->en_addr;
packet->instr_count = elem->num_instr_range;
+ packet->last_instr_type = elem->last_i_type;
+ packet->last_instr_subtype = elem->last_i_subtype;
+ packet->last_instr_cond = elem->last_instr_cond;
switch (elem->last_i_type) {
case OCSD_INSTR_BR:
@@ -395,10 +408,20 @@ cs_etm_decoder__buffer_discontinuity(struct cs_etm_decoder *decoder,
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception(struct cs_etm_decoder *decoder,
+ const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
-{
- return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
- CS_ETM_EXCEPTION);
+{ int ret = 0;
+ struct cs_etm_packet *packet;
+
+ ret = cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
+ CS_ETM_EXCEPTION);
+ if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
+ return ret;
+
+ packet = &decoder->packet_buffer[decoder->tail];
+ packet->exception_number = elem->exception_number;
+
+ return ret;
}
static ocsd_datapath_resp_t
@@ -432,7 +455,7 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION:
- resp = cs_etm_decoder__buffer_exception(decoder,
+ resp = cs_etm_decoder__buffer_exception(decoder, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index a6407d41598f..3ab11dfa92ae 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -15,13 +15,6 @@
struct cs_etm_decoder;
-struct cs_etm_buffer {
- const unsigned char *buf;
- size_t len;
- u64 offset;
- u64 ref_timestamp;
-};
-
enum cs_etm_sample_type {
CS_ETM_EMPTY,
CS_ETM_RANGE,
@@ -43,8 +36,14 @@ struct cs_etm_packet {
u64 start_addr;
u64 end_addr;
u32 instr_count;
+ u32 last_instr_type;
+ u32 last_instr_subtype;
+ u32 flags;
+ u32 exception_number;
+ u8 last_instr_cond;
u8 last_instr_taken_branch;
u8 last_instr_size;
+ u8 trace_chan_id;
int cpu;
};
@@ -99,9 +98,10 @@ enum {
CS_ETM_PROTO_PTM,
};
-enum {
+enum cs_etm_decoder_operation {
CS_ETM_OPERATION_PRINT = 1,
CS_ETM_OPERATION_DECODE,
+ CS_ETM_OPERATION_MAX,
};
int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 27a374ddf661..110804936fc3 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -12,6 +12,7 @@
#include <linux/log2.h>
#include <linux/types.h>
+#include <opencsd/ocsd_if_types.h>
#include <stdlib.h>
#include "auxtrace.h"
@@ -24,6 +25,7 @@
#include "machine.h"
#include "map.h"
#include "perf.h"
+#include "symbol.h"
#include "thread.h"
#include "thread_map.h"
#include "thread-stack.h"
@@ -63,13 +65,10 @@ struct cs_etm_queue {
struct thread *thread;
struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer;
- const struct cs_etm_state *state;
union perf_event *event_buf;
unsigned int queue_nr;
pid_t pid, tid;
int cpu;
- u64 time;
- u64 timestamp;
u64 offset;
u64 period_instructions;
struct branch_stack *last_branch;
@@ -77,11 +76,13 @@ struct cs_etm_queue {
size_t last_branch_pos;
struct cs_etm_packet *prev_packet;
struct cs_etm_packet *packet;
+ const unsigned char *buf;
+ size_t buf_len, buf_used;
};
static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
- pid_t tid, u64 time_);
+ pid_t tid);
/* PTMs ETMIDR [11:8] set to b0011 */
#define ETMIDR_PTM_VERSION 0x00000300
@@ -96,6 +97,34 @@ static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
return CS_ETM_PROTO_ETMV3;
}
+static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
+{
+ struct int_node *inode;
+ u64 *metadata;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+ *magic = metadata[CS_ETM_MAGIC];
+ return 0;
+}
+
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
+{
+ struct int_node *inode;
+ u64 *metadata;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+ *cpu = (int)metadata[CS_ETM_CPU];
+ return 0;
+}
+
static void cs_etm__packet_dump(const char *pkt_string)
{
const char *color = PERF_COLOR_BLUE;
@@ -109,10 +138,83 @@ static void cs_etm__packet_dump(const char *pkt_string)
fflush(stdout);
}
+static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm, int idx,
+ u32 etmidr)
+{
+ u64 **metadata = etm->metadata;
+
+ t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
+ t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
+ t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
+}
+
+static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm, int idx)
+{
+ u64 **metadata = etm->metadata;
+
+ t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
+ t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
+ t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
+ t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
+ t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
+ t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
+}
+
+static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm)
+{
+ int i;
+ u32 etmidr;
+ u64 architecture;
+
+ for (i = 0; i < etm->num_cpu; i++) {
+ architecture = etm->metadata[i][CS_ETM_MAGIC];
+
+ switch (architecture) {
+ case __perf_cs_etmv3_magic:
+ etmidr = etm->metadata[i][CS_ETM_ETMIDR];
+ cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
+ break;
+ case __perf_cs_etmv4_magic:
+ cs_etm__set_trace_param_etmv4(t_params, etm, i);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_queue *etmq,
+ enum cs_etm_decoder_operation mode)
+{
+ int ret = -EINVAL;
+
+ if (!(mode < CS_ETM_OPERATION_MAX))
+ goto out;
+
+ d_params->packet_printer = cs_etm__packet_dump;
+ d_params->operation = mode;
+ d_params->data = etmq;
+ d_params->formatted = true;
+ d_params->fsyncs = false;
+ d_params->hsyncs = false;
+ d_params->frame_aligned = true;
+
+ ret = 0;
+out:
+ return ret;
+}
+
static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
struct auxtrace_buffer *buffer)
{
- int i, ret;
+ int ret;
const char *color = PERF_COLOR_BLUE;
struct cs_etm_decoder_params d_params;
struct cs_etm_trace_params *t_params;
@@ -126,48 +228,22 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
- for (i = 0; i < etm->num_cpu; i++) {
- if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
- u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
-
- t_params[i].protocol =
- cs_etm__get_v7_protocol_version(etmidr);
- t_params[i].etmv3.reg_ctrl =
- etm->metadata[i][CS_ETM_ETMCR];
- t_params[i].etmv3.reg_trc_id =
- etm->metadata[i][CS_ETM_ETMTRACEIDR];
- } else if (etm->metadata[i][CS_ETM_MAGIC] ==
- __perf_cs_etmv4_magic) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 =
- etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 =
- etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 =
- etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 =
- etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
- etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
- etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
- }
- }
+
+ if (!t_params)
+ return;
+
+ if (cs_etm__init_trace_params(t_params, etm))
+ goto out_free;
/* Set decoder parameters to simply print the trace packets */
- d_params.packet_printer = cs_etm__packet_dump;
- d_params.operation = CS_ETM_OPERATION_PRINT;
- d_params.formatted = true;
- d_params.fsyncs = false;
- d_params.hsyncs = false;
- d_params.frame_aligned = true;
+ if (cs_etm__init_decoder_params(&d_params, NULL,
+ CS_ETM_OPERATION_PRINT))
+ goto out_free;
decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
- zfree(&t_params);
-
if (!decoder)
- return;
+ goto out_free;
do {
size_t consumed;
@@ -182,6 +258,9 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
} while (buffer_used < buffer->size);
cs_etm_decoder__free(decoder);
+
+out_free:
+ zfree(&t_params);
}
static int cs_etm__flush_events(struct perf_session *session,
@@ -205,7 +284,7 @@ static int cs_etm__flush_events(struct perf_session *session,
if (ret < 0)
return ret;
- return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
+ return cs_etm__process_timeless_queues(etm, -1);
}
static void cs_etm__free_queue(void *priv)
@@ -251,7 +330,7 @@ static void cs_etm__free(struct perf_session *session)
cs_etm__free_events(session);
session->auxtrace = NULL;
- /* First remove all traceID/CPU# nodes for the RB tree */
+ /* First remove all traceID/metadata nodes for the RB tree */
intlist__for_each_entry_safe(inode, tmp, traceid_list)
intlist__remove(traceid_list, inode);
/* Then the RB tree itself */
@@ -297,7 +376,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
struct addr_location al;
if (!etmq)
- return -1;
+ return 0;
machine = etmq->etm->machine;
cpumode = cs_etm__cpu_mode(etmq, address);
@@ -305,7 +384,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
thread = etmq->thread;
if (!thread) {
if (cpumode != PERF_RECORD_MISC_KERNEL)
- return -EINVAL;
+ return 0;
thread = etmq->etm->unknown_thread;
}
@@ -328,12 +407,10 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
return len;
}
-static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
- unsigned int queue_nr)
+static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
{
- int i;
struct cs_etm_decoder_params d_params;
- struct cs_etm_trace_params *t_params;
+ struct cs_etm_trace_params *t_params = NULL;
struct cs_etm_queue *etmq;
size_t szp = sizeof(struct cs_etm_packet);
@@ -368,59 +445,22 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
if (!etmq->event_buf)
goto out_free;
- etmq->etm = etm;
- etmq->queue_nr = queue_nr;
- etmq->pid = -1;
- etmq->tid = -1;
- etmq->cpu = -1;
-
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
if (!t_params)
goto out_free;
- for (i = 0; i < etm->num_cpu; i++) {
- if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
- u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
-
- t_params[i].protocol =
- cs_etm__get_v7_protocol_version(etmidr);
- t_params[i].etmv3.reg_ctrl =
- etm->metadata[i][CS_ETM_ETMCR];
- t_params[i].etmv3.reg_trc_id =
- etm->metadata[i][CS_ETM_ETMTRACEIDR];
- } else if (etm->metadata[i][CS_ETM_MAGIC] ==
- __perf_cs_etmv4_magic) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 =
- etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 =
- etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 =
- etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 =
- etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
- etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
- etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
- }
- }
+ if (cs_etm__init_trace_params(t_params, etm))
+ goto out_free;
- /* Set decoder parameters to simply print the trace packets */
- d_params.packet_printer = cs_etm__packet_dump;
- d_params.operation = CS_ETM_OPERATION_DECODE;
- d_params.formatted = true;
- d_params.fsyncs = false;
- d_params.hsyncs = false;
- d_params.frame_aligned = true;
- d_params.data = etmq;
+ /* Set decoder parameters to decode trace packets */
+ if (cs_etm__init_decoder_params(&d_params, etmq,
+ CS_ETM_OPERATION_DECODE))
+ goto out_free;
etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
- zfree(&t_params);
-
if (!etmq->decoder)
goto out_free;
@@ -433,14 +473,13 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
cs_etm__mem_access))
goto out_free_decoder;
- etmq->offset = 0;
- etmq->period_instructions = 0;
-
+ zfree(&t_params);
return etmq;
out_free_decoder:
cs_etm_decoder__free(etmq->decoder);
out_free:
+ zfree(&t_params);
zfree(&etmq->event_buf);
zfree(&etmq->last_branch);
zfree(&etmq->last_branch_rb);
@@ -455,24 +494,30 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue,
unsigned int queue_nr)
{
+ int ret = 0;
struct cs_etm_queue *etmq = queue->priv;
if (list_empty(&queue->head) || etmq)
- return 0;
+ goto out;
- etmq = cs_etm__alloc_queue(etm, queue_nr);
+ etmq = cs_etm__alloc_queue(etm);
- if (!etmq)
- return -ENOMEM;
+ if (!etmq) {
+ ret = -ENOMEM;
+ goto out;
+ }
queue->priv = etmq;
-
- if (queue->cpu != -1)
- etmq->cpu = queue->cpu;
-
+ etmq->etm = etm;
+ etmq->queue_nr = queue_nr;
+ etmq->cpu = queue->cpu;
etmq->tid = queue->tid;
+ etmq->pid = -1;
+ etmq->offset = 0;
+ etmq->period_instructions = 0;
- return 0;
+out:
+ return ret;
}
static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
@@ -480,6 +525,9 @@ static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
unsigned int i;
int ret;
+ if (!etm->kernel_start)
+ etm->kernel_start = machine__kernel_start(etm->machine);
+
for (i = 0; i < etm->queues.nr_queues; i++) {
ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
if (ret)
@@ -637,7 +685,7 @@ static int cs_etm__inject_event(union perf_event *event,
static int
-cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
+cs_etm__get_trace(struct cs_etm_queue *etmq)
{
struct auxtrace_buffer *aux_buffer = etmq->buffer;
struct auxtrace_buffer *old_buffer = aux_buffer;
@@ -651,7 +699,7 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
if (!aux_buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
- buff->len = 0;
+ etmq->buf_len = 0;
return 0;
}
@@ -671,13 +719,11 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
- buff->offset = aux_buffer->offset;
- buff->len = aux_buffer->size;
- buff->buf = aux_buffer->data;
-
- buff->ref_timestamp = aux_buffer->reference;
+ etmq->buf_used = 0;
+ etmq->buf_len = aux_buffer->size;
+ etmq->buf = aux_buffer->data;
- return buff->len;
+ return etmq->buf_len;
}
static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
@@ -719,7 +765,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
sample.stream_id = etmq->etm->instructions_id;
sample.period = period;
sample.cpu = etmq->packet->cpu;
- sample.flags = 0;
+ sample.flags = etmq->prev_packet->flags;
sample.insn_len = 1;
sample.cpumode = event->sample.header.misc;
@@ -778,7 +824,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
sample.stream_id = etmq->etm->branches_id;
sample.period = 1;
sample.cpu = etmq->packet->cpu;
- sample.flags = 0;
+ sample.flags = etmq->prev_packet->flags;
sample.cpumode = event->sample.header.misc;
/*
@@ -1106,95 +1152,489 @@ static int cs_etm__end_block(struct cs_etm_queue *etmq)
return 0;
}
+/*
+ * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
+ * if need be.
+ * Returns: < 0 if error
+ * = 0 if no more auxtrace_buffer to read
+ * > 0 if the current buffer isn't empty yet
+ */
+static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
+{
+ int ret;
+
+ if (!etmq->buf_len) {
+ ret = cs_etm__get_trace(etmq);
+ if (ret <= 0)
+ return ret;
+ /*
+ * We cannot assume consecutive blocks in the data file
+ * are contiguous, reset the decoder to force re-sync.
+ */
+ ret = cs_etm_decoder__reset(etmq->decoder);
+ if (ret)
+ return ret;
+ }
+
+ return etmq->buf_len;
+}
+
+static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq,
+ struct cs_etm_packet *packet,
+ u64 end_addr)
+{
+ u16 instr16;
+ u32 instr32;
+ u64 addr;
+
+ switch (packet->isa) {
+ case CS_ETM_ISA_T32:
+ /*
+ * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
+ *
+ * b'15 b'8
+ * +-----------------+--------+
+ * | 1 1 0 1 1 1 1 1 | imm8 |
+ * +-----------------+--------+
+ *
+ * According to the specifiction, it only defines SVC for T32
+ * with 16 bits instruction and has no definition for 32bits;
+ * so below only read 2 bytes as instruction size for T32.
+ */
+ addr = end_addr - 2;
+ cs_etm__mem_access(etmq, addr, sizeof(instr16), (u8 *)&instr16);
+ if ((instr16 & 0xFF00) == 0xDF00)
+ return true;
+
+ break;
+ case CS_ETM_ISA_A32:
+ /*
+ * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
+ *
+ * b'31 b'28 b'27 b'24
+ * +---------+---------+-------------------------+
+ * | !1111 | 1 1 1 1 | imm24 |
+ * +---------+---------+-------------------------+
+ */
+ addr = end_addr - 4;
+ cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
+ if ((instr32 & 0x0F000000) == 0x0F000000 &&
+ (instr32 & 0xF0000000) != 0xF0000000)
+ return true;
+
+ break;
+ case CS_ETM_ISA_A64:
+ /*
+ * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
+ *
+ * b'31 b'21 b'4 b'0
+ * +-----------------------+---------+-----------+
+ * | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
+ * +-----------------------+---------+-----------+
+ */
+ addr = end_addr - 4;
+ cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
+ if ((instr32 & 0xFFE0001F) == 0xd4000001)
+ return true;
+
+ break;
+ case CS_ETM_ISA_UNKNOWN:
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool cs_etm__is_syscall(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_SVC)
+ return true;
+
+ /*
+ * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
+ * HVC cases; need to check if it's SVC instruction based on
+ * packet address.
+ */
+ if (magic == __perf_cs_etmv4_magic) {
+ if (packet->exception_number == CS_ETMV4_EXC_CALL &&
+ cs_etm__is_svc_instr(etmq, prev_packet,
+ prev_packet->end_addr))
+ return true;
+ }
+
+ return false;
+}
+
+static bool cs_etm__is_async_exception(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
+ packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
+ packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
+ packet->exception_number == CS_ETMV3_EXC_IRQ ||
+ packet->exception_number == CS_ETMV3_EXC_FIQ)
+ return true;
+
+ if (magic == __perf_cs_etmv4_magic)
+ if (packet->exception_number == CS_ETMV4_EXC_RESET ||
+ packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
+ packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
+ packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
+ packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
+ packet->exception_number == CS_ETMV4_EXC_IRQ ||
+ packet->exception_number == CS_ETMV4_EXC_FIQ)
+ return true;
+
+ return false;
+}
+
+static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_SMC ||
+ packet->exception_number == CS_ETMV3_EXC_HYP ||
+ packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
+ packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
+ packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
+ packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
+ packet->exception_number == CS_ETMV3_EXC_GENERIC)
+ return true;
+
+ if (magic == __perf_cs_etmv4_magic) {
+ if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
+ packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
+ packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
+ packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
+ return true;
+
+ /*
+ * For CS_ETMV4_EXC_CALL, except SVC other instructions
+ * (SMC, HVC) are taken as sync exceptions.
+ */
+ if (packet->exception_number == CS_ETMV4_EXC_CALL &&
+ !cs_etm__is_svc_instr(etmq, prev_packet,
+ prev_packet->end_addr))
+ return true;
+
+ /*
+ * ETMv4 has 5 bits for exception number; if the numbers
+ * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
+ * they are implementation defined exceptions.
+ *
+ * For this case, simply take it as sync exception.
+ */
+ if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
+ packet->exception_number <= CS_ETMV4_EXC_END)
+ return true;
+ }
+
+ return false;
+}
+
+static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+ u64 magic;
+ int ret;
+
+ switch (packet->sample_type) {
+ case CS_ETM_RANGE:
+ /*
+ * Immediate branch instruction without neither link nor
+ * return flag, it's normal branch instruction within
+ * the function.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR &&
+ packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
+ packet->flags = PERF_IP_FLAG_BRANCH;
+
+ if (packet->last_instr_cond)
+ packet->flags |= PERF_IP_FLAG_CONDITIONAL;
+ }
+
+ /*
+ * Immediate branch instruction with link (e.g. BL), this is
+ * branch instruction for function call.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR &&
+ packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL;
+
+ /*
+ * Indirect branch instruction with link (e.g. BLR), this is
+ * branch instruction for function call.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL;
+
+ /*
+ * Indirect branch instruction with subtype of
+ * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
+ * function return for A32/T32.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /*
+ * Indirect branch instruction without link (e.g. BR), usually
+ * this is used for function return, especially for functions
+ * within dynamic link lib.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_NONE)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /* Return instruction for function return. */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /*
+ * Decoder might insert a discontinuity in the middle of
+ * instruction packets, fixup prev_packet with flag
+ * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
+ */
+ if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
+ prev_packet->flags |= PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_TRACE_BEGIN;
+
+ /*
+ * If the previous packet is an exception return packet
+ * and the return address just follows SVC instuction,
+ * it needs to calibrate the previous packet sample flags
+ * as PERF_IP_FLAG_SYSCALLRET.
+ */
+ if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_INTERRUPT) &&
+ cs_etm__is_svc_instr(etmq, packet, packet->start_addr))
+ prev_packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_SYSCALLRET;
+ break;
+ case CS_ETM_DISCONTINUITY:
+ /*
+ * The trace is discontinuous, if the previous packet is
+ * instruction packet, set flag PERF_IP_FLAG_TRACE_END
+ * for previous packet.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags |= PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_TRACE_END;
+ break;
+ case CS_ETM_EXCEPTION:
+ ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
+ if (ret)
+ return ret;
+
+ /* The exception is for system call. */
+ if (cs_etm__is_syscall(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_SYSCALLRET;
+ /*
+ * The exceptions are triggered by external signals from bus,
+ * interrupt controller, debug module, PE reset or halt.
+ */
+ else if (cs_etm__is_async_exception(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_INTERRUPT;
+ /*
+ * Otherwise, exception is caused by trap, instruction &
+ * data fault, or alignment errors.
+ */
+ else if (cs_etm__is_sync_exception(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_INTERRUPT;
+
+ /*
+ * When the exception packet is inserted, since exception
+ * packet is not used standalone for generating samples
+ * and it's affiliation to the previous instruction range
+ * packet; so set previous range packet flags to tell perf
+ * it is an exception taken branch.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags = packet->flags;
+ break;
+ case CS_ETM_EXCEPTION_RET:
+ /*
+ * When the exception return packet is inserted, since
+ * exception return packet is not used standalone for
+ * generating samples and it's affiliation to the previous
+ * instruction range packet; so set previous range packet
+ * flags to tell perf it is an exception return branch.
+ *
+ * The exception return can be for either system call or
+ * other exception types; unfortunately the packet doesn't
+ * contain exception type related info so we cannot decide
+ * the exception type purely based on exception return packet.
+ * If we record the exception number from exception packet and
+ * reuse it for excpetion return packet, this is not reliable
+ * due the trace can be discontinuity or the interrupt can
+ * be nested, thus the recorded exception number cannot be
+ * used for exception return packet for these two cases.
+ *
+ * For exception return packet, we only need to distinguish the
+ * packet is for system call or for other types. Thus the
+ * decision can be deferred when receive the next packet which
+ * contains the return address, based on the return address we
+ * can read out the previous instruction and check if it's a
+ * system call instruction and then calibrate the sample flag
+ * as needed.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_INTERRUPT;
+ break;
+ case CS_ETM_EMPTY:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
+{
+ int ret = 0;
+ size_t processed = 0;
+
+ /*
+ * Packets are decoded and added to the decoder's packet queue
+ * until the decoder packet processing callback has requested that
+ * processing stops or there is nothing left in the buffer. Normal
+ * operations that stop processing are a timestamp packet or a full
+ * decoder buffer queue.
+ */
+ ret = cs_etm_decoder__process_data_block(etmq->decoder,
+ etmq->offset,
+ &etmq->buf[etmq->buf_used],
+ etmq->buf_len,
+ &processed);
+ if (ret)
+ goto out;
+
+ etmq->offset += processed;
+ etmq->buf_used += processed;
+ etmq->buf_len -= processed;
+
+out:
+ return ret;
+}
+
+static int cs_etm__process_decoder_queue(struct cs_etm_queue *etmq)
+{
+ int ret;
+
+ /* Process each packet in this chunk */
+ while (1) {
+ ret = cs_etm_decoder__get_packet(etmq->decoder,
+ etmq->packet);
+ if (ret <= 0)
+ /*
+ * Stop processing this chunk on
+ * end of data or error
+ */
+ break;
+
+ /*
+ * Since packet addresses are swapped in packet
+ * handling within below switch() statements,
+ * thus setting sample flags must be called
+ * prior to switch() statement to use address
+ * information before packets swapping.
+ */
+ ret = cs_etm__set_sample_flags(etmq);
+ if (ret < 0)
+ break;
+
+ switch (etmq->packet->sample_type) {
+ case CS_ETM_RANGE:
+ /*
+ * If the packet contains an instruction
+ * range, generate instruction sequence
+ * events.
+ */
+ cs_etm__sample(etmq);
+ break;
+ case CS_ETM_EXCEPTION:
+ case CS_ETM_EXCEPTION_RET:
+ /*
+ * If the exception packet is coming,
+ * make sure the previous instruction
+ * range packet to be handled properly.
+ */
+ cs_etm__exception(etmq);
+ break;
+ case CS_ETM_DISCONTINUITY:
+ /*
+ * Discontinuity in trace, flush
+ * previous branch stack
+ */
+ cs_etm__flush(etmq);
+ break;
+ case CS_ETM_EMPTY:
+ /*
+ * Should not receive empty packet,
+ * report error.
+ */
+ pr_err("CS ETM Trace: empty packet\n");
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
{
- struct cs_etm_auxtrace *etm = etmq->etm;
- struct cs_etm_buffer buffer;
- size_t buffer_used, processed;
int err = 0;
- if (!etm->kernel_start)
- etm->kernel_start = machine__kernel_start(etm->machine);
-
/* Go through each buffer in the queue and decode them one by one */
while (1) {
- buffer_used = 0;
- memset(&buffer, 0, sizeof(buffer));
- err = cs_etm__get_trace(&buffer, etmq);
+ err = cs_etm__get_data_block(etmq);
if (err <= 0)
return err;
- /*
- * We cannot assume consecutive blocks in the data file are
- * contiguous, reset the decoder to force re-sync.
- */
- err = cs_etm_decoder__reset(etmq->decoder);
- if (err != 0)
- return err;
/* Run trace decoder until buffer consumed or end of trace */
do {
- processed = 0;
- err = cs_etm_decoder__process_data_block(
- etmq->decoder,
- etmq->offset,
- &buffer.buf[buffer_used],
- buffer.len - buffer_used,
- &processed);
+ err = cs_etm__decode_data_block(etmq);
if (err)
return err;
- etmq->offset += processed;
- buffer_used += processed;
-
- /* Process each packet in this chunk */
- while (1) {
- err = cs_etm_decoder__get_packet(etmq->decoder,
- etmq->packet);
- if (err <= 0)
- /*
- * Stop processing this chunk on
- * end of data or error
- */
- break;
-
- switch (etmq->packet->sample_type) {
- case CS_ETM_RANGE:
- /*
- * If the packet contains an instruction
- * range, generate instruction sequence
- * events.
- */
- cs_etm__sample(etmq);
- break;
- case CS_ETM_EXCEPTION:
- case CS_ETM_EXCEPTION_RET:
- /*
- * If the exception packet is coming,
- * make sure the previous instruction
- * range packet to be handled properly.
- */
- cs_etm__exception(etmq);
- break;
- case CS_ETM_DISCONTINUITY:
- /*
- * Discontinuity in trace, flush
- * previous branch stack
- */
- cs_etm__flush(etmq);
- break;
- case CS_ETM_EMPTY:
- /*
- * Should not receive empty packet,
- * report error.
- */
- pr_err("CS ETM Trace: empty packet\n");
- return -EINVAL;
- default:
- break;
- }
- }
- } while (buffer.len > buffer_used);
+ /*
+ * Process each packet in this chunk, nothing to do if
+ * an error occurs other than hoping the next one will
+ * be better.
+ */
+ err = cs_etm__process_decoder_queue(etmq);
+
+ } while (etmq->buf_len);
if (err == 0)
/* Flush any remaining branch stack entries */
@@ -1205,7 +1645,7 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
}
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
- pid_t tid, u64 time_)
+ pid_t tid)
{
unsigned int i;
struct auxtrace_queues *queues = &etm->queues;
@@ -1215,7 +1655,6 @@ static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
struct cs_etm_queue *etmq = queue->priv;
if (etmq && ((tid == -1) || (etmq->tid == tid))) {
- etmq->time = time_;
cs_etm__set_pid_tid_cpu(etm, queue);
cs_etm__run_decoder(etmq);
}
@@ -1259,8 +1698,7 @@ static int cs_etm__process_event(struct perf_session *session,
if (event->header.type == PERF_RECORD_EXIT)
return cs_etm__process_timeless_queues(etm,
- event->fork.tid,
- sample->time);
+ event->fork.tid);
return 0;
}
@@ -1414,9 +1852,9 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
0xffffffff);
/*
- * Create an RB tree for traceID-CPU# tuple. Since the conversion has
- * to be made for each packet that gets decoded, optimizing access in
- * anything other than a sequential array is worth doing.
+ * Create an RB tree for traceID-metadata tuple. Since the conversion
+ * has to be made for each packet that gets decoded, optimizing access
+ * in anything other than a sequential array is worth doing.
*/
traceid_list = intlist__new(NULL);
if (!traceid_list) {
@@ -1482,8 +1920,8 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
err = -EINVAL;
goto err_free_metadata;
}
- /* All good, associate the traceID with the CPU# */
- inode->priv = &metadata[j][CS_ETM_CPU];
+ /* All good, associate the traceID with the metadata pointer */
+ inode->priv = metadata[j];
}
/*
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 37f8d48179ca..0e97c196147a 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -53,7 +53,51 @@ enum {
CS_ETMV4_PRIV_MAX,
};
-/* RB tree for quick conversion between traceID and CPUs */
+/*
+ * ETMv3 exception encoding number:
+ * See Embedded Trace Macrocell spcification (ARM IHI 0014Q)
+ * table 7-12 Encoding of Exception[3:0] for non-ARMv7-M processors.
+ */
+enum {
+ CS_ETMV3_EXC_NONE = 0,
+ CS_ETMV3_EXC_DEBUG_HALT = 1,
+ CS_ETMV3_EXC_SMC = 2,
+ CS_ETMV3_EXC_HYP = 3,
+ CS_ETMV3_EXC_ASYNC_DATA_ABORT = 4,
+ CS_ETMV3_EXC_JAZELLE_THUMBEE = 5,
+ CS_ETMV3_EXC_PE_RESET = 8,
+ CS_ETMV3_EXC_UNDEFINED_INSTR = 9,
+ CS_ETMV3_EXC_SVC = 10,
+ CS_ETMV3_EXC_PREFETCH_ABORT = 11,
+ CS_ETMV3_EXC_DATA_FAULT = 12,
+ CS_ETMV3_EXC_GENERIC = 13,
+ CS_ETMV3_EXC_IRQ = 14,
+ CS_ETMV3_EXC_FIQ = 15,
+};
+
+/*
+ * ETMv4 exception encoding number:
+ * See ARM Embedded Trace Macrocell Architecture Specification (ARM IHI 0064D)
+ * table 6-12 Possible values for the TYPE field in an Exception instruction
+ * trace packet, for ARMv7-A/R and ARMv8-A/R PEs.
+ */
+enum {
+ CS_ETMV4_EXC_RESET = 0,
+ CS_ETMV4_EXC_DEBUG_HALT = 1,
+ CS_ETMV4_EXC_CALL = 2,
+ CS_ETMV4_EXC_TRAP = 3,
+ CS_ETMV4_EXC_SYSTEM_ERROR = 4,
+ CS_ETMV4_EXC_INST_DEBUG = 6,
+ CS_ETMV4_EXC_DATA_DEBUG = 7,
+ CS_ETMV4_EXC_ALIGNMENT = 10,
+ CS_ETMV4_EXC_INST_FAULT = 11,
+ CS_ETMV4_EXC_DATA_FAULT = 12,
+ CS_ETMV4_EXC_IRQ = 14,
+ CS_ETMV4_EXC_FIQ = 15,
+ CS_ETMV4_EXC_END = 31,
+};
+
+/* RB tree for quick conversion between traceID and metadata pointers */
struct intlist *traceid_list;
#define KiB(x) ((x) * 1024)
@@ -61,14 +105,15 @@ struct intlist *traceid_list;
#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64))
-static const u64 __perf_cs_etmv3_magic = 0x3030303030303030ULL;
-static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
+#define __perf_cs_etmv3_magic 0x3030303030303030ULL
+#define __perf_cs_etmv4_magic 0x4040404040404040ULL
#define CS_ETMV3_PRIV_SIZE (CS_ETM_PRIV_MAX * sizeof(u64))
#define CS_ETMV4_PRIV_SIZE (CS_ETMV4_PRIV_MAX * sizeof(u64))
#ifdef HAVE_CSTRACE_SUPPORT
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
#else
static inline int
cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
@@ -76,6 +121,12 @@ cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
{
return -1;
}
+
+static inline int cs_etm__get_cpu(u8 trace_chan_id __maybe_unused,
+ int *cpu __maybe_unused)
+{
+ return -1;
+}
#endif
#endif
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 2a36fab76994..26af43ad9ddd 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -1578,7 +1578,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
{
struct perf_session *session;
struct perf_data data = {
- .file = { .path = input, .fd = -1 },
+ .path = input,
.mode = PERF_DATA_MODE_READ,
.force = opts->force,
};
@@ -1650,7 +1650,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
fprintf(stderr,
"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
- data.file.path, path);
+ data.path, path);
fprintf(stderr,
"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index d8cfc19ddb10..e098e189f93e 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -7,11 +7,117 @@
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
+#include <asm/bug.h>
+#include <sys/types.h>
+#include <dirent.h>
#include "data.h"
#include "util.h"
#include "debug.h"
+static void close_dir(struct perf_data_file *files, int nr)
+{
+ while (--nr >= 1) {
+ close(files[nr].fd);
+ free(files[nr].path);
+ }
+ free(files);
+}
+
+void perf_data__close_dir(struct perf_data *data)
+{
+ close_dir(data->dir.files, data->dir.nr);
+}
+
+int perf_data__create_dir(struct perf_data *data, int nr)
+{
+ struct perf_data_file *files = NULL;
+ int i, ret = -1;
+
+ files = zalloc(nr * sizeof(*files));
+ if (!files)
+ return -ENOMEM;
+
+ data->dir.files = files;
+ data->dir.nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ struct perf_data_file *file = &files[i];
+
+ if (asprintf(&file->path, "%s/data.%d", data->path, i) < 0)
+ goto out_err;
+
+ ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR);
+ if (ret < 0)
+ goto out_err;
+
+ file->fd = ret;
+ }
+
+ return 0;
+
+out_err:
+ close_dir(files, i);
+ return ret;
+}
+
+int perf_data__open_dir(struct perf_data *data)
+{
+ struct perf_data_file *files = NULL;
+ struct dirent *dent;
+ int ret = -1;
+ DIR *dir;
+ int nr = 0;
+
+ dir = opendir(data->path);
+ if (!dir)
+ return -EINVAL;
+
+ while ((dent = readdir(dir)) != NULL) {
+ struct perf_data_file *file;
+ char path[PATH_MAX];
+ struct stat st;
+
+ snprintf(path, sizeof(path), "%s/%s", data->path, dent->d_name);
+ if (stat(path, &st))
+ continue;
+
+ if (!S_ISREG(st.st_mode) || strncmp(dent->d_name, "data", 4))
+ continue;
+
+ ret = -ENOMEM;
+
+ file = realloc(files, (nr + 1) * sizeof(*files));
+ if (!file)
+ goto out_err;
+
+ files = file;
+ file = &files[nr++];
+
+ file->path = strdup(path);
+ if (!file->path)
+ goto out_err;
+
+ ret = open(file->path, O_RDONLY);
+ if (ret < 0)
+ goto out_err;
+
+ file->fd = ret;
+ file->size = st.st_size;
+ }
+
+ if (!files)
+ return -EINVAL;
+
+ data->dir.files = files;
+ data->dir.nr = nr;
+ return 0;
+
+out_err:
+ close_dir(files, nr);
+ return ret;
+}
+
static bool check_pipe(struct perf_data *data)
{
struct stat st;
@@ -19,11 +125,11 @@ static bool check_pipe(struct perf_data *data)
int fd = perf_data__is_read(data) ?
STDIN_FILENO : STDOUT_FILENO;
- if (!data->file.path) {
+ if (!data->path) {
if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
is_pipe = true;
} else {
- if (!strcmp(data->file.path, "-"))
+ if (!strcmp(data->path, "-"))
is_pipe = true;
}
@@ -37,13 +143,31 @@ static int check_backup(struct perf_data *data)
{
struct stat st;
- if (!stat(data->file.path, &st) && st.st_size) {
- /* TODO check errors properly */
+ if (perf_data__is_read(data))
+ return 0;
+
+ if (!stat(data->path, &st) && st.st_size) {
char oldname[PATH_MAX];
+ int ret;
+
snprintf(oldname, sizeof(oldname), "%s.old",
- data->file.path);
- unlink(oldname);
- rename(data->file.path, oldname);
+ data->path);
+
+ ret = rm_rf_perf_data(oldname);
+ if (ret) {
+ pr_err("Can't remove old data: %s (%s)\n",
+ ret == -2 ?
+ "Unknown file found" : strerror(errno),
+ oldname);
+ return -1;
+ }
+
+ if (rename(data->path, oldname)) {
+ pr_err("Can't move data: %s (%s to %s)\n",
+ strerror(errno),
+ data->path, oldname);
+ return -1;
+ }
}
return 0;
@@ -82,7 +206,7 @@ static int open_file_read(struct perf_data *data)
goto out_close;
}
- data->size = st.st_size;
+ data->file.size = st.st_size;
return fd;
out_close:
@@ -95,9 +219,6 @@ static int open_file_write(struct perf_data *data)
int fd;
char sbuf[STRERR_BUFSIZE];
- if (check_backup(data))
- return -1;
-
fd = open(data->file.path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC,
S_IRUSR|S_IWUSR);
@@ -115,8 +236,22 @@ static int open_file(struct perf_data *data)
fd = perf_data__is_read(data) ?
open_file_read(data) : open_file_write(data);
+ if (fd < 0) {
+ zfree(&data->file.path);
+ return -1;
+ }
+
data->file.fd = fd;
- return fd < 0 ? -1 : 0;
+ return 0;
+}
+
+static int open_file_dup(struct perf_data *data)
+{
+ data->file.path = strdup(data->path);
+ if (!data->file.path)
+ return -ENOMEM;
+
+ return open_file(data);
}
int perf_data__open(struct perf_data *data)
@@ -124,14 +259,18 @@ int perf_data__open(struct perf_data *data)
if (check_pipe(data))
return 0;
- if (!data->file.path)
- data->file.path = "perf.data";
+ if (!data->path)
+ data->path = "perf.data";
- return open_file(data);
+ if (check_backup(data))
+ return -1;
+
+ return open_file_dup(data);
}
void perf_data__close(struct perf_data *data)
{
+ zfree(&data->file.path);
close(data->file.fd);
}
@@ -159,15 +298,15 @@ int perf_data__switch(struct perf_data *data,
if (perf_data__is_read(data))
return -EINVAL;
- if (asprintf(&new_filepath, "%s.%s", data->file.path, postfix) < 0)
+ if (asprintf(&new_filepath, "%s.%s", data->path, postfix) < 0)
return -ENOMEM;
/*
* Only fire a warning, don't return error, continue fill
* original file.
*/
- if (rename(data->file.path, new_filepath))
- pr_warning("Failed to rename %s to %s\n", data->file.path, new_filepath);
+ if (rename(data->path, new_filepath))
+ pr_warning("Failed to rename %s to %s\n", data->path, new_filepath);
if (!at_exit) {
close(data->file.fd);
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 4828f7feea89..14b47be2bd69 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -10,16 +10,22 @@ enum perf_data_mode {
};
struct perf_data_file {
- const char *path;
+ char *path;
int fd;
+ unsigned long size;
};
struct perf_data {
+ const char *path;
struct perf_data_file file;
bool is_pipe;
bool force;
- unsigned long size;
enum perf_data_mode mode;
+
+ struct {
+ struct perf_data_file *files;
+ int nr;
+ } dir;
};
static inline bool perf_data__is_read(struct perf_data *data)
@@ -44,7 +50,7 @@ static inline int perf_data__fd(struct perf_data *data)
static inline unsigned long perf_data__size(struct perf_data *data)
{
- return data->size;
+ return data->file.size;
}
int perf_data__open(struct perf_data *data);
@@ -63,4 +69,8 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
int perf_data__switch(struct perf_data *data,
const char *postfix,
size_t pos, bool at_exit);
+
+int perf_data__create_dir(struct perf_data *data, int nr);
+int perf_data__open_dir(struct perf_data *data);
+void perf_data__close_dir(struct perf_data *data);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 69fbb0a72d0c..d7315a00c731 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -20,6 +20,7 @@
#include "thread.h"
#include "comm.h"
#include "symbol.h"
+#include "map.h"
#include "event.h"
#include "util.h"
#include "thread-stack.h"
@@ -509,18 +510,23 @@ int db_export__call_path(struct db_export *dbe, struct call_path *cp)
return 0;
}
-int db_export__call_return(struct db_export *dbe, struct call_return *cr)
+int db_export__call_return(struct db_export *dbe, struct call_return *cr,
+ u64 *parent_db_id)
{
int err;
- if (cr->db_id)
- return 0;
-
err = db_export__call_path(dbe, cr->cp);
if (err)
return err;
- cr->db_id = ++dbe->call_return_last_db_id;
+ if (!cr->db_id)
+ cr->db_id = ++dbe->call_return_last_db_id;
+
+ if (parent_db_id) {
+ if (!*parent_db_id)
+ *parent_db_id = ++dbe->call_return_last_db_id;
+ cr->parent_db_id = *parent_db_id;
+ }
if (dbe->export_call_return)
return dbe->export_call_return(dbe, cr);
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
index 67bc6b8ad2d6..4e2424c89df9 100644
--- a/tools/perf/util/db-export.h
+++ b/tools/perf/util/db-export.h
@@ -104,6 +104,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
int db_export__branch_types(struct db_export *dbe);
int db_export__call_path(struct db_export *dbe, struct call_path *cp);
-int db_export__call_return(struct db_export *dbe, struct call_return *cr);
+int db_export__call_return(struct db_export *dbe, struct call_return *cr,
+ u64 *parent_db_id);
#endif
diff --git a/tools/perf/util/drv_configs.c b/tools/perf/util/drv_configs.c
deleted file mode 100644
index eec754243f4d..000000000000
--- a/tools/perf/util/drv_configs.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * drv_configs.h: Interface to apply PMU specific configuration
- * Copyright (c) 2016-2018, Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include "drv_configs.h"
-#include "evlist.h"
-#include "evsel.h"
-#include "pmu.h"
-#include <errno.h>
-
-static int
-perf_evsel__apply_drv_configs(struct perf_evsel *evsel,
- struct perf_evsel_config_term **err_term)
-{
- bool found = false;
- int err = 0;
- struct perf_evsel_config_term *term;
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmu__scan(pmu)) != NULL)
- if (pmu->type == evsel->attr.type) {
- found = true;
- break;
- }
-
- list_for_each_entry(term, &evsel->config_terms, list) {
- if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
- continue;
-
- /*
- * We have a configuration term, report an error if we
- * can't find the PMU or if the PMU driver doesn't support
- * cmd line driver configuration.
- */
- if (!found || !pmu->set_drv_config) {
- err = -EINVAL;
- *err_term = term;
- break;
- }
-
- err = pmu->set_drv_config(term);
- if (err) {
- *err_term = term;
- break;
- }
- }
-
- return err;
-}
-
-int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
- struct perf_evsel **err_evsel,
- struct perf_evsel_config_term **err_term)
-{
- struct perf_evsel *evsel;
- int err = 0;
-
- evlist__for_each_entry(evlist, evsel) {
- err = perf_evsel__apply_drv_configs(evsel, err_term);
- if (err) {
- *err_evsel = evsel;
- break;
- }
- }
-
- return err;
-}
diff --git a/tools/perf/util/drv_configs.h b/tools/perf/util/drv_configs.h
deleted file mode 100644
index 32bc9babc2e0..000000000000
--- a/tools/perf/util/drv_configs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * drv_configs.h: Interface to apply PMU specific configuration
- * Copyright (c) 2016-2018, Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef __PERF_DRV_CONFIGS_H
-#define __PERF_DRV_CONFIGS_H
-
-#include "drv_configs.h"
-#include "evlist.h"
-#include "evsel.h"
-
-int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
- struct perf_evsel **err_evsel,
- struct perf_evsel_config_term **term);
-#endif
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 62c8cf622607..ba58ba603b69 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -8,8 +8,11 @@
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include "compress.h"
+#include "namespaces.h"
#include "path.h"
+#include "map.h"
#include "symbol.h"
#include "srcline.h"
#include "dso.h"
@@ -1195,10 +1198,10 @@ struct dso *dso__new(const char *name)
strcpy(dso->name, name);
dso__set_long_name(dso, dso->name, false);
dso__set_short_name(dso, dso->name, false);
- dso->symbols = dso->symbol_names = RB_ROOT;
+ dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
dso->data.cache = RB_ROOT;
- dso->inlined_nodes = RB_ROOT;
- dso->srclines = RB_ROOT;
+ dso->inlined_nodes = RB_ROOT_CACHED;
+ dso->srclines = RB_ROOT_CACHED;
dso->data.fd = -1;
dso->data.status = DSO_DATA_STATUS_UNKNOWN;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@@ -1467,7 +1470,7 @@ size_t dso__fprintf(struct dso *dso, FILE *fp)
ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
ret += dso__fprintf_buildid(dso, fp);
ret += fprintf(fp, ")\n");
- for (nd = rb_first(&dso->symbols); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
ret += symbol__fprintf(pos, fp);
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 8c8a7abe809d..bb417c54c25a 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -7,13 +7,14 @@
#include <linux/rbtree.h>
#include <sys/types.h>
#include <stdbool.h>
+#include <stdio.h>
#include "rwsem.h"
-#include <linux/types.h>
#include <linux/bitops.h>
-#include "map.h"
-#include "namespaces.h"
#include "build-id.h"
+struct machine;
+struct map;
+
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
@@ -140,10 +141,10 @@ struct dso {
struct list_head node;
struct rb_node rb_node; /* rbtree node sorted by long name */
struct rb_root *root; /* root of rbtree that rb_node is in */
- struct rb_root symbols;
- struct rb_root symbol_names;
- struct rb_root inlined_nodes;
- struct rb_root srclines;
+ struct rb_root_cached symbols;
+ struct rb_root_cached symbol_names;
+ struct rb_root_cached inlined_nodes;
+ struct rb_root_cached srclines;
struct {
u64 addr;
struct symbol *symbol;
@@ -235,7 +236,7 @@ bool dso__loaded(const struct dso *dso);
static inline bool dso__has_symbols(const struct dso *dso)
{
- return !RB_EMPTY_ROOT(&dso->symbols);
+ return !RB_EMPTY_ROOT(&dso->symbols.rb_root);
}
bool dso__sorted_by_name(const struct dso *dso);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 937a5a4f71cc..ba7be74fad6e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -21,9 +21,13 @@
#include "thread.h"
#include "thread_map.h"
#include "sane_ctype.h"
+#include "map.h"
+#include "symbol.h"
#include "symbol/kallsyms.h"
#include "asm/bug.h"
#include "stat.h"
+#include "session.h"
+#include "bpf-event.h"
#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
@@ -45,6 +49,8 @@ static const char *perf_event__names[] = {
[PERF_RECORD_SWITCH] = "SWITCH",
[PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
[PERF_RECORD_NAMESPACES] = "NAMESPACES",
+ [PERF_RECORD_KSYMBOL] = "KSYMBOL",
+ [PERF_RECORD_BPF_EVENT] = "BPF_EVENT",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
@@ -1329,6 +1335,22 @@ int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
return machine__process_switch_event(machine, event);
}
+int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_ksymbol(machine, event, sample);
+}
+
+int perf_event__process_bpf_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_bpf_event(machine, event, sample);
+}
+
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
@@ -1461,6 +1483,21 @@ static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
}
+size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " ksymbol event with addr %" PRIx64 " len %u type %u flags 0x%x name %s\n",
+ event->ksymbol_event.addr, event->ksymbol_event.len,
+ event->ksymbol_event.ksym_type,
+ event->ksymbol_event.flags, event->ksymbol_event.name);
+}
+
+size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " bpf event with type %u, flags %u, id %u\n",
+ event->bpf_event.type, event->bpf_event.flags,
+ event->bpf_event.id);
+}
+
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
@@ -1496,6 +1533,12 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
case PERF_RECORD_LOST:
ret += perf_event__fprintf_lost(event, fp);
break;
+ case PERF_RECORD_KSYMBOL:
+ ret += perf_event__fprintf_ksymbol(event, fp);
+ break;
+ case PERF_RECORD_BPF_EVENT:
+ ret += perf_event__fprintf_bpf_event(event, fp);
+ break;
default:
ret += fprintf(fp, "\n");
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index eb95f3384958..36ae7e92dab1 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -5,6 +5,7 @@
#include <limits.h>
#include <stdio.h>
#include <linux/kernel.h>
+#include <linux/bpf.h>
#include "../perf.h"
#include "build-id.h"
@@ -84,6 +85,29 @@ struct throttle_event {
u64 stream_id;
};
+#ifndef KSYM_NAME_LEN
+#define KSYM_NAME_LEN 256
+#endif
+
+struct ksymbol_event {
+ struct perf_event_header header;
+ u64 addr;
+ u32 len;
+ u16 ksym_type;
+ u16 flags;
+ char name[KSYM_NAME_LEN];
+};
+
+struct bpf_event {
+ struct perf_event_header header;
+ u16 type;
+ u16 flags;
+ u32 id;
+
+ /* for bpf_prog types */
+ u8 tag[BPF_TAG_SIZE]; // prog tag
+};
+
#define PERF_SAMPLE_MASK \
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
@@ -137,26 +161,7 @@ struct ip_callchain {
u64 ips[0];
};
-struct branch_flags {
- u64 mispred:1;
- u64 predicted:1;
- u64 in_tx:1;
- u64 abort:1;
- u64 cycles:16;
- u64 type:4;
- u64 reserved:40;
-};
-
-struct branch_entry {
- u64 from;
- u64 to;
- struct branch_flags flags;
-};
-
-struct branch_stack {
- u64 nr;
- struct branch_entry entries[0];
-};
+struct branch_stack;
enum {
PERF_IP_FLAG_BRANCH = 1ULL << 0,
@@ -527,8 +532,9 @@ struct auxtrace_error_event {
u32 cpu;
u32 pid;
u32 tid;
- u32 reserved__; /* For alignment */
+ u32 fmt;
u64 ip;
+ u64 time;
char msg[MAX_AUXTRACE_ERROR_MSG];
};
@@ -651,6 +657,8 @@ union perf_event {
struct stat_round_event stat_round;
struct time_conv_event time_conv;
struct feature_event feat;
+ struct ksymbol_event ksymbol_event;
+ struct bpf_event bpf_event;
};
void perf_event__print_totals(void);
@@ -748,6 +756,14 @@ int perf_event__process_exit(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
+int perf_event__process_ksymbol(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_bpf_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
int perf_tool__process_synth_event(struct perf_tool *tool,
union perf_event *event,
struct machine *machine,
@@ -811,6 +827,8 @@ size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
int kallsyms__get_function_start(const char *kallsyms_filename,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8c902276d4b4..ed20f4379956 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -230,18 +230,33 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
}
}
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
+void perf_event_attr__set_max_precise_ip(struct perf_event_attr *pattr)
{
- attr->precise_ip = 3;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .exclude_kernel = 1,
+ .precise_ip = 3,
+ };
- while (attr->precise_ip != 0) {
- int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
+ event_attr_init(&attr);
+
+ /*
+ * Unnamed union member, not supported as struct member named
+ * initializer in older compilers such as gcc 4.4.7
+ */
+ attr.sample_period = 1;
+
+ while (attr.precise_ip != 0) {
+ int fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd != -1) {
close(fd);
break;
}
- --attr->precise_ip;
+ --attr.precise_ip;
}
+
+ pattr->precise_ip = attr.precise_ip;
}
int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
@@ -1022,7 +1037,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks)
+ bool auxtrace_overwrite, int nr_cblocks, int affinity)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@@ -1032,7 +1047,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks };
+ struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1064,7 +1079,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 868294491194..744906dd4887 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -49,6 +49,9 @@ struct perf_evlist {
struct perf_evsel *selected;
struct events_stats stats;
struct perf_env *env;
+ void (*trace_event_sample_raw)(struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample);
u64 first_sample_time;
u64 last_sample_time;
};
@@ -162,7 +165,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks);
+ bool auxtrace_overwrite, int nr_cblocks, int affinity);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
@@ -314,5 +317,4 @@ void perf_evlist__force_leader(struct perf_evlist *evlist);
struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
struct perf_evsel *evsel);
-
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index dbc0466db368..3bbf73e979c0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -294,20 +294,12 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
if (!precise)
goto new_event;
- /*
- * Unnamed union member, not supported as struct member named
- * initializer in older compilers such as gcc 4.4.7
- *
- * Just for probing the precise_ip:
- */
- attr.sample_period = 1;
perf_event_attr__set_max_precise_ip(&attr);
/*
* Now let the usual logic to set up the perf_event_attr defaults
* to kick in when we return and before perf_evsel__open() is called.
*/
- attr.sample_period = 0;
new_event:
evsel = perf_evsel__new(&attr);
if (evsel == NULL)
@@ -956,6 +948,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->sample_freq = 0;
attr->sample_period = 0;
attr->write_backward = 0;
+
+ /*
+ * We don't get sample for slave events, we make them
+ * when delivering group leader sample. Set the slave
+ * event to follow the master sample_type to ease up
+ * report.
+ */
+ attr->sample_type = leader->attr.sample_type;
}
if (opts->no_samples)
@@ -1035,6 +1035,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
+ attr->ksymbol = track && !perf_missing_features.ksymbol;
+ attr->bpf_event = track && opts->bpf_event &&
+ !perf_missing_features.bpf_event;
if (opts->record_namespaces)
attr->namespaces = track;
@@ -1652,6 +1655,8 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(context_switch, p_unsigned);
PRINT_ATTRf(write_backward, p_unsigned);
PRINT_ATTRf(namespaces, p_unsigned);
+ PRINT_ATTRf(ksymbol, p_unsigned);
+ PRINT_ATTRf(bpf_event, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
@@ -1811,6 +1816,10 @@ fallback_missing_features:
PERF_SAMPLE_BRANCH_NO_CYCLES);
if (perf_missing_features.group_read && evsel->attr.inherit)
evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
+ if (perf_missing_features.ksymbol)
+ evsel->attr.ksymbol = 0;
+ if (perf_missing_features.bpf_event)
+ evsel->attr.bpf_event = 0;
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
@@ -1930,7 +1939,15 @@ try_fallback:
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
- if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
+ if (!perf_missing_features.bpf_event && evsel->attr.bpf_event) {
+ perf_missing_features.bpf_event = true;
+ pr_debug2("switching off bpf_event\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.ksymbol && evsel->attr.ksymbol) {
+ perf_missing_features.ksymbol = true;
+ pr_debug2("switching off ksymbol\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
perf_missing_features.write_backward = true;
pr_debug2("switching off write_backward\n");
goto out_close;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 82a289ce8b0c..cc578e02e08f 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -8,7 +8,7 @@
#include <linux/perf_event.h>
#include <linux/types.h>
#include "xyarray.h"
-#include "symbol.h"
+#include "symbol_conf.h"
#include "cpumap.h"
#include "counts.h"
@@ -168,6 +168,8 @@ struct perf_missing_features {
bool lbr_flags;
bool write_backward;
bool group_read;
+ bool ksymbol;
+ bool bpf_event;
};
extern struct perf_missing_features perf_missing_features;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index dec6d218c31c..01b324c275b9 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -39,6 +39,7 @@
#include "tool.h"
#include "time-utils.h"
#include "units.h"
+#include "cputopo.h"
#include "sane_ctype.h"
@@ -526,17 +527,11 @@ static int write_event_desc(struct feat_fd *ff,
static int write_cmdline(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- char buf[MAXPATHLEN];
- u32 n;
- int i, ret;
+ char pbuf[MAXPATHLEN], *buf;
+ int i, ret, n;
/* actual path to perf binary */
- ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
- if (ret <= 0)
- return -1;
-
- /* readlink() does not add null termination */
- buf[ret] = '\0';
+ buf = perf_exe(pbuf, MAXPATHLEN);
/* account for binary path */
n = perf_env.nr_cmdline + 1;
@@ -557,160 +552,15 @@ static int write_cmdline(struct feat_fd *ff,
return 0;
}
-#define CORE_SIB_FMT \
- "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
-#define THRD_SIB_FMT \
- "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
-
-struct cpu_topo {
- u32 cpu_nr;
- u32 core_sib;
- u32 thread_sib;
- char **core_siblings;
- char **thread_siblings;
-};
-
-static int build_cpu_topo(struct cpu_topo *tp, int cpu)
-{
- FILE *fp;
- char filename[MAXPATHLEN];
- char *buf = NULL, *p;
- size_t len = 0;
- ssize_t sret;
- u32 i = 0;
- int ret = -1;
-
- sprintf(filename, CORE_SIB_FMT, cpu);
- fp = fopen(filename, "r");
- if (!fp)
- goto try_threads;
-
- sret = getline(&buf, &len, fp);
- fclose(fp);
- if (sret <= 0)
- goto try_threads;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- for (i = 0; i < tp->core_sib; i++) {
- if (!strcmp(buf, tp->core_siblings[i]))
- break;
- }
- if (i == tp->core_sib) {
- tp->core_siblings[i] = buf;
- tp->core_sib++;
- buf = NULL;
- len = 0;
- }
- ret = 0;
-
-try_threads:
- sprintf(filename, THRD_SIB_FMT, cpu);
- fp = fopen(filename, "r");
- if (!fp)
- goto done;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- for (i = 0; i < tp->thread_sib; i++) {
- if (!strcmp(buf, tp->thread_siblings[i]))
- break;
- }
- if (i == tp->thread_sib) {
- tp->thread_siblings[i] = buf;
- tp->thread_sib++;
- buf = NULL;
- }
- ret = 0;
-done:
- if(fp)
- fclose(fp);
- free(buf);
- return ret;
-}
-
-static void free_cpu_topo(struct cpu_topo *tp)
-{
- u32 i;
-
- if (!tp)
- return;
-
- for (i = 0 ; i < tp->core_sib; i++)
- zfree(&tp->core_siblings[i]);
-
- for (i = 0 ; i < tp->thread_sib; i++)
- zfree(&tp->thread_siblings[i]);
-
- free(tp);
-}
-
-static struct cpu_topo *build_cpu_topology(void)
-{
- struct cpu_topo *tp = NULL;
- void *addr;
- u32 nr, i;
- size_t sz;
- long ncpus;
- int ret = -1;
- struct cpu_map *map;
-
- ncpus = cpu__max_present_cpu();
-
- /* build online CPU map */
- map = cpu_map__new(NULL);
- if (map == NULL) {
- pr_debug("failed to get system cpumap\n");
- return NULL;
- }
-
- nr = (u32)(ncpus & UINT_MAX);
-
- sz = nr * sizeof(char *);
- addr = calloc(1, sizeof(*tp) + 2 * sz);
- if (!addr)
- goto out_free;
-
- tp = addr;
- tp->cpu_nr = nr;
- addr += sizeof(*tp);
- tp->core_siblings = addr;
- addr += sz;
- tp->thread_siblings = addr;
-
- for (i = 0; i < nr; i++) {
- if (!cpu_map__has(map, i))
- continue;
-
- ret = build_cpu_topo(tp, i);
- if (ret < 0)
- break;
- }
-
-out_free:
- cpu_map__put(map);
- if (ret) {
- free_cpu_topo(tp);
- tp = NULL;
- }
- return tp;
-}
static int write_cpu_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- struct cpu_topo *tp;
+ struct cpu_topology *tp;
u32 i;
int ret, j;
- tp = build_cpu_topology();
+ tp = cpu_topology__new();
if (!tp)
return -1;
@@ -748,7 +598,7 @@ static int write_cpu_topology(struct feat_fd *ff,
return ret;
}
done:
- free_cpu_topo(tp);
+ cpu_topology__delete(tp);
return ret;
}
@@ -783,112 +633,45 @@ static int write_total_mem(struct feat_fd *ff,
return ret;
}
-static int write_topo_node(struct feat_fd *ff, int node)
-{
- char str[MAXPATHLEN];
- char field[32];
- char *buf = NULL, *p;
- size_t len = 0;
- FILE *fp;
- u64 mem_total, mem_free, mem;
- int ret = -1;
-
- sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
- fp = fopen(str, "r");
- if (!fp)
- return -1;
-
- while (getline(&buf, &len, fp) > 0) {
- /* skip over invalid lines */
- if (!strchr(buf, ':'))
- continue;
- if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
- goto done;
- if (!strcmp(field, "MemTotal:"))
- mem_total = mem;
- if (!strcmp(field, "MemFree:"))
- mem_free = mem;
- }
-
- fclose(fp);
- fp = NULL;
-
- ret = do_write(ff, &mem_total, sizeof(u64));
- if (ret)
- goto done;
-
- ret = do_write(ff, &mem_free, sizeof(u64));
- if (ret)
- goto done;
-
- ret = -1;
- sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
-
- fp = fopen(str, "r");
- if (!fp)
- goto done;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- ret = do_write_string(ff, buf);
-done:
- free(buf);
- if (fp)
- fclose(fp);
- return ret;
-}
-
static int write_numa_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- char *buf = NULL;
- size_t len = 0;
- FILE *fp;
- struct cpu_map *node_map = NULL;
- char *c;
- u32 nr, i, j;
+ struct numa_topology *tp;
int ret = -1;
+ u32 i;
- fp = fopen("/sys/devices/system/node/online", "r");
- if (!fp)
- return -1;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
+ tp = numa_topology__new();
+ if (!tp)
+ return -ENOMEM;
- c = strchr(buf, '\n');
- if (c)
- *c = '\0';
+ ret = do_write(ff, &tp->nr, sizeof(u32));
+ if (ret < 0)
+ goto err;
- node_map = cpu_map__new(buf);
- if (!node_map)
- goto done;
+ for (i = 0; i < tp->nr; i++) {
+ struct numa_topology_node *n = &tp->nodes[i];
- nr = (u32)node_map->nr;
+ ret = do_write(ff, &n->node, sizeof(u32));
+ if (ret < 0)
+ goto err;
- ret = do_write(ff, &nr, sizeof(nr));
- if (ret < 0)
- goto done;
+ ret = do_write(ff, &n->mem_total, sizeof(u64));
+ if (ret)
+ goto err;
- for (i = 0; i < nr; i++) {
- j = (u32)node_map->map[i];
- ret = do_write(ff, &j, sizeof(j));
- if (ret < 0)
- break;
+ ret = do_write(ff, &n->mem_free, sizeof(u64));
+ if (ret)
+ goto err;
- ret = write_topo_node(ff, i);
+ ret = do_write_string(ff, n->cpus);
if (ret < 0)
- break;
+ goto err;
}
-done:
- free(buf);
- fclose(fp);
- cpu_map__put(node_map);
+
+ ret = 0;
+
+err:
+ numa_topology__delete(tp);
return ret;
}
@@ -1042,11 +825,9 @@ static int write_cpuid(struct feat_fd *ff,
int ret;
ret = get_cpuid(buffer, sizeof(buffer));
- if (!ret)
- goto write_it;
+ if (ret)
+ return -1;
- return -1;
-write_it:
return do_write_string(ff, buffer);
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 8aad8330e392..f9eb95bf3938 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include "callchain.h"
#include "util.h"
#include "build-id.h"
#include "hist.h"
@@ -11,6 +12,7 @@
#include "evsel.h"
#include "annotate.h"
#include "srcline.h"
+#include "symbol.h"
#include "thread.h"
#include "ui/progress.h"
#include <errno.h>
@@ -209,7 +211,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
int row = 0;
@@ -296,7 +298,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
if (!he->leaf) {
struct hist_entry *child;
- struct rb_node *node = rb_first(&he->hroot_out);
+ struct rb_node *node = rb_first_cached(&he->hroot_out);
while (node) {
child = rb_entry(node, struct hist_entry, rb_node);
node = rb_next(node);
@@ -311,8 +313,8 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
{
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
if (he->parent_he) {
root_in = &he->parent_he->hroot_in;
@@ -325,8 +327,8 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
root_out = &hists->entries;
}
- rb_erase(&he->rb_node_in, root_in);
- rb_erase(&he->rb_node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
+ rb_erase_cached(&he->rb_node, root_out);
--hists->nr_entries;
if (!he->filtered)
@@ -337,7 +339,7 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@@ -353,7 +355,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
void hists__delete_entries(struct hists *hists)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@@ -394,11 +396,8 @@ static int hist_entry__init(struct hist_entry *he,
* adding new entries. So we need to save a copy.
*/
he->branch_info = malloc(sizeof(*he->branch_info));
- if (he->branch_info == NULL) {
- map__zput(he->ms.map);
- free(he->stat_acc);
- return -ENOMEM;
- }
+ if (he->branch_info == NULL)
+ goto err;
memcpy(he->branch_info, template->branch_info,
sizeof(*he->branch_info));
@@ -417,31 +416,43 @@ static int hist_entry__init(struct hist_entry *he,
if (he->raw_data) {
he->raw_data = memdup(he->raw_data, he->raw_size);
+ if (he->raw_data == NULL)
+ goto err_infos;
+ }
- if (he->raw_data == NULL) {
- map__put(he->ms.map);
- if (he->branch_info) {
- map__put(he->branch_info->from.map);
- map__put(he->branch_info->to.map);
- free(he->branch_info);
- }
- if (he->mem_info) {
- map__put(he->mem_info->iaddr.map);
- map__put(he->mem_info->daddr.map);
- }
- free(he->stat_acc);
- return -ENOMEM;
- }
+ if (he->srcline) {
+ he->srcline = strdup(he->srcline);
+ if (he->srcline == NULL)
+ goto err_rawdata;
}
+
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
- he->hroot_in = RB_ROOT;
- he->hroot_out = RB_ROOT;
+ he->hroot_in = RB_ROOT_CACHED;
+ he->hroot_out = RB_ROOT_CACHED;
if (!symbol_conf.report_hierarchy)
he->leaf = true;
return 0;
+
+err_rawdata:
+ free(he->raw_data);
+
+err_infos:
+ if (he->branch_info) {
+ map__put(he->branch_info->from.map);
+ map__put(he->branch_info->to.map);
+ free(he->branch_info);
+ }
+ if (he->mem_info) {
+ map__put(he->mem_info->iaddr.map);
+ map__put(he->mem_info->daddr.map);
+ }
+err:
+ map__zput(he->ms.map);
+ free(he->stat_acc);
+ return -ENOMEM;
}
static void *hist_entry__zalloc(size_t size)
@@ -513,8 +524,9 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
int64_t cmp;
u64 period = entry->stat.period;
u64 weight = entry->stat.weight;
+ bool leftmost = true;
- p = &hists->entries_in->rb_node;
+ p = &hists->entries_in->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@@ -557,8 +569,10 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(entry, sample_self);
@@ -570,7 +584,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, hists->entries_in);
+ rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
out:
if (sample_self)
he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
@@ -601,7 +615,7 @@ __hists__add_entry(struct hists *hists,
.map = al->map,
.sym = al->sym,
},
- .srcline = al->srcline ? strdup(al->srcline) : NULL,
+ .srcline = (char *) al->srcline,
.socket = al->socket,
.cpu = al->cpu,
.cpumode = al->cpumode,
@@ -958,7 +972,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.map = al->map,
.sym = al->sym,
},
- .srcline = al->srcline ? strdup(al->srcline) : NULL,
+ .srcline = (char *) al->srcline,
.parent = iter->parent,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
@@ -1279,16 +1293,17 @@ static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
}
static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he,
struct hist_entry *parent_he,
struct perf_hpp_list *hpp_list)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter, *new;
struct perf_hpp_fmt *fmt;
int64_t cmp;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1308,8 +1323,10 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
new = hist_entry__new(he, true);
@@ -1343,12 +1360,12 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
}
rb_link_node(&new->rb_node_in, parent, p);
- rb_insert_color(&new->rb_node_in, root);
+ rb_insert_color_cached(&new->rb_node_in, root, leftmost);
return new;
}
static int hists__hierarchy_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he)
{
struct perf_hpp_list_node *node;
@@ -1395,13 +1412,14 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
}
static int hists__collapse_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
+ bool leftmost = true;
if (symbol_conf.report_hierarchy)
return hists__hierarchy_insert_entry(hists, root, he);
@@ -1432,19 +1450,21 @@ static int hists__collapse_insert_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
return 1;
}
-struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
+struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
pthread_mutex_lock(&hists->lock);
@@ -1467,7 +1487,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
int ret;
@@ -1479,7 +1499,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
root = hists__get_rotate_entries_in(hists);
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next) {
if (session_done())
@@ -1487,7 +1507,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
- rb_erase(&n->rb_node_in, root);
+ rb_erase_cached(&n->rb_node_in, root);
ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
if (ret < 0)
return -1;
@@ -1558,7 +1578,7 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
struct rb_node *node;
struct hist_entry *he;
- node = rb_first(&hists->entries);
+ node = rb_first_cached(&hists->entries);
hists->stats.total_period = 0;
hists->stats.total_non_filtered_period = 0;
@@ -1578,13 +1598,14 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
}
}
-static void hierarchy_insert_output_entry(struct rb_root *root,
+static void hierarchy_insert_output_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1592,12 +1613,14 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
if (hist_entry__sort(he, iter) > 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ rb_insert_color_cached(&he->rb_node, root, leftmost);
/* update column width of dynamic entry */
perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
@@ -1608,16 +1631,16 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
static void hists__hierarchy_output_resort(struct hists *hists,
struct ui_progress *prog,
- struct rb_root *root_in,
- struct rb_root *root_out,
+ struct rb_root_cached *root_in,
+ struct rb_root_cached *root_out,
u64 min_callchain_hits,
bool use_callchain)
{
struct rb_node *node;
struct hist_entry *he;
- *root_out = RB_ROOT;
- node = rb_first(root_in);
+ *root_out = RB_ROOT_CACHED;
+ node = rb_first_cached(root_in);
while (node) {
he = rb_entry(node, struct hist_entry, rb_node_in);
@@ -1660,15 +1683,16 @@ static void hists__hierarchy_output_resort(struct hists *hists,
}
}
-static void __hists__insert_output_entry(struct rb_root *entries,
+static void __hists__insert_output_entry(struct rb_root_cached *entries,
struct hist_entry *he,
u64 min_callchain_hits,
bool use_callchain)
{
- struct rb_node **p = &entries->rb_node;
+ struct rb_node **p = &entries->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
if (use_callchain) {
if (callchain_param.mode == CHAIN_GRAPH_REL) {
@@ -1689,12 +1713,14 @@ static void __hists__insert_output_entry(struct rb_root *entries,
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, entries);
+ rb_insert_color_cached(&he->rb_node, entries, leftmost);
perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
if (perf_hpp__is_dynamic_entry(fmt) &&
@@ -1704,9 +1730,10 @@ static void __hists__insert_output_entry(struct rb_root *entries,
}
static void output_resort(struct hists *hists, struct ui_progress *prog,
- bool use_callchain, hists__resort_cb_t cb)
+ bool use_callchain, hists__resort_cb_t cb,
+ void *cb_arg)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
u64 callchain_total;
@@ -1736,14 +1763,14 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
else
root = hists->entries_in;
- next = rb_first(root);
- hists->entries = RB_ROOT;
+ next = rb_first_cached(root);
+ hists->entries = RB_ROOT_CACHED;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
- if (cb && cb(n))
+ if (cb && cb(n, cb_arg))
continue;
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
@@ -1757,7 +1784,8 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
}
}
-void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg)
{
bool use_callchain;
@@ -1768,18 +1796,23 @@ void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *pro
use_callchain |= symbol_conf.show_branchflag_count;
- output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
+ output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
+}
+
+void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+{
+ return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
}
void hists__output_resort(struct hists *hists, struct ui_progress *prog)
{
- output_resort(hists, prog, symbol_conf.use_callchain, NULL);
+ output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
}
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
hists__resort_cb_t cb)
{
- output_resort(hists, prog, symbol_conf.use_callchain, cb);
+ output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
}
static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
@@ -1798,7 +1831,7 @@ struct rb_node *rb_hierarchy_last(struct rb_node *node)
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
while (can_goto_child(he, HMD_NORMAL)) {
- node = rb_last(&he->hroot_out);
+ node = rb_last(&he->hroot_out.rb_root);
he = rb_entry(node, struct hist_entry, rb_node);
}
return node;
@@ -1809,7 +1842,7 @@ struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_di
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
if (can_goto_child(he, hmd))
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
else
node = rb_next(node);
@@ -1847,7 +1880,7 @@ bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
if (he->leaf)
return false;
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
child = rb_entry(node, struct hist_entry, rb_node);
while (node && child->filtered) {
@@ -1965,7 +1998,7 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (filter(hists, h))
@@ -1975,13 +2008,15 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
}
}
-static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
+static void resort_filtered_entry(struct rb_root_cached *root,
+ struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
- struct rb_root new_root = RB_ROOT;
+ struct rb_root_cached new_root = RB_ROOT_CACHED;
struct rb_node *nd;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1989,22 +2024,24 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ rb_insert_color_cached(&he->rb_node, root, leftmost);
if (he->leaf || he->filtered)
return;
- nd = rb_first(&he->hroot_out);
+ nd = rb_first_cached(&he->hroot_out);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
- rb_erase(&h->rb_node, &he->hroot_out);
+ rb_erase_cached(&h->rb_node, &he->hroot_out);
resort_filtered_entry(&new_root, h);
}
@@ -2015,14 +2052,14 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
{
struct rb_node *nd;
- struct rb_root new_root = RB_ROOT;
+ struct rb_root_cached new_root = RB_ROOT_CACHED;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- nd = rb_first(&hists->entries);
+ nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
int ret;
@@ -2066,12 +2103,12 @@ static void hists__filter_hierarchy(struct hists *hists, int type, const void *a
* resort output after applying a new filter since filter in a lower
* hierarchy can change periods in a upper hierarchy.
*/
- nd = rb_first(&hists->entries);
+ nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
- rb_erase(&h->rb_node, &hists->entries);
+ rb_erase_cached(&h->rb_node, &hists->entries);
resort_filtered_entry(&new_root, h);
}
@@ -2140,18 +2177,19 @@ void hists__inc_nr_samples(struct hists *hists, bool filtered)
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct hist_entry *pair)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
int64_t cmp;
+ bool leftmost = true;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@@ -2164,8 +2202,10 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(pair, true);
@@ -2175,7 +2215,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (symbol_conf.cumulate_callchain)
memset(he->stat_acc, 0, sizeof(he->stat));
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
hists__inc_stats(hists, he);
he->dummy = true;
}
@@ -2184,15 +2224,16 @@ out:
}
static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *pair)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p != NULL) {
int64_t cmp = 0;
@@ -2209,14 +2250,16 @@ static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(pair, true);
if (he) {
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
he->dummy = true;
he->hists = hists;
@@ -2233,9 +2276,9 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
struct rb_node *n;
if (hists__has(hists, need_collapse))
- n = hists->entries_collapsed.rb_node;
+ n = hists->entries_collapsed.rb_root.rb_node;
else
- n = hists->entries_in->rb_node;
+ n = hists->entries_in->rb_root.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
@@ -2252,10 +2295,10 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
return NULL;
}
-static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
+static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node *n = root->rb_node;
+ struct rb_node *n = root->rb_root.rb_node;
while (n) {
struct hist_entry *iter;
@@ -2280,13 +2323,13 @@ static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
return NULL;
}
-static void hists__match_hierarchy(struct rb_root *leader_root,
- struct rb_root *other_root)
+static void hists__match_hierarchy(struct rb_root_cached *leader_root,
+ struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *pair;
- for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_hierarchy_entry(other_root, pos);
@@ -2302,7 +2345,7 @@ static void hists__match_hierarchy(struct rb_root *leader_root,
*/
void hists__match(struct hists *leader, struct hists *other)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@@ -2317,7 +2360,7 @@ void hists__match(struct hists *leader, struct hists *other)
else
root = leader->entries_in;
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_entry(other, pos);
@@ -2328,13 +2371,13 @@ void hists__match(struct hists *leader, struct hists *other)
static int hists__link_hierarchy(struct hists *leader_hists,
struct hist_entry *parent,
- struct rb_root *leader_root,
- struct rb_root *other_root)
+ struct rb_root_cached *leader_root,
+ struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *leader;
- for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (hist_entry__has_pairs(pos)) {
@@ -2377,7 +2420,7 @@ static int hists__link_hierarchy(struct hists *leader_hists,
*/
int hists__link(struct hists *leader, struct hists *other)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@@ -2393,7 +2436,7 @@ int hists__link(struct hists *leader, struct hists *other)
else
root = other->entries_in;
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (!hist_entry__has_pairs(pos)) {
@@ -2566,10 +2609,10 @@ int perf_hist_config(const char *var, const char *value)
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
{
memset(hists, 0, sizeof(*hists));
- hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
hists->entries_in = &hists->entries_in_array[0];
- hists->entries_collapsed = RB_ROOT;
- hists->entries = RB_ROOT;
+ hists->entries_collapsed = RB_ROOT_CACHED;
+ hists->entries = RB_ROOT_CACHED;
pthread_mutex_init(&hists->lock, NULL);
hists->socket_filter = -1;
hists->hpp_list = hpp_list;
@@ -2577,14 +2620,14 @@ int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
return 0;
}
-static void hists__delete_remaining_entries(struct rb_root *root)
+static void hists__delete_remaining_entries(struct rb_root_cached *root)
{
struct rb_node *node;
struct hist_entry *he;
- while (!RB_EMPTY_ROOT(root)) {
- node = rb_first(root);
- rb_erase(node, root);
+ while (!RB_EMPTY_ROOT(&root->rb_root)) {
+ node = rb_first_cached(root);
+ rb_erase_cached(node, root);
he = rb_entry(node, struct hist_entry, rb_node_in);
hist_entry__delete(he);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 664b5eda8d51..4af27fbab24f 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -2,9 +2,9 @@
#ifndef __PERF_HIST_H
#define __PERF_HIST_H
+#include <linux/rbtree.h>
#include <linux/types.h>
#include <pthread.h>
-#include "callchain.h"
#include "evsel.h"
#include "header.h"
#include "color.h"
@@ -13,6 +13,9 @@
struct hist_entry;
struct hist_entry_ops;
struct addr_location;
+struct map_symbol;
+struct mem_info;
+struct branch_info;
struct symbol;
enum hist_filter {
@@ -70,10 +73,10 @@ struct thread;
struct dso;
struct hists {
- struct rb_root entries_in_array[2];
- struct rb_root *entries_in;
- struct rb_root entries;
- struct rb_root entries_collapsed;
+ struct rb_root_cached entries_in_array[2];
+ struct rb_root_cached *entries_in;
+ struct rb_root_cached entries;
+ struct rb_root_cached entries_collapsed;
u64 nr_entries;
u64 nr_non_filtered_entries;
u64 callchain_period;
@@ -160,8 +163,10 @@ int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
struct perf_hpp_fmt *fmt, int printed);
void hist_entry__delete(struct hist_entry *he);
-typedef int (*hists__resort_cb_t)(struct hist_entry *he);
+typedef int (*hists__resort_cb_t)(struct hist_entry *he, void *arg);
+void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg);
void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog);
void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
@@ -230,7 +235,7 @@ static __pure inline bool hists__has_callchains(struct hists *hists)
int hists__init(void);
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
-struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
+struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists);
struct perf_hpp {
char *buf;
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index ee6ca65f81f4..47025bc727e1 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -27,6 +27,8 @@
#include "evsel.h"
#include "evlist.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "session.h"
#include "util.h"
#include "thread.h"
@@ -142,7 +144,7 @@ static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
- sample->tid, 0, "Lost trace data");
+ sample->tid, 0, "Lost trace data", sample->time);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
@@ -326,35 +328,19 @@ static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
{
struct machine *machine = btsq->bts->machine;
struct thread *thread;
- struct addr_location al;
unsigned char buf[INTEL_PT_INSN_BUF_SZ];
ssize_t len;
- int x86_64;
- uint8_t cpumode;
+ bool x86_64;
int err = -1;
- if (machine__kernel_ip(machine, ip))
- cpumode = PERF_RECORD_MISC_KERNEL;
- else
- cpumode = PERF_RECORD_MISC_USER;
-
thread = machine__find_thread(machine, -1, btsq->tid);
if (!thread)
return -1;
- if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
- goto out_put;
-
- len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
- INTEL_PT_INSN_BUF_SZ);
+ len = thread__memcpy(thread, machine, buf, ip, INTEL_PT_INSN_BUF_SZ, &x86_64);
if (len <= 0)
goto out_put;
- /* Load maps to ensure dso->is_64_bit has been updated */
- map__load(al.map);
-
- x86_64 = al.map->dso->is_64_bit;
-
if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
goto out_put;
@@ -372,7 +358,7 @@ static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
- "Failed to get instruction");
+ "Failed to get instruction", 0);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 1b704fbea9de..23bf788f84b9 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -1,4 +1,4 @@
-libperf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
+perf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
inat_tables_script = util/intel-pt-decoder/gen-insn-attr-x86.awk
inat_tables_maps = util/intel-pt-decoder/x86-opcode-map.txt
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 4503f3ca45ab..6e03db142091 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -26,6 +26,7 @@
#include "../cache.h"
#include "../util.h"
+#include "../auxtrace.h"
#include "intel-pt-insn-decoder.h"
#include "intel-pt-pkt-decoder.h"
@@ -867,7 +868,7 @@ static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
ret = intel_pt_get_packet(decoder->buf, decoder->len,
&decoder->packet);
- if (ret == INTEL_PT_NEED_MORE_BYTES &&
+ if (ret == INTEL_PT_NEED_MORE_BYTES && BITS_PER_LONG == 32 &&
decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) {
ret = intel_pt_get_split_packet(decoder);
if (ret < 0)
@@ -1394,7 +1395,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
{
intel_pt_log("ERROR: Buffer overflow\n");
intel_pt_clear_tx_flags(decoder);
- decoder->cbr = 0;
decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
decoder->overflow = true;
@@ -2575,6 +2575,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
}
}
+#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
+
+/**
+ * adj_for_padding - adjust overlap to account for padding.
+ * @buf_b: second buffer
+ * @buf_a: first buffer
+ * @len_a: size of first buffer
+ *
+ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
+ * accordingly.
+ *
+ * Return: A pointer into @buf_b from where non-overlapped data starts
+ */
+static unsigned char *adj_for_padding(unsigned char *buf_b,
+ unsigned char *buf_a, size_t len_a)
+{
+ unsigned char *p = buf_b - MAX_PADDING;
+ unsigned char *q = buf_a + len_a - MAX_PADDING;
+ int i;
+
+ for (i = MAX_PADDING; i; i--, p++, q++) {
+ if (*p != *q)
+ break;
+ }
+
+ return p;
+}
+
/**
* intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
* using TSC.
@@ -2625,8 +2653,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
/* Same TSC, so buffers are consecutive */
if (!cmp && rem_b >= rem_a) {
+ unsigned char *start;
+
*consecutive = true;
- return buf_b + len_b - (rem_b - rem_a);
+ start = buf_b + len_b - (rem_b - rem_a);
+ return adj_for_padding(start, buf_a, len_a);
}
if (cmp < 0)
return buf_b; /* tsc_a < tsc_b => no overlap */
@@ -2689,7 +2720,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
found = memmem(buf_a, len_a, buf_b, len_a);
if (found) {
*consecutive = true;
- return buf_b + len_a;
+ return adj_for_padding(buf_b + len_a, buf_a, len_a);
}
/* Try again at next PSB in buffer 'a' */
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 2e72373ec6df..6d288237887b 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1411,7 +1411,7 @@ static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
}
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
- pid_t pid, pid_t tid, u64 ip)
+ pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
union perf_event event;
char msg[MAX_AUXTRACE_ERROR_MSG];
@@ -1420,7 +1420,7 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
- code, cpu, pid, tid, ip, msg);
+ code, cpu, pid, tid, ip, msg, timestamp);
err = perf_session__deliver_synth_event(pt->session, &event, NULL);
if (err)
@@ -1430,6 +1430,18 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
return err;
}
+static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
+ const struct intel_pt_state *state)
+{
+ struct intel_pt *pt = ptq->pt;
+ u64 tm = ptq->timestamp;
+
+ tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
+
+ return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
+ ptq->tid, state->from_ip, tm);
+}
+
static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
{
struct auxtrace_queue *queue;
@@ -1676,10 +1688,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
intel_pt_next_tid(pt, ptq);
}
if (pt->synth_opts.errors) {
- err = intel_pt_synth_error(pt, state->err,
- ptq->cpu, ptq->pid,
- ptq->tid,
- state->from_ip);
+ err = intel_ptq_synth_error(ptq, state);
if (err)
return err;
}
@@ -1804,7 +1813,7 @@ static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
{
return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
- sample->pid, sample->tid, 0);
+ sample->pid, sample->tid, 0, sample->time);
}
static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
@@ -2522,6 +2531,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
}
pt->timeless_decoding = intel_pt_timeless_decoding(pt);
+ if (pt->timeless_decoding && !pt->tc.time_mult)
+ pt->tc.time_mult = 1;
pt->have_tsc = intel_pt_have_tsc(pt);
pt->sampling_mode = false;
pt->est_tsc = !pt->timeless_decoding;
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
index 85bab8735fa9..5c19ee001299 100644
--- a/tools/perf/util/intlist.h
+++ b/tools/perf/util/intlist.h
@@ -45,7 +45,7 @@ static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
/* For intlist iteration */
static inline struct int_node *intlist__first(struct intlist *ilist)
{
- struct rb_node *rn = rb_first(&ilist->rblist.entries);
+ struct rb_node *rn = rb_first_cached(&ilist->rblist.entries);
return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
}
static inline struct int_node *intlist__next(struct int_node *in)
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index bf249552a9b0..eda28d3570bc 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -2,6 +2,7 @@
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <errno.h>
+#include <libgen.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index 7b1f06567521..1403dec189b4 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -3,12 +3,13 @@
#define __PERF_KVM_STAT_H
#include "../perf.h"
-#include "evsel.h"
-#include "evlist.h"
-#include "session.h"
#include "tool.h"
#include "stat.h"
+struct perf_evsel;
+struct perf_evlist;
+struct perf_session;
+
struct event_key {
#define INVALID_KEY (~0ULL)
u64 key;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 143f7057d581..61959aba7e27 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -10,6 +10,7 @@
#include "hist.h"
#include "machine.h"
#include "map.h"
+#include "symbol.h"
#include "sort.h"
#include "strlist.h"
#include "thread.h"
@@ -21,6 +22,7 @@
#include "unwind.h"
#include "linux/hash.h"
#include "asm/bug.h"
+#include "bpf-event.h"
#include "sane_ctype.h"
#include <symbol/kallsyms.h>
@@ -41,7 +43,7 @@ static void machine__threads_init(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
- threads->entries = RB_ROOT;
+ threads->entries = RB_ROOT_CACHED;
init_rwsem(&threads->lock);
threads->nr = 0;
INIT_LIST_HEAD(&threads->dead);
@@ -179,7 +181,7 @@ void machine__delete_threads(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
down_write(&threads->lock);
- nd = rb_first(&threads->entries);
+ nd = rb_first_cached(&threads->entries);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
@@ -222,7 +224,7 @@ void machine__delete(struct machine *machine)
void machines__init(struct machines *machines)
{
machine__init(&machines->host, "", HOST_KERNEL_ID);
- machines->guests = RB_ROOT;
+ machines->guests = RB_ROOT_CACHED;
}
void machines__exit(struct machines *machines)
@@ -234,9 +236,10 @@ void machines__exit(struct machines *machines)
struct machine *machines__add(struct machines *machines, pid_t pid,
const char *root_dir)
{
- struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *pos, *machine = malloc(sizeof(*machine));
+ bool leftmost = true;
if (machine == NULL)
return NULL;
@@ -251,12 +254,14 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
pos = rb_entry(parent, struct machine, rb_node);
if (pid < pos->pid)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&machine->rb_node, parent, p);
- rb_insert_color(&machine->rb_node, &machines->guests);
+ rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
return machine;
}
@@ -267,7 +272,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
machines->host.comm_exec = comm_exec;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
machine->comm_exec = comm_exec;
@@ -276,7 +281,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
struct machine *machines__find(struct machines *machines, pid_t pid)
{
- struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *machine;
struct machine *default_machine = NULL;
@@ -339,7 +344,7 @@ void machines__process_guests(struct machines *machines,
{
struct rb_node *nd;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
process(pos, data);
}
@@ -352,7 +357,8 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
machines->host.id_hdr_size = id_hdr_size;
- for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&machines->guests); node;
+ node = rb_next(node)) {
machine = rb_entry(node, struct machine, rb_node);
machine->id_hdr_size = id_hdr_size;
}
@@ -465,9 +471,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
{
- struct rb_node **p = &threads->entries.rb_node;
+ struct rb_node **p = &threads->entries.rb_root.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
+ bool leftmost = true;
th = threads__get_last_match(threads, machine, pid, tid);
if (th)
@@ -485,8 +492,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
if (tid < th->tid)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
if (!create)
@@ -495,7 +504,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
th = thread__new(pid, tid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, &threads->entries);
+ rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
/*
* We have to initialize map_groups separately
@@ -506,7 +515,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* leader and that would screwed the rb tree.
*/
if (thread__init_map_groups(th, machine)) {
- rb_erase_init(&th->rb_node, &threads->entries);
+ rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
thread__put(th);
return NULL;
@@ -681,6 +690,59 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
return 0;
}
+static int machine__process_ksymbol_register(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct symbol *sym;
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
+ if (!map) {
+ map = dso__new_map(event->ksymbol_event.name);
+ if (!map)
+ return -ENOMEM;
+
+ map->start = event->ksymbol_event.addr;
+ map->pgoff = map->start;
+ map->end = map->start + event->ksymbol_event.len;
+ map_groups__insert(&machine->kmaps, map);
+ }
+
+ sym = symbol__new(event->ksymbol_event.addr, event->ksymbol_event.len,
+ 0, 0, event->ksymbol_event.name);
+ if (!sym)
+ return -ENOMEM;
+ dso__insert_symbol(map->dso, sym);
+ return 0;
+}
+
+static int machine__process_ksymbol_unregister(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
+ if (map)
+ map_groups__remove(&machine->kmaps, map);
+
+ return 0;
+}
+
+int machine__process_ksymbol(struct machine *machine __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ if (dump_trace)
+ perf_event__fprintf_ksymbol(event, stdout);
+
+ if (event->ksymbol_event.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
+ return machine__process_ksymbol_unregister(machine, event,
+ sample);
+ return machine__process_ksymbol_register(machine, event, sample);
+}
+
static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
{
const char *dup_filename;
@@ -744,7 +806,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
struct rb_node *nd;
size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += __dsos__fprintf(&pos->dsos.head, fp);
}
@@ -764,7 +826,7 @@ size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
struct rb_node *nd;
size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
}
@@ -804,7 +866,8 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
ret = fprintf(fp, "Threads: %u\n", threads->nr);
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
@@ -1107,7 +1170,7 @@ failure:
void machines__destroy_kernel_maps(struct machines *machines)
{
- struct rb_node *next = rb_first(&machines->guests);
+ struct rb_node *next = rb_first_cached(&machines->guests);
machine__destroy_kernel_maps(&machines->host);
@@ -1115,7 +1178,7 @@ void machines__destroy_kernel_maps(struct machines *machines)
struct machine *pos = rb_entry(next, struct machine, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, &machines->guests);
+ rb_erase_cached(&pos->rb_node, &machines->guests);
machine__delete(pos);
}
}
@@ -1680,7 +1743,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
BUG_ON(refcount_read(&th->refcnt) == 0);
if (lock)
down_write(&threads->lock);
- rb_erase_init(&th->rb_node, &threads->entries);
+ rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
--threads->nr;
/*
@@ -1812,6 +1875,10 @@ int machine__process_event(struct machine *machine, union perf_event *event,
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
ret = machine__process_switch_event(machine, event); break;
+ case PERF_RECORD_KSYMBOL:
+ ret = machine__process_ksymbol(machine, event, sample); break;
+ case PERF_RECORD_BPF_EVENT:
+ ret = machine__process_bpf_event(machine, event, sample); break;
default:
ret = -1;
break;
@@ -2453,7 +2520,8 @@ int machine__for_each_thread(struct machine *machine,
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
threads = &machine->threads[i];
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
thread = rb_entry(nd, struct thread, rb_node);
rc = fn(thread, priv);
if (rc != 0)
@@ -2480,7 +2548,7 @@ int machines__for_each_thread(struct machines *machines,
if (rc != 0)
return rc;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
rc = machine__for_each_thread(machine, fn, priv);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index a5d1da60f751..f70ab98a7bde 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -4,7 +4,7 @@
#include <sys/types.h>
#include <linux/rbtree.h>
-#include "map.h"
+#include "map_groups.h"
#include "dso.h"
#include "event.h"
#include "rwsem.h"
@@ -29,11 +29,11 @@ struct vdso_info;
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
struct threads {
- struct rb_root entries;
- struct rw_semaphore lock;
- unsigned int nr;
- struct list_head dead;
- struct thread *last_match;
+ struct rb_root_cached entries;
+ struct rw_semaphore lock;
+ unsigned int nr;
+ struct list_head dead;
+ struct thread *last_match;
};
struct machine {
@@ -130,6 +130,9 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
struct perf_sample *sample);
int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
+int machine__process_ksymbol(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample);
int machine__process_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
@@ -137,7 +140,7 @@ typedef void (*machine__process_t)(struct machine *machine, void *data);
struct machines {
struct machine host;
- struct rb_root guests;
+ struct rb_root_cached guests;
};
void machines__init(struct machines *machines);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6751301a755c..fbeb0c6efaa6 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -286,8 +286,8 @@ void map__put(struct map *map)
void map__fixup_start(struct map *map)
{
- struct rb_root *symbols = &map->dso->symbols;
- struct rb_node *nd = rb_first(symbols);
+ struct rb_root_cached *symbols = &map->dso->symbols;
+ struct rb_node *nd = rb_first_cached(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->start = sym->start;
@@ -296,8 +296,8 @@ void map__fixup_start(struct map *map)
void map__fixup_end(struct map *map)
{
- struct rb_root *symbols = &map->dso->symbols;
- struct rb_node *nd = rb_last(symbols);
+ struct rb_root_cached *symbols = &map->dso->symbols;
+ struct rb_node *nd = rb_last(&symbols->rb_root);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->end = sym->end;
@@ -557,6 +557,12 @@ void map_groups__init(struct map_groups *mg, struct machine *machine)
refcount_set(&mg->refcnt, 1);
}
+void map_groups__insert(struct map_groups *mg, struct map *map)
+{
+ maps__insert(&mg->maps, map);
+ map->groups = mg;
+}
+
static void __maps__purge(struct maps *maps)
{
struct rb_root *root = &maps->entries;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 09282aa45c80..0e20749f2c55 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -6,12 +6,10 @@
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
-#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <linux/types.h>
-#include "rwsem.h"
struct dso;
struct ip_callchain;
@@ -48,38 +46,7 @@ struct map {
refcount_t refcnt;
};
-#define KMAP_NAME_LEN 256
-
-struct kmap {
- struct ref_reloc_sym *ref_reloc_sym;
- struct map_groups *kmaps;
- char name[KMAP_NAME_LEN];
-};
-
-struct maps {
- struct rb_root entries;
- struct rb_root names;
- struct rw_semaphore lock;
-};
-
-struct map_groups {
- struct maps maps;
- struct machine *machine;
- refcount_t refcnt;
-};
-
-struct map_groups *map_groups__new(struct machine *machine);
-void map_groups__delete(struct map_groups *mg);
-bool map_groups__empty(struct map_groups *mg);
-
-static inline struct map_groups *map_groups__get(struct map_groups *mg)
-{
- if (mg)
- refcount_inc(&mg->refcnt);
- return mg;
-}
-
-void map_groups__put(struct map_groups *mg);
+struct kmap;
struct kmap *__map__kmap(struct map *map);
struct kmap *map__kmap(struct map *map);
@@ -174,18 +141,7 @@ char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp);
-struct srccode_state {
- char *srcfile;
- unsigned line;
-};
-
-static inline void srccode_state_init(struct srccode_state *state)
-{
- state->srcfile = NULL;
- state->line = 0;
-}
-
-void srccode_state_free(struct srccode_state *state);
+struct srccode_state;
int map__fprintf_srccode(struct map *map, u64 addr,
FILE *fp, struct srccode_state *state);
@@ -198,61 +154,9 @@ void map__fixup_end(struct map *map);
void map__reloc_vmlinux(struct map *map);
-void maps__insert(struct maps *maps, struct map *map);
-void maps__remove(struct maps *maps, struct map *map);
-struct map *maps__find(struct maps *maps, u64 addr);
-struct map *maps__first(struct maps *maps);
-struct map *map__next(struct map *map);
-struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
- struct map **mapp);
-void map_groups__init(struct map_groups *mg, struct machine *machine);
-void map_groups__exit(struct map_groups *mg);
-int map_groups__clone(struct thread *thread,
- struct map_groups *parent);
-size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
-
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
u64 addr);
-static inline void map_groups__insert(struct map_groups *mg, struct map *map)
-{
- maps__insert(&mg->maps, map);
- map->groups = mg;
-}
-
-static inline void map_groups__remove(struct map_groups *mg, struct map *map)
-{
- maps__remove(&mg->maps, map);
-}
-
-static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
-{
- return maps__find(&mg->maps, addr);
-}
-
-struct map *map_groups__first(struct map_groups *mg);
-
-static inline struct map *map_groups__next(struct map *map)
-{
- return map__next(map);
-}
-
-struct symbol *map_groups__find_symbol(struct map_groups *mg,
- u64 addr, struct map **mapp);
-
-struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
- const char *name,
- struct map **mapp);
-
-struct addr_map_symbol;
-
-int map_groups__find_ams(struct addr_map_symbol *ams);
-
-int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
- FILE *fp);
-
-struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
-
bool __map__is_kernel(const struct map *map);
bool __map__is_extra_kernel_map(const struct map *map);
diff --git a/tools/perf/util/map_groups.h b/tools/perf/util/map_groups.h
new file mode 100644
index 000000000000..4dcda33e0fdf
--- /dev/null
+++ b/tools/perf/util/map_groups.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_MAP_GROUPS_H
+#define __PERF_MAP_GROUPS_H
+
+#include <linux/refcount.h>
+#include <linux/rbtree.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include "rwsem.h"
+
+struct ref_reloc_sym;
+struct machine;
+struct map;
+struct thread;
+
+struct maps {
+ struct rb_root entries;
+ struct rb_root names;
+ struct rw_semaphore lock;
+};
+
+void maps__insert(struct maps *maps, struct map *map);
+void maps__remove(struct maps *maps, struct map *map);
+struct map *maps__find(struct maps *maps, u64 addr);
+struct map *maps__first(struct maps *maps);
+struct map *map__next(struct map *map);
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
+
+struct map_groups {
+ struct maps maps;
+ struct machine *machine;
+ refcount_t refcnt;
+};
+
+#define KMAP_NAME_LEN 256
+
+struct kmap {
+ struct ref_reloc_sym *ref_reloc_sym;
+ struct map_groups *kmaps;
+ char name[KMAP_NAME_LEN];
+};
+
+struct map_groups *map_groups__new(struct machine *machine);
+void map_groups__delete(struct map_groups *mg);
+bool map_groups__empty(struct map_groups *mg);
+
+static inline struct map_groups *map_groups__get(struct map_groups *mg)
+{
+ if (mg)
+ refcount_inc(&mg->refcnt);
+ return mg;
+}
+
+void map_groups__put(struct map_groups *mg);
+void map_groups__init(struct map_groups *mg, struct machine *machine);
+void map_groups__exit(struct map_groups *mg);
+int map_groups__clone(struct thread *thread, struct map_groups *parent);
+size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
+
+void map_groups__insert(struct map_groups *mg, struct map *map);
+
+static inline void map_groups__remove(struct map_groups *mg, struct map *map)
+{
+ maps__remove(&mg->maps, map);
+}
+
+static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
+{
+ return maps__find(&mg->maps, addr);
+}
+
+struct map *map_groups__first(struct map_groups *mg);
+
+static inline struct map *map_groups__next(struct map *map)
+{
+ return map__next(map);
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp);
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp);
+
+struct addr_map_symbol;
+
+int map_groups__find_ams(struct addr_map_symbol *ams);
+
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp);
+
+struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
+
+#endif // __PERF_MAP_GROUPS_H
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
new file mode 100644
index 000000000000..5a1aed9f6bb4
--- /dev/null
+++ b/tools/perf/util/map_symbol.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __PERF_MAP_SYMBOL
+#define __PERF_MAP_SYMBOL 1
+
+#include <linux/types.h>
+
+struct map;
+struct symbol;
+
+struct map_symbol {
+ struct map *map;
+ struct symbol *sym;
+};
+
+struct addr_map_symbol {
+ struct map *map;
+ struct symbol *sym;
+ u64 addr;
+ u64 al_addr;
+ u64 phys_addr;
+};
+#endif // __PERF_MAP_SYMBOL
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 93f74d8d3cdd..42c3e5a229d2 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -28,7 +28,7 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
static char mem_loads_name[100];
static bool mem_loads_name__init;
-char *perf_mem_events__name(int i)
+char * __weak perf_mem_events__name(int i)
{
if (i == PERF_MEM_EVENTS__LOAD) {
if (!mem_loads_name__init) {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index a28f9b5cc4ff..b8d864ed4afe 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -270,7 +270,7 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
}
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
- bool raw)
+ bool raw, bool details)
{
struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
@@ -329,6 +329,12 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
if (asprintf(&s, "%s\n%*s%s]",
pe->metric_name, 8, "[", pe->desc) < 0)
return;
+
+ if (details) {
+ if (asprintf(&s, "%s\n%*s%s]",
+ s, 8, "[", pe->metric_expr) < 0)
+ return;
+ }
}
if (!s)
@@ -352,7 +358,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
else if (metrics && !raw)
printf("\nMetrics:\n\n");
- for (node = rb_first(&groups.entries); node; node = next) {
+ for (node = rb_first_cached(&groups.entries); node; node = next) {
struct mep *me = container_of(node, struct mep, nd);
if (metricgroups)
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index 8a155dba0581..5c52097a5c63 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -27,6 +27,7 @@ int metricgroup__parse_groups(const struct option *opt,
const char *str,
struct rblist *metric_events);
-void metricgroup__print(bool metrics, bool groups, char *filter, bool raw);
+void metricgroup__print(bool metrics, bool groups, char *filter,
+ bool raw, bool details);
bool metricgroup__has_metric(const char *metric);
#endif
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 8fc39311a30d..cdc7740fc181 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -10,6 +10,9 @@
#include <sys/mman.h>
#include <inttypes.h>
#include <asm/bug.h>
+#ifdef HAVE_LIBNUMA_SUPPORT
+#include <numaif.h>
+#endif
#include "debug.h"
#include "event.h"
#include "mmap.h"
@@ -154,9 +157,72 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
}
#ifdef HAVE_AIO_SUPPORT
+
+#ifdef HAVE_LIBNUMA_SUPPORT
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+{
+ map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+ if (map->aio.data[idx] == MAP_FAILED) {
+ map->aio.data[idx] = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+{
+ if (map->aio.data[idx]) {
+ munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
+ map->aio.data[idx] = NULL;
+ }
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity)
+{
+ void *data;
+ size_t mmap_len;
+ unsigned long node_mask;
+
+ if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
+ data = map->aio.data[idx];
+ mmap_len = perf_mmap__mmap_len(map);
+ node_mask = 1UL << cpu__get_node(cpu);
+ if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
+ pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
+ data, data + mmap_len, cpu__get_node(cpu));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+#else
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+{
+ map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
+ if (map->aio.data[idx] == NULL)
+ return -1;
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+{
+ zfree(&(map->aio.data[idx]));
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused,
+ int cpu __maybe_unused, int affinity __maybe_unused)
+{
+ return 0;
+}
+#endif
+
static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
{
- int delta_max, i, prio;
+ int delta_max, i, prio, ret;
map->aio.nr_cblocks = mp->nr_cblocks;
if (map->aio.nr_cblocks) {
@@ -177,11 +243,14 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
}
delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
for (i = 0; i < map->aio.nr_cblocks; ++i) {
- map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
- if (!map->aio.data[i]) {
+ ret = perf_mmap__aio_alloc(map, i);
+ if (ret == -1) {
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
+ ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity);
+ if (ret == -1)
+ return -1;
/*
* Use cblock.aio_fildes value different from -1
* to denote started aio write operation on the
@@ -210,7 +279,7 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
int i;
for (i = 0; i < map->aio.nr_cblocks; ++i)
- zfree(&map->aio.data[i]);
+ perf_mmap__aio_free(map, i);
if (map->aio.data)
zfree(&map->aio.data);
zfree(&map->aio.cblocks);
@@ -314,6 +383,32 @@ void perf_mmap__munmap(struct perf_mmap *map)
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
+static void build_node_mask(int node, cpu_set_t *mask)
+{
+ int c, cpu, nr_cpus;
+ const struct cpu_map *cpu_map = NULL;
+
+ cpu_map = cpu_map__online();
+ if (!cpu_map)
+ return;
+
+ nr_cpus = cpu_map__nr(cpu_map);
+ for (c = 0; c < nr_cpus; c++) {
+ cpu = cpu_map->map[c]; /* map c index to online cpu index */
+ if (cpu__get_node(cpu) == node)
+ CPU_SET(cpu, mask);
+ }
+}
+
+static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
+{
+ CPU_ZERO(&map->affinity_mask);
+ if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
+ build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
+ else if (mp->affinity == PERF_AFFINITY_CPU)
+ CPU_SET(map->cpu, &map->affinity_mask);
+}
+
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
@@ -343,6 +438,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->fd = fd;
map->cpu = cpu;
+ perf_mmap__setup_affinity_mask(map, mp);
+
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index aeb6942fdb00..e566c19b242b 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -38,6 +38,7 @@ struct perf_mmap {
int nr_cblocks;
} aio;
#endif
+ cpu_set_t affinity_mask;
};
/*
@@ -69,7 +70,7 @@ enum bkw_mmap_state {
};
struct mmap_params {
- int prot, mask, nr_cblocks;
+ int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
};
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 920e1e6551dd..4dcc01b2532c 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2540,7 +2540,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_sdt_events(NULL, NULL, name_only);
- metricgroup__print(true, true, NULL, name_only);
+ metricgroup__print(true, true, NULL, name_only, details_flag);
}
int parse_events__is_hardcoded_term(struct parse_events_term *term)
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index da8fe57691b8..44819bdb037d 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -311,7 +311,7 @@ value_sym '/' event_config '/'
$$ = list;
}
|
-value_sym sep_slash_dc
+value_sym sep_slash_slash_dc
{
struct list_head *list;
int type = $1 >> 16;
@@ -702,7 +702,7 @@ PE_VALUE PE_ARRAY_RANGE PE_VALUE
sep_dc: ':' |
-sep_slash_dc: '/' | ':' |
+sep_slash_slash_dc: '/' '/' | ':' |
%%
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 11a234740632..6199a3174ab9 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -29,8 +29,6 @@ struct perf_pmu_format {
struct list_head list;
};
-#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
-
int perf_pmu_parse(struct list_head *list, char *name);
extern FILE *perf_pmu_in;
@@ -754,6 +752,19 @@ perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
return NULL;
}
+static int pmu_max_precise(const char *name)
+{
+ char path[PATH_MAX];
+ int max_precise = -1;
+
+ scnprintf(path, PATH_MAX,
+ "bus/event_source/devices/%s/caps/max_precise",
+ name);
+
+ sysfs__read_int(path, &max_precise);
+ return max_precise;
+}
+
static struct perf_pmu *pmu_lookup(const char *name)
{
struct perf_pmu *pmu;
@@ -786,6 +797,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->name = strdup(name);
pmu->type = type;
pmu->is_uncore = pmu_is_uncore(name);
+ pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
INIT_LIST_HEAD(&pmu->format);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 76fecec7b3f9..bd9ec2704a57 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -6,9 +6,10 @@
#include <linux/compiler.h>
#include <linux/perf_event.h>
#include <stdbool.h>
-#include "evsel.h"
#include "parse-events.h"
+struct perf_evsel_config_term;
+
enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
PERF_PMU_FORMAT_VALUE_CONFIG1,
@@ -16,6 +17,7 @@ enum {
};
#define PERF_PMU_FORMAT_BITS 64
+#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
struct perf_event_attr;
@@ -24,12 +26,12 @@ struct perf_pmu {
__u32 type;
bool selectable;
bool is_uncore;
+ int max_precise;
struct perf_event_attr *default_config;
struct cpu_map *cpus;
struct list_head format; /* HEAD struct perf_pmu_format -> list */
struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
struct list_head list; /* ELEM */
- int (*set_drv_config) (struct perf_evsel_config_term *term);
};
struct perf_pmu_info {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 18a59fba97ff..a1b8d9649ca7 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -35,11 +35,14 @@
#include "util.h"
#include "event.h"
+#include "namespaces.h"
#include "strlist.h"
#include "strfilter.h"
#include "debug.h"
#include "cache.h"
#include "color.h"
+#include "map.h"
+#include "map_groups.h"
#include "symbol.h"
#include "thread.h"
#include <api/fs/fs.h>
@@ -469,9 +472,12 @@ static struct debuginfo *open_debuginfo(const char *module, struct nsinfo *nsi,
strcpy(reason, "(unknown)");
} else
dso__strerror_load(dso, reason, STRERR_BUFSIZE);
- if (!silent)
- pr_err("Failed to find the path for %s: %s\n",
- module ?: "kernel", reason);
+ if (!silent) {
+ if (module)
+ pr_err("Module %s is not loaded, please specify its full path name.\n", module);
+ else
+ pr_err("Failed to find the path for the kernel: %s\n", reason);
+ }
return NULL;
}
path = dso->long_name;
@@ -3528,7 +3534,8 @@ int show_available_funcs(const char *target, struct nsinfo *nsi,
/* Show all (filtered) symbols */
setup_pager();
- for (nd = rb_first(&map->dso->symbol_names); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&map->dso->symbol_names); nd;
+ nd = rb_next(nd)) {
struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
if (strfilter__compare(_filter, pos->sym.name))
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 15a98c3a2a2f..05c8d571a901 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -4,8 +4,9 @@
#include <linux/compiler.h>
#include <stdbool.h>
-#include "intlist.h"
-#include "namespaces.h"
+
+struct intlist;
+struct nsinfo;
/* Probe related configurations */
struct probe_conf {
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 0b1195cad0e5..4062bc4412a9 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -20,6 +20,7 @@
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
+#include "namespaces.h"
#include "util.h"
#include "event.h"
#include "strlist.h"
diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h
index a920f702a74d..376e86cb4c3c 100644
--- a/tools/perf/util/rb_resort.h
+++ b/tools/perf/util/rb_resort.h
@@ -140,12 +140,12 @@ struct __name##_sorted *__name = __name##_sorted__new
/* For 'struct intlist' */
#define DECLARE_RESORT_RB_INTLIST(__name, __ilist) \
- DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries, \
+ DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \
__ilist->rblist.nr_entries)
/* For 'struct machine->threads' */
-#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
- DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries, \
- __machine->threads[hash_bucket].nr)
+#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
+ DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \
+ __machine->threads[hash_bucket].nr)
#endif /* _PERF_RESORT_RB_H_ */
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
index 0efc3258c648..11e07fab20dc 100644
--- a/tools/perf/util/rblist.c
+++ b/tools/perf/util/rblist.c
@@ -13,8 +13,9 @@
int rblist__add_node(struct rblist *rblist, const void *new_entry)
{
- struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node;
+ bool leftmost = true;
while (*p != NULL) {
int rc;
@@ -24,8 +25,10 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
rc = rblist->node_cmp(parent, new_entry);
if (rc > 0)
p = &(*p)->rb_left;
- else if (rc < 0)
+ else if (rc < 0) {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
else
return -EEXIST;
}
@@ -35,7 +38,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
return -ENOMEM;
rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &rblist->entries);
+ rb_insert_color_cached(new_node, &rblist->entries, leftmost);
++rblist->nr_entries;
return 0;
@@ -43,7 +46,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
{
- rb_erase(rb_node, &rblist->entries);
+ rb_erase_cached(rb_node, &rblist->entries);
--rblist->nr_entries;
rblist->node_delete(rblist, rb_node);
}
@@ -52,8 +55,9 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
const void *entry,
bool create)
{
- struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node = NULL;
+ bool leftmost = true;
while (*p != NULL) {
int rc;
@@ -63,8 +67,10 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
rc = rblist->node_cmp(parent, entry);
if (rc > 0)
p = &(*p)->rb_left;
- else if (rc < 0)
+ else if (rc < 0) {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
else
return parent;
}
@@ -73,7 +79,8 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
new_node = rblist->node_new(rblist, entry);
if (new_node) {
rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &rblist->entries);
+ rb_insert_color_cached(new_node,
+ &rblist->entries, leftmost);
++rblist->nr_entries;
}
}
@@ -94,7 +101,7 @@ struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry)
void rblist__init(struct rblist *rblist)
{
if (rblist != NULL) {
- rblist->entries = RB_ROOT;
+ rblist->entries = RB_ROOT_CACHED;
rblist->nr_entries = 0;
}
@@ -103,7 +110,7 @@ void rblist__init(struct rblist *rblist)
void rblist__exit(struct rblist *rblist)
{
- struct rb_node *pos, *next = rb_first(&rblist->entries);
+ struct rb_node *pos, *next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
@@ -124,7 +131,8 @@ struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
{
struct rb_node *node;
- for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&rblist->entries); node;
+ node = rb_next(node)) {
if (!idx--)
return node;
}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
index 76df15c27f5f..14b232a4d0b6 100644
--- a/tools/perf/util/rblist.h
+++ b/tools/perf/util/rblist.h
@@ -20,7 +20,7 @@
*/
struct rblist {
- struct rb_root entries;
+ struct rb_root_cached entries;
unsigned int nr_entries;
int (*node_cmp)(struct rb_node *rbn, const void *entry);
diff --git a/tools/perf/util/s390-cpumcf-kernel.h b/tools/perf/util/s390-cpumcf-kernel.h
new file mode 100644
index 000000000000..d4356030b504
--- /dev/null
+++ b/tools/perf/util/s390-cpumcf-kernel.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for s390 CPU measurement counter set diagnostic facility
+ *
+ * Copyright IBM Corp. 2019
+ Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ * Thomas Richter <tmricht@linux.ibm.com>
+ */
+#ifndef S390_CPUMCF_KERNEL_H
+#define S390_CPUMCF_KERNEL_H
+
+#define S390_CPUMCF_DIAG_DEF 0xfeef /* Counter diagnostic entry ID */
+#define PERF_EVENT_CPUM_CF_DIAG 0xBC000 /* Event: Counter sets */
+
+struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int set:16; /* 16-23 Counter set identifier */
+ unsigned int ctr:16; /* 24-39 Number of stored counters */
+ unsigned int res1:16; /* 40-63 Reserved */
+};
+
+struct cf_trailer_entry { /* CPU-M CF trailer for raw traces (64 byte) */
+ /* 0 - 7 */
+ union {
+ struct {
+ unsigned int clock_base:1; /* TOD clock base */
+ unsigned int speed:1; /* CPU speed */
+ /* Measurement alerts */
+ unsigned int mtda:1; /* Loss of MT ctr. data alert */
+ unsigned int caca:1; /* Counter auth. change alert */
+ unsigned int lcda:1; /* Loss of counter data alert */
+ };
+ unsigned long flags; /* 0-63 All indicators */
+ };
+ /* 8 - 15 */
+ unsigned int cfvn:16; /* 64-79 Ctr First Version */
+ unsigned int csvn:16; /* 80-95 Ctr Second Version */
+ unsigned int cpu_speed:32; /* 96-127 CPU speed */
+ /* 16 - 23 */
+ unsigned long timestamp; /* 128-191 Timestamp (TOD) */
+ /* 24 - 55 */
+ union {
+ struct {
+ unsigned long progusage1;
+ unsigned long progusage2;
+ unsigned long progusage3;
+ unsigned long tod_base;
+ };
+ unsigned long progusage[4];
+ };
+ /* 56 - 63 */
+ unsigned int mach_type:16; /* Machine type */
+ unsigned int res1:16; /* Reserved */
+ unsigned int res2:32; /* Reserved */
+};
+
+#define CPUMF_CTR_SET_BASIC 0 /* Basic Counter Set */
+#define CPUMF_CTR_SET_USER 1 /* Problem-State Counter Set */
+#define CPUMF_CTR_SET_CRYPTO 2 /* Crypto-Activity Counter Set */
+#define CPUMF_CTR_SET_EXT 3 /* Extended Counter Set */
+#define CPUMF_CTR_SET_MT_DIAG 4 /* MT-diagnostic Counter Set */
+#endif
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 68b2570304ec..c215704931dc 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -162,6 +162,7 @@
#include "auxtrace.h"
#include "s390-cpumsf.h"
#include "s390-cpumsf-kernel.h"
+#include "s390-cpumcf-kernel.h"
#include "config.h"
struct s390_cpumsf {
@@ -184,8 +185,58 @@ struct s390_cpumsf_queue {
struct auxtrace_buffer *buffer;
int cpu;
FILE *logfile;
+ FILE *logfile_ctr;
};
+/* Check if the raw data should be dumped to file. If this is the case and
+ * the file to dump to has not been opened for writing, do so.
+ *
+ * Return 0 on success and greater zero on error so processing continues.
+ */
+static int s390_cpumcf_dumpctr(struct s390_cpumsf *sf,
+ struct perf_sample *sample)
+{
+ struct s390_cpumsf_queue *sfq;
+ struct auxtrace_queue *q;
+ int rc = 0;
+
+ if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu)
+ return rc;
+
+ q = &sf->queues.queue_array[sample->cpu];
+ sfq = q->priv;
+ if (!sfq) /* Queue not yet allocated */
+ return rc;
+
+ if (!sfq->logfile_ctr) {
+ char *name;
+
+ rc = (sf->logdir)
+ ? asprintf(&name, "%s/aux.ctr.%02x",
+ sf->logdir, sample->cpu)
+ : asprintf(&name, "aux.ctr.%02x", sample->cpu);
+ if (rc > 0)
+ sfq->logfile_ctr = fopen(name, "w");
+ if (sfq->logfile_ctr == NULL) {
+ pr_err("Failed to open counter set log file %s, "
+ "continue...\n", name);
+ rc = 1;
+ }
+ free(name);
+ }
+
+ if (sfq->logfile_ctr) {
+ /* See comment above for -4 */
+ size_t n = fwrite(sample->raw_data, sample->raw_size - 4, 1,
+ sfq->logfile_ctr);
+ if (n != 1) {
+ pr_err("Failed to write counter set data\n");
+ rc = 1;
+ }
+ }
+ return rc;
+}
+
/* Display s390 CPU measurement facility basic-sampling data entry */
static bool s390_cpumsf_basic_show(const char *color, size_t pos,
struct hws_basic_entry *basic)
@@ -301,6 +352,11 @@ static bool s390_cpumsf_validate(int machine_type,
*dsdes = 85;
*bsdes = 32;
break;
+ case 2964:
+ case 2965:
+ *dsdes = 112;
+ *bsdes = 32;
+ break;
default:
/* Illegal trailer entry */
return false;
@@ -768,7 +824,7 @@ static int s390_cpumsf_process_queues(struct s390_cpumsf *sf, u64 timestamp)
}
static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
- pid_t pid, pid_t tid, u64 ip)
+ pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
char msg[MAX_AUXTRACE_ERROR_MSG];
union perf_event event;
@@ -776,7 +832,7 @@ static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
strncpy(msg, "Lost Auxiliary Trace Buffer", sizeof(msg) - 1);
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
- code, cpu, pid, tid, ip, msg);
+ code, cpu, pid, tid, ip, msg, timestamp);
err = perf_session__deliver_synth_event(sf->session, &event, NULL);
if (err)
@@ -788,11 +844,12 @@ static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
static int s390_cpumsf_lost(struct s390_cpumsf *sf, struct perf_sample *sample)
{
return s390_cpumsf_synth_error(sf, 1, sample->cpu,
- sample->pid, sample->tid, 0);
+ sample->pid, sample->tid, 0,
+ sample->time);
}
static int
-s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
+s390_cpumsf_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
@@ -801,6 +858,8 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
struct s390_cpumsf,
auxtrace);
u64 timestamp = sample->time;
+ struct perf_evsel *ev_bc000;
+
int err = 0;
if (dump_trace)
@@ -811,6 +870,16 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
return -EINVAL;
}
+ if (event->header.type == PERF_RECORD_SAMPLE &&
+ sample->raw_size) {
+ /* Handle event with raw data */
+ ev_bc000 = perf_evlist__event2evsel(session->evlist, event);
+ if (ev_bc000 &&
+ ev_bc000->attr.config == PERF_EVENT_CPUM_CF_DIAG)
+ err = s390_cpumcf_dumpctr(sf, sample);
+ return err;
+ }
+
if (event->header.type == PERF_RECORD_AUX &&
event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
return s390_cpumsf_lost(sf, sample);
@@ -891,9 +960,15 @@ static void s390_cpumsf_free_queues(struct perf_session *session)
struct s390_cpumsf_queue *sfq = (struct s390_cpumsf_queue *)
queues->queue_array[i].priv;
- if (sfq != NULL && sfq->logfile) {
- fclose(sfq->logfile);
- sfq->logfile = NULL;
+ if (sfq != NULL) {
+ if (sfq->logfile) {
+ fclose(sfq->logfile);
+ sfq->logfile = NULL;
+ }
+ if (sfq->logfile_ctr) {
+ fclose(sfq->logfile_ctr);
+ sfq->logfile_ctr = NULL;
+ }
}
zfree(&queues->queue_array[i].priv);
}
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
new file mode 100644
index 000000000000..6650f599ed9c
--- /dev/null
+++ b/tools/perf/util/s390-sample-raw.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Thomas Richter <tmricht@linux.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Architecture specific trace_event function. Save event's bc000 raw data
+ * to file. File name is aux.ctr.## where ## stands for the CPU number the
+ * sample was taken from.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <sys/stat.h>
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+
+#include "debug.h"
+#include "util.h"
+#include "auxtrace.h"
+#include "session.h"
+#include "evlist.h"
+#include "config.h"
+#include "color.h"
+#include "sample-raw.h"
+#include "s390-cpumcf-kernel.h"
+#include "pmu-events/pmu-events.h"
+
+static size_t ctrset_size(struct cf_ctrset_entry *set)
+{
+ return sizeof(*set) + set->ctr * sizeof(u64);
+}
+
+static bool ctrset_valid(struct cf_ctrset_entry *set)
+{
+ return set->def == S390_CPUMCF_DIAG_DEF;
+}
+
+/* CPU Measurement Counter Facility raw data is a byte stream. It is 8 byte
+ * aligned and might have trailing padding bytes.
+ * Display the raw data on screen.
+ */
+static bool s390_cpumcfdg_testctr(struct perf_sample *sample)
+{
+ size_t len = sample->raw_size, offset = 0;
+ unsigned char *buf = sample->raw_data;
+ struct cf_trailer_entry *te;
+ struct cf_ctrset_entry *cep, ce;
+
+ if (!len)
+ return false;
+ while (offset < len) {
+ cep = (struct cf_ctrset_entry *)(buf + offset);
+ ce.def = be16_to_cpu(cep->def);
+ ce.set = be16_to_cpu(cep->set);
+ ce.ctr = be16_to_cpu(cep->ctr);
+ ce.res1 = be16_to_cpu(cep->res1);
+
+ if (!ctrset_valid(&ce) || offset + ctrset_size(&ce) > len) {
+ /* Raw data for counter sets are always multiple of 8
+ * bytes. Prepending a 4 bytes size field to the
+ * raw data block in the sample causes the perf tool
+ * to append 4 padding bytes to make the raw data part
+ * of the sample a multiple of eight bytes again.
+ *
+ * If the last entry (trailer) is 4 bytes off the raw
+ * area data end, all is good.
+ */
+ if (len - offset - sizeof(*te) == 4)
+ break;
+ pr_err("Invalid counter set entry at %zd\n", offset);
+ return false;
+ }
+ offset += ctrset_size(&ce);
+ }
+ return true;
+}
+
+/* Dump event bc000 on screen, already tested on correctness. */
+static void s390_cpumcfdg_dumptrail(const char *color, size_t offset,
+ struct cf_trailer_entry *tep)
+{
+ struct cf_trailer_entry te;
+
+ te.flags = be64_to_cpu(tep->flags);
+ te.cfvn = be16_to_cpu(tep->cfvn);
+ te.csvn = be16_to_cpu(tep->csvn);
+ te.cpu_speed = be32_to_cpu(tep->cpu_speed);
+ te.timestamp = be64_to_cpu(tep->timestamp);
+ te.progusage1 = be64_to_cpu(tep->progusage1);
+ te.progusage2 = be64_to_cpu(tep->progusage2);
+ te.progusage3 = be64_to_cpu(tep->progusage3);
+ te.tod_base = be64_to_cpu(tep->tod_base);
+ te.mach_type = be16_to_cpu(tep->mach_type);
+ te.res1 = be16_to_cpu(tep->res1);
+ te.res2 = be32_to_cpu(tep->res2);
+
+ color_fprintf(stdout, color, " [%#08zx] Trailer:%c%c%c%c%c"
+ " Cfvn:%d Csvn:%d Speed:%d TOD:%#llx\n",
+ offset, te.clock_base ? 'T' : ' ',
+ te.speed ? 'S' : ' ', te.mtda ? 'M' : ' ',
+ te.caca ? 'C' : ' ', te.lcda ? 'L' : ' ',
+ te.cfvn, te.csvn, te.cpu_speed, te.timestamp);
+ color_fprintf(stdout, color, "\t\t1:%lx 2:%lx 3:%lx TOD-Base:%#llx"
+ " Type:%x\n\n",
+ te.progusage1, te.progusage2, te.progusage3,
+ te.tod_base, te.mach_type);
+}
+
+/* Return starting number of a counter set */
+static int get_counterset_start(int setnr)
+{
+ switch (setnr) {
+ case CPUMF_CTR_SET_BASIC: /* Basic counter set */
+ return 0;
+ case CPUMF_CTR_SET_USER: /* Problem state counter set */
+ return 32;
+ case CPUMF_CTR_SET_CRYPTO: /* Crypto counter set */
+ return 64;
+ case CPUMF_CTR_SET_EXT: /* Extended counter set */
+ return 128;
+ case CPUMF_CTR_SET_MT_DIAG: /* Diagnostic counter set */
+ return 448;
+ default:
+ return -1;
+ }
+}
+
+/* Scan the PMU table and extract the logical name of a counter from the
+ * PMU events table. Input is the counter set and counter number with in the
+ * set. Construct the event number and use this as key. If they match return
+ * the name of this counter.
+ * If no match is found a NULL pointer is returned.
+ */
+static const char *get_counter_name(int set, int nr, struct pmu_events_map *map)
+{
+ int rc, event_nr, wanted = get_counterset_start(set) + nr;
+
+ if (map) {
+ struct pmu_event *evp = map->table;
+
+ for (; evp->name || evp->event || evp->desc; ++evp) {
+ if (evp->name == NULL || evp->event == NULL)
+ continue;
+ rc = sscanf(evp->event, "event=%x", &event_nr);
+ if (rc == 1 && event_nr == wanted)
+ return evp->name;
+ }
+ }
+ return NULL;
+}
+
+static void s390_cpumcfdg_dump(struct perf_sample *sample)
+{
+ size_t i, len = sample->raw_size, offset = 0;
+ unsigned char *buf = sample->raw_data;
+ const char *color = PERF_COLOR_BLUE;
+ struct cf_ctrset_entry *cep, ce;
+ struct pmu_events_map *map;
+ struct perf_pmu pmu;
+ u64 *p;
+
+ memset(&pmu, 0, sizeof(pmu));
+ map = perf_pmu__find_map(&pmu);
+ while (offset < len) {
+ cep = (struct cf_ctrset_entry *)(buf + offset);
+
+ ce.def = be16_to_cpu(cep->def);
+ ce.set = be16_to_cpu(cep->set);
+ ce.ctr = be16_to_cpu(cep->ctr);
+ ce.res1 = be16_to_cpu(cep->res1);
+
+ if (!ctrset_valid(&ce)) { /* Print trailer */
+ s390_cpumcfdg_dumptrail(color, offset,
+ (struct cf_trailer_entry *)cep);
+ return;
+ }
+
+ color_fprintf(stdout, color, " [%#08zx] Counterset:%d"
+ " Counters:%d\n", offset, ce.set, ce.ctr);
+ for (i = 0, p = (u64 *)(cep + 1); i < ce.ctr; ++i, ++p) {
+ const char *ev_name = get_counter_name(ce.set, i, map);
+
+ color_fprintf(stdout, color,
+ "\tCounter:%03d %s Value:%#018lx\n", i,
+ ev_name ?: "<unknown>", be64_to_cpu(*p));
+ }
+ offset += ctrset_size(&ce);
+ }
+}
+
+/* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events
+ * and if the event was triggered by a counter set diagnostic event display
+ * its raw data.
+ * The function is only invoked when the dump flag -D is set.
+ */
+void perf_evlist__s390_sample_raw(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct perf_evsel *ev_bc000;
+
+ if (event->header.type != PERF_RECORD_SAMPLE)
+ return;
+
+ ev_bc000 = perf_evlist__event2evsel(evlist, event);
+ if (ev_bc000 == NULL ||
+ ev_bc000->attr.config != PERF_EVENT_CPUM_CF_DIAG)
+ return;
+
+ /* Display raw data on screen */
+ if (!s390_cpumcfdg_testctr(sample)) {
+ pr_err("Invalid counter set data encountered\n");
+ return;
+ }
+ s390_cpumcfdg_dump(sample);
+}
diff --git a/tools/perf/util/sample-raw.c b/tools/perf/util/sample-raw.c
new file mode 100644
index 000000000000..c21e1311fb0f
--- /dev/null
+++ b/tools/perf/util/sample-raw.c
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <string.h>
+#include "evlist.h"
+#include "env.h"
+#include "sample-raw.h"
+
+/*
+ * Check platform the perf data file was created on and perform platform
+ * specific interpretation.
+ */
+void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist)
+{
+ const char *arch_pf = perf_env__arch(evlist->env);
+
+ if (arch_pf && !strcmp("s390", arch_pf))
+ evlist->trace_event_sample_raw = perf_evlist__s390_sample_raw;
+}
diff --git a/tools/perf/util/sample-raw.h b/tools/perf/util/sample-raw.h
new file mode 100644
index 000000000000..95d445c87e93
--- /dev/null
+++ b/tools/perf/util/sample-raw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SAMPLE_RAW_H
+#define __SAMPLE_RAW_H 1
+
+struct perf_evlist;
+union perf_event;
+struct perf_sample;
+
+void perf_evlist__s390_sample_raw(struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample);
+
+void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist);
+#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 82d28c67e0f3..7b342ce38d99 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,5 +1,5 @@
-libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
-libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
+perf-$(CONFIG_LIBPERL) += trace-event-perl.o
+perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index b93f36b887b5..5f06378a482b 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -37,6 +37,8 @@
#include "../../perf.h"
#include "../callchain.h"
#include "../machine.h"
+#include "../map.h"
+#include "../symbol.h"
#include "../thread.h"
#include "../event.h"
#include "../trace-event.h"
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 87ef16a1b17e..09604c6508f0 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -44,6 +44,8 @@
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../call-path.h"
+#include "map.h"
+#include "symbol.h"
#include "thread_map.h"
#include "cpumap.h"
#include "print_binary.h"
@@ -733,8 +735,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel)));
- pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize(
- (const char *)&evsel->attr, sizeof(evsel->attr)));
+ pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->attr, sizeof(evsel->attr)));
pydict_set_item_string_decref(dict_sample, "pid",
_PyLong_FromLong(sample->pid));
@@ -1172,7 +1173,7 @@ static int python_export_call_return(struct db_export *dbe,
u64 comm_db_id = cr->comm ? cr->comm->db_id : 0;
PyObject *t;
- t = tuple_new(11);
+ t = tuple_new(12);
tuple_set_u64(t, 0, cr->db_id);
tuple_set_u64(t, 1, cr->thread->db_id);
@@ -1185,6 +1186,7 @@ static int python_export_call_return(struct db_export *dbe,
tuple_set_u64(t, 8, cr->return_ref);
tuple_set_u64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
+ tuple_set_u64(t, 11, cr->parent_db_id);
call_object(tables->call_return_handler, t, "call_return_table");
@@ -1193,11 +1195,12 @@ static int python_export_call_return(struct db_export *dbe,
return 0;
}
-static int python_process_call_return(struct call_return *cr, void *data)
+static int python_process_call_return(struct call_return *cr, u64 *parent_db_id,
+ void *data)
{
struct db_export *dbe = data;
- return db_export__call_return(dbe, cr);
+ return db_export__call_return(dbe, cr, parent_db_id);
}
static void python_process_general_event(struct perf_sample *sample,
@@ -1494,34 +1497,40 @@ static void _free_command_line(wchar_t **command_line, int num)
static int python_start_script(const char *script, int argc, const char **argv)
{
struct tables *tables = &tables_global;
+ PyMODINIT_FUNC (*initfunc)(void);
#if PY_MAJOR_VERSION < 3
const char **command_line;
#else
wchar_t **command_line;
#endif
- char buf[PATH_MAX];
+ /*
+ * Use a non-const name variable to cope with python 2.6's
+ * PyImport_AppendInittab prototype
+ */
+ char buf[PATH_MAX], name[19] = "perf_trace_context";
int i, err = 0;
FILE *fp;
#if PY_MAJOR_VERSION < 3
+ initfunc = initperf_trace_context;
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
#else
+ initfunc = PyInit_perf_trace_context;
command_line = malloc((argc + 1) * sizeof(wchar_t *));
command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
#endif
+ PyImport_AppendInittab(name, initfunc);
Py_Initialize();
#if PY_MAJOR_VERSION < 3
- initperf_trace_context();
PySys_SetArgv(argc + 1, (char **)command_line);
#else
- PyInit_perf_trace_context();
PySys_SetArgv(argc + 1, command_line);
#endif
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5456c84c7dd1..db643f3c2b95 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -13,6 +13,8 @@
#include "evlist.h"
#include "evsel.h"
#include "memswap.h"
+#include "map.h"
+#include "symbol.h"
#include "session.h"
#include "tool.h"
#include "sort.h"
@@ -23,6 +25,7 @@
#include "auxtrace.h"
#include "thread.h"
#include "thread-stack.h"
+#include "sample-raw.h"
#include "stat.h"
#include "arch/common.h"
@@ -137,7 +140,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
if (perf_data__is_read(data)) {
if (perf_session__open(session) < 0)
- goto out_close;
+ goto out_delete;
/*
* set session attributes that are present in perf.data
@@ -147,6 +150,8 @@ struct perf_session *perf_session__new(struct perf_data *data,
perf_session__set_id_hdr_size(session);
perf_session__set_comm_exec(session);
}
+
+ perf_evlist__init_trace_event_sample_raw(session->evlist);
}
} else {
session->machines.host.env = &perf_env;
@@ -176,8 +181,6 @@ struct perf_session *perf_session__new(struct perf_data *data,
return session;
- out_close:
- perf_data__close(data);
out_delete:
perf_session__delete(session);
out:
@@ -376,6 +379,10 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->itrace_start = perf_event__process_itrace_start;
if (tool->context_switch == NULL)
tool->context_switch = perf_event__process_switch;
+ if (tool->ksymbol == NULL)
+ tool->ksymbol = perf_event__process_ksymbol;
+ if (tool->bpf_event == NULL)
+ tool->bpf_event = perf_event__process_bpf_event;
if (tool->read == NULL)
tool->read = process_event_sample_stub;
if (tool->throttle == NULL)
@@ -694,7 +701,10 @@ static void perf_event__auxtrace_error_swap(union perf_event *event,
event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
+ event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
+ if (event->auxtrace_error.fmt)
+ event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
}
static void perf_event__thread_map_swap(union perf_event *event,
@@ -1065,6 +1075,8 @@ static void dump_event(struct perf_evlist *evlist, union perf_event *event,
file_offset, event->header.size, event->header.type);
trace_event(event);
+ if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
+ evlist->trace_event_sample_raw(evlist, event, sample);
if (sample)
perf_evlist__print_tstamp(evlist, event, sample);
@@ -1188,6 +1200,13 @@ static int deliver_sample_value(struct perf_evlist *evlist,
return 0;
}
+ /*
+ * There's no reason to deliver sample
+ * for zero period, bail out.
+ */
+ if (!sample->period)
+ return 0;
+
return tool->sample(tool, event, sample, sid->evsel, machine);
}
@@ -1305,6 +1324,10 @@ static int machines__deliver_event(struct machines *machines,
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
return tool->context_switch(tool, event, sample, machine);
+ case PERF_RECORD_KSYMBOL:
+ return tool->ksymbol(tool, event, sample, machine);
+ case PERF_RECORD_BPF_EVENT:
+ return tool->bpf_event(tool, event, sample, machine);
default:
++evlist->stats.nr_unknown_events;
return -1;
@@ -1820,38 +1843,35 @@ fetch_mmaped_event(struct perf_session *session,
#define NUM_MMAPS 128
#endif
-static int __perf_session__process_events(struct perf_session *session,
- u64 data_offset, u64 data_size,
- u64 file_size)
+struct reader {
+ int fd;
+ u64 data_size;
+ u64 data_offset;
+};
+
+static int
+reader__process_events(struct reader *rd, struct perf_session *session,
+ struct ui_progress *prog)
{
- struct ordered_events *oe = &session->ordered_events;
- struct perf_tool *tool = session->tool;
- int fd = perf_data__fd(session->data);
+ u64 data_size = rd->data_size;
u64 head, page_offset, file_offset, file_pos, size;
- int err, mmap_prot, mmap_flags, map_idx = 0;
+ int err = 0, mmap_prot, mmap_flags, map_idx = 0;
size_t mmap_size;
char *buf, *mmaps[NUM_MMAPS];
union perf_event *event;
- struct ui_progress prog;
s64 skip;
- perf_tool__fill_defaults(tool);
-
- page_offset = page_size * (data_offset / page_size);
+ page_offset = page_size * (rd->data_offset / page_size);
file_offset = page_offset;
- head = data_offset - page_offset;
-
- if (data_size == 0)
- goto out;
+ head = rd->data_offset - page_offset;
- if (data_offset + data_size < file_size)
- file_size = data_offset + data_size;
+ ui_progress__init_size(prog, data_size, "Processing events...");
- ui_progress__init_size(&prog, file_size, "Processing events...");
+ data_size += rd->data_offset;
mmap_size = MMAP_SIZE;
- if (mmap_size > file_size) {
- mmap_size = file_size;
+ if (mmap_size > data_size) {
+ mmap_size = data_size;
session->one_mmap = true;
}
@@ -1865,12 +1885,12 @@ static int __perf_session__process_events(struct perf_session *session,
mmap_flags = MAP_PRIVATE;
}
remap:
- buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
+ buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
err = -errno;
- goto out_err;
+ goto out;
}
mmaps[map_idx] = buf;
map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
@@ -1902,7 +1922,7 @@ more:
file_offset + head, event->header.size,
event->header.type);
err = -EINVAL;
- goto out_err;
+ goto out;
}
if (skip)
@@ -1911,15 +1931,40 @@ more:
head += size;
file_pos += size;
- ui_progress__update(&prog, size);
+ ui_progress__update(prog, size);
if (session_done())
goto out;
- if (file_pos < file_size)
+ if (file_pos < data_size)
goto more;
out:
+ return err;
+}
+
+static int __perf_session__process_events(struct perf_session *session)
+{
+ struct reader rd = {
+ .fd = perf_data__fd(session->data),
+ .data_size = session->header.data_size,
+ .data_offset = session->header.data_offset,
+ };
+ struct ordered_events *oe = &session->ordered_events;
+ struct perf_tool *tool = session->tool;
+ struct ui_progress prog;
+ int err;
+
+ perf_tool__fill_defaults(tool);
+
+ if (rd.data_size == 0)
+ return -1;
+
+ ui_progress__init_size(&prog, rd.data_size, "Processing events...");
+
+ err = reader__process_events(&rd, session, &prog);
+ if (err)
+ goto out_err;
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
@@ -1944,20 +1989,13 @@ out_err:
int perf_session__process_events(struct perf_session *session)
{
- u64 size = perf_data__size(session->data);
- int err;
-
if (perf_session__register_idle_thread(session) < 0)
return -ENOMEM;
- if (!perf_data__is_pipe(session->data))
- err = __perf_session__process_events(session,
- session->header.data_offset,
- session->header.data_size, size);
- else
- err = __perf_session__process_pipe_events(session);
+ if (perf_data__is_pipe(session->data))
+ return __perf_session__process_pipe_events(session);
- return err;
+ return __perf_session__process_events(session);
}
bool perf_session__has_traces(struct perf_session *session, const char *msg)
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 64d1f36dee99..5b5a167b43ce 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-
from os import getenv
from subprocess import Popen, PIPE
from re import sub
@@ -55,9 +53,14 @@ ext_sources = [f.strip() for f in open('util/python-ext-sources')
# use full paths with source files
ext_sources = list(map(lambda x: '%s/%s' % (src_perf, x) , ext_sources))
+extra_libraries = []
+if '-DHAVE_LIBNUMA_SUPPORT' in cflags:
+ extra_libraries = [ 'numa' ]
+
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
+ libraries = extra_libraries,
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 6c1a83768eb0..d2299e912e59 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -6,6 +6,7 @@
#include "sort.h"
#include "hist.h"
#include "comm.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include "evsel.h"
@@ -230,8 +231,14 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
if (sym_l == sym_r)
return 0;
- if (sym_l->inlined || sym_r->inlined)
- return strcmp(sym_l->name, sym_r->name);
+ if (sym_l->inlined || sym_r->inlined) {
+ int ret = strcmp(sym_l->name, sym_r->name);
+
+ if (ret)
+ return ret;
+ if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
+ return 0;
+ }
if (sym_l->start != sym_r->start)
return (int64_t)(sym_r->start - sym_l->start);
@@ -428,8 +435,6 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
{
struct symbol *sym = he->ms.sym;
- struct map *map = he->ms.map;
- struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct annotation *notes;
double ipc = 0.0, coverage = 0.0;
char tmp[64];
@@ -437,11 +442,6 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
if (!sym)
return repsep_snprintf(bf, size, "%-*s", width, "-");
- if (!sym->annotate2 && symbol__annotate2(sym, map, evsel,
- &annotation__default_options, NULL) < 0) {
- return 0;
- }
-
notes = symbol__annotation(sym);
if (notes->hit_cycles)
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 130fe37fe2df..2fbee0b1011c 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -9,7 +9,8 @@
#include <linux/list.h>
#include "cache.h"
#include <linux/rbtree.h>
-#include "symbol.h"
+#include "map_symbol.h"
+#include "symbol_conf.h"
#include "string.h"
#include "callchain.h"
#include "values.h"
@@ -145,8 +146,8 @@ struct hist_entry {
union {
/* this is for hierarchical entry structure */
struct {
- struct rb_root hroot_in;
- struct rb_root hroot_out;
+ struct rb_root_cached hroot_in;
+ struct rb_root_cached hroot_out;
}; /* non-leaf entries */
struct rb_root sorted_chain; /* leaf entry has callchains */
};
diff --git a/tools/perf/util/srccode.h b/tools/perf/util/srccode.h
index e500a746d5f1..1b5ed769779c 100644
--- a/tools/perf/util/srccode.h
+++ b/tools/perf/util/srccode.h
@@ -1,6 +1,19 @@
#ifndef SRCCODE_H
#define SRCCODE_H 1
+struct srccode_state {
+ char *srcfile;
+ unsigned line;
+};
+
+static inline void srccode_state_init(struct srccode_state *state)
+{
+ state->srcfile = NULL;
+ state->line = 0;
+}
+
+void srccode_state_free(struct srccode_state *state);
+
/* Result is not 0 terminated */
char *find_sourceline(char *fn, unsigned line, int *lenp);
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index dc86597d0cc4..10ca1533937e 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -104,7 +104,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
} else {
/* create a fake symbol for the inline frame */
inline_sym = symbol__new(base_sym ? base_sym->start : 0,
- base_sym ? base_sym->end : 0,
+ base_sym ? (base_sym->end - base_sym->start) : 0,
base_sym ? base_sym->binding : 0,
base_sym ? base_sym->type : 0,
funcname);
@@ -594,11 +594,12 @@ struct srcline_node {
struct rb_node rb_node;
};
-void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
+void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline)
{
- struct rb_node **p = &tree->rb_node;
+ struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
struct srcline_node *i, *node;
+ bool leftmost = true;
node = zalloc(sizeof(struct srcline_node));
if (!node) {
@@ -614,16 +615,18 @@ void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
i = rb_entry(parent, struct srcline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&node->rb_node, parent, p);
- rb_insert_color(&node->rb_node, tree);
+ rb_insert_color_cached(&node->rb_node, tree, leftmost);
}
-char *srcline__tree_find(struct rb_root *tree, u64 addr)
+char *srcline__tree_find(struct rb_root_cached *tree, u64 addr)
{
- struct rb_node *n = tree->rb_node;
+ struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct srcline_node *i = rb_entry(n, struct srcline_node,
@@ -640,15 +643,15 @@ char *srcline__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
-void srcline__tree_delete(struct rb_root *tree)
+void srcline__tree_delete(struct rb_root_cached *tree)
{
struct srcline_node *pos;
- struct rb_node *next = rb_first(tree);
+ struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct srcline_node, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, tree);
+ rb_erase_cached(&pos->rb_node, tree);
free_srcline(pos->srcline);
zfree(&pos);
}
@@ -682,28 +685,32 @@ void inline_node__delete(struct inline_node *node)
free(node);
}
-void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines)
+void inlines__tree_insert(struct rb_root_cached *tree,
+ struct inline_node *inlines)
{
- struct rb_node **p = &tree->rb_node;
+ struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 addr = inlines->addr;
struct inline_node *i;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
i = rb_entry(parent, struct inline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&inlines->rb_node, parent, p);
- rb_insert_color(&inlines->rb_node, tree);
+ rb_insert_color_cached(&inlines->rb_node, tree, leftmost);
}
-struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
+struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr)
{
- struct rb_node *n = tree->rb_node;
+ struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct inline_node *i = rb_entry(n, struct inline_node,
@@ -720,15 +727,15 @@ struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
-void inlines__tree_delete(struct rb_root *tree)
+void inlines__tree_delete(struct rb_root_cached *tree)
{
struct inline_node *pos;
- struct rb_node *next = rb_first(tree);
+ struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct inline_node, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, tree);
+ rb_erase_cached(&pos->rb_node, tree);
inline_node__delete(pos);
}
}
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 5762212dc342..b11a0aaaa676 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -19,11 +19,11 @@ void free_srcline(char *srcline);
char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line);
/* insert the srcline into the DSO, which will take ownership */
-void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
+void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline);
/* find previously inserted srcline */
-char *srcline__tree_find(struct rb_root *tree, u64 addr);
+char *srcline__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all srclines within the tree */
-void srcline__tree_delete(struct rb_root *tree);
+void srcline__tree_delete(struct rb_root_cached *tree);
#define SRCLINE_UNKNOWN ((char *) "??:0")
@@ -46,10 +46,11 @@ struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
void inline_node__delete(struct inline_node *node);
/* insert the inline node list into the DSO, which will take ownership */
-void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines);
+void inlines__tree_insert(struct rb_root_cached *tree,
+ struct inline_node *inlines);
/* find previously inserted inline node list */
-struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr);
+struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all nodes within the tree of inline_node s */
-void inlines__tree_delete(struct rb_root *tree);
+void inlines__tree_delete(struct rb_root_cached *tree);
#endif /* PERF_SRCLINE_H */
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 665ee374fc01..6d043c78f3c2 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -2,6 +2,7 @@
#include <inttypes.h>
#include <linux/time64.h>
#include <math.h>
+#include "color.h"
#include "evlist.h"
#include "evsel.h"
#include "stat.h"
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 3c22c58b3e90..83d8094be4fe 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -168,7 +168,7 @@ static void reset_stat(struct runtime_stat *st)
struct rb_node *pos, *next;
rblist = &st->value_list;
- next = rb_first(&rblist->entries);
+ next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index d58f1e08b170..7e82c71dcc42 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -57,7 +57,7 @@ static inline unsigned int strlist__nr_entries(const struct strlist *slist)
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *slist)
{
- struct rb_node *rn = rb_first(&slist->rblist.entries);
+ struct rb_node *rn = rb_first_cached(&slist->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 66a84d5846c8..4ad106a5f2c0 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -6,6 +6,8 @@
#include <unistd.h>
#include <inttypes.h>
+#include "map.h"
+#include "map_groups.h"
#include "symbol.h"
#include "demangle-java.h"
#include "demangle-rust.h"
@@ -19,6 +21,20 @@
#define EM_AARCH64 183 /* ARM 64 bit */
#endif
+#ifndef ELF32_ST_VISIBILITY
+#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
+#endif
+
+/* For ELF64 the definitions are the same. */
+#ifndef ELF64_ST_VISIBILITY
+#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
+#endif
+
+/* How to extract information held in the st_other field. */
+#ifndef GELF_ST_VISIBILITY
+#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
+#endif
+
typedef Elf64_Nhdr GElf_Nhdr;
#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
@@ -87,6 +103,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
return GELF_ST_TYPE(sym->st_info);
}
+static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
+{
+ return GELF_ST_VISIBILITY(sym->st_other);
+}
+
#ifndef STT_GNU_IFUNC
#define STT_GNU_IFUNC 10
#endif
@@ -111,7 +132,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
return elf_sym__type(sym) == STT_NOTYPE &&
sym->st_name != 0 &&
sym->st_shndx != SHN_UNDEF &&
- sym->st_shndx != SHN_ABS;
+ sym->st_shndx != SHN_ABS &&
+ elf_sym__visibility(sym) != STV_HIDDEN &&
+ elf_sym__visibility(sym) != STV_INTERNAL;
}
static bool elf_sym__filter(GElf_Sym *sym)
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 7119df77dc0b..17edbd4f6f85 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -3,6 +3,7 @@
#include "util.h"
#include <errno.h>
+#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <string.h>
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 48efad6d0f90..758bf5f74e6e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -17,6 +17,7 @@
#include "util.h"
#include "debug.h"
#include "machine.h"
+#include "map.h"
#include "symbol.h"
#include "strlist.h"
#include "intlist.h"
@@ -163,7 +164,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
return arch__choose_best_symbol(syma, symb);
}
-void symbols__fixup_duplicate(struct rb_root *symbols)
+void symbols__fixup_duplicate(struct rb_root_cached *symbols)
{
struct rb_node *nd;
struct symbol *curr, *next;
@@ -171,7 +172,7 @@ void symbols__fixup_duplicate(struct rb_root *symbols)
if (symbol_conf.allow_aliases)
return;
- nd = rb_first(symbols);
+ nd = rb_first_cached(symbols);
while (nd) {
curr = rb_entry(nd, struct symbol, rb_node);
@@ -186,20 +187,20 @@ again:
continue;
if (choose_best_symbol(curr, next) == SYMBOL_A) {
- rb_erase(&next->rb_node, symbols);
+ rb_erase_cached(&next->rb_node, symbols);
symbol__delete(next);
goto again;
} else {
nd = rb_next(&curr->rb_node);
- rb_erase(&curr->rb_node, symbols);
+ rb_erase_cached(&curr->rb_node, symbols);
symbol__delete(curr);
}
}
}
-void symbols__fixup_end(struct rb_root *symbols)
+void symbols__fixup_end(struct rb_root_cached *symbols)
{
- struct rb_node *nd, *prevnd = rb_first(symbols);
+ struct rb_node *nd, *prevnd = rb_first_cached(symbols);
struct symbol *curr, *prev;
if (prevnd == NULL)
@@ -282,25 +283,27 @@ void symbol__delete(struct symbol *sym)
free(((void *)sym) - symbol_conf.priv_size);
}
-void symbols__delete(struct rb_root *symbols)
+void symbols__delete(struct rb_root_cached *symbols)
{
struct symbol *pos;
- struct rb_node *next = rb_first(symbols);
+ struct rb_node *next = rb_first_cached(symbols);
while (next) {
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, symbols);
+ rb_erase_cached(&pos->rb_node, symbols);
symbol__delete(pos);
}
}
-void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
+void __symbols__insert(struct rb_root_cached *symbols,
+ struct symbol *sym, bool kernel)
{
- struct rb_node **p = &symbols->rb_node;
+ struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 ip = sym->start;
struct symbol *s;
+ bool leftmost = true;
if (kernel) {
const char *name = sym->name;
@@ -318,26 +321,28 @@ void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
s = rb_entry(parent, struct symbol, rb_node);
if (ip < s->start)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&sym->rb_node, parent, p);
- rb_insert_color(&sym->rb_node, symbols);
+ rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
}
-void symbols__insert(struct rb_root *symbols, struct symbol *sym)
+void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
{
__symbols__insert(symbols, sym, false);
}
-static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
+static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
{
struct rb_node *n;
if (symbols == NULL)
return NULL;
- n = symbols->rb_node;
+ n = symbols->rb_root.rb_node;
while (n) {
struct symbol *s = rb_entry(n, struct symbol, rb_node);
@@ -353,9 +358,9 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
return NULL;
}
-static struct symbol *symbols__first(struct rb_root *symbols)
+static struct symbol *symbols__first(struct rb_root_cached *symbols)
{
- struct rb_node *n = rb_first(symbols);
+ struct rb_node *n = rb_first_cached(symbols);
if (n)
return rb_entry(n, struct symbol, rb_node);
@@ -363,9 +368,9 @@ static struct symbol *symbols__first(struct rb_root *symbols)
return NULL;
}
-static struct symbol *symbols__last(struct rb_root *symbols)
+static struct symbol *symbols__last(struct rb_root_cached *symbols)
{
- struct rb_node *n = rb_last(symbols);
+ struct rb_node *n = rb_last(&symbols->rb_root);
if (n)
return rb_entry(n, struct symbol, rb_node);
@@ -383,11 +388,12 @@ static struct symbol *symbols__next(struct symbol *sym)
return NULL;
}
-static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
+static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
{
- struct rb_node **p = &symbols->rb_node;
+ struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
struct symbol_name_rb_node *symn, *s;
+ bool leftmost = true;
symn = container_of(sym, struct symbol_name_rb_node, sym);
@@ -396,19 +402,21 @@ static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
if (strcmp(sym->name, s->sym.name) < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&symn->rb_node, parent, p);
- rb_insert_color(&symn->rb_node, symbols);
+ rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
}
-static void symbols__sort_by_name(struct rb_root *symbols,
- struct rb_root *source)
+static void symbols__sort_by_name(struct rb_root_cached *symbols,
+ struct rb_root_cached *source)
{
struct rb_node *nd;
- for (nd = rb_first(source); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
symbols__insert_by_name(symbols, pos);
}
@@ -431,7 +439,7 @@ int symbol__match_symbol_name(const char *name, const char *str,
return arch__compare_symbol_names(name, str);
}
-static struct symbol *symbols__find_by_name(struct rb_root *symbols,
+static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
const char *name,
enum symbol_tag_include includes)
{
@@ -441,7 +449,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
if (symbols == NULL)
return NULL;
- n = symbols->rb_node;
+ n = symbols->rb_root.rb_node;
while (n) {
int cmp;
@@ -644,7 +652,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
{
struct symbol *sym;
struct dso *dso = arg;
- struct rb_root *root = &dso->symbols;
+ struct rb_root_cached *root = &dso->symbols;
if (!symbol_type__filter(type))
return 0;
@@ -681,14 +689,14 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
struct map *curr_map;
struct symbol *pos;
int count = 0;
- struct rb_root old_root = dso->symbols;
- struct rb_root *root = &dso->symbols;
- struct rb_node *next = rb_first(root);
+ struct rb_root_cached old_root = dso->symbols;
+ struct rb_root_cached *root = &dso->symbols;
+ struct rb_node *next = rb_first_cached(root);
if (!kmaps)
return -1;
- *root = RB_ROOT;
+ *root = RB_ROOT_CACHED;
while (next) {
char *module;
@@ -696,8 +704,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase_init(&pos->rb_node, &old_root);
-
+ rb_erase_cached(&pos->rb_node, &old_root);
+ RB_CLEAR_NODE(&pos->rb_node);
module = strchr(pos->name, '\t');
if (module)
*module = '\0';
@@ -710,6 +718,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
}
pos->start -= curr_map->start - curr_map->pgoff;
+ if (pos->end > curr_map->end)
+ pos->end = curr_map->end;
if (pos->end)
pos->end -= curr_map->start - curr_map->pgoff;
symbols__insert(&curr_map->dso->symbols, pos);
@@ -734,8 +744,8 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
struct map *curr_map = initial_map;
struct symbol *pos;
int count = 0, moved = 0;
- struct rb_root *root = &dso->symbols;
- struct rb_node *next = rb_first(root);
+ struct rb_root_cached *root = &dso->symbols;
+ struct rb_node *next = rb_first_cached(root);
int kernel_range = 0;
bool x86_64;
@@ -849,7 +859,7 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
}
add_symbol:
if (curr_map != initial_map) {
- rb_erase(&pos->rb_node, root);
+ rb_erase_cached(&pos->rb_node, root);
symbols__insert(&curr_map->dso->symbols, pos);
++moved;
} else
@@ -857,7 +867,7 @@ add_symbol:
continue;
discard_symbol:
- rb_erase(&pos->rb_node, root);
+ rb_erase_cached(&pos->rb_node, root);
symbol__delete(pos);
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 14d9d438e7e2..9a8fe012910a 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -5,16 +5,13 @@
#include <linux/types.h>
#include <stdbool.h>
#include <stdint.h>
-#include "map.h"
-#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <stdio.h>
-#include <byteswap.h>
-#include <libgen.h>
-#include "build-id.h"
-#include "event.h"
+#include "map_symbol.h"
+#include "branch.h"
#include "path.h"
+#include "symbol_conf.h"
#ifdef HAVE_LIBELF_SUPPORT
#include <libelf.h>
@@ -24,6 +21,10 @@
#include "dso.h"
+struct map;
+struct map_groups;
+struct option;
+
/*
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
* for newer versions we can use mmap to reduce memory usage:
@@ -68,7 +69,7 @@ struct symbol {
};
void symbol__delete(struct symbol *sym);
-void symbols__delete(struct rb_root *symbols);
+void symbols__delete(struct rb_root_cached *symbols);
/* symbols__for_each_entry - iterate over symbols (rb_root)
*
@@ -77,7 +78,7 @@ void symbols__delete(struct rb_root *symbols);
* @nd: the 'struct rb_node *' to use as a temporary storage
*/
#define symbols__for_each_entry(symbols, pos, nd) \
- for (nd = rb_first(symbols); \
+ for (nd = rb_first_cached(symbols); \
nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
nd = rb_next(nd))
@@ -89,69 +90,6 @@ static inline size_t symbol__size(const struct symbol *sym)
struct strlist;
struct intlist;
-struct symbol_conf {
- unsigned short priv_size;
- bool try_vmlinux_path,
- init_annotation,
- force,
- ignore_vmlinux,
- ignore_vmlinux_buildid,
- show_kernel_path,
- use_modules,
- allow_aliases,
- sort_by_name,
- show_nr_samples,
- show_total_period,
- use_callchain,
- cumulate_callchain,
- show_branchflag_count,
- exclude_other,
- show_cpu_utilization,
- initialized,
- kptr_restrict,
- event_group,
- demangle,
- demangle_kernel,
- filter_relative,
- show_hist_headers,
- branch_callstack,
- has_filter,
- show_ref_callgraph,
- hide_unresolved,
- raw_trace,
- report_hierarchy,
- inline_name;
- const char *vmlinux_name,
- *kallsyms_name,
- *source_prefix,
- *field_sep,
- *graph_function;
- const char *default_guest_vmlinux_name,
- *default_guest_kallsyms,
- *default_guest_modules;
- const char *guestmount;
- const char *dso_list_str,
- *comm_list_str,
- *pid_list_str,
- *tid_list_str,
- *sym_list_str,
- *col_width_list_str,
- *bt_stop_list_str;
- struct strlist *dso_list,
- *comm_list,
- *sym_list,
- *dso_from_list,
- *dso_to_list,
- *sym_from_list,
- *sym_to_list,
- *bt_stop_list;
- struct intlist *pid_list,
- *tid_list;
- const char *symfs;
-};
-
-extern struct symbol_conf symbol_conf;
-
struct symbol_name_rb_node {
struct rb_node rb_node;
struct symbol sym;
@@ -178,19 +116,6 @@ struct ref_reloc_sym {
u64 unrelocated_addr;
};
-struct map_symbol {
- struct map *map;
- struct symbol *sym;
-};
-
-struct addr_map_symbol {
- struct map *map;
- struct symbol *sym;
- u64 addr;
- u64 al_addr;
- u64 phys_addr;
-};
-
struct branch_info {
struct addr_map_symbol from;
struct addr_map_symbol to;
@@ -310,10 +235,11 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss);
char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name);
-void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel);
-void symbols__insert(struct rb_root *symbols, struct symbol *sym);
-void symbols__fixup_duplicate(struct rb_root *symbols);
-void symbols__fixup_end(struct rb_root *symbols);
+void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
+ bool kernel);
+void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
+void symbols__fixup_duplicate(struct rb_root_cached *symbols);
+void symbols__fixup_end(struct rb_root_cached *symbols);
void map_groups__fixup_end(struct map_groups *mg);
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
new file mode 100644
index 000000000000..fffea68c1203
--- /dev/null
+++ b/tools/perf/util/symbol_conf.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_SYMBOL_CONF
+#define __PERF_SYMBOL_CONF 1
+
+#include <stdbool.h>
+
+struct strlist;
+struct intlist;
+
+struct symbol_conf {
+ unsigned short priv_size;
+ bool try_vmlinux_path,
+ init_annotation,
+ force,
+ ignore_vmlinux,
+ ignore_vmlinux_buildid,
+ show_kernel_path,
+ use_modules,
+ allow_aliases,
+ sort_by_name,
+ show_nr_samples,
+ show_total_period,
+ use_callchain,
+ cumulate_callchain,
+ show_branchflag_count,
+ exclude_other,
+ show_cpu_utilization,
+ initialized,
+ kptr_restrict,
+ event_group,
+ demangle,
+ demangle_kernel,
+ filter_relative,
+ show_hist_headers,
+ branch_callstack,
+ has_filter,
+ show_ref_callgraph,
+ hide_unresolved,
+ raw_trace,
+ report_hierarchy,
+ inline_name;
+ const char *vmlinux_name,
+ *kallsyms_name,
+ *source_prefix,
+ *field_sep,
+ *graph_function;
+ const char *default_guest_vmlinux_name,
+ *default_guest_kallsyms,
+ *default_guest_modules;
+ const char *guestmount;
+ const char *dso_list_str,
+ *comm_list_str,
+ *pid_list_str,
+ *tid_list_str,
+ *sym_list_str,
+ *col_width_list_str,
+ *bt_stop_list_str;
+ struct strlist *dso_list,
+ *comm_list,
+ *sym_list,
+ *dso_from_list,
+ *dso_to_list,
+ *sym_from_list,
+ *sym_to_list,
+ *bt_stop_list;
+ struct intlist *pid_list,
+ *tid_list;
+ const char *symfs;
+};
+
+extern struct symbol_conf symbol_conf;
+
+#endif // __PERF_SYMBOL_CONF
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index ed0205cc7942..02e89b02c2ce 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -3,6 +3,7 @@
#include <inttypes.h>
#include <stdio.h>
+#include "map.h"
#include "symbol.h"
size_t symbol__fprintf(struct symbol *sym, FILE *fp)
@@ -64,7 +65,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
struct rb_node *nd;
struct symbol_name_rb_node *pos;
- for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
fprintf(fp, "%s\n", pos->sym.name);
}
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index d52f27f373ce..41942c2aaa18 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -20,6 +20,7 @@
#include "thread.h"
#include "event.h"
#include "machine.h"
+#include "env.h"
#include "util.h"
#include "debug.h"
#include "symbol.h"
@@ -29,24 +30,41 @@
#define STACK_GROWTH 2048
+/*
+ * State of retpoline detection.
+ *
+ * RETPOLINE_NONE: no retpoline detection
+ * X86_RETPOLINE_POSSIBLE: x86 retpoline possible
+ * X86_RETPOLINE_DETECTED: x86 retpoline detected
+ */
+enum retpoline_state_t {
+ RETPOLINE_NONE,
+ X86_RETPOLINE_POSSIBLE,
+ X86_RETPOLINE_DETECTED,
+};
+
/**
* struct thread_stack_entry - thread stack entry.
* @ret_addr: return address
* @timestamp: timestamp (if known)
* @ref: external reference (e.g. db_id of sample)
* @branch_count: the branch count when the entry was created
+ * @db_id: id used for db-export
* @cp: call path
* @no_call: a 'call' was not seen
* @trace_end: a 'call' but trace ended
+ * @non_call: a branch but not a 'call' to the start of a different symbol
*/
struct thread_stack_entry {
u64 ret_addr;
u64 timestamp;
u64 ref;
u64 branch_count;
+ u64 db_id;
struct call_path *cp;
bool no_call;
bool trace_end;
+ bool non_call;
};
/**
@@ -62,6 +80,7 @@ struct thread_stack_entry {
* @crp: call/return processor
* @comm: current comm
* @arr_sz: size of array if this is the first element of an array
+ * @rstate: used to detect retpolines
*/
struct thread_stack {
struct thread_stack_entry *stack;
@@ -74,6 +93,7 @@ struct thread_stack {
struct call_return_processor *crp;
struct comm *comm;
unsigned int arr_sz;
+ enum retpoline_state_t rstate;
};
/*
@@ -113,10 +133,16 @@ static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
if (err)
return err;
- if (thread->mg && thread->mg->machine)
- ts->kernel_start = machine__kernel_start(thread->mg->machine);
- else
+ if (thread->mg && thread->mg->machine) {
+ struct machine *machine = thread->mg->machine;
+ const char *arch = perf_env__arch(machine->env);
+
+ ts->kernel_start = machine__kernel_start(machine);
+ if (!strcmp(arch, "x86"))
+ ts->rstate = X86_RETPOLINE_POSSIBLE;
+ } else {
ts->kernel_start = 1ULL << 63;
+ }
ts->crp = crp;
return 0;
@@ -256,20 +282,31 @@ static int thread_stack__call_return(struct thread *thread,
.comm = ts->comm,
.db_id = 0,
};
+ u64 *parent_db_id;
tse = &ts->stack[idx];
cr.cp = tse->cp;
cr.call_time = tse->timestamp;
cr.return_time = timestamp;
cr.branch_count = ts->branch_count - tse->branch_count;
+ cr.db_id = tse->db_id;
cr.call_ref = tse->ref;
cr.return_ref = ref;
if (tse->no_call)
cr.flags |= CALL_RETURN_NO_CALL;
if (no_return)
cr.flags |= CALL_RETURN_NO_RETURN;
+ if (tse->non_call)
+ cr.flags |= CALL_RETURN_NON_CALL;
+
+ /*
+ * The parent db_id must be assigned before exporting the child. Note
+ * it is not possible to export the parent first because its information
+ * is not yet complete because its 'return' has not yet been processed.
+ */
+ parent_db_id = idx ? &(tse - 1)->db_id : NULL;
- return crp->process(&cr, crp->data);
+ return crp->process(&cr, parent_db_id, crp->data);
}
static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
@@ -458,7 +495,7 @@ void thread_stack__sample(struct thread *thread, int cpu,
}
struct call_return_processor *
-call_return_processor__new(int (*process)(struct call_return *cr, void *data),
+call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
void *data)
{
struct call_return_processor *crp;
@@ -493,6 +530,9 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
struct thread_stack_entry *tse;
int err;
+ if (!cp)
+ return -ENOMEM;
+
if (ts->cnt == ts->sz) {
err = thread_stack__grow(ts);
if (err)
@@ -507,6 +547,8 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
tse->cp = cp;
tse->no_call = no_call;
tse->trace_end = trace_end;
+ tse->non_call = false;
+ tse->db_id = 0;
return 0;
}
@@ -528,14 +570,16 @@ static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
timestamp, ref, false);
}
- if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
+ if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
+ !ts->stack[ts->cnt - 1].non_call) {
return thread_stack__call_return(thread, ts, --ts->cnt,
timestamp, ref, false);
} else {
size_t i = ts->cnt - 1;
while (i--) {
- if (ts->stack[i].ret_addr != ret_addr)
+ if (ts->stack[i].ret_addr != ret_addr ||
+ ts->stack[i].non_call)
continue;
i += 1;
while (ts->cnt > i) {
@@ -576,8 +620,6 @@ static int thread_stack__bottom(struct thread_stack *ts,
cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
true, false);
@@ -590,36 +632,36 @@ static int thread_stack__no_call_return(struct thread *thread,
struct addr_location *to_al, u64 ref)
{
struct call_path_root *cpr = ts->crp->cpr;
+ struct call_path *root = &cpr->call_path;
+ struct symbol *fsym = from_al->sym;
+ struct symbol *tsym = to_al->sym;
struct call_path *cp, *parent;
u64 ks = ts->kernel_start;
+ u64 addr = sample->addr;
+ u64 tm = sample->time;
+ u64 ip = sample->ip;
int err;
- if (sample->ip >= ks && sample->addr < ks) {
+ if (ip >= ks && addr < ks) {
/* Return to userspace, so pop all kernel addresses */
while (thread_stack__in_kernel(ts)) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
- sample->time, ref,
- true);
+ tm, ref, true);
if (err)
return err;
}
/* If the stack is empty, push the userspace address */
if (!ts->cnt) {
- cp = call_path__findnew(cpr, &cpr->call_path,
- to_al->sym, sample->addr,
- ts->kernel_start);
- if (!cp)
- return -ENOMEM;
- return thread_stack__push_cp(ts, 0, sample->time, ref,
- cp, true, false);
+ cp = call_path__findnew(cpr, root, tsym, addr, ks);
+ return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
+ false);
}
- } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
+ } else if (thread_stack__in_kernel(ts) && ip < ks) {
/* Return to userspace, so pop all kernel addresses */
while (thread_stack__in_kernel(ts)) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
- sample->time, ref,
- true);
+ tm, ref, true);
if (err)
return err;
}
@@ -628,21 +670,59 @@ static int thread_stack__no_call_return(struct thread *thread,
if (ts->cnt)
parent = ts->stack[ts->cnt - 1].cp;
else
- parent = &cpr->call_path;
+ parent = root;
- /* This 'return' had no 'call', so push and pop top of stack */
- cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
- ts->kernel_start);
- if (!cp)
- return -ENOMEM;
+ if (parent->sym == from_al->sym) {
+ /*
+ * At the bottom of the stack, assume the missing 'call' was
+ * before the trace started. So, pop the current symbol and push
+ * the 'to' symbol.
+ */
+ if (ts->cnt == 1) {
+ err = thread_stack__call_return(thread, ts, --ts->cnt,
+ tm, ref, false);
+ if (err)
+ return err;
+ }
+
+ if (!ts->cnt) {
+ cp = call_path__findnew(cpr, root, tsym, addr, ks);
+
+ return thread_stack__push_cp(ts, addr, tm, ref, cp,
+ true, false);
+ }
+
+ /*
+ * Otherwise assume the 'return' is being used as a jump (e.g.
+ * retpoline) and just push the 'to' symbol.
+ */
+ cp = call_path__findnew(cpr, parent, tsym, addr, ks);
+
+ err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
+ if (!err)
+ ts->stack[ts->cnt - 1].non_call = true;
+
+ return err;
+ }
+
+ /*
+ * Assume 'parent' has not yet returned, so push 'to', and then push and
+ * pop 'from'.
+ */
+
+ cp = call_path__findnew(cpr, parent, tsym, addr, ks);
+
+ err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
+ if (err)
+ return err;
- err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
- true, false);
+ cp = call_path__findnew(cpr, cp, fsym, ip, ks);
+
+ err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
if (err)
return err;
- return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
- to_al->sym);
+ return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
}
static int thread_stack__trace_begin(struct thread *thread,
@@ -680,8 +760,6 @@ static int thread_stack__trace_end(struct thread_stack *ts,
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
ret_addr = sample->ip + sample->insn_len;
@@ -689,6 +767,70 @@ static int thread_stack__trace_end(struct thread_stack *ts,
false, true);
}
+static bool is_x86_retpoline(const char *name)
+{
+ const char *p = strstr(name, "__x86_indirect_thunk_");
+
+ return p == name || !strcmp(name, "__indirect_thunk_start");
+}
+
+/*
+ * x86 retpoline functions pollute the call graph. This function removes them.
+ * This does not handle function return thunks, nor is there any improvement
+ * for the handling of inline thunks or extern thunks.
+ */
+static int thread_stack__x86_retpoline(struct thread_stack *ts,
+ struct perf_sample *sample,
+ struct addr_location *to_al)
+{
+ struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
+ struct call_path_root *cpr = ts->crp->cpr;
+ struct symbol *sym = tse->cp->sym;
+ struct symbol *tsym = to_al->sym;
+ struct call_path *cp;
+
+ if (sym && is_x86_retpoline(sym->name)) {
+ /*
+ * This is a x86 retpoline fn. It pollutes the call graph by
+ * showing up everywhere there is an indirect branch, but does
+ * not itself mean anything. Here the top-of-stack is removed,
+ * by decrementing the stack count, and then further down, the
+ * resulting top-of-stack is replaced with the actual target.
+ * The result is that the retpoline functions will no longer
+ * appear in the call graph. Note this only affects the call
+ * graph, since all the original branches are left unchanged.
+ */
+ ts->cnt -= 1;
+ sym = ts->stack[ts->cnt - 2].cp->sym;
+ if (sym && sym == tsym && to_al->addr != tsym->start) {
+ /*
+ * Target is back to the middle of the symbol we came
+ * from so assume it is an indirect jmp and forget it
+ * altogether.
+ */
+ ts->cnt -= 1;
+ return 0;
+ }
+ } else if (sym && sym == tsym) {
+ /*
+ * Target is back to the symbol we came from so assume it is an
+ * indirect jmp and forget it altogether.
+ */
+ ts->cnt -= 1;
+ return 0;
+ }
+
+ cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
+ sample->addr, ts->kernel_start);
+ if (!cp)
+ return -ENOMEM;
+
+ /* Replace the top-of-stack with the actual target */
+ ts->stack[ts->cnt - 1].cp = cp;
+
+ return 0;
+}
+
int thread_stack__process(struct thread *thread, struct comm *comm,
struct perf_sample *sample,
struct addr_location *from_al,
@@ -696,6 +838,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
struct call_return_processor *crp)
{
struct thread_stack *ts = thread__stack(thread, sample->cpu);
+ enum retpoline_state_t rstate;
int err = 0;
if (ts && !ts->crp) {
@@ -711,6 +854,10 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
ts->comm = comm;
}
+ rstate = ts->rstate;
+ if (rstate == X86_RETPOLINE_DETECTED)
+ ts->rstate = X86_RETPOLINE_POSSIBLE;
+
/* Flush stack on exec */
if (ts->comm != comm && thread->pid_ == thread->tid) {
err = __thread_stack__flush(thread, ts);
@@ -745,14 +892,27 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
to_al->sym, sample->addr,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
cp, false, trace_end);
+
+ /*
+ * A call to the same symbol but not the start of the symbol,
+ * may be the start of a x86 retpoline.
+ */
+ if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
+ from_al->sym == to_al->sym &&
+ to_al->addr != to_al->sym->start)
+ ts->rstate = X86_RETPOLINE_DETECTED;
+
} else if (sample->flags & PERF_IP_FLAG_RETURN) {
if (!sample->ip || !sample->addr)
return 0;
+ /* x86 retpoline 'return' doesn't match the stack */
+ if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
+ ts->stack[ts->cnt - 1].ret_addr != sample->addr)
+ return thread_stack__x86_retpoline(ts, sample, to_al);
+
err = thread_stack__pop_cp(thread, ts, sample->addr,
sample->time, ref, from_al->sym);
if (err) {
@@ -765,6 +925,25 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
err = thread_stack__trace_begin(thread, ts, sample->time, ref);
} else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
err = thread_stack__trace_end(ts, sample, ref);
+ } else if (sample->flags & PERF_IP_FLAG_BRANCH &&
+ from_al->sym != to_al->sym && to_al->sym &&
+ to_al->addr == to_al->sym->start) {
+ struct call_path_root *cpr = ts->crp->cpr;
+ struct call_path *cp;
+
+ /*
+ * The compiler might optimize a call/ret combination by making
+ * it a jmp. Make that visible by recording on the stack a
+ * branch to the start of a different symbol. Note, that means
+ * when a ret pops the stack, all jmps must be popped off first.
+ */
+ cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
+ to_al->sym, sample->addr,
+ ts->kernel_start);
+ err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
+ false);
+ if (!err)
+ ts->stack[ts->cnt - 1].non_call = true;
}
return err;
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index 1f626f4a1c40..9c45f947f5a9 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -35,10 +35,13 @@ struct call_path;
*
* CALL_RETURN_NO_CALL: 'return' but no matching 'call'
* CALL_RETURN_NO_RETURN: 'call' but no matching 'return'
+ * CALL_RETURN_NON_CALL: a branch but not a 'call' to the start of a different
+ * symbol
*/
enum {
CALL_RETURN_NO_CALL = 1 << 0,
CALL_RETURN_NO_RETURN = 1 << 1,
+ CALL_RETURN_NON_CALL = 1 << 2,
};
/**
@@ -52,6 +55,7 @@ enum {
* @call_ref: external reference to 'call' sample (e.g. db_id)
* @return_ref: external reference to 'return' sample (e.g. db_id)
* @db_id: id used for db-export
+ * @parent_db_id: id of parent call used for db-export
* @flags: Call/Return flags
*/
struct call_return {
@@ -64,6 +68,7 @@ struct call_return {
u64 call_ref;
u64 return_ref;
u64 db_id;
+ u64 parent_db_id;
u32 flags;
};
@@ -76,7 +81,7 @@ struct call_return {
*/
struct call_return_processor {
struct call_path_root *cpr;
- int (*process)(struct call_return *cr, void *data);
+ int (*process)(struct call_return *cr, u64 *parent_db_id, void *data);
void *data;
};
@@ -90,7 +95,7 @@ void thread_stack__free(struct thread *thread);
size_t thread_stack__depth(struct thread *thread, int cpu);
struct call_return_processor *
-call_return_processor__new(int (*process)(struct call_return *cr, void *data),
+call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
void *data);
void call_return_processor__free(struct call_return_processor *crp);
int thread_stack__process(struct thread *thread, struct comm *comm,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index c83372329f89..50678d318185 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -12,6 +12,8 @@
#include "debug.h"
#include "namespaces.h"
#include "comm.h"
+#include "map.h"
+#include "symbol.h"
#include "unwind.h"
#include <api/fs/fs.h>
@@ -392,3 +394,25 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
return machine__find_thread(machine, thread->pid_, thread->pid_);
}
+
+int thread__memcpy(struct thread *thread, struct machine *machine,
+ void *buf, u64 ip, int len, bool *is64bit)
+{
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ struct addr_location al;
+ long offset;
+
+ if (machine__kernel_ip(machine, ip))
+ cpumode = PERF_RECORD_MISC_KERNEL;
+
+ if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso ||
+ al.map->dso->data.status == DSO_DATA_STATUS_ERROR ||
+ map__load(al.map) < 0)
+ return -1;
+
+ offset = al.map->map_ip(al.map, ip);
+ if (is64bit)
+ *is64bit = al.map->dso->is_64_bit;
+
+ return dso__data_read_offset(al.map->dso, machine, offset, buf, len);
+}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 712dd48cc0ca..cf8375c017a0 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -5,14 +5,18 @@
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include <linux/list.h>
+#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
-#include "symbol.h"
-#include "map.h"
+#include "srccode.h"
+#include "symbol_conf.h"
#include <strlist.h>
#include <intlist.h>
#include "rwsem.h"
+struct addr_location;
+struct map;
+struct namespaces_event;
struct thread_stack;
struct unwind_libunwind_ops;
@@ -109,6 +113,9 @@ struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
struct addr_location *al);
+int thread__memcpy(struct thread *thread, struct machine *machine,
+ void *buf, u64 ip, int len, bool *is64bit);
+
static inline void *thread__priv(struct thread *thread)
{
return thread->priv;
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 6193b46050a5..0f53baec660e 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -11,6 +11,8 @@
#include "perf.h"
#include "debug.h"
#include "time-utils.h"
+#include "session.h"
+#include "evlist.h"
int parse_nsec_time(const char *str, u64 *ptime)
{
@@ -374,7 +376,7 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
struct perf_time_interval *ptime;
int i;
- if ((timestamp == 0) || (num == 0))
+ if ((!ptime_buf) || (timestamp == 0) || (num == 0))
return false;
if (num == 1)
@@ -396,6 +398,53 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
return (i == num) ? true : false;
}
+int perf_time__parse_for_ranges(const char *time_str,
+ struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num)
+{
+ struct perf_time_interval *ptime_range;
+ int size, num, ret;
+
+ ptime_range = perf_time__range_alloc(time_str, &size);
+ if (!ptime_range)
+ return -ENOMEM;
+
+ if (perf_time__parse_str(ptime_range, time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ num = perf_time__percent_parse_str(
+ ptime_range, size,
+ time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (num < 0) {
+ pr_err("Invalid time string\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ num = 1;
+ }
+
+ *range_size = size;
+ *range_num = num;
+ *ranges = ptime_range;
+ return 0;
+
+error:
+ free(ptime_range);
+ return ret;
+}
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC;
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
index 70b177d2b98c..b923de44e36f 100644
--- a/tools/perf/util/time-utils.h
+++ b/tools/perf/util/time-utils.h
@@ -23,6 +23,12 @@ bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp);
bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
int num, u64 timestamp);
+struct perf_session;
+
+int perf_time__parse_for_ranges(const char *str, struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num);
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
int fetch_current_timestamp(char *buf, size_t sz);
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 56e4ca54020a..250391672f9f 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -53,7 +53,10 @@ struct perf_tool {
itrace_start,
context_switch,
throttle,
- unthrottle;
+ unthrottle,
+ ksymbol,
+ bpf_event;
+
event_attr_op attr;
event_attr_op event_update;
event_op2 tracing_data;
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 5eff9bfc5758..407d0167b942 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -8,6 +8,8 @@
#include "unwind.h"
#include "unwind-libdw.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "thread.h"
#include <linux/types.h>
#include "event.h"
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 79f521a552cf..f3c666a84e4d 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -34,6 +34,7 @@
#include "session.h"
#include "perf_regs.h"
#include "unwind.h"
+#include "map.h"
#include "symbol.h"
#include "util.h"
#include "debug.h"
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index b029a5e9ae49..9778b3133b77 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "unwind.h"
+#include "map.h"
#include "thread.h"
#include "session.h"
#include "debug.h"
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 093352e93d50..d388f80d8703 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -2,6 +2,7 @@
#include "../perf.h"
#include "util.h"
#include "debug.h"
+#include "namespaces.h"
#include <api/fs/fs.h>
#include <sys/mman.h>
#include <sys/stat.h>
@@ -20,6 +21,7 @@
#include <linux/time64.h>
#include <unistd.h>
#include "strlist.h"
+#include "string2.h"
/*
* XXX We need to find a better place for these things...
@@ -116,23 +118,67 @@ int mkdir_p(char *path, mode_t mode)
return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
}
-int rm_rf(const char *path)
+static bool match_pat(char *file, const char **pat)
+{
+ int i = 0;
+
+ if (!pat)
+ return true;
+
+ while (pat[i]) {
+ if (strglobmatch(file, pat[i]))
+ return true;
+
+ i++;
+ }
+
+ return false;
+}
+
+/*
+ * The depth specify how deep the removal will go.
+ * 0 - will remove only files under the 'path' directory
+ * 1 .. x - will dive in x-level deep under the 'path' directory
+ *
+ * If specified the pat is array of string patterns ended with NULL,
+ * which are checked upon every file/directory found. Only matching
+ * ones are removed.
+ *
+ * The function returns:
+ * 0 on success
+ * -1 on removal failure with errno set
+ * -2 on pattern failure
+ */
+static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
{
DIR *dir;
- int ret = 0;
+ int ret;
struct dirent *d;
char namebuf[PATH_MAX];
+ struct stat statbuf;
+ /* Do not fail if there's no file. */
+ ret = lstat(path, &statbuf);
+ if (ret)
+ return 0;
+
+ /* Try to remove any file we get. */
+ if (!(statbuf.st_mode & S_IFDIR))
+ return unlink(path);
+
+ /* We have directory in path. */
dir = opendir(path);
if (dir == NULL)
- return 0;
+ return -1;
while ((d = readdir(dir)) != NULL && !ret) {
- struct stat statbuf;
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue;
+ if (!match_pat(d->d_name, pat))
+ return -2;
+
scnprintf(namebuf, sizeof(namebuf), "%s/%s",
path, d->d_name);
@@ -144,7 +190,7 @@ int rm_rf(const char *path)
}
if (S_ISDIR(statbuf.st_mode))
- ret = rm_rf(namebuf);
+ ret = depth ? rm_rf_depth_pat(namebuf, depth - 1, pat) : 0;
else
ret = unlink(namebuf);
}
@@ -156,6 +202,22 @@ int rm_rf(const char *path)
return rmdir(path);
}
+int rm_rf_perf_data(const char *path)
+{
+ const char *pat[] = {
+ "header",
+ "data.*",
+ NULL,
+ };
+
+ return rm_rf_depth_pat(path, 0, pat);
+}
+
+int rm_rf(const char *path)
+{
+ return rm_rf_depth_pat(path, INT_MAX, NULL);
+}
+
/* A filter which removes dot files */
bool lsdir_no_dot_filter(const char *name __maybe_unused, struct dirent *d)
{
@@ -506,3 +568,13 @@ out:
return tip;
}
+
+char *perf_exe(char *buf, int len)
+{
+ int n = readlink("/proc/self/exe", buf, len);
+ if (n > 0) {
+ buf[n] = 0;
+ return buf;
+ }
+ return strcpy(buf, "perf");
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index ece040b799f6..09c1b0f91f65 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -31,6 +31,7 @@ struct strlist;
int mkdir_p(char *path, mode_t mode);
int rm_rf(const char *path);
+int rm_rf_perf_data(const char *path);
struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *));
bool lsdir_no_dot_filter(const char *name, struct dirent *d);
int copyfile(const char *from, const char *to);
@@ -76,6 +77,8 @@ extern bool perf_singlethreaded;
void perf_set_singlethreaded(void);
void perf_set_multithreaded(void);
+char *perf_exe(char *buf, int len);
+
#ifndef O_CLOEXEC
#ifdef __sparc__
#define O_CLOEXEC 0x400000
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 3702cba11d7d..5031b7b22bbd 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -11,6 +11,7 @@
#include "vdso.h"
#include "util.h"
+#include "map.h"
#include "symbol.h"
#include "machine.h"
#include "thread.h"
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
index 902ce6384f57..512ad7c09b13 100644
--- a/tools/perf/util/zlib.c
+++ b/tools/perf/util/zlib.c
@@ -6,7 +6,6 @@
#include <sys/mman.h>
#include <zlib.h>
#include <linux/compiler.h>
-#include <unistd.h>
#include "util/compress.h"
#include "util/util.h"
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index ff8025de8237..28c11c6b4d06 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -3,7 +3,7 @@
*
* Module Name: cfsize - Common get file size function
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 7be89b873661..6b41d8b64a00 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -3,7 +3,7 @@
*
* Module Name: getopt
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index a20c703f8b7d..2a1fd9182f94 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -3,7 +3,7 @@
*
* Module Name: oslinuxtbl - Linux OSL for obtaining ACPI tables
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index 4fd44e218a83..30913f124dd5 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixdir - Unix directory access interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index 93d8359309b6..29dfb47adfeb 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixmap - Unix OSL for file mappings
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 8a88e87778bd..83d3b3b829b8 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixxf - UNIX OSL interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index f69f1f559743..2eb0aaa4f462 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -3,7 +3,7 @@
*
* Module Name: acpidump.h - Include file for acpi_dump utility
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index a9848de32d8e..e256c2ac5ddc 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -3,7 +3,7 @@
*
* Module Name: apdump - Dump routines for ACPI tables (acpidump)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index e86207e5afcf..49972bc78bc5 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -3,7 +3,7 @@
*
* Module Name: apfiles - File-related functions for acpidump utility
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 2d9b94b631cb..d8f1b57537d3 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -3,7 +3,7 @@
*
* Module Name: apmain - Main module for the acpidump utility
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 1a2bd15c5b6e..fb5758ac469e 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -10,6 +10,7 @@ TARGETS += drivers/dma-buf
TARGETS += efivarfs
TARGETS += exec
TARGETS += filesystems
+TARGETS += filesystems/binderfs
TARGETS += firmware
TARGETS += ftrace
TARGETS += futex
@@ -21,6 +22,7 @@ TARGETS += ir
TARGETS += kcmp
TARGETS += kvm
TARGETS += lib
+TARGETS += livepatch
TARGETS += membarrier
TARGETS += memfd
TARGETS += memory-hotplug
@@ -47,6 +49,8 @@ TARGETS += sysctl
ifneq (1, $(quicktest))
TARGETS += timers
endif
+TARGETS += tmpfs
+TARGETS += tpm2
TARGETS += user
TARGETS += vm
TARGETS += x86
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index dd093bd91aa9..3b74d23fffab 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -14,6 +14,7 @@ feature
test_libbpf_open
test_sock
test_sock_addr
+test_sock_fields
urandom_read
test_btf
test_sockmap
@@ -29,3 +30,4 @@ test_netcnt
test_section_names
test_tcpnotify_user
test_libbpf
+alu32
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 41ab7a3668b3..2aed37ea61a4 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -10,36 +10,33 @@ ifneq ($(wildcard $(GENHDR)),)
GENFLAGS := -DHAVE_GENHDR
endif
+CLANG ?= clang
+LLC ?= llc
+LLVM_OBJCOPY ?= llvm-objcopy
+LLVM_READELF ?= llvm-readelf
+BTF_PAHOLE ?= pahole
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
LDLIBS += -lcap -lelf -lrt -lpthread
-TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
-all: $(TEST_CUSTOM_PROGS)
-
-$(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
- $(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id
-
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
- test_netcnt test_tcpnotify_user
-
-TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
- test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
- sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
- test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
- test_tcpnotify_kern.o \
- sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \
- sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o test_adjust_tail.o \
- test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \
- test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \
- test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
- get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
- test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \
- test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o \
- xdp_dummy.o test_map_in_map.o
+ test_netcnt test_tcpnotify_user test_sock_fields
+
+BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
+TEST_GEN_FILES = $(BPF_OBJ_FILES)
+
+# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
+# contains both ALU32 and JMP32 instructions.
+SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
+ $(CLANG) -target bpf -O2 -emit-llvm -S -x c - -o - | \
+ $(LLC) -mattr=+alu32 -mcpu=v3 2>&1 | \
+ grep 'if w')
+ifneq ($(SUBREG_CODEGEN),)
+TEST_GEN_FILES += $(patsubst %.o,alu32/%.o, $(BPF_OBJ_FILES))
+endif
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -53,7 +50,8 @@ TEST_PROGS := test_kmod.sh \
test_lirc_mode2.sh \
test_skb_cgroup_id.sh \
test_flow_dissector.sh \
- test_xdp_vlan.sh
+ test_xdp_vlan.sh \
+ test_lwt_ip_encap.sh
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
@@ -66,6 +64,13 @@ TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_use
include ../lib.mk
+# NOTE: $(OUTPUT) won't get default value if used before lib.mk
+TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
+all: $(TEST_CUSTOM_PROGS)
+
+$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
+ $(CC) -o $@ -static $< -Wl,--build-id
+
BPFOBJ := $(OUTPUT)/libbpf.a
$(TEST_GEN_PROGS): $(BPFOBJ)
@@ -84,6 +89,7 @@ $(OUTPUT)/test_progs: trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
$(OUTPUT)/test_netcnt: cgroup_helpers.c
+$(OUTPUT)/test_sock_fields: cgroup_helpers.c
.PHONY: force
@@ -93,11 +99,6 @@ force:
$(BPFOBJ): force
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
-CLANG ?= clang
-LLC ?= llc
-LLVM_OBJCOPY ?= llvm-objcopy
-BTF_PAHOLE ?= pahole
-
PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
# Let newer LLVM versions transparently probe the kernel for availability
@@ -127,12 +128,15 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
$(OUTPUT)/test_queue_map.o: test_queue_stack_map.h
$(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
+$(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
+$(OUTPUT)/test_progs.o: flow_dissector_load.h
+
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
$(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
- readelf -S ./llvm_btf_verify.o | grep BTF; \
+ $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
/bin/rm -f ./llvm_btf_verify.o)
ifneq ($(BTF_LLVM_PROBE),)
@@ -149,9 +153,43 @@ endif
endif
endif
+TEST_PROGS_CFLAGS := -I. -I$(OUTPUT)
+TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier
+
+ifneq ($(SUBREG_CODEGEN),)
+ALU32_BUILD_DIR = $(OUTPUT)/alu32
+TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32
+$(ALU32_BUILD_DIR):
+ mkdir -p $@
+
+$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read
+ cp $< $@
+
+$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
+ $(ALU32_BUILD_DIR) \
+ $(ALU32_BUILD_DIR)/urandom_read
+ $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \
+ -o $(ALU32_BUILD_DIR)/test_progs_32 \
+ test_progs.c trace_helpers.c prog_tests/*.c \
+ $(OUTPUT)/libbpf.a $(LDLIBS)
+
+$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H)
+$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
+
+$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \
+ $(ALU32_BUILD_DIR)/test_progs_32
+ $(CLANG) $(CLANG_FLAGS) \
+ -O2 -target bpf -emit-llvm -c $< -o - | \
+ $(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
+ -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+endif
+endif
+
# Have one program compiled without "-target bpf" to test whether libbpf loads
# it successfully
-$(OUTPUT)/test_xdp.o: test_xdp.c
+$(OUTPUT)/test_xdp.o: progs/test_xdp.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
@@ -159,7 +197,7 @@ ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
-$(OUTPUT)/%.o: %.c
+$(OUTPUT)/%.o: progs/%.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
@@ -167,4 +205,46 @@ ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
+PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h
+$(OUTPUT)/test_progs: $(PROG_TESTS_H)
+$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
+$(OUTPUT)/test_progs: prog_tests/*.c
+
+PROG_TESTS_DIR = $(OUTPUT)/prog_tests
+$(PROG_TESTS_DIR):
+ mkdir -p $@
+
+PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
+$(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
+ $(shell ( cd prog_tests/; \
+ echo '/* Generated header, do not edit */'; \
+ echo '#ifdef DECLARE'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@extern void test_\1(void);@'; \
+ echo '#endif'; \
+ echo '#ifdef CALL'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@test_\1();@'; \
+ echo '#endif' \
+ ) > $(PROG_TESTS_H))
+
+VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h
+$(OUTPUT)/test_verifier: $(VERIFIER_TESTS_H)
+$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
+
+VERIFIER_TESTS_DIR = $(OUTPUT)/verifier
+$(VERIFIER_TESTS_DIR):
+ mkdir -p $@
+
+VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
+$(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
+ $(shell ( cd verifier/; \
+ echo '/* Generated header, do not edit */'; \
+ echo '#ifdef FILL_ARRAY'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\(.*\)@#include \"\1\"@'; \
+ echo '#endif' \
+ ) > $(VERIFIER_TESTS_H))
+
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \
+ $(VERIFIER_TESTS_H) $(PROG_TESTS_H)
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 6c77cf7bedce..c9433a496d54 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -172,6 +172,16 @@ static int (*bpf_skb_vlan_pop)(void *ctx) =
(void *) BPF_FUNC_skb_vlan_pop;
static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
(void *) BPF_FUNC_rc_pointer_rel;
+static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
+ (void *) BPF_FUNC_spin_lock;
+static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
+ (void *) BPF_FUNC_spin_unlock;
+static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_sk_fullsock;
+static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_tcp_sock;
+static int (*bpf_skb_ecn_set_ce)(void *ctx) =
+ (void *) BPF_FUNC_skb_ecn_set_ce;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
@@ -224,6 +234,36 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
(void *) BPF_FUNC_skb_change_head;
static int (*bpf_skb_pull_data)(void *, int len) =
(void *) BPF_FUNC_skb_pull_data;
+static unsigned int (*bpf_get_cgroup_classid)(void *ctx) =
+ (void *) BPF_FUNC_get_cgroup_classid;
+static unsigned int (*bpf_get_route_realm)(void *ctx) =
+ (void *) BPF_FUNC_get_route_realm;
+static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) =
+ (void *) BPF_FUNC_skb_change_proto;
+static int (*bpf_skb_change_type)(void *ctx, __u32 type) =
+ (void *) BPF_FUNC_skb_change_type;
+static unsigned int (*bpf_get_hash_recalc)(void *ctx) =
+ (void *) BPF_FUNC_get_hash_recalc;
+static unsigned long long (*bpf_get_current_task)(void *ctx) =
+ (void *) BPF_FUNC_get_current_task;
+static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) =
+ (void *) BPF_FUNC_skb_change_tail;
+static long long (*bpf_csum_update)(void *ctx, __u32 csum) =
+ (void *) BPF_FUNC_csum_update;
+static void (*bpf_set_hash_invalid)(void *ctx) =
+ (void *) BPF_FUNC_set_hash_invalid;
+static int (*bpf_get_numa_node_id)(void) =
+ (void *) BPF_FUNC_get_numa_node_id;
+static int (*bpf_probe_read_str)(void *ctx, __u32 size,
+ const void *unsafe_ptr) =
+ (void *) BPF_FUNC_probe_read_str;
+static unsigned int (*bpf_get_socket_uid)(void *ctx) =
+ (void *) BPF_FUNC_get_socket_uid;
+static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) =
+ (void *) BPF_FUNC_set_hash;
+static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_skb_adjust_room;
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86)
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 315a44fa32af..a29206ebbd13 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
unsigned int start, end, possible_cpus = 0;
char buff[128];
FILE *fp;
- int n;
+ int len, n, i, j = 0;
fp = fopen(fcpu, "r");
if (!fp) {
@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
exit(1);
}
- while (fgets(buff, sizeof(buff), fp)) {
- n = sscanf(buff, "%u-%u", &start, &end);
- if (n == 0) {
- printf("Failed to retrieve # possible CPUs!\n");
- exit(1);
- } else if (n == 1) {
- end = start;
+ if (!fgets(buff, sizeof(buff), fp)) {
+ printf("Failed to read %s!\n", fcpu);
+ exit(1);
+ }
+
+ len = strlen(buff);
+ for (i = 0; i <= len; i++) {
+ if (buff[i] == ',' || buff[i] == '\0') {
+ buff[i] = '\0';
+ n = sscanf(&buff[j], "%u-%u", &start, &end);
+ if (n <= 0) {
+ printf("Failed to retrieve # possible CPUs!\n");
+ exit(1);
+ } else if (n == 1) {
+ end = start;
+ }
+ possible_cpus += end - start + 1;
+ j = i + 1;
}
- possible_cpus = start == 0 ? end + 1 : 0;
- break;
}
+
fclose(fp);
return possible_cpus;
@@ -48,4 +58,13 @@ static inline unsigned int bpf_num_possible_cpus(void)
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
+#ifndef sizeof_field
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#endif
+
+#ifndef offsetofend
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+#endif
+
#endif /* __BPF_UTIL__ */
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index ae8180b11d5f..77cafa66d048 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -12,6 +12,7 @@
#include <bpf/libbpf.h>
#include "bpf_rlimit.h"
+#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
const char *cfg_map_name = "jmp_table";
@@ -21,46 +22,13 @@ char *cfg_path_name;
static void load_and_attach_program(void)
{
- struct bpf_program *prog, *main_prog;
- struct bpf_map *prog_array;
- int i, fd, prog_fd, ret;
+ int prog_fd, ret;
struct bpf_object *obj;
- int prog_array_fd;
- ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj,
- &prog_fd);
+ ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
+ cfg_map_name, &prog_fd);
if (ret)
- error(1, 0, "bpf_prog_load %s", cfg_path_name);
-
- main_prog = bpf_object__find_program_by_title(obj, cfg_section_name);
- if (!main_prog)
- error(1, 0, "bpf_object__find_program_by_title %s",
- cfg_section_name);
-
- prog_fd = bpf_program__fd(main_prog);
- if (prog_fd < 0)
- error(1, 0, "bpf_program__fd");
-
- prog_array = bpf_object__find_map_by_name(obj, cfg_map_name);
- if (!prog_array)
- error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name);
-
- prog_array_fd = bpf_map__fd(prog_array);
- if (prog_array_fd < 0)
- error(1, 0, "bpf_map__fd %s", cfg_map_name);
-
- i = 0;
- bpf_object__for_each_program(prog, obj) {
- fd = bpf_program__fd(prog);
- if (fd < 0)
- error(1, 0, "bpf_program__fd");
-
- if (fd != prog_fd) {
- printf("%d: %s\n", i, bpf_program__title(prog, false));
- bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
- ++i;
- }
- }
+ error(1, 0, "bpf_flow_load %s", cfg_path_name);
ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0);
if (ret)
@@ -69,7 +37,6 @@ static void load_and_attach_program(void)
ret = bpf_object__pin(obj, cfg_pin_path);
if (ret)
error(1, 0, "bpf_object__pin %s", cfg_pin_path);
-
}
static void detach_program(void)
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
new file mode 100644
index 000000000000..41dd6959feb0
--- /dev/null
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef FLOW_DISSECTOR_LOAD
+#define FLOW_DISSECTOR_LOAD
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+static inline int bpf_flow_load(struct bpf_object **obj,
+ const char *path,
+ const char *section_name,
+ const char *map_name,
+ int *prog_fd)
+{
+ struct bpf_program *prog, *main_prog;
+ struct bpf_map *prog_array;
+ int prog_array_fd;
+ int ret, fd, i;
+
+ ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
+ prog_fd);
+ if (ret)
+ return ret;
+
+ main_prog = bpf_object__find_program_by_title(*obj, section_name);
+ if (!main_prog)
+ return ret;
+
+ *prog_fd = bpf_program__fd(main_prog);
+ if (*prog_fd < 0)
+ return ret;
+
+ prog_array = bpf_object__find_map_by_name(*obj, map_name);
+ if (!prog_array)
+ return ret;
+
+ prog_array_fd = bpf_map__fd(prog_array);
+ if (prog_array_fd < 0)
+ return ret;
+
+ i = 0;
+ bpf_object__for_each_program(prog, *obj) {
+ fd = bpf_program__fd(prog);
+ if (fd < 0)
+ return fd;
+
+ if (fd != *prog_fd) {
+ bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
+ ++i;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* FLOW_DISSECTOR_LOAD */
diff --git a/tools/testing/selftests/bpf/prog_tests/.gitignore b/tools/testing/selftests/bpf/prog_tests/.gitignore
new file mode 100644
index 000000000000..45984a364647
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/.gitignore
@@ -0,0 +1 @@
+tests.h
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
new file mode 100644
index 000000000000..a64f7a02139c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_bpf_obj_id(void)
+{
+ const __u64 array_magic_value = 0xfaceb00c;
+ const __u32 array_key = 0;
+ const int nr_iters = 2;
+ const char *file = "./test_obj_id.o";
+ const char *expected_prog_name = "test_obj_id";
+ const char *expected_map_name = "test_map_id";
+ const __u64 nsec_per_sec = 1000000000;
+
+ struct bpf_object *objs[nr_iters];
+ int prog_fds[nr_iters], map_fds[nr_iters];
+ /* +1 to test for the info_len returned by kernel */
+ struct bpf_prog_info prog_infos[nr_iters + 1];
+ struct bpf_map_info map_infos[nr_iters + 1];
+ /* Each prog only uses one map. +1 to test nr_map_ids
+ * returned by kernel.
+ */
+ __u32 map_ids[nr_iters + 1];
+ char jited_insns[128], xlated_insns[128], zeros[128];
+ __u32 i, next_id, info_len, nr_id_found, duration = 0;
+ struct timespec real_time_ts, boot_time_ts;
+ int err = 0;
+ __u64 array_value;
+ uid_t my_uid = getuid();
+ time_t now, load_time;
+
+ err = bpf_prog_get_fd_by_id(0);
+ CHECK(err >= 0 || errno != ENOENT,
+ "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
+
+ err = bpf_map_get_fd_by_id(0);
+ CHECK(err >= 0 || errno != ENOENT,
+ "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
+
+ for (i = 0; i < nr_iters; i++)
+ objs[i] = NULL;
+
+ /* Check bpf_obj_get_info_by_fd() */
+ bzero(zeros, sizeof(zeros));
+ for (i = 0; i < nr_iters; i++) {
+ now = time(NULL);
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
+ &objs[i], &prog_fds[i]);
+ /* test_obj_id.o is a dumb prog. It should never fail
+ * to load.
+ */
+ if (err)
+ error_cnt++;
+ assert(!err);
+
+ /* Insert a magic value to the map */
+ map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
+ assert(map_fds[i] >= 0);
+ err = bpf_map_update_elem(map_fds[i], &array_key,
+ &array_magic_value, 0);
+ assert(!err);
+
+ /* Check getting map info */
+ info_len = sizeof(struct bpf_map_info) * 2;
+ bzero(&map_infos[i], info_len);
+ err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
+ &info_len);
+ if (CHECK(err ||
+ map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
+ map_infos[i].key_size != sizeof(__u32) ||
+ map_infos[i].value_size != sizeof(__u64) ||
+ map_infos[i].max_entries != 1 ||
+ map_infos[i].map_flags != 0 ||
+ info_len != sizeof(struct bpf_map_info) ||
+ strcmp((char *)map_infos[i].name, expected_map_name),
+ "get-map-info(fd)",
+ "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+ err, errno,
+ map_infos[i].type, BPF_MAP_TYPE_ARRAY,
+ info_len, sizeof(struct bpf_map_info),
+ map_infos[i].key_size,
+ map_infos[i].value_size,
+ map_infos[i].max_entries,
+ map_infos[i].map_flags,
+ map_infos[i].name, expected_map_name))
+ goto done;
+
+ /* Check getting prog info */
+ info_len = sizeof(struct bpf_prog_info) * 2;
+ bzero(&prog_infos[i], info_len);
+ bzero(jited_insns, sizeof(jited_insns));
+ bzero(xlated_insns, sizeof(xlated_insns));
+ prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
+ prog_infos[i].jited_prog_len = sizeof(jited_insns);
+ prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
+ prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
+ prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
+ prog_infos[i].nr_map_ids = 2;
+ err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
+ assert(!err);
+ err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
+ assert(!err);
+ err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
+ &info_len);
+ load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
+ + (prog_infos[i].load_time / nsec_per_sec);
+ if (CHECK(err ||
+ prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
+ info_len != sizeof(struct bpf_prog_info) ||
+ (jit_enabled && !prog_infos[i].jited_prog_len) ||
+ (jit_enabled &&
+ !memcmp(jited_insns, zeros, sizeof(zeros))) ||
+ !prog_infos[i].xlated_prog_len ||
+ !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
+ load_time < now - 60 || load_time > now + 60 ||
+ prog_infos[i].created_by_uid != my_uid ||
+ prog_infos[i].nr_map_ids != 1 ||
+ *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
+ strcmp((char *)prog_infos[i].name, expected_prog_name),
+ "get-prog-info(fd)",
+ "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+ err, errno, i,
+ prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
+ info_len, sizeof(struct bpf_prog_info),
+ jit_enabled,
+ prog_infos[i].jited_prog_len,
+ prog_infos[i].xlated_prog_len,
+ !!memcmp(jited_insns, zeros, sizeof(zeros)),
+ !!memcmp(xlated_insns, zeros, sizeof(zeros)),
+ load_time, now,
+ prog_infos[i].created_by_uid, my_uid,
+ prog_infos[i].nr_map_ids, 1,
+ *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
+ prog_infos[i].name, expected_prog_name))
+ goto done;
+ }
+
+ /* Check bpf_prog_get_next_id() */
+ nr_id_found = 0;
+ next_id = 0;
+ while (!bpf_prog_get_next_id(next_id, &next_id)) {
+ struct bpf_prog_info prog_info = {};
+ __u32 saved_map_id;
+ int prog_fd;
+
+ info_len = sizeof(prog_info);
+
+ prog_fd = bpf_prog_get_fd_by_id(next_id);
+ if (prog_fd < 0 && errno == ENOENT)
+ /* The bpf_prog is in the dead row */
+ continue;
+ if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
+ "prog_fd %d next_id %d errno %d\n",
+ prog_fd, next_id, errno))
+ break;
+
+ for (i = 0; i < nr_iters; i++)
+ if (prog_infos[i].id == next_id)
+ break;
+
+ if (i == nr_iters)
+ continue;
+
+ nr_id_found++;
+
+ /* Negative test:
+ * prog_info.nr_map_ids = 1
+ * prog_info.map_ids = NULL
+ */
+ prog_info.nr_map_ids = 1;
+ err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ if (CHECK(!err || errno != EFAULT,
+ "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
+ err, errno, EFAULT))
+ break;
+ bzero(&prog_info, sizeof(prog_info));
+ info_len = sizeof(prog_info);
+
+ saved_map_id = *(int *)((long)prog_infos[i].map_ids);
+ prog_info.map_ids = prog_infos[i].map_ids;
+ prog_info.nr_map_ids = 2;
+ err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ prog_infos[i].jited_prog_insns = 0;
+ prog_infos[i].xlated_prog_insns = 0;
+ CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
+ memcmp(&prog_info, &prog_infos[i], info_len) ||
+ *(int *)(long)prog_info.map_ids != saved_map_id,
+ "get-prog-info(next_id->fd)",
+ "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
+ err, errno, info_len, sizeof(struct bpf_prog_info),
+ memcmp(&prog_info, &prog_infos[i], info_len),
+ *(int *)(long)prog_info.map_ids, saved_map_id);
+ close(prog_fd);
+ }
+ CHECK(nr_id_found != nr_iters,
+ "check total prog id found by get_next_id",
+ "nr_id_found %u(%u)\n",
+ nr_id_found, nr_iters);
+
+ /* Check bpf_map_get_next_id() */
+ nr_id_found = 0;
+ next_id = 0;
+ while (!bpf_map_get_next_id(next_id, &next_id)) {
+ struct bpf_map_info map_info = {};
+ int map_fd;
+
+ info_len = sizeof(map_info);
+
+ map_fd = bpf_map_get_fd_by_id(next_id);
+ if (map_fd < 0 && errno == ENOENT)
+ /* The bpf_map is in the dead row */
+ continue;
+ if (CHECK(map_fd < 0, "get-map-fd(next_id)",
+ "map_fd %d next_id %u errno %d\n",
+ map_fd, next_id, errno))
+ break;
+
+ for (i = 0; i < nr_iters; i++)
+ if (map_infos[i].id == next_id)
+ break;
+
+ if (i == nr_iters)
+ continue;
+
+ nr_id_found++;
+
+ err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
+ assert(!err);
+
+ err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ CHECK(err || info_len != sizeof(struct bpf_map_info) ||
+ memcmp(&map_info, &map_infos[i], info_len) ||
+ array_value != array_magic_value,
+ "check get-map-info(next_id->fd)",
+ "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
+ err, errno, info_len, sizeof(struct bpf_map_info),
+ memcmp(&map_info, &map_infos[i], info_len),
+ array_value, array_magic_value);
+
+ close(map_fd);
+ }
+ CHECK(nr_id_found != nr_iters,
+ "check total map id found by get_next_id",
+ "nr_id_found %u(%u)\n",
+ nr_id_found, nr_iters);
+
+done:
+ for (i = 0; i < nr_iters; i++)
+ bpf_object__close(objs[i]);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
new file mode 100644
index 000000000000..bcbd928c96ab
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#define CHECK_FLOW_KEYS(desc, got, expected) \
+ CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \
+ desc, \
+ "nhoff=%u/%u " \
+ "thoff=%u/%u " \
+ "addr_proto=0x%x/0x%x " \
+ "is_frag=%u/%u " \
+ "is_first_frag=%u/%u " \
+ "is_encap=%u/%u " \
+ "n_proto=0x%x/0x%x " \
+ "sport=%u/%u " \
+ "dport=%u/%u\n", \
+ got.nhoff, expected.nhoff, \
+ got.thoff, expected.thoff, \
+ got.addr_proto, expected.addr_proto, \
+ got.is_frag, expected.is_frag, \
+ got.is_first_frag, expected.is_first_frag, \
+ got.is_encap, expected.is_encap, \
+ got.n_proto, expected.n_proto, \
+ got.sport, expected.sport, \
+ got.dport, expected.dport)
+
+static struct bpf_flow_keys pkt_v4_flow_keys = {
+ .nhoff = 0,
+ .thoff = sizeof(struct iphdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IP),
+};
+
+static struct bpf_flow_keys pkt_v6_flow_keys = {
+ .nhoff = 0,
+ .thoff = sizeof(struct ipv6hdr),
+ .addr_proto = ETH_P_IPV6,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+};
+
+void test_flow_dissector(void)
+{
+ struct bpf_flow_keys flow_keys;
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+ __u32 size;
+
+ err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
+ "jmp_table", &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4",
+ "err %d errno %d retval %d duration %d size %u/%lu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+ CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys);
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6",
+ "err %d errno %d retval %d duration %d size %u/%lu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+ CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
new file mode 100644
index 000000000000..d7bb5beb1c57
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#define MAX_CNT_RAWTP 10ull
+#define MAX_STACK_RAWTP 100
+struct get_stack_trace_t {
+ int pid;
+ int kern_stack_size;
+ int user_stack_size;
+ int user_stack_buildid_size;
+ __u64 kern_stack[MAX_STACK_RAWTP];
+ __u64 user_stack[MAX_STACK_RAWTP];
+ struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
+};
+
+static int get_stack_print_output(void *data, int size)
+{
+ bool good_kern_stack = false, good_user_stack = false;
+ const char *nonjit_func = "___bpf_prog_run";
+ struct get_stack_trace_t *e = data;
+ int i, num_stack;
+ static __u64 cnt;
+ struct ksym *ks;
+
+ cnt++;
+
+ if (size < sizeof(struct get_stack_trace_t)) {
+ __u64 *raw_data = data;
+ bool found = false;
+
+ num_stack = size / sizeof(__u64);
+ /* If jit is enabled, we do not have a good way to
+ * verify the sanity of the kernel stack. So we
+ * just assume it is good if the stack is not empty.
+ * This could be improved in the future.
+ */
+ if (jit_enabled) {
+ found = num_stack > 0;
+ } else {
+ for (i = 0; i < num_stack; i++) {
+ ks = ksym_search(raw_data[i]);
+ if (strcmp(ks->name, nonjit_func) == 0) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ good_kern_stack = true;
+ good_user_stack = true;
+ }
+ } else {
+ num_stack = e->kern_stack_size / sizeof(__u64);
+ if (jit_enabled) {
+ good_kern_stack = num_stack > 0;
+ } else {
+ for (i = 0; i < num_stack; i++) {
+ ks = ksym_search(e->kern_stack[i]);
+ if (strcmp(ks->name, nonjit_func) == 0) {
+ good_kern_stack = true;
+ break;
+ }
+ }
+ }
+ if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
+ good_user_stack = true;
+ }
+ if (!good_kern_stack || !good_user_stack)
+ return LIBBPF_PERF_EVENT_ERROR;
+
+ if (cnt == MAX_CNT_RAWTP)
+ return LIBBPF_PERF_EVENT_DONE;
+
+ return LIBBPF_PERF_EVENT_CONT;
+}
+
+void test_get_stack_raw_tp(void)
+{
+ const char *file = "./test_get_stack_rawtp.o";
+ int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
+ struct perf_event_attr attr = {};
+ struct timespec tv = {0, 10};
+ __u32 key = 0, duration = 0;
+ struct bpf_object *obj;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
+ efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
+ if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
+ perfmap_fd, errno))
+ goto close_prog;
+
+ err = load_kallsyms();
+ if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ attr.sample_type = PERF_SAMPLE_RAW;
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_BPF_OUTPUT;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
+ -1/*group_fd*/, 0);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
+ errno))
+ goto close_prog;
+
+ err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
+ if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
+ errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
+ err, errno))
+ goto close_prog;
+
+ err = perf_event_mmap(pmu_fd);
+ if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ /* trigger some syscall action */
+ for (i = 0; i < MAX_CNT_RAWTP; i++)
+ nanosleep(&tv, NULL);
+
+ err = perf_event_poller(pmu_fd, get_stack_print_output);
+ if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
new file mode 100644
index 000000000000..20ddca830e68
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void test_l4lb(const char *file)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct vip key = {.protocol = 6};
+ struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+ } value = {.vip_num = VIP_NUM};
+ __u32 stats_key = VIP_NUM;
+ struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+ } stats[nr_cpus];
+ struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+ } real_def = {.dst = MAGIC_VAL};
+ __u32 ch_key = 11, real_num = 3;
+ __u32 duration, retval, size;
+ int err, i, prog_fd, map_fd;
+ __u64 bytes = 0, pkts = 0;
+ struct bpf_object *obj;
+ char buf[128];
+ u32 *magic = (u32 *)buf;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip_map");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key, &value, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "ch_rings");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "reals");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
+ *magic != MAGIC_VAL, "ipv4",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
+ *magic != MAGIC_VAL, "ipv6",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ map_fd = bpf_find_map(__func__, obj, "stats");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ for (i = 0; i < nr_cpus; i++) {
+ bytes += stats[i].bytes;
+ pkts += stats[i].pkts;
+ }
+ if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+ error_cnt++;
+ printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
+ }
+out:
+ bpf_object__close(obj);
+}
+
+void test_l4lb_all(void)
+{
+ const char *file1 = "./test_l4lb.o";
+ const char *file2 = "./test_l4lb_noinline.o";
+
+ test_l4lb(file1);
+ test_l4lb(file2);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c
new file mode 100644
index 000000000000..90f8a206340a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void *parallel_map_access(void *arg)
+{
+ int err, map_fd = *(u32 *) arg;
+ int vars[17], i, j, rnd, key = 0;
+
+ for (i = 0; i < 10000; i++) {
+ err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK);
+ if (err) {
+ printf("lookup failed\n");
+ error_cnt++;
+ goto out;
+ }
+ if (vars[0] != 0) {
+ printf("lookup #%d var[0]=%d\n", i, vars[0]);
+ error_cnt++;
+ goto out;
+ }
+ rnd = vars[1];
+ for (j = 2; j < 17; j++) {
+ if (vars[j] == rnd)
+ continue;
+ printf("lookup #%d var[1]=%d var[%d]=%d\n",
+ i, rnd, j, vars[j]);
+ error_cnt++;
+ goto out;
+ }
+ }
+out:
+ pthread_exit(arg);
+}
+
+void test_map_lock(void)
+{
+ const char *file = "./test_map_lock.o";
+ int prog_fd, map_fd[2], vars[17] = {};
+ pthread_t thread_id[6];
+ struct bpf_object *obj;
+ int err = 0, key = 0, i;
+ void *ret;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ if (err) {
+ printf("test_map_lock:bpf_prog_load errno %d\n", errno);
+ goto close_prog;
+ }
+ map_fd[0] = bpf_find_map(__func__, obj, "hash_map");
+ if (map_fd[0] < 0)
+ goto close_prog;
+ map_fd[1] = bpf_find_map(__func__, obj, "array_map");
+ if (map_fd[1] < 0)
+ goto close_prog;
+
+ bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK);
+
+ for (i = 0; i < 4; i++)
+ assert(pthread_create(&thread_id[i], NULL,
+ &spin_lock_thread, &prog_fd) == 0);
+ for (i = 4; i < 6; i++)
+ assert(pthread_create(&thread_id[i], NULL,
+ &parallel_map_access, &map_fd[i - 4]) == 0);
+ for (i = 0; i < 4; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 &&
+ ret == (void *)&prog_fd);
+ for (i = 4; i < 6; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 &&
+ ret == (void *)&map_fd[i - 4]);
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c
new file mode 100644
index 000000000000..e178416bddad
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_obj_name(void)
+{
+ struct {
+ const char *name;
+ int success;
+ int expected_errno;
+ } tests[] = {
+ { "", 1, 0 },
+ { "_123456789ABCDE", 1, 0 },
+ { "_123456789ABCDEF", 0, EINVAL },
+ { "_123456789ABCD\n", 0, EINVAL },
+ };
+ struct bpf_insn prog[] = {
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ __u32 duration = 0;
+ int i;
+
+ for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ size_t name_len = strlen(tests[i].name) + 1;
+ union bpf_attr attr;
+ size_t ncopy;
+ int fd;
+
+ /* test different attr.prog_name during BPF_PROG_LOAD */
+ ncopy = name_len < sizeof(attr.prog_name) ?
+ name_len : sizeof(attr.prog_name);
+ bzero(&attr, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
+ attr.insn_cnt = 2;
+ attr.insns = ptr_to_u64(prog);
+ attr.license = ptr_to_u64("");
+ memcpy(attr.prog_name, tests[i].name, ncopy);
+
+ fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+ CHECK((tests[i].success && fd < 0) ||
+ (!tests[i].success && fd != -1) ||
+ (!tests[i].success && errno != tests[i].expected_errno),
+ "check-bpf-prog-name",
+ "fd %d(%d) errno %d(%d)\n",
+ fd, tests[i].success, errno, tests[i].expected_errno);
+
+ if (fd != -1)
+ close(fd);
+
+ /* test different attr.map_name during BPF_MAP_CREATE */
+ ncopy = name_len < sizeof(attr.map_name) ?
+ name_len : sizeof(attr.map_name);
+ bzero(&attr, sizeof(attr));
+ attr.map_type = BPF_MAP_TYPE_ARRAY;
+ attr.key_size = 4;
+ attr.value_size = 4;
+ attr.max_entries = 1;
+ attr.map_flags = 0;
+ memcpy(attr.map_name, tests[i].name, ncopy);
+ fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
+ CHECK((tests[i].success && fd < 0) ||
+ (!tests[i].success && fd != -1) ||
+ (!tests[i].success && errno != tests[i].expected_errno),
+ "check-bpf-map-name",
+ "fd %d(%d) errno %d(%d)\n",
+ fd, tests[i].success, errno, tests[i].expected_errno);
+
+ if (fd != -1)
+ close(fd);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
new file mode 100644
index 000000000000..4ecfd721a044
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_pkt_access(void)
+{
+ const char *file = "./test_pkt_access.o";
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv4",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
new file mode 100644
index 000000000000..ac0d43435806
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_pkt_md_access(void)
+{
+ const char *file = "./test_pkt_md_access.o";
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
new file mode 100644
index 000000000000..5dd89b941f53
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_prog_run_xattr(void)
+{
+ const char *file = "./test_pkt_access.o";
+ struct bpf_object *obj;
+ char buf[10];
+ int err;
+ struct bpf_prog_test_run_attr tattr = {
+ .repeat = 1,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = 5,
+ };
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &tattr.prog_fd);
+ if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+ return;
+
+ memset(buf, 0, sizeof(buf));
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
+ "err %d errno %d retval %d\n", err, errno, tattr.retval);
+
+ CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
+ "incorrect output size, want %lu have %u\n",
+ sizeof(pkt_v4), tattr.data_size_out);
+
+ CHECK_ATTR(buf[5] != 0, "overflow",
+ "BPF_PROG_TEST_RUN ignored size hint\n");
+
+ tattr.data_out = NULL;
+ tattr.data_size_out = 0;
+ errno = 0;
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
+ "err %d errno %d retval %d\n", err, errno, tattr.retval);
+
+ tattr.data_size_out = 1;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
new file mode 100644
index 000000000000..e60cd5ff1f55
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+enum {
+ QUEUE,
+ STACK,
+};
+
+static void test_queue_stack_map_by_type(int type)
+{
+ const int MAP_SIZE = 32;
+ __u32 vals[MAP_SIZE], duration, retval, size, val;
+ int i, err, prog_fd, map_in_fd, map_out_fd;
+ char file[32], buf[128];
+ struct bpf_object *obj;
+ struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
+
+ /* Fill test values to be used */
+ for (i = 0; i < MAP_SIZE; i++)
+ vals[i] = rand();
+
+ if (type == QUEUE)
+ strncpy(file, "./test_queue_map.o", sizeof(file));
+ else if (type == STACK)
+ strncpy(file, "./test_stack_map.o", sizeof(file));
+ else
+ return;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_in_fd = bpf_find_map(__func__, obj, "map_in");
+ if (map_in_fd < 0)
+ goto out;
+
+ map_out_fd = bpf_find_map(__func__, obj, "map_out");
+ if (map_out_fd < 0)
+ goto out;
+
+ /* Push 32 elements to the input map */
+ for (i = 0; i < MAP_SIZE; i++) {
+ err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
+ if (err) {
+ error_cnt++;
+ goto out;
+ }
+ }
+
+ /* The eBPF program pushes iph.saddr in the output map,
+ * pops the input map and saves this value in iph.daddr
+ */
+ for (i = 0; i < MAP_SIZE; i++) {
+ if (type == QUEUE) {
+ val = vals[i];
+ pkt_v4.iph.saddr = vals[i] * 5;
+ } else if (type == STACK) {
+ val = vals[MAP_SIZE - 1 - i];
+ pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ if (err || retval || size != sizeof(pkt_v4) ||
+ iph->daddr != val)
+ break;
+ }
+
+ CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
+ "bpf_map_pop_elem",
+ "err %d errno %d retval %d size %d iph->daddr %u\n",
+ err, errno, retval, size, iph->daddr);
+
+ /* Queue is empty, program should return TC_ACT_SHOT */
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4),
+ "check-queue-stack-map-empty",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ /* Check that the program pushed elements correctly */
+ for (i = 0; i < MAP_SIZE; i++) {
+ err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
+ if (err || val != vals[i] * 5)
+ break;
+ }
+
+ CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
+ "bpf_map_push_elem", "err %d value %u\n", err, val);
+
+out:
+ pkt_v4.iph.saddr = 0;
+ bpf_object__close(obj);
+}
+
+void test_queue_stack_map(void)
+{
+ test_queue_stack_map_by_type(QUEUE);
+ test_queue_stack_map_by_type(STACK);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
new file mode 100644
index 000000000000..5633be43828f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG)
+ return 0;
+
+ return vfprintf(stderr, format, args);
+}
+
+void test_reference_tracking(void)
+{
+ const char *file = "./test_sk_lookup_kern.o";
+ struct bpf_object *obj;
+ struct bpf_program *prog;
+ __u32 duration = 0;
+ int err = 0;
+
+ obj = bpf_object__open(file);
+ if (IS_ERR(obj)) {
+ error_cnt++;
+ return;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ const char *title;
+
+ /* Ignore .text sections */
+ title = bpf_program__title(prog, false);
+ if (strstr(title, ".text") != NULL)
+ continue;
+
+ bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
+
+ /* Expect verifier failure if test name has 'fail' */
+ if (strstr(title, "fail") != NULL) {
+ libbpf_set_print(NULL);
+ err = !bpf_program__load(prog, "GPL", 0);
+ libbpf_set_print(libbpf_debug_print);
+ } else {
+ err = bpf_program__load(prog, "GPL", 0);
+ }
+ CHECK(err, title, "\n");
+ }
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
new file mode 100644
index 000000000000..996e808f43a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void sigalrm_handler(int s) {}
+static struct sigaction sigalrm_action = {
+ .sa_handler = sigalrm_handler,
+};
+
+static void test_signal_pending_by_type(enum bpf_prog_type prog_type)
+{
+ struct bpf_insn prog[4096];
+ struct itimerval timeo = {
+ .it_value.tv_usec = 100000, /* 100ms */
+ };
+ __u32 duration = 0, retval;
+ int prog_fd;
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prog); i++)
+ prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
+ prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN();
+
+ prog_fd = bpf_load_program(prog_type, prog, ARRAY_SIZE(prog),
+ "GPL", 0, NULL, 0);
+ CHECK(prog_fd < 0, "test-run", "errno %d\n", errno);
+
+ err = sigaction(SIGALRM, &sigalrm_action, NULL);
+ CHECK(err, "test-run-signal-sigaction", "errno %d\n", errno);
+
+ err = setitimer(ITIMER_REAL, &timeo, NULL);
+ CHECK(err, "test-run-signal-timer", "errno %d\n", errno);
+
+ err = bpf_prog_test_run(prog_fd, 0xffffffff, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(duration > 500000000, /* 500ms */
+ "test-run-signal-duration",
+ "duration %dns > 500ms\n",
+ duration);
+
+ signal(SIGALRM, SIG_DFL);
+}
+
+void test_signal_pending(enum bpf_prog_type prog_type)
+{
+ test_signal_pending_by_type(BPF_PROG_TYPE_SOCKET_FILTER);
+ test_signal_pending_by_type(BPF_PROG_TYPE_FLOW_DISSECTOR);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c
new file mode 100644
index 000000000000..9a573a9675d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_spinlock(void)
+{
+ const char *file = "./test_spin_lock.o";
+ pthread_t thread_id[4];
+ struct bpf_object *obj;
+ int prog_fd;
+ int err = 0, i;
+ void *ret;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ if (err) {
+ printf("test_spin_lock:bpf_prog_load errno %d\n", errno);
+ goto close_prog;
+ }
+ for (i = 0; i < 4; i++)
+ assert(pthread_create(&thread_id[i], NULL,
+ &spin_lock_thread, &prog_fd) == 0);
+ for (i = 0; i < 4; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 &&
+ ret == (void *)&prog_fd);
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
new file mode 100644
index 000000000000..3aab2b083c71
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_build_id(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ const char *file = "./test_stacktrace_build_id.o";
+ int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
+ struct perf_event_attr attr = {};
+ __u32 key, previous_key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+ int i, j;
+ struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
+ int build_id_matches = 0;
+ int retry = 1;
+
+retry:
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* Get the ID for the sched/sched_switch tracepoint */
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/random/urandom_read/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ goto close_prog;
+
+ /* Open the perf event and attach bpf progrram */
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+ if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
+ == 0);
+ assert(system("./urandom_read") == 0);
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = extract_build_id(buf, 256);
+
+ if (CHECK(err, "get build_id with readelf",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ if (CHECK(err, "get_next_key from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+
+ do {
+ char build_id[64];
+
+ err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
+ if (CHECK(err, "lookup_elem from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+ for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
+ if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
+ id_offs[i].offset != 0) {
+ for (j = 0; j < 20; ++j)
+ sprintf(build_id + 2 * j, "%02x",
+ id_offs[i].build_id[j] & 0xff);
+ if (strstr(buf, build_id) != NULL)
+ build_id_matches = 1;
+ }
+ previous_key = key;
+ } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ if (CHECK(build_id_matches < 1, "build id match",
+ "Didn't find expected build ID from the map\n"))
+ goto disable_pmu;
+
+ stack_trace_len = PERF_MAX_STACK_DEPTH
+ * sizeof(struct bpf_stack_build_id);
+ err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+ CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
+ "err %d errno %d\n", err, errno);
+
+disable_pmu:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+ close(pmu_fd);
+
+close_prog:
+ bpf_object__close(obj);
+
+out:
+ return;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
new file mode 100644
index 000000000000..8a114bb1c379
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_build_id_nmi(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ const char *file = "./test_stacktrace_build_id.o";
+ int err, pmu_fd, prog_fd;
+ struct perf_event_attr attr = {
+ .sample_freq = 5000,
+ .freq = 1,
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ __u32 key, previous_key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+ int i, j;
+ struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
+ int build_id_matches = 0;
+ int retry = 1;
+
+retry:
+ err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ return;
+
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open",
+ "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+ if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
+ == 0);
+ assert(system("taskset 0x1 ./urandom_read 100000") == 0);
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = extract_build_id(buf, 256);
+
+ if (CHECK(err, "get build_id with readelf",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ if (CHECK(err, "get_next_key from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+
+ do {
+ char build_id[64];
+
+ err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
+ if (CHECK(err, "lookup_elem from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+ for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
+ if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
+ id_offs[i].offset != 0) {
+ for (j = 0; j < 20; ++j)
+ sprintf(build_id + 2 * j, "%02x",
+ id_offs[i].build_id[j] & 0xff);
+ if (strstr(buf, build_id) != NULL)
+ build_id_matches = 1;
+ }
+ previous_key = key;
+ } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ if (CHECK(build_id_matches < 1, "build id match",
+ "Didn't find expected build ID from the map\n"))
+ goto disable_pmu;
+
+ /*
+ * We intentionally skip compare_stack_ips(). This is because we
+ * only support one in_nmi() ips-to-build_id translation per cpu
+ * at any time, thus stack_amap here will always fallback to
+ * BPF_STACK_BUILD_ID_IP;
+ */
+
+disable_pmu:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+ close(pmu_fd);
+
+close_prog:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
new file mode 100644
index 000000000000..2bfd50a0d6d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_map(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ const char *file = "./test_stacktrace_map.o";
+ int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
+ struct perf_event_attr attr = {};
+ __u32 key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ return;
+
+ /* Get the ID for the sched/sched_switch tracepoint */
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (bytes <= 0 || bytes >= sizeof(buf))
+ goto close_prog;
+
+ /* Open the perf event and attach bpf progrram */
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ goto disable_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (err)
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (control_map_fd < 0)
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (stackid_hmap_fd < 0)
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (stackmap_fd < 0)
+ goto disable_pmu;
+
+ stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+ if (stack_amap_fd < 0)
+ goto disable_pmu;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu_noerr;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu_noerr;
+
+ stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
+ err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+ if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu_noerr;
+
+ goto disable_pmu_noerr;
+disable_pmu:
+ error_cnt++;
+disable_pmu_noerr:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+close_prog:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
new file mode 100644
index 000000000000..1f8387d80fd7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_map_raw_tp(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd;
+ const char *file = "./test_stacktrace_map.o";
+ int efd, err, prog_fd;
+ __u32 key, val, duration = 0;
+ struct bpf_object *obj;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
+ efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (control_map_fd < 0)
+ goto close_prog;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (stackid_hmap_fd < 0)
+ goto close_prog;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (stackmap_fd < 0)
+ goto close_prog;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
new file mode 100644
index 000000000000..958a3d88de99
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_task_fd_query_rawtp(void)
+{
+ const char *file = "./test_get_stack_rawtp.o";
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ struct bpf_object *obj;
+ int efd, err, prog_fd;
+ __u32 duration = 0;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
+ efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ /* query (getpid(), efd) */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
+ errno))
+ goto close_prog;
+
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ strcmp(buf, "sys_enter") == 0;
+ if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
+ fd_type, buf))
+ goto close_prog;
+
+ /* test zero len */
+ len = 0;
+ err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
+ err, errno))
+ goto close_prog;
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ len == strlen("sys_enter");
+ if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
+ goto close_prog;
+
+ /* test empty buffer */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
+ err, errno))
+ goto close_prog;
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ len == strlen("sys_enter");
+ if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
+ goto close_prog;
+
+ /* test smaller buffer */
+ len = 3;
+ err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
+ "err %d errno %d\n", err, errno))
+ goto close_prog;
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ len == strlen("sys_enter") &&
+ strcmp(buf, "sy") == 0;
+ if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
+ goto close_prog;
+
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
new file mode 100644
index 000000000000..d636a4f39476
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void test_task_fd_query_tp_core(const char *probe_name,
+ const char *tp_name)
+{
+ const char *file = "./test_tracepoint.o";
+ int err, bytes, efd, prog_fd, pmu_fd;
+ struct perf_event_attr attr = {};
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ struct bpf_object *obj;
+ __u32 duration = 0;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/%s/id", probe_name);
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
+ "bytes %d errno %d\n", bytes, errno))
+ goto close_prog;
+
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
+ errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
+ errno))
+ goto close_pmu;
+
+ /* query (getpid(), pmu_fd) */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
+ errno))
+ goto close_pmu;
+
+ err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
+ if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
+ fd_type, buf))
+ goto close_pmu;
+
+ close(pmu_fd);
+ goto close_prog_noerr;
+
+close_pmu:
+ close(pmu_fd);
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
+
+void test_task_fd_query_tp(void)
+{
+ test_task_fd_query_tp_core("sched/sched_switch",
+ "sched_switch");
+ test_task_fd_query_tp_core("syscalls/sys_enter_read",
+ "sys_enter_read");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
new file mode 100644
index 000000000000..bb8759d69099
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_tcp_estats(void)
+{
+ const char *file = "./test_tcp_estats.o";
+ int err, prog_fd;
+ struct bpf_object *obj;
+ __u32 duration = 0;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ CHECK(err, "", "err %d errno %d\n", err, errno);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
new file mode 100644
index 000000000000..a2f476f91637
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_tp_attach_query(void)
+{
+ const int num_progs = 3;
+ int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
+ __u32 duration = 0, info_len, saved_prog_ids[num_progs];
+ const char *file = "./test_tracepoint.o";
+ struct perf_event_query_bpf *query;
+ struct perf_event_attr attr = {};
+ struct bpf_object *obj[num_progs];
+ struct bpf_prog_info prog_info;
+ char buf[256];
+
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ return;
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ return;
+
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+
+ query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
+ for (i = 0; i < num_progs; i++) {
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
+ &prog_fd[i]);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto cleanup1;
+
+ bzero(&prog_info, sizeof(prog_info));
+ prog_info.jited_prog_len = 0;
+ prog_info.xlated_prog_len = 0;
+ prog_info.nr_map_ids = 0;
+ info_len = sizeof(prog_info);
+ err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
+ if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
+ err, errno))
+ goto cleanup1;
+ saved_prog_ids[i] = prog_info.id;
+
+ pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd[i], errno))
+ goto cleanup2;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 0) {
+ /* check NULL prog array query */
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 0,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 1) {
+ /* try to get # of programs only */
+ query->ids_len = 0;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+
+ /* try a few negative tests */
+ /* invalid query pointer */
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
+ (struct perf_event_query_bpf *)0x1);
+ if (CHECK(!err || errno != EFAULT,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d\n", err, errno))
+ goto cleanup3;
+
+ /* no enough space */
+ query->ids_len = 1;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != (i + 1),
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ for (j = 0; j < i + 1; j++)
+ if (CHECK(saved_prog_ids[j] != query->ids[j],
+ "perf_event_ioc_query_bpf",
+ "#%d saved_prog_id %x query prog_id %x\n",
+ j, saved_prog_ids[j], query->ids[j]))
+ goto cleanup3;
+ }
+
+ i = num_progs - 1;
+ for (; i >= 0; i--) {
+ cleanup3:
+ ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
+ cleanup2:
+ close(pmu_fd[i]);
+ cleanup1:
+ bpf_object__close(obj[i]);
+ }
+ free(query);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c
new file mode 100644
index 000000000000..a74167289545
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp(void)
+{
+ struct vip key4 = {.protocol = 6, .family = AF_INET};
+ struct vip key6 = {.protocol = 6, .family = AF_INET6};
+ struct iptnl_info value4 = {.family = AF_INET};
+ struct iptnl_info value6 = {.family = AF_INET6};
+ const char *file = "./test_xdp.o";
+ struct bpf_object *obj;
+ char buf[128];
+ struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
+ struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
+ __u32 duration, retval, size;
+ int err, prog_fd, map_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip2tnl");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key4, &value4, 0);
+ bpf_map_update_elem(map_fd, &key6, &value6, 0);
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+
+ CHECK(err || retval != XDP_TX || size != 74 ||
+ iph->protocol != IPPROTO_IPIP, "ipv4",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != XDP_TX || size != 114 ||
+ iph6->nexthdr != IPPROTO_IPV6, "ipv6",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+out:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
new file mode 100644
index 000000000000..922aa0a19764
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp_adjust_tail(void)
+{
+ const char *file = "./test_adjust_tail.o";
+ struct bpf_object *obj;
+ char buf[128];
+ __u32 duration, retval, size;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+
+ CHECK(err || retval != XDP_DROP,
+ "ipv4", "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != XDP_TX || size != 54,
+ "ipv6", "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
new file mode 100644
index 000000000000..09e6b46f5515
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp_noinline(void)
+{
+ const char *file = "./test_xdp_noinline.o";
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct vip key = {.protocol = 6};
+ struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+ } value = {.vip_num = VIP_NUM};
+ __u32 stats_key = VIP_NUM;
+ struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+ } stats[nr_cpus];
+ struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+ } real_def = {.dst = MAGIC_VAL};
+ __u32 ch_key = 11, real_num = 3;
+ __u32 duration, retval, size;
+ int err, i, prog_fd, map_fd;
+ __u64 bytes = 0, pkts = 0;
+ struct bpf_object *obj;
+ char buf[128];
+ u32 *magic = (u32 *)buf;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip_map");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key, &value, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "ch_rings");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "reals");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 1 || size != 54 ||
+ *magic != MAGIC_VAL, "ipv4",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 1 || size != 74 ||
+ *magic != MAGIC_VAL, "ipv6",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ map_fd = bpf_find_map(__func__, obj, "stats");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ for (i = 0; i < nr_cpus; i++) {
+ bytes += stats[i].bytes;
+ pkts += stats[i].pkts;
+ }
+ if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+ error_cnt++;
+ printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
+ }
+out:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 284660f5aa95..284660f5aa95 100644
--- a/tools/testing/selftests/bpf/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
diff --git a/tools/testing/selftests/bpf/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c
index 1fd244d35ba9..1fd244d35ba9 100644
--- a/tools/testing/selftests/bpf/connect4_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect4_prog.c
diff --git a/tools/testing/selftests/bpf/connect6_prog.c b/tools/testing/selftests/bpf/progs/connect6_prog.c
index 26397ab7b3c7..26397ab7b3c7 100644
--- a/tools/testing/selftests/bpf/connect6_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect6_prog.c
diff --git a/tools/testing/selftests/bpf/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c
index ce41a3475f27..ce41a3475f27 100644
--- a/tools/testing/selftests/bpf/dev_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
index 014dba10b8a5..014dba10b8a5 100644
--- a/tools/testing/selftests/bpf/get_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
diff --git a/tools/testing/selftests/bpf/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c
index 9f741e69cebe..9f741e69cebe 100644
--- a/tools/testing/selftests/bpf/netcnt_prog.c
+++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c
diff --git a/tools/testing/selftests/bpf/sample_map_ret0.c b/tools/testing/selftests/bpf/progs/sample_map_ret0.c
index 0756303676ac..0756303676ac 100644
--- a/tools/testing/selftests/bpf/sample_map_ret0.c
+++ b/tools/testing/selftests/bpf/progs/sample_map_ret0.c
diff --git a/tools/testing/selftests/bpf/sample_ret0.c b/tools/testing/selftests/bpf/progs/sample_ret0.c
index fec99750d6ea..fec99750d6ea 100644
--- a/tools/testing/selftests/bpf/sample_ret0.c
+++ b/tools/testing/selftests/bpf/progs/sample_ret0.c
diff --git a/tools/testing/selftests/bpf/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
index a91536b1c47e..a91536b1c47e 100644
--- a/tools/testing/selftests/bpf/sendmsg4_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
index 5aeaa284fc47..5aeaa284fc47 100644
--- a/tools/testing/selftests/bpf/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
diff --git a/tools/testing/selftests/bpf/socket_cookie_prog.c b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
index 9ff8ac4b0bf6..9ff8ac4b0bf6 100644
--- a/tools/testing/selftests/bpf/socket_cookie_prog.c
+++ b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
index 0f92858f6226..0f92858f6226 100644
--- a/tools/testing/selftests/bpf/sockmap_parse_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
index 12a7b5c82ed6..12a7b5c82ed6 100644
--- a/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
index 2ce7634a4012..2ce7634a4012 100644
--- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
diff --git a/tools/testing/selftests/bpf/test_adjust_tail.c b/tools/testing/selftests/bpf/progs/test_adjust_tail.c
index 4cd5e860c903..4cd5e860c903 100644
--- a/tools/testing/selftests/bpf/test_adjust_tail.c
+++ b/tools/testing/selftests/bpf/progs/test_adjust_tail.c
diff --git a/tools/testing/selftests/bpf/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
index e5c79fe0ffdb..e5c79fe0ffdb 100644
--- a/tools/testing/selftests/bpf/test_btf_haskv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
diff --git a/tools/testing/selftests/bpf/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
index 434188c37774..434188c37774 100644
--- a/tools/testing/selftests/bpf/test_btf_nokv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
diff --git a/tools/testing/selftests/bpf/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
index f6d9f238e00a..f6d9f238e00a 100644
--- a/tools/testing/selftests/bpf/test_get_stack_rawtp.c
+++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
diff --git a/tools/testing/selftests/bpf/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c
index 1e10c9590991..1e10c9590991 100644
--- a/tools/testing/selftests/bpf/test_l4lb.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb.c
diff --git a/tools/testing/selftests/bpf/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index ba44a14e6dc4..ba44a14e6dc4 100644
--- a/tools/testing/selftests/bpf/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c
index 4147130cc3b7..4147130cc3b7 100644
--- a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c
new file mode 100644
index 000000000000..c957d6dfe6d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+struct grehdr {
+ __be16 flags;
+ __be16 protocol;
+};
+
+SEC("encap_gre")
+int bpf_lwt_encap_gre(struct __sk_buff *skb)
+{
+ struct encap_hdr {
+ struct iphdr iph;
+ struct grehdr greh;
+ } hdr;
+ int err;
+
+ memset(&hdr, 0, sizeof(struct encap_hdr));
+
+ hdr.iph.ihl = 5;
+ hdr.iph.version = 4;
+ hdr.iph.ttl = 0x40;
+ hdr.iph.protocol = 47; /* IPPROTO_GRE */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ hdr.iph.saddr = 0x640110ac; /* 172.16.1.100 */
+ hdr.iph.daddr = 0x641010ac; /* 172.16.16.100 */
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ hdr.iph.saddr = 0xac100164; /* 172.16.1.100 */
+ hdr.iph.daddr = 0xac101064; /* 172.16.16.100 */
+#else
+#error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+ hdr.iph.tot_len = bpf_htons(skb->len + sizeof(struct encap_hdr));
+
+ hdr.greh.protocol = skb->protocol;
+
+ err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+ sizeof(struct encap_hdr));
+ if (err)
+ return BPF_DROP;
+
+ return BPF_LWT_REROUTE;
+}
+
+SEC("encap_gre6")
+int bpf_lwt_encap_gre6(struct __sk_buff *skb)
+{
+ struct encap_hdr {
+ struct ipv6hdr ip6hdr;
+ struct grehdr greh;
+ } hdr;
+ int err;
+
+ memset(&hdr, 0, sizeof(struct encap_hdr));
+
+ hdr.ip6hdr.version = 6;
+ hdr.ip6hdr.payload_len = bpf_htons(skb->len + sizeof(struct grehdr));
+ hdr.ip6hdr.nexthdr = 47; /* IPPROTO_GRE */
+ hdr.ip6hdr.hop_limit = 0x40;
+ /* fb01::1 */
+ hdr.ip6hdr.saddr.s6_addr[0] = 0xfb;
+ hdr.ip6hdr.saddr.s6_addr[1] = 1;
+ hdr.ip6hdr.saddr.s6_addr[15] = 1;
+ /* fb10::1 */
+ hdr.ip6hdr.daddr.s6_addr[0] = 0xfb;
+ hdr.ip6hdr.daddr.s6_addr[1] = 0x10;
+ hdr.ip6hdr.daddr.s6_addr[15] = 1;
+
+ hdr.greh.protocol = skb->protocol;
+
+ err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+ sizeof(struct encap_hdr));
+ if (err)
+ return BPF_DROP;
+
+ return BPF_LWT_REROUTE;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 0575751bc1bc..0575751bc1bc 100644
--- a/tools/testing/selftests/bpf/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
diff --git a/tools/testing/selftests/bpf/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index ce923e67e08e..2985f262846e 100644
--- a/tools/testing/selftests/bpf/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -27,6 +27,7 @@ SEC("xdp_mimtest")
int xdp_mimtest0(struct xdp_md *ctx)
{
int value = 123;
+ int *value_p;
int key = 0;
void *map;
@@ -35,6 +36,9 @@ int xdp_mimtest0(struct xdp_md *ctx)
return XDP_DROP;
bpf_map_update_elem(map, &key, &value, 0);
+ value_p = bpf_map_lookup_elem(map, &key);
+ if (!value_p || *value_p != 123)
+ return XDP_DROP;
map = bpf_map_lookup_elem(&mim_hash, &key);
if (!map)
diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c
new file mode 100644
index 000000000000..af8cc68ed2f9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_lock.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+#define VAR_NUM 16
+
+struct hmap_elem {
+ struct bpf_spin_lock lock;
+ int var[VAR_NUM];
+};
+
+struct bpf_map_def SEC("maps") hash_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct hmap_elem),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(hash_map, int, struct hmap_elem);
+
+struct array_elem {
+ struct bpf_spin_lock lock;
+ int var[VAR_NUM];
+};
+
+struct bpf_map_def SEC("maps") array_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct array_elem),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(array_map, int, struct array_elem);
+
+SEC("map_lock_demo")
+int bpf_map_lock_test(struct __sk_buff *skb)
+{
+ struct hmap_elem zero = {}, *val;
+ int rnd = bpf_get_prandom_u32();
+ int key = 0, err = 1, i;
+ struct array_elem *q;
+
+ val = bpf_map_lookup_elem(&hash_map, &key);
+ if (!val)
+ goto err;
+ /* spin_lock in hash map */
+ bpf_spin_lock(&val->lock);
+ for (i = 0; i < VAR_NUM; i++)
+ val->var[i] = rnd;
+ bpf_spin_unlock(&val->lock);
+
+ /* spin_lock in array */
+ q = bpf_map_lookup_elem(&array_map, &key);
+ if (!q)
+ goto err;
+ bpf_spin_lock(&q->lock);
+ for (i = 0; i < VAR_NUM; i++)
+ q->var[i] = rnd;
+ bpf_spin_unlock(&q->lock);
+ err = 0;
+err:
+ return err;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c
index 880d2963b472..880d2963b472 100644
--- a/tools/testing/selftests/bpf/test_obj_id.c
+++ b/tools/testing/selftests/bpf/progs/test_obj_id.c
diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 6e11ba11709e..6e11ba11709e 100644
--- a/tools/testing/selftests/bpf/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
index 7956302ecdf2..7956302ecdf2 100644
--- a/tools/testing/selftests/bpf/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
diff --git a/tools/testing/selftests/bpf/test_queue_map.c b/tools/testing/selftests/bpf/progs/test_queue_map.c
index 87db1f9da33d..87db1f9da33d 100644
--- a/tools/testing/selftests/bpf/test_queue_map.c
+++ b/tools/testing/selftests/bpf/progs/test_queue_map.c
diff --git a/tools/testing/selftests/bpf/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index 5b54ec637ada..5b54ec637ada 100644
--- a/tools/testing/selftests/bpf/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index e21cd736c196..e21cd736c196 100644
--- a/tools/testing/selftests/bpf/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
index 68cf9829f5a7..68cf9829f5a7 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
new file mode 100644
index 000000000000..de1a43e8f610
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <linux/bpf.h>
+#include <netinet/in.h>
+#include <stdbool.h>
+
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+enum bpf_array_idx {
+ SRV_IDX,
+ CLI_IDX,
+ __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") addr_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct sockaddr_in6),
+ .max_entries = __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") sock_result_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_sock),
+ .max_entries = __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") tcp_sock_result_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_tcp_sock),
+ .max_entries = __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") linum_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+};
+
+static bool is_loopback6(__u32 *a6)
+{
+ return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
+}
+
+static void skcpy(struct bpf_sock *dst,
+ const struct bpf_sock *src)
+{
+ dst->bound_dev_if = src->bound_dev_if;
+ dst->family = src->family;
+ dst->type = src->type;
+ dst->protocol = src->protocol;
+ dst->mark = src->mark;
+ dst->priority = src->priority;
+ dst->src_ip4 = src->src_ip4;
+ dst->src_ip6[0] = src->src_ip6[0];
+ dst->src_ip6[1] = src->src_ip6[1];
+ dst->src_ip6[2] = src->src_ip6[2];
+ dst->src_ip6[3] = src->src_ip6[3];
+ dst->src_port = src->src_port;
+ dst->dst_ip4 = src->dst_ip4;
+ dst->dst_ip6[0] = src->dst_ip6[0];
+ dst->dst_ip6[1] = src->dst_ip6[1];
+ dst->dst_ip6[2] = src->dst_ip6[2];
+ dst->dst_ip6[3] = src->dst_ip6[3];
+ dst->dst_port = src->dst_port;
+ dst->state = src->state;
+}
+
+static void tpcpy(struct bpf_tcp_sock *dst,
+ const struct bpf_tcp_sock *src)
+{
+ dst->snd_cwnd = src->snd_cwnd;
+ dst->srtt_us = src->srtt_us;
+ dst->rtt_min = src->rtt_min;
+ dst->snd_ssthresh = src->snd_ssthresh;
+ dst->rcv_nxt = src->rcv_nxt;
+ dst->snd_nxt = src->snd_nxt;
+ dst->snd_una = src->snd_una;
+ dst->mss_cache = src->mss_cache;
+ dst->ecn_flags = src->ecn_flags;
+ dst->rate_delivered = src->rate_delivered;
+ dst->rate_interval_us = src->rate_interval_us;
+ dst->packets_out = src->packets_out;
+ dst->retrans_out = src->retrans_out;
+ dst->total_retrans = src->total_retrans;
+ dst->segs_in = src->segs_in;
+ dst->data_segs_in = src->data_segs_in;
+ dst->segs_out = src->segs_out;
+ dst->data_segs_out = src->data_segs_out;
+ dst->lost_out = src->lost_out;
+ dst->sacked_out = src->sacked_out;
+ dst->bytes_received = src->bytes_received;
+ dst->bytes_acked = src->bytes_acked;
+}
+
+#define RETURN { \
+ linum = __LINE__; \
+ bpf_map_update_elem(&linum_map, &idx0, &linum, 0); \
+ return 1; \
+}
+
+SEC("cgroup_skb/egress")
+int read_sock_fields(struct __sk_buff *skb)
+{
+ __u32 srv_idx = SRV_IDX, cli_idx = CLI_IDX, idx;
+ struct sockaddr_in6 *srv_sa6, *cli_sa6;
+ struct bpf_tcp_sock *tp, *tp_ret;
+ struct bpf_sock *sk, *sk_ret;
+ __u32 linum, idx0 = 0;
+
+ sk = skb->sk;
+ if (!sk || sk->state == 10)
+ RETURN;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk || sk->family != AF_INET6 || sk->protocol != IPPROTO_TCP ||
+ !is_loopback6(sk->src_ip6))
+ RETURN;
+
+ tp = bpf_tcp_sock(sk);
+ if (!tp)
+ RETURN;
+
+ srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
+ cli_sa6 = bpf_map_lookup_elem(&addr_map, &cli_idx);
+ if (!srv_sa6 || !cli_sa6)
+ RETURN;
+
+ if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
+ idx = srv_idx;
+ else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
+ idx = cli_idx;
+ else
+ RETURN;
+
+ sk_ret = bpf_map_lookup_elem(&sock_result_map, &idx);
+ tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &idx);
+ if (!sk_ret || !tp_ret)
+ RETURN;
+
+ skcpy(sk_ret, sk);
+ tpcpy(tp_ret, tp);
+
+ RETURN;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_sockhash_kern.c b/tools/testing/selftests/bpf/progs/test_sockhash_kern.c
index e6755916442a..e6755916442a 100644
--- a/tools/testing/selftests/bpf/test_sockhash_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sockhash_kern.c
diff --git a/tools/testing/selftests/bpf/test_sockmap_kern.c b/tools/testing/selftests/bpf/progs/test_sockmap_kern.c
index 677b2ed1cc1e..677b2ed1cc1e 100644
--- a/tools/testing/selftests/bpf/test_sockmap_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
new file mode 100644
index 000000000000..40f904312090
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+struct hmap_elem {
+ volatile int cnt;
+ struct bpf_spin_lock lock;
+ int test_padding;
+};
+
+struct bpf_map_def SEC("maps") hmap = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct hmap_elem),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(hmap, int, struct hmap_elem);
+
+
+struct cls_elem {
+ struct bpf_spin_lock lock;
+ volatile int cnt;
+};
+
+struct bpf_map_def SEC("maps") cls_map = {
+ .type = BPF_MAP_TYPE_CGROUP_STORAGE,
+ .key_size = sizeof(struct bpf_cgroup_storage_key),
+ .value_size = sizeof(struct cls_elem),
+};
+
+BPF_ANNOTATE_KV_PAIR(cls_map, struct bpf_cgroup_storage_key,
+ struct cls_elem);
+
+struct bpf_vqueue {
+ struct bpf_spin_lock lock;
+ /* 4 byte hole */
+ unsigned long long lasttime;
+ int credit;
+ unsigned int rate;
+};
+
+struct bpf_map_def SEC("maps") vqueue = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct bpf_vqueue),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(vqueue, int, struct bpf_vqueue);
+#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
+
+SEC("spin_lock_demo")
+int bpf_sping_lock_test(struct __sk_buff *skb)
+{
+ volatile int credit = 0, max_credit = 100, pkt_len = 64;
+ struct hmap_elem zero = {}, *val;
+ unsigned long long curtime;
+ struct bpf_vqueue *q;
+ struct cls_elem *cls;
+ int key = 0;
+ int err = 0;
+
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (!val) {
+ bpf_map_update_elem(&hmap, &key, &zero, 0);
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (!val) {
+ err = 1;
+ goto err;
+ }
+ }
+ /* spin_lock in hash map run time test */
+ bpf_spin_lock(&val->lock);
+ if (val->cnt)
+ val->cnt--;
+ else
+ val->cnt++;
+ if (val->cnt != 0 && val->cnt != 1)
+ err = 1;
+ bpf_spin_unlock(&val->lock);
+
+ /* spin_lock in array. virtual queue demo */
+ q = bpf_map_lookup_elem(&vqueue, &key);
+ if (!q)
+ goto err;
+ curtime = bpf_ktime_get_ns();
+ bpf_spin_lock(&q->lock);
+ q->credit += CREDIT_PER_NS(curtime - q->lasttime, q->rate);
+ q->lasttime = curtime;
+ if (q->credit > max_credit)
+ q->credit = max_credit;
+ q->credit -= pkt_len;
+ credit = q->credit;
+ bpf_spin_unlock(&q->lock);
+
+ /* spin_lock in cgroup local storage */
+ cls = bpf_get_local_storage(&cls_map, 0);
+ bpf_spin_lock(&cls->lock);
+ cls->cnt++;
+ bpf_spin_unlock(&cls->lock);
+
+err:
+ return err;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_stack_map.c b/tools/testing/selftests/bpf/progs/test_stack_map.c
index 31c3880e6da0..31c3880e6da0 100644
--- a/tools/testing/selftests/bpf/test_stack_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stack_map.c
diff --git a/tools/testing/selftests/bpf/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index d86c281e957f..d86c281e957f 100644
--- a/tools/testing/selftests/bpf/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index af111af7ca1a..af111af7ca1a 100644
--- a/tools/testing/selftests/bpf/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
diff --git a/tools/testing/selftests/bpf/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
index bee3bbecc0c4..bee3bbecc0c4 100644
--- a/tools/testing/selftests/bpf/test_tcp_estats.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index 74f73b33a7b0..74f73b33a7b0 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index edbca203ce2d..edbca203ce2d 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
diff --git a/tools/testing/selftests/bpf/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c
index 04bf084517e0..04bf084517e0 100644
--- a/tools/testing/selftests/bpf/test_tracepoint.c
+++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c
diff --git a/tools/testing/selftests/bpf/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index 504df69c83df..504df69c83df 100644
--- a/tools/testing/selftests/bpf/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
diff --git a/tools/testing/selftests/bpf/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index 5e7df8bb5b5d..5e7df8bb5b5d 100644
--- a/tools/testing/selftests/bpf/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
index 8d0182650653..8d0182650653 100644
--- a/tools/testing/selftests/bpf/test_xdp_meta.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
diff --git a/tools/testing/selftests/bpf/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 5e4aac74f9d0..5e4aac74f9d0 100644
--- a/tools/testing/selftests/bpf/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
index ef9e704be140..ef9e704be140 100644
--- a/tools/testing/selftests/bpf/test_xdp_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
index 365a7d2d9f5c..365a7d2d9f5c 100644
--- a/tools/testing/selftests/bpf/test_xdp_vlan.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
diff --git a/tools/testing/selftests/bpf/xdp_dummy.c b/tools/testing/selftests/bpf/progs/xdp_dummy.c
index 43b0ef1001ed..43b0ef1001ed 100644
--- a/tools/testing/selftests/bpf/xdp_dummy.c
+++ b/tools/testing/selftests/bpf/progs/xdp_dummy.c
diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py
index 7f8200a8702b..a53ed58528d6 100755
--- a/tools/testing/selftests/bpf/tcp_client.py
+++ b/tools/testing/selftests/bpf/tcp_client.py
@@ -30,12 +30,11 @@ def send(sock, s):
serverPort = int(sys.argv[1])
-HostName = socket.gethostname()
# create active socket
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try:
- sock.connect((HostName, serverPort))
+ sock.connect(('localhost', serverPort))
except socket.error as e:
sys.exit(1)
diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py
index b39903fca4c8..0ca60d193bed 100755
--- a/tools/testing/selftests/bpf/tcp_server.py
+++ b/tools/testing/selftests/bpf/tcp_server.py
@@ -35,13 +35,10 @@ MAX_PORTS = 2
serverPort = SERVER_PORT
serverSocket = None
-HostName = socket.gethostname()
-
# create passive socket
serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
-host = socket.gethostname()
-try: serverSocket.bind((host, 0))
+try: serverSocket.bind(('localhost', 0))
except socket.error as msg:
print('bind fails: ' + str(msg))
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index a0bd04befe87..38797aa627a7 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -18,6 +18,7 @@
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
+#include <assert.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
@@ -51,18 +52,10 @@ static int count_result(int err)
return err;
}
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-
-__printf(1, 2)
-static int __base_pr(const char *format, ...)
+static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
+ const char *format, va_list args)
{
- va_list args;
- int err;
-
- va_start(args, format);
- err = vfprintf(stderr, format, args);
- va_end(args);
- return err;
+ return vfprintf(stderr, format, args);
}
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
@@ -77,12 +70,21 @@ static int __base_pr(const char *format, ...)
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
BTF_INT_ENC(encoding, bits_offset, bits)
+#define BTF_FWD_ENC(name, kind_flag) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FWD, kind_flag, 0), 0)
+
#define BTF_ARRAY_ENC(type, index_type, nr_elems) \
(type), (index_type), (nr_elems)
#define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \
BTF_ARRAY_ENC(type, index_type, nr_elems)
+#define BTF_STRUCT_ENC(name, nr_elems, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, nr_elems), sz)
+
+#define BTF_UNION_ENC(name, nr_elems, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_UNION, 0, nr_elems), sz)
+
#define BTF_MEMBER_ENC(name, type, bits_offset) \
(name), (type), (bits_offset)
#define BTF_ENUM_ENC(name, val) (name), (val)
@@ -98,6 +100,12 @@ static int __base_pr(const char *format, ...)
#define BTF_CONST_ENC(type) \
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type)
+#define BTF_VOLATILE_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), type)
+
+#define BTF_RESTRICT_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), type)
+
#define BTF_FUNC_PROTO_ENC(ret_type, nargs) \
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type)
@@ -110,6 +118,10 @@ static int __base_pr(const char *format, ...)
#define BTF_END_RAW 0xdeadbeef
#define NAME_TBD 0xdeadb33f
+#define NAME_NTH(N) (0xffff0000 | N)
+#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xffff0000)
+#define GET_NAME_NTH_IDX(X) (X & 0x0000ffff)
+
#define MAX_NR_RAW_U32 1024
#define BTF_LOG_BUF_SIZE 65535
@@ -118,12 +130,14 @@ static struct args {
unsigned int file_test_num;
unsigned int get_info_test_num;
unsigned int info_raw_test_num;
+ unsigned int dedup_test_num;
bool raw_test;
bool file_test;
bool get_info_test;
bool pprint_test;
bool always_log;
bool info_raw_test;
+ bool dedup_test;
} args;
static char btf_log_buf[BTF_LOG_BUF_SIZE];
@@ -134,6 +148,12 @@ static struct btf_header hdr_tmpl = {
.hdr_len = sizeof(struct btf_header),
};
+/* several different mapv kinds(types) supported by pprint */
+enum pprint_mapv_kind_t {
+ PPRINT_MAPV_KIND_BASIC = 0,
+ PPRINT_MAPV_KIND_INT128,
+};
+
struct btf_raw_test {
const char *descr;
const char *str_sec;
@@ -156,6 +176,7 @@ struct btf_raw_test {
int type_off_delta;
int str_off_delta;
int str_len_delta;
+ enum pprint_mapv_kind_t mapv_kind;
};
#define BTF_STR_SEC(str) \
@@ -1881,13 +1902,12 @@ static struct btf_raw_test raw_tests[] = {
},
{
- .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)",
+ .descr = "func proto (TYPEDEF=>FUNC_PROTO)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
- BTF_CONST_ENC(4), /* [3] */
- BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */
- BTF_FUNC_PROTO_ENC(0, 2), /* [5] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [4] */
BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2),
BTF_END_RAW,
@@ -1901,8 +1921,6 @@ static struct btf_raw_test raw_tests[] = {
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
- .btf_load_err = true,
- .err_str = "Invalid type_id",
},
{
@@ -1957,7 +1975,7 @@ static struct btf_raw_test raw_tests[] = {
/* void (*)(int a, unsigned int <bad_name_off>) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
- BTF_FUNC_PROTO_ARG_ENC(0xffffffff, 2),
+ BTF_FUNC_PROTO_ARG_ENC(0x0fffffff, 2),
BTF_END_RAW,
},
.str_sec = "\0a",
@@ -2707,6 +2725,99 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "Invalid member offset",
},
+{
+ .descr = "128-bit int",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "int_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, 128-bit int member",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, 120-bit int member bitfield",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 120, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, kind_flag, 128-bit int member",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, kind_flag, 120-bit int member bitfield",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(120, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
}; /* struct btf_raw_test raw_tests[] */
static const char *get_next_str(const char *start, const char *end)
@@ -2734,11 +2845,13 @@ static void *btf_raw_create(const struct btf_header *hdr,
const char **ret_next_str)
{
const char *next_str = str, *end_str = str + str_sec_size;
+ const char **strs_idx = NULL, **tmp_strs_idx;
+ int strs_cap = 0, strs_cnt = 0, next_str_idx = 0;
unsigned int size_needed, offset;
struct btf_header *ret_hdr;
- int i, type_sec_size;
+ int i, type_sec_size, err = 0;
uint32_t *ret_types;
- void *raw_btf;
+ void *raw_btf = NULL;
type_sec_size = get_raw_sec_size(raw_types);
if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types"))
@@ -2753,17 +2866,44 @@ static void *btf_raw_create(const struct btf_header *hdr,
memcpy(raw_btf, hdr, sizeof(*hdr));
offset = sizeof(*hdr);
+ /* Index strings */
+ while ((next_str = get_next_str(next_str, end_str))) {
+ if (strs_cnt == strs_cap) {
+ strs_cap += max(16, strs_cap / 2);
+ tmp_strs_idx = realloc(strs_idx,
+ sizeof(*strs_idx) * strs_cap);
+ if (CHECK(!tmp_strs_idx,
+ "Cannot allocate memory for strs_idx")) {
+ err = -1;
+ goto done;
+ }
+ strs_idx = tmp_strs_idx;
+ }
+ strs_idx[strs_cnt++] = next_str;
+ next_str += strlen(next_str);
+ }
+
/* Copy type section */
ret_types = raw_btf + offset;
for (i = 0; i < type_sec_size / sizeof(raw_types[0]); i++) {
if (raw_types[i] == NAME_TBD) {
- next_str = get_next_str(next_str, end_str);
- if (CHECK(!next_str, "Error in getting next_str")) {
- free(raw_btf);
- return NULL;
+ if (CHECK(next_str_idx == strs_cnt,
+ "Error in getting next_str #%d",
+ next_str_idx)) {
+ err = -1;
+ goto done;
}
- ret_types[i] = next_str - str;
- next_str += strlen(next_str);
+ ret_types[i] = strs_idx[next_str_idx++] - str;
+ } else if (IS_NAME_NTH(raw_types[i])) {
+ int idx = GET_NAME_NTH_IDX(raw_types[i]);
+
+ if (CHECK(idx <= 0 || idx > strs_cnt,
+ "Error getting string #%d, strs_cnt:%d",
+ idx, strs_cnt)) {
+ err = -1;
+ goto done;
+ }
+ ret_types[i] = strs_idx[idx-1] - str;
} else {
ret_types[i] = raw_types[i];
}
@@ -2780,8 +2920,17 @@ static void *btf_raw_create(const struct btf_header *hdr,
*btf_size = size_needed;
if (ret_next_str)
- *ret_next_str = next_str;
+ *ret_next_str =
+ next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL;
+done:
+ if (err) {
+ if (raw_btf)
+ free(raw_btf);
+ if (strs_idx)
+ free(strs_idx);
+ return NULL;
+ }
return raw_btf;
}
@@ -3530,6 +3679,16 @@ struct pprint_mapv {
uint32_t bits2c:2;
};
+#ifdef __SIZEOF_INT128__
+struct pprint_mapv_int128 {
+ __int128 si128a;
+ __int128 si128b;
+ unsigned __int128 bits3:3;
+ unsigned __int128 bits80:80;
+ unsigned __int128 ui128;
+};
+#endif
+
static struct btf_raw_test pprint_test_template[] = {
{
.raw_types = {
@@ -3721,6 +3880,35 @@ static struct btf_raw_test pprint_test_template[] = {
.max_entries = 128 * 1024,
},
+#ifdef __SIZEOF_INT128__
+{
+ /* test int128 */
+ .raw_types = {
+ /* unsigned int */ /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
+ /* __int128 */ /* [2] */
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 128, 16),
+ /* unsigned __int128 */ /* [3] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 128, 16),
+ /* struct pprint_mapv_int128 */ /* [4] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 5), 64),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), /* si128a */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 128)), /* si128b */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(3, 256)), /* bits3 */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(80, 259)), /* bits80 */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(0, 384)), /* ui128 */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0unsigned int\0__int128\0unsigned __int128\0pprint_mapv_int128\0si128a\0si128b\0bits3\0bits80\0ui128"),
+ .key_size = sizeof(unsigned int),
+ .value_size = sizeof(struct pprint_mapv_int128),
+ .key_type_id = 1,
+ .value_type_id = 4,
+ .max_entries = 128 * 1024,
+ .mapv_kind = PPRINT_MAPV_KIND_INT128,
+},
+#endif
+
};
static struct btf_pprint_test_meta {
@@ -3787,24 +3975,108 @@ static struct btf_pprint_test_meta {
};
+static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
+{
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC)
+ return sizeof(struct pprint_mapv);
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128)
+ return sizeof(struct pprint_mapv_int128);
+#endif
+
+ assert(0);
+}
-static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i,
+static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
+ void *mapv, uint32_t i,
int num_cpus, int rounded_value_size)
{
int cpu;
- for (cpu = 0; cpu < num_cpus; cpu++) {
- v->ui32 = i + cpu;
- v->si32 = -i;
- v->unused_bits2a = 3;
- v->bits28 = i;
- v->unused_bits2b = 3;
- v->ui64 = i;
- v->aenum = i & 0x03;
- v->ui32b = 4;
- v->bits2c = 1;
- v = (void *)v + rounded_value_size;
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
+ struct pprint_mapv *v = mapv;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ v->ui32 = i + cpu;
+ v->si32 = -i;
+ v->unused_bits2a = 3;
+ v->bits28 = i;
+ v->unused_bits2b = 3;
+ v->ui64 = i;
+ v->aenum = i & 0x03;
+ v->ui32b = 4;
+ v->bits2c = 1;
+ v = (void *)v + rounded_value_size;
+ }
+ }
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
+ struct pprint_mapv_int128 *v = mapv;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ v->si128a = i;
+ v->si128b = -i;
+ v->bits3 = i & 0x07;
+ v->bits80 = (((unsigned __int128)1) << 64) + i;
+ v->ui128 = (((unsigned __int128)2) << 64) + i;
+ v = (void *)v + rounded_value_size;
+ }
+ }
+#endif
+}
+
+ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
+ char *expected_line, ssize_t line_size,
+ bool percpu_map, unsigned int next_key,
+ int cpu, void *mapv)
+{
+ ssize_t nexpected_line = -1;
+
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
+ struct pprint_mapv *v = mapv;
+
+ nexpected_line = snprintf(expected_line, line_size,
+ "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
+ "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
+ "%u,0x%x}\n",
+ percpu_map ? "\tcpu" : "",
+ percpu_map ? cpu : next_key,
+ v->ui32, v->si32,
+ v->unused_bits2a,
+ v->bits28,
+ v->unused_bits2b,
+ v->ui64,
+ v->ui8a[0], v->ui8a[1],
+ v->ui8a[2], v->ui8a[3],
+ v->ui8a[4], v->ui8a[5],
+ v->ui8a[6], v->ui8a[7],
+ pprint_enum_str[v->aenum],
+ v->ui32b,
+ v->bits2c);
+ }
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
+ struct pprint_mapv_int128 *v = mapv;
+
+ nexpected_line = snprintf(expected_line, line_size,
+ "%s%u: {0x%lx,0x%lx,0x%lx,"
+ "0x%lx%016lx,0x%lx%016lx}\n",
+ percpu_map ? "\tcpu" : "",
+ percpu_map ? cpu : next_key,
+ (uint64_t)v->si128a,
+ (uint64_t)v->si128b,
+ (uint64_t)v->bits3,
+ (uint64_t)(v->bits80 >> 64),
+ (uint64_t)v->bits80,
+ (uint64_t)(v->ui128 >> 64),
+ (uint64_t)v->ui128);
}
+#endif
+
+ return nexpected_line;
}
static int check_line(const char *expected_line, int nexpected_line,
@@ -3828,10 +4100,10 @@ static int check_line(const char *expected_line, int nexpected_line,
static int do_test_pprint(int test_num)
{
const struct btf_raw_test *test = &pprint_test_template[test_num];
+ enum pprint_mapv_kind_t mapv_kind = test->mapv_kind;
struct bpf_create_map_attr create_attr = {};
bool ordered_map, lossless_map, percpu_map;
int err, ret, num_cpus, rounded_value_size;
- struct pprint_mapv *mapv = NULL;
unsigned int key, nr_read_elems;
int map_fd = -1, btf_fd = -1;
unsigned int raw_btf_size;
@@ -3840,6 +4112,7 @@ static int do_test_pprint(int test_num)
char pin_path[255];
size_t line_len = 0;
char *line = NULL;
+ void *mapv = NULL;
uint8_t *raw_btf;
ssize_t nread;
@@ -3892,7 +4165,7 @@ static int do_test_pprint(int test_num)
percpu_map = test->percpu_map;
num_cpus = percpu_map ? bpf_num_possible_cpus() : 1;
- rounded_value_size = round_up(sizeof(struct pprint_mapv), 8);
+ rounded_value_size = round_up(get_pprint_mapv_size(mapv_kind), 8);
mapv = calloc(num_cpus, rounded_value_size);
if (CHECK(!mapv, "mapv allocation failure")) {
err = -1;
@@ -3900,7 +4173,7 @@ static int do_test_pprint(int test_num)
}
for (key = 0; key < test->max_entries; key++) {
- set_pprint_mapv(mapv, key, num_cpus, rounded_value_size);
+ set_pprint_mapv(mapv_kind, mapv, key, num_cpus, rounded_value_size);
bpf_map_update_elem(map_fd, &key, mapv, 0);
}
@@ -3924,13 +4197,13 @@ static int do_test_pprint(int test_num)
ordered_map = test->ordered_map;
lossless_map = test->lossless_map;
do {
- struct pprint_mapv *cmapv;
ssize_t nexpected_line;
unsigned int next_key;
+ void *cmapv;
int cpu;
next_key = ordered_map ? nr_read_elems : atoi(line);
- set_pprint_mapv(mapv, next_key, num_cpus, rounded_value_size);
+ set_pprint_mapv(mapv_kind, mapv, next_key, num_cpus, rounded_value_size);
cmapv = mapv;
for (cpu = 0; cpu < num_cpus; cpu++) {
@@ -3963,31 +4236,16 @@ static int do_test_pprint(int test_num)
break;
}
- nexpected_line = snprintf(expected_line, sizeof(expected_line),
- "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
- "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
- "%u,0x%x}\n",
- percpu_map ? "\tcpu" : "",
- percpu_map ? cpu : next_key,
- cmapv->ui32, cmapv->si32,
- cmapv->unused_bits2a,
- cmapv->bits28,
- cmapv->unused_bits2b,
- cmapv->ui64,
- cmapv->ui8a[0], cmapv->ui8a[1],
- cmapv->ui8a[2], cmapv->ui8a[3],
- cmapv->ui8a[4], cmapv->ui8a[5],
- cmapv->ui8a[6], cmapv->ui8a[7],
- pprint_enum_str[cmapv->aenum],
- cmapv->ui32b,
- cmapv->bits2c);
-
+ nexpected_line = get_pprint_expected_line(mapv_kind, expected_line,
+ sizeof(expected_line),
+ percpu_map, next_key,
+ cpu, cmapv);
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
if (err == -1)
goto done;
- cmapv = (void *)cmapv + rounded_value_size;
+ cmapv = cmapv + rounded_value_size;
}
if (percpu_map) {
@@ -4083,6 +4341,10 @@ static struct prog_info_raw_test {
__u32 line_info_rec_size;
__u32 nr_jited_ksyms;
bool expected_prog_load_failure;
+ __u32 dead_code_cnt;
+ __u32 dead_code_mask;
+ __u32 dead_func_cnt;
+ __u32 dead_func_mask;
} info_raw_tests[] = {
{
.descr = "func_type (main func + one sub)",
@@ -4509,6 +4771,369 @@ static struct prog_info_raw_test {
.expected_prog_load_failure = true,
},
+{
+ .descr = "line_info (dead start)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0/* dead jmp */\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 6),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 1,
+ .dead_code_mask = 0x01,
+},
+
+{
+ .descr = "line_info (dead end)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0/* dead jmp */\0return a + b;\0/* dead exit */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 12),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 11),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 9),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 8),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 6, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 2,
+ .dead_code_mask = 0x28,
+},
+
+{
+ .descr = "line_info (dead code + subprog + func_info)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0/* dead jmp */"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 8),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {14, 3} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(14, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(16, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 9,
+ .dead_code_mask = 0x3fe,
+},
+
+{
+ .descr = "line_info (dead subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
+ "\0return 0;\0return 0;\0/* dead */\0/* dead */"
+ "\0/* dead */\0return bla + 1;\0return bla + 1;"
+ "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {6, 3}, {9, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 3,
+ .dead_code_mask = 0x70,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead last subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0int a=1+1;\0/* live call */"
+ "\0return 0;\0/* dead */\0/* dead */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {5, 3} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 2,
+ .dead_code_mask = 0x18,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead subprog + dead start)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* dead */"
+ "\0return 0;\0return 0;\0return 0;"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0return b + 1;\0return b + 1;\0return b + 1;"),
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {7, 3}, {10, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(13, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 5,
+ .dead_code_mask = 0x1e2,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead subprog + dead start w/ move)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
+ "\0return 0;\0return 0;\0/* dead */\0/* dead */"
+ "\0/* dead */\0return bla + 1;\0return bla + 1;"
+ "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {6, 3}, {9, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 3,
+ .dead_code_mask = 0x70,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead end + subprog start w/ no linfo)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0main\0func\0/* main linfo */\0/* func linfo */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 1, 3),
+ BPF_CALL_REL(3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 3}, {6, 4}, },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+},
+
};
static size_t probe_prog_length(const struct bpf_insn *fp)
@@ -4568,6 +5193,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
struct bpf_func_info *finfo;
__u32 info_len, rec_size, i;
void *func_info = NULL;
+ __u32 nr_func_info;
int err;
/* get necessary lens */
@@ -4577,7 +5203,8 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
fprintf(stderr, "%s\n", btf_log_buf);
return -1;
}
- if (CHECK(info.nr_func_info != test->func_info_cnt,
+ nr_func_info = test->func_info_cnt - test->dead_func_cnt;
+ if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (1st) %d",
info.nr_func_info)) {
return -1;
@@ -4598,7 +5225,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
/* reset info to only retrieve func_info related data */
memset(&info, 0, sizeof(info));
- info.nr_func_info = test->func_info_cnt;
+ info.nr_func_info = nr_func_info;
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
@@ -4607,7 +5234,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
err = -1;
goto done;
}
- if (CHECK(info.nr_func_info != test->func_info_cnt,
+ if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (2nd) %d",
info.nr_func_info)) {
err = -1;
@@ -4621,7 +5248,9 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
}
finfo = func_info;
- for (i = 0; i < test->func_info_cnt; i++) {
+ for (i = 0; i < nr_func_info; i++) {
+ if (test->dead_func_mask & (1 << i))
+ continue;
if (CHECK(finfo->type_id != test->func_info[i][1],
"incorrect func_type %u expected %u",
finfo->type_id, test->func_info[i][1])) {
@@ -4650,6 +5279,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
struct bpf_prog_info info = {};
__u32 *jited_func_lens = NULL;
__u64 cur_func_ksyms;
+ __u32 dead_insns;
int err;
jited_cnt = cnt;
@@ -4658,7 +5288,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
if (test->nr_jited_ksyms)
nr_jited_ksyms = test->nr_jited_ksyms;
else
- nr_jited_ksyms = test->func_info_cnt;
+ nr_jited_ksyms = test->func_info_cnt - test->dead_func_cnt;
nr_jited_func_lens = nr_jited_ksyms;
info_len = sizeof(struct bpf_prog_info);
@@ -4760,12 +5390,20 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
goto done;
}
+ dead_insns = 0;
+ while (test->dead_code_mask & (1 << dead_insns))
+ dead_insns++;
+
CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u",
linfo[0].insn_off);
for (i = 1; i < cnt; i++) {
const struct bpf_line_info *expected_linfo;
- expected_linfo = patched_linfo + (i * test->line_info_rec_size);
+ while (test->dead_code_mask & (1 << (i + dead_insns)))
+ dead_insns++;
+
+ expected_linfo = patched_linfo +
+ ((i + dead_insns) * test->line_info_rec_size);
if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off,
"linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u",
i, linfo[i].insn_off,
@@ -4923,7 +5561,9 @@ static int do_test_info_raw(unsigned int test_num)
if (err)
goto done;
- err = test_get_linfo(test, patched_linfo, attr.line_info_cnt, prog_fd);
+ err = test_get_linfo(test, patched_linfo,
+ attr.line_info_cnt - test->dead_code_cnt,
+ prog_fd);
if (err)
goto done;
@@ -4959,20 +5599,508 @@ static int test_info_raw(void)
return err;
}
+struct btf_raw_data {
+ __u32 raw_types[MAX_NR_RAW_U32];
+ const char *str_sec;
+ __u32 str_sec_size;
+};
+
+struct btf_dedup_test {
+ const char *descr;
+ struct btf_raw_data input;
+ struct btf_raw_data expect;
+ struct btf_dedup_opts opts;
+};
+
+const struct btf_dedup_test dedup_tests[] = {
+
+{
+ .descr = "dedup: unused strings filtering",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0unused\0int\0foo\0bar\0long"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0long"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: strings deduplication",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(3), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0long int\0int\0long int\0int"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0long int"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: struct example #1",
+ /*
+ * struct s {
+ * struct s *next;
+ * const int *a;
+ * int b[16];
+ * int c;
+ * }
+ */
+ .input = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* int[16] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
+ /* struct s { */
+ BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 4, 0), /* struct s *next; */
+ BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */
+ BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */
+ BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */
+ /* ptr -> [3] struct s */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> [6] const int */
+ BTF_PTR_ENC(6), /* [5] */
+ /* const -> [1] int */
+ BTF_CONST_ENC(1), /* [6] */
+
+ /* full copy of the above */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */
+ BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */
+ BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [9] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 10, 0),
+ BTF_MEMBER_ENC(NAME_NTH(4), 11, 64),
+ BTF_MEMBER_ENC(NAME_NTH(5), 8, 128),
+ BTF_MEMBER_ENC(NAME_NTH(6), 7, 640),
+ BTF_PTR_ENC(9), /* [10] */
+ BTF_PTR_ENC(12), /* [11] */
+ BTF_CONST_ENC(7), /* [12] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0"),
+ },
+ .expect = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* int[16] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
+ /* struct s { */
+ BTF_STRUCT_ENC(NAME_NTH(6), 4, 84), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(5), 4, 0), /* struct s *next; */
+ BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */
+ BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */
+ /* ptr -> [3] struct s */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> [6] const int */
+ BTF_PTR_ENC(6), /* [5] */
+ /* const -> [1] int */
+ BTF_CONST_ENC(1), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0a\0b\0c\0int\0next\0s"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: struct <-> fwd resolution w/ hash collision",
+ /*
+ * // CU 1:
+ * struct x;
+ * struct s {
+ * struct x *x;
+ * };
+ * // CU 2:
+ * struct x {};
+ * struct s {
+ * struct x *x;
+ * };
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_FWD_ENC(NAME_TBD, 0 /* struct fwd */), /* [1] fwd x */
+ BTF_PTR_ENC(1), /* [2] ptr -> [1] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] struct s */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ /* CU 2 */
+ BTF_STRUCT_ENC(NAME_TBD, 0, 0), /* [4] struct x */
+ BTF_PTR_ENC(4), /* [5] ptr -> [4] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [6] struct s */
+ BTF_MEMBER_ENC(NAME_TBD, 5, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0x\0s\0x\0x\0s\0x\0"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_PTR_ENC(3), /* [1] ptr -> [3] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [2] struct s */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_STRUCT_ENC(NAME_NTH(2), 0, 0), /* [3] struct x */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0s\0x"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ .dedup_table_size = 1, /* force hash collisions */
+ },
+},
+{
+ .descr = "dedup: all possible kinds (no duplicates)",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_ENUM_ENC(NAME_TBD, 1),
+ BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
+ BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
+ BTF_PTR_ENC(0), /* [8] ptr */
+ BTF_CONST_ENC(8), /* [9] const */
+ BTF_VOLATILE_ENC(8), /* [10] volatile */
+ BTF_RESTRICT_ENC(8), /* [11] restrict */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_ENUM_ENC(NAME_TBD, 1),
+ BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
+ BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
+ BTF_PTR_ENC(0), /* [8] ptr */
+ BTF_CONST_ENC(8), /* [9] const */
+ BTF_VOLATILE_ENC(8), /* [10] volatile */
+ BTF_RESTRICT_ENC(8), /* [11] restrict */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: no int duplicates",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
+ /* different name */
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
+ /* different encoding */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
+ /* different bit offset */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
+ /* different bit size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
+ /* different byte size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0some other int"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
+ /* different name */
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
+ /* different encoding */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
+ /* different bit offset */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
+ /* different bit size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
+ /* different byte size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0some other int"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+
+};
+
+static int btf_type_size(const struct btf_type *t)
+{
+ int base_size = sizeof(struct btf_type);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ __u16 kind = BTF_INFO_KIND(t->info);
+
+ switch (kind) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ return base_size;
+ case BTF_KIND_INT:
+ return base_size + sizeof(__u32);
+ case BTF_KIND_ENUM:
+ return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ARRAY:
+ return base_size + sizeof(struct btf_array);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return base_size + vlen * sizeof(struct btf_member);
+ case BTF_KIND_FUNC_PROTO:
+ return base_size + vlen * sizeof(struct btf_param);
+ default:
+ fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind);
+ return -EINVAL;
+ }
+}
+
+static void dump_btf_strings(const char *strs, __u32 len)
+{
+ const char *cur = strs;
+ int i = 0;
+
+ while (cur < strs + len) {
+ fprintf(stderr, "string #%d: '%s'\n", i, cur);
+ cur += strlen(cur) + 1;
+ i++;
+ }
+}
+
+static int do_test_dedup(unsigned int test_num)
+{
+ const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
+ __u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
+ const struct btf_header *test_hdr, *expect_hdr;
+ struct btf *test_btf = NULL, *expect_btf = NULL;
+ const void *test_btf_data, *expect_btf_data;
+ const char *ret_test_next_str, *ret_expect_next_str;
+ const char *test_strs, *expect_strs;
+ const char *test_str_cur, *test_str_end;
+ const char *expect_str_cur, *expect_str_end;
+ unsigned int raw_btf_size;
+ void *raw_btf;
+ int err = 0, i;
+
+ fprintf(stderr, "BTF dedup test[%u] (%s):", test_num, test->descr);
+
+ raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types,
+ test->input.str_sec, test->input.str_sec_size,
+ &raw_btf_size, &ret_test_next_str);
+ if (!raw_btf)
+ return -1;
+ test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ free(raw_btf);
+ if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld",
+ PTR_ERR(test_btf))) {
+ err = -1;
+ goto done;
+ }
+
+ raw_btf = btf_raw_create(&hdr_tmpl, test->expect.raw_types,
+ test->expect.str_sec,
+ test->expect.str_sec_size,
+ &raw_btf_size, &ret_expect_next_str);
+ if (!raw_btf)
+ return -1;
+ expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ free(raw_btf);
+ if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld",
+ PTR_ERR(expect_btf))) {
+ err = -1;
+ goto done;
+ }
+
+ err = btf__dedup(test_btf, NULL, &test->opts);
+ if (CHECK(err, "btf_dedup failed errno:%d", err)) {
+ err = -1;
+ goto done;
+ }
+
+ test_btf_data = btf__get_raw_data(test_btf, &test_btf_size);
+ expect_btf_data = btf__get_raw_data(expect_btf, &expect_btf_size);
+ if (CHECK(test_btf_size != expect_btf_size,
+ "test_btf_size:%u != expect_btf_size:%u",
+ test_btf_size, expect_btf_size)) {
+ err = -1;
+ goto done;
+ }
+
+ test_hdr = test_btf_data;
+ test_strs = test_btf_data + sizeof(*test_hdr) + test_hdr->str_off;
+ expect_hdr = expect_btf_data;
+ expect_strs = expect_btf_data + sizeof(*test_hdr) + expect_hdr->str_off;
+ if (CHECK(test_hdr->str_len != expect_hdr->str_len,
+ "test_hdr->str_len:%u != expect_hdr->str_len:%u",
+ test_hdr->str_len, expect_hdr->str_len)) {
+ fprintf(stderr, "\ntest strings:\n");
+ dump_btf_strings(test_strs, test_hdr->str_len);
+ fprintf(stderr, "\nexpected strings:\n");
+ dump_btf_strings(expect_strs, expect_hdr->str_len);
+ err = -1;
+ goto done;
+ }
+
+ test_str_cur = test_strs;
+ test_str_end = test_strs + test_hdr->str_len;
+ expect_str_cur = expect_strs;
+ expect_str_end = expect_strs + expect_hdr->str_len;
+ while (test_str_cur < test_str_end && expect_str_cur < expect_str_end) {
+ size_t test_len, expect_len;
+
+ test_len = strlen(test_str_cur);
+ expect_len = strlen(expect_str_cur);
+ if (CHECK(test_len != expect_len,
+ "test_len:%zu != expect_len:%zu "
+ "(test_str:%s, expect_str:%s)",
+ test_len, expect_len, test_str_cur, expect_str_cur)) {
+ err = -1;
+ goto done;
+ }
+ if (CHECK(strcmp(test_str_cur, expect_str_cur),
+ "test_str:%s != expect_str:%s",
+ test_str_cur, expect_str_cur)) {
+ err = -1;
+ goto done;
+ }
+ test_str_cur += test_len + 1;
+ expect_str_cur += expect_len + 1;
+ }
+ if (CHECK(test_str_cur != test_str_end,
+ "test_str_cur:%p != test_str_end:%p",
+ test_str_cur, test_str_end)) {
+ err = -1;
+ goto done;
+ }
+
+ test_nr_types = btf__get_nr_types(test_btf);
+ expect_nr_types = btf__get_nr_types(expect_btf);
+ if (CHECK(test_nr_types != expect_nr_types,
+ "test_nr_types:%u != expect_nr_types:%u",
+ test_nr_types, expect_nr_types)) {
+ err = -1;
+ goto done;
+ }
+
+ for (i = 1; i <= test_nr_types; i++) {
+ const struct btf_type *test_type, *expect_type;
+ int test_size, expect_size;
+
+ test_type = btf__type_by_id(test_btf, i);
+ expect_type = btf__type_by_id(expect_btf, i);
+ test_size = btf_type_size(test_type);
+ expect_size = btf_type_size(expect_type);
+
+ if (CHECK(test_size != expect_size,
+ "type #%d: test_size:%d != expect_size:%u",
+ i, test_size, expect_size)) {
+ err = -1;
+ goto done;
+ }
+ if (CHECK(memcmp((void *)test_type,
+ (void *)expect_type,
+ test_size),
+ "type #%d: contents differ", i)) {
+ err = -1;
+ goto done;
+ }
+ }
+
+done:
+ if (!err)
+ fprintf(stderr, "OK");
+ if (!IS_ERR(test_btf))
+ btf__free(test_btf);
+ if (!IS_ERR(expect_btf))
+ btf__free(expect_btf);
+
+ return err;
+}
+
+static int test_dedup(void)
+{
+ unsigned int i;
+ int err = 0;
+
+ if (args.dedup_test_num)
+ return count_result(do_test_dedup(args.dedup_test_num));
+
+ for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++)
+ err |= count_result(do_test_dedup(i));
+
+ return err;
+}
+
static void usage(const char *cmd)
{
fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n"
"\t[-g btf_get_info_test_num (1 - %zu)] |\n"
"\t[-f btf_file_test_num (1 - %zu)] |\n"
"\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n"
- "\t[-p (pretty print test)]]\n",
+ "\t[-p (pretty print test)] |\n"
+ "\t[-d btf_dedup_test_num (1 - %zu)]]\n",
cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests),
- ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests));
+ ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests),
+ ARRAY_SIZE(dedup_tests));
}
static int parse_args(int argc, char **argv)
{
- const char *optstr = "lpk:f:r:g:";
+ const char *optstr = "hlpk:f:r:g:d:";
int opt;
while ((opt = getopt(argc, argv, optstr)) != -1) {
@@ -4999,12 +6127,16 @@ static int parse_args(int argc, char **argv)
args.info_raw_test_num = atoi(optarg);
args.info_raw_test = true;
break;
+ case 'd':
+ args.dedup_test_num = atoi(optarg);
+ args.dedup_test = true;
+ break;
case 'h':
usage(argv[0]);
exit(0);
default:
- usage(argv[0]);
- return -1;
+ usage(argv[0]);
+ return -1;
}
}
@@ -5040,6 +6172,14 @@ static int parse_args(int argc, char **argv)
return -1;
}
+ if (args.dedup_test_num &&
+ (args.dedup_test_num < 1 ||
+ args.dedup_test_num > ARRAY_SIZE(dedup_tests))) {
+ fprintf(stderr, "BTF dedup test number must be [1 - %zu]\n",
+ ARRAY_SIZE(dedup_tests));
+ return -1;
+ }
+
return 0;
}
@@ -5058,7 +6198,7 @@ int main(int argc, char **argv)
return err;
if (args.always_log)
- libbpf_set_print(__base_pr, __base_pr, __base_pr);
+ libbpf_set_print(__base_pr);
if (args.raw_test)
err |= test_raw();
@@ -5075,14 +6215,18 @@ int main(int argc, char **argv)
if (args.info_raw_test)
err |= test_info_raw();
+ if (args.dedup_test)
+ err |= test_dedup();
+
if (args.raw_test || args.get_info_test || args.file_test ||
- args.pprint_test || args.info_raw_test)
+ args.pprint_test || args.info_raw_test || args.dedup_test)
goto done;
err |= test_raw();
err |= test_get_info();
err |= test_file();
err |= test_info_raw();
+ err |= test_dedup();
done:
print_summary();
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c
index 12b784afba31..01f0c634d548 100644
--- a/tools/testing/selftests/bpf/test_flow_dissector.c
+++ b/tools/testing/selftests/bpf/test_flow_dissector.c
@@ -16,7 +16,6 @@
#include <errno.h>
#include <linux/if_packet.h>
#include <linux/if_ether.h>
-#include <linux/if_packet.h>
#include <linux/ipv6.h>
#include <netinet/ip.h>
#include <netinet/in.h>
@@ -25,7 +24,6 @@
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
index 8fcd1c076add..65cbd30704b5 100644
--- a/tools/testing/selftests/bpf/test_libbpf_open.c
+++ b/tools/testing/selftests/bpf/test_libbpf_open.c
@@ -34,23 +34,16 @@ static void usage(char *argv[])
printf("\n");
}
-#define DEFINE_PRINT_FN(name, enabled) \
-static int libbpf_##name(const char *fmt, ...) \
-{ \
- va_list args; \
- int ret; \
- \
- va_start(args, fmt); \
- if (enabled) { \
- fprintf(stderr, "[" #name "] "); \
- ret = vfprintf(stderr, fmt, args); \
- } \
- va_end(args); \
- return ret; \
+static bool debug = 0;
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *fmt, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !debug)
+ return 0;
+
+ fprintf(stderr, "[%d] ", level);
+ return vfprintf(stderr, fmt, args);
}
-DEFINE_PRINT_FN(warning, 1)
-DEFINE_PRINT_FN(info, 1)
-DEFINE_PRINT_FN(debug, 1)
#define EXIT_FAIL_LIBBPF EXIT_FAILURE
#define EXIT_FAIL_OPTION 2
@@ -74,7 +67,7 @@ int test_walk_maps(struct bpf_object *obj, bool verbose)
struct bpf_map *map;
int cnt = 0;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
cnt++;
if (verbose)
printf("Map (count:%d) name: %s\n", cnt,
@@ -120,15 +113,14 @@ int main(int argc, char **argv)
int longindex = 0;
int opt;
- libbpf_set_print(libbpf_warning, libbpf_info, NULL);
+ libbpf_set_print(libbpf_debug_print);
/* Parse commands line args */
while ((opt = getopt_long(argc, argv, "hDq",
long_options, &longindex)) != -1) {
switch (opt) {
case 'D':
- libbpf_set_print(libbpf_warning, libbpf_info,
- libbpf_debug);
+ debug = 1;
break;
case 'q': /* Use in scripting mode */
verbose = 0;
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index 147e34cfceb7..02d7c871862a 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -474,6 +474,16 @@ static void test_lpm_delete(void)
assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
errno == ENOENT);
+ key->prefixlen = 30; // unused prefix so far
+ inet_pton(AF_INET, "192.255.0.0", key->data);
+ assert(bpf_map_delete_elem(map_fd, key) == -1 &&
+ errno == ENOENT);
+
+ key->prefixlen = 16; // same prefix as the root node
+ inet_pton(AF_INET, "192.255.0.0", key->data);
+ assert(bpf_map_delete_elem(map_fd, key) == -1 &&
+ errno == ENOENT);
+
/* assert initial lookup */
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.0.1", key->data);
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
new file mode 100755
index 000000000000..d4d3391cc13a
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -0,0 +1,426 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Setup/topology:
+#
+# NS1 NS2 NS3
+# veth1 <---> veth2 veth3 <---> veth4 (the top route)
+# veth5 <---> veth6 veth7 <---> veth8 (the bottom route)
+#
+# each vethN gets IPv[4|6]_N address
+#
+# IPv*_SRC = IPv*_1
+# IPv*_DST = IPv*_4
+#
+# all tests test pings from IPv*_SRC to IPv*_DST
+#
+# by default, routes are configured to allow packets to go
+# IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
+#
+# a GRE device is installed in NS3 with IPv*_GRE, and
+# NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
+# (the bottom route)
+#
+# Tests:
+#
+# 1. routes NS2->IPv*_DST are brought down, so the only way a ping
+# from IP*_SRC to IP*_DST can work is via IPv*_GRE
+#
+# 2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
+# that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+# ping: SRC->[encap at veth1:egress]->GRE:decap->DST
+# ping replies go DST->SRC directly
+#
+# 2b. in an ingress test, a bpf LWT_IN program is installed on veth2
+# that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+# ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
+# ping replies go DST->SRC directly
+
+if [[ $EUID -ne 0 ]]; then
+ echo "This script must be run as root"
+ echo "FAIL"
+ exit 1
+fi
+
+readonly NS1="ns1-$(mktemp -u XXXXXX)"
+readonly NS2="ns2-$(mktemp -u XXXXXX)"
+readonly NS3="ns3-$(mktemp -u XXXXXX)"
+
+readonly IPv4_1="172.16.1.100"
+readonly IPv4_2="172.16.2.100"
+readonly IPv4_3="172.16.3.100"
+readonly IPv4_4="172.16.4.100"
+readonly IPv4_5="172.16.5.100"
+readonly IPv4_6="172.16.6.100"
+readonly IPv4_7="172.16.7.100"
+readonly IPv4_8="172.16.8.100"
+readonly IPv4_GRE="172.16.16.100"
+
+readonly IPv4_SRC=$IPv4_1
+readonly IPv4_DST=$IPv4_4
+
+readonly IPv6_1="fb01::1"
+readonly IPv6_2="fb02::1"
+readonly IPv6_3="fb03::1"
+readonly IPv6_4="fb04::1"
+readonly IPv6_5="fb05::1"
+readonly IPv6_6="fb06::1"
+readonly IPv6_7="fb07::1"
+readonly IPv6_8="fb08::1"
+readonly IPv6_GRE="fb10::1"
+
+readonly IPv6_SRC=$IPv6_1
+readonly IPv6_DST=$IPv6_4
+
+TEST_STATUS=0
+TESTS_SUCCEEDED=0
+TESTS_FAILED=0
+
+TMPFILE=""
+
+process_test_results()
+{
+ if [[ "${TEST_STATUS}" -eq 0 ]] ; then
+ echo "PASS"
+ TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1))
+ else
+ echo "FAIL"
+ TESTS_FAILED=$((TESTS_FAILED+1))
+ fi
+}
+
+print_test_summary_and_exit()
+{
+ echo "passed tests: ${TESTS_SUCCEEDED}"
+ echo "failed tests: ${TESTS_FAILED}"
+ if [ "${TESTS_FAILED}" -eq "0" ] ; then
+ exit 0
+ else
+ exit 1
+ fi
+}
+
+setup()
+{
+ set -e # exit on error
+ TEST_STATUS=0
+
+ # create devices and namespaces
+ ip netns add "${NS1}"
+ ip netns add "${NS2}"
+ ip netns add "${NS3}"
+
+ ip link add veth1 type veth peer name veth2
+ ip link add veth3 type veth peer name veth4
+ ip link add veth5 type veth peer name veth6
+ ip link add veth7 type veth peer name veth8
+
+ ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1
+ ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1
+
+ ip link set veth1 netns ${NS1}
+ ip link set veth2 netns ${NS2}
+ ip link set veth3 netns ${NS2}
+ ip link set veth4 netns ${NS3}
+ ip link set veth5 netns ${NS1}
+ ip link set veth6 netns ${NS2}
+ ip link set veth7 netns ${NS2}
+ ip link set veth8 netns ${NS3}
+
+ # configure addesses: the top route (1-2-3-4)
+ ip -netns ${NS1} addr add ${IPv4_1}/24 dev veth1
+ ip -netns ${NS2} addr add ${IPv4_2}/24 dev veth2
+ ip -netns ${NS2} addr add ${IPv4_3}/24 dev veth3
+ ip -netns ${NS3} addr add ${IPv4_4}/24 dev veth4
+ ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1
+ ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2
+ ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3
+ ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4
+
+ # configure addresses: the bottom route (5-6-7-8)
+ ip -netns ${NS1} addr add ${IPv4_5}/24 dev veth5
+ ip -netns ${NS2} addr add ${IPv4_6}/24 dev veth6
+ ip -netns ${NS2} addr add ${IPv4_7}/24 dev veth7
+ ip -netns ${NS3} addr add ${IPv4_8}/24 dev veth8
+ ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5
+ ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6
+ ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
+ ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
+
+ ip -netns ${NS1} link set dev veth1 up
+ ip -netns ${NS2} link set dev veth2 up
+ ip -netns ${NS2} link set dev veth3 up
+ ip -netns ${NS3} link set dev veth4 up
+ ip -netns ${NS1} link set dev veth5 up
+ ip -netns ${NS2} link set dev veth6 up
+ ip -netns ${NS2} link set dev veth7 up
+ ip -netns ${NS3} link set dev veth8 up
+
+ # configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
+ # the bottom route to specific bottom addresses
+
+ # NS1
+ # top route
+ ip -netns ${NS1} route add ${IPv4_2}/32 dev veth1
+ ip -netns ${NS1} route add default dev veth1 via ${IPv4_2} # go top by default
+ ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1
+ ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2} # go top by default
+ # bottom route
+ ip -netns ${NS1} route add ${IPv4_6}/32 dev veth5
+ ip -netns ${NS1} route add ${IPv4_7}/32 dev veth5 via ${IPv4_6}
+ ip -netns ${NS1} route add ${IPv4_8}/32 dev veth5 via ${IPv4_6}
+ ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5
+ ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6}
+ ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6}
+
+ # NS2
+ # top route
+ ip -netns ${NS2} route add ${IPv4_1}/32 dev veth2
+ ip -netns ${NS2} route add ${IPv4_4}/32 dev veth3
+ ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2
+ ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3
+ # bottom route
+ ip -netns ${NS2} route add ${IPv4_5}/32 dev veth6
+ ip -netns ${NS2} route add ${IPv4_8}/32 dev veth7
+ ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6
+ ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7
+
+ # NS3
+ # top route
+ ip -netns ${NS3} route add ${IPv4_3}/32 dev veth4
+ ip -netns ${NS3} route add ${IPv4_1}/32 dev veth4 via ${IPv4_3}
+ ip -netns ${NS3} route add ${IPv4_2}/32 dev veth4 via ${IPv4_3}
+ ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
+ ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
+ ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
+ # bottom route
+ ip -netns ${NS3} route add ${IPv4_7}/32 dev veth8
+ ip -netns ${NS3} route add ${IPv4_5}/32 dev veth8 via ${IPv4_7}
+ ip -netns ${NS3} route add ${IPv4_6}/32 dev veth8 via ${IPv4_7}
+ ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
+ ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
+ ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
+
+ # configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
+ ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
+ ip -netns ${NS3} link set gre_dev up
+ ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev
+ ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6}
+ ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8}
+
+
+ # configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
+ ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
+ ip -netns ${NS3} link set gre6_dev up
+ ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev
+ ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6}
+ ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8}
+
+ # rp_filter gets confused by what these tests are doing, so disable it
+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+
+ TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
+
+ sleep 1 # reduce flakiness
+ set +e
+}
+
+cleanup()
+{
+ if [ -f ${TMPFILE} ] ; then
+ rm ${TMPFILE}
+ fi
+
+ ip netns del ${NS1} 2> /dev/null
+ ip netns del ${NS2} 2> /dev/null
+ ip netns del ${NS3} 2> /dev/null
+}
+
+trap cleanup EXIT
+
+remove_routes_to_gredev()
+{
+ ip -netns ${NS1} route del ${IPv4_GRE} dev veth5
+ ip -netns ${NS2} route del ${IPv4_GRE} dev veth7
+ ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5
+ ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7
+}
+
+add_unreachable_routes_to_gredev()
+{
+ ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32
+ ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32
+ ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128
+ ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128
+}
+
+test_ping()
+{
+ local readonly PROTO=$1
+ local readonly EXPECTED=$2
+ local RET=0
+
+ if [ "${PROTO}" == "IPv4" ] ; then
+ ip netns exec ${NS1} ping -c 1 -W 1 -I ${IPv4_SRC} ${IPv4_DST} 2>&1 > /dev/null
+ RET=$?
+ elif [ "${PROTO}" == "IPv6" ] ; then
+ ip netns exec ${NS1} ping6 -c 1 -W 6 -I ${IPv6_SRC} ${IPv6_DST} 2>&1 > /dev/null
+ RET=$?
+ else
+ echo " test_ping: unknown PROTO: ${PROTO}"
+ TEST_STATUS=1
+ fi
+
+ if [ "0" != "${RET}" ]; then
+ RET=1
+ fi
+
+ if [ "${EXPECTED}" != "${RET}" ] ; then
+ echo " test_ping failed: expected: ${EXPECTED}; got ${RET}"
+ TEST_STATUS=1
+ fi
+}
+
+test_gso()
+{
+ local readonly PROTO=$1
+ local readonly PKT_SZ=5000
+ local IP_DST=""
+ : > ${TMPFILE} # trim the capture file
+
+ # check that nc is present
+ command -v nc >/dev/null 2>&1 || \
+ { echo >&2 "nc is not available: skipping TSO tests"; return; }
+
+ # listen on IPv*_DST, capture TCP into $TMPFILE
+ if [ "${PROTO}" == "IPv4" ] ; then
+ IP_DST=${IPv4_DST}
+ ip netns exec ${NS3} bash -c \
+ "nc -4 -l -s ${IPv4_DST} -p 9000 > ${TMPFILE} &"
+ elif [ "${PROTO}" == "IPv6" ] ; then
+ IP_DST=${IPv6_DST}
+ ip netns exec ${NS3} bash -c \
+ "nc -6 -l -s ${IPv6_DST} -p 9000 > ${TMPFILE} &"
+ RET=$?
+ else
+ echo " test_gso: unknown PROTO: ${PROTO}"
+ TEST_STATUS=1
+ fi
+ sleep 1 # let nc start listening
+
+ # send a packet larger than MTU
+ ip netns exec ${NS1} bash -c \
+ "dd if=/dev/zero bs=$PKT_SZ count=1 > /dev/tcp/${IP_DST}/9000 2>/dev/null"
+ sleep 2 # let the packet get delivered
+
+ # verify we received all expected bytes
+ SZ=$(stat -c %s ${TMPFILE})
+ if [ "$SZ" != "$PKT_SZ" ] ; then
+ echo " test_gso failed: ${PROTO}"
+ TEST_STATUS=1
+ fi
+}
+
+test_egress()
+{
+ local readonly ENCAP=$1
+ echo "starting egress ${ENCAP} encap test"
+ setup
+
+ # by default, pings work
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # remove NS2->DST routes, ping fails
+ ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3
+ ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
+ else
+ echo " unknown encap ${ENCAP}"
+ TEST_STATUS=1
+ fi
+ test_ping IPv4 0
+ test_ping IPv6 0
+ test_gso IPv4
+ test_gso IPv6
+
+ # a negative test: remove routes to GRE devices: ping fails
+ remove_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # another negative test
+ add_unreachable_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ cleanup
+ process_test_results
+}
+
+test_ingress()
+{
+ local readonly ENCAP=$1
+ echo "starting ingress ${ENCAP} encap test"
+ setup
+
+ # need to wait a bit for IPv6 to autoconf, otherwise
+ # ping6 sometimes fails with "unable to bind to address"
+
+ # by default, pings work
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # remove NS2->DST routes, pings fail
+ ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3
+ ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
+ else
+ echo "FAIL: unknown encap ${ENCAP}"
+ TEST_STATUS=1
+ fi
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # a negative test: remove routes to GRE devices: ping fails
+ remove_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # another negative test
+ add_unreachable_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ cleanup
+ process_test_results
+}
+
+test_egress IPv4
+test_egress IPv6
+test_ingress IPv4
+test_ingress IPv6
+
+print_test_summary_and_exit
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index e2b9eee37187..3c627771f965 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -32,6 +32,8 @@
#define ENOTSUPP 524
#endif
+static int skips;
+
static int map_flags;
#define CHECK(condition, tag, format...) ({ \
@@ -43,7 +45,7 @@ static int map_flags;
} \
})
-static void test_hashmap(int task, void *data)
+static void test_hashmap(unsigned int task, void *data)
{
long long key, next_key, first_key, value;
int fd;
@@ -133,7 +135,7 @@ static void test_hashmap(int task, void *data)
close(fd);
}
-static void test_hashmap_sizes(int task, void *data)
+static void test_hashmap_sizes(unsigned int task, void *data)
{
int fd, i, j;
@@ -153,7 +155,7 @@ static void test_hashmap_sizes(int task, void *data)
}
}
-static void test_hashmap_percpu(int task, void *data)
+static void test_hashmap_percpu(unsigned int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, value);
@@ -280,7 +282,7 @@ static int helper_fill_hashmap(int max_entries)
return fd;
}
-static void test_hashmap_walk(int task, void *data)
+static void test_hashmap_walk(unsigned int task, void *data)
{
int fd, i, max_entries = 1000;
long long key, value, next_key;
@@ -351,7 +353,7 @@ static void test_hashmap_zero_seed(void)
close(second);
}
-static void test_arraymap(int task, void *data)
+static void test_arraymap(unsigned int task, void *data)
{
int key, next_key, fd;
long long value;
@@ -406,7 +408,7 @@ static void test_arraymap(int task, void *data)
close(fd);
}
-static void test_arraymap_percpu(int task, void *data)
+static void test_arraymap_percpu(unsigned int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, values);
@@ -502,7 +504,7 @@ static void test_arraymap_percpu_many_keys(void)
close(fd);
}
-static void test_devmap(int task, void *data)
+static void test_devmap(unsigned int task, void *data)
{
int fd;
__u32 key, value;
@@ -517,7 +519,7 @@ static void test_devmap(int task, void *data)
close(fd);
}
-static void test_queuemap(int task, void *data)
+static void test_queuemap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
__u32 vals[MAP_SIZE + MAP_SIZE/2], val;
@@ -575,7 +577,7 @@ static void test_queuemap(int task, void *data)
close(fd);
}
-static void test_stackmap(int task, void *data)
+static void test_stackmap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
__u32 vals[MAP_SIZE + MAP_SIZE/2], val;
@@ -633,7 +635,6 @@ static void test_stackmap(int task, void *data)
close(fd);
}
-#include <sys/socket.h>
#include <sys/ioctl.h>
#include <arpa/inet.h>
#include <sys/select.h>
@@ -641,7 +642,7 @@ static void test_stackmap(int task, void *data)
#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o"
#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o"
-static void test_sockmap(int tasks, void *data)
+static void test_sockmap(unsigned int tasks, void *data)
{
struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;
int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;
@@ -725,6 +726,15 @@ static void test_sockmap(int tasks, void *data)
sizeof(key), sizeof(value),
6, 0);
if (fd < 0) {
+ if (!bpf_probe_map_type(BPF_MAP_TYPE_SOCKMAP, 0)) {
+ printf("%s SKIP (unsupported map type BPF_MAP_TYPE_SOCKMAP)\n",
+ __func__);
+ skips++;
+ for (i = 0; i < 6; i++)
+ close(sfd[i]);
+ return;
+ }
+
printf("Failed to create sockmap %i\n", fd);
goto out_sockmap;
}
@@ -1258,10 +1268,11 @@ static void test_map_large(void)
}
#define run_parallel(N, FN, DATA) \
- printf("Fork %d tasks to '" #FN "'\n", N); \
+ printf("Fork %u tasks to '" #FN "'\n", N); \
__run_parallel(N, FN, DATA)
-static void __run_parallel(int tasks, void (*fn)(int task, void *data),
+static void __run_parallel(unsigned int tasks,
+ void (*fn)(unsigned int task, void *data),
void *data)
{
pid_t pid[tasks];
@@ -1302,7 +1313,7 @@ static void test_map_stress(void)
#define DO_UPDATE 1
#define DO_DELETE 0
-static void test_update_delete(int fn, void *data)
+static void test_update_delete(unsigned int fn, void *data)
{
int do_update = ((int *)data)[1];
int fd = ((int *)data)[0];
@@ -1702,6 +1713,6 @@ int main(void)
map_flags = BPF_F_NO_PREALLOC;
run_all_tests();
- printf("test_maps: OK\n");
+ printf("test_maps: OK, %d SKIPPED\n", skips);
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index d59642e70f56..84bea3985d64 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -23,6 +23,7 @@ import string
import struct
import subprocess
import time
+import traceback
logfile = None
log_level = 1
@@ -78,7 +79,9 @@ def fail(cond, msg):
if not cond:
return
print("FAIL: " + msg)
- log("FAIL: " + msg, "", level=1)
+ tb = "".join(traceback.extract_stack().format())
+ print(tb)
+ log("FAIL: " + msg, tb, level=1)
os.sys.exit(1)
def start_test(msg):
@@ -589,6 +592,15 @@ def check_verifier_log(output, reference):
return
fail(True, "Missing or incorrect message from netdevsim in verifier log")
+def check_multi_basic(two_xdps):
+ fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
+ fail("prog" in two_xdps, "Base program reported in multi program mode")
+ fail(len(two_xdps["attached"]) != 2,
+ "Wrong attached program count with two programs")
+ fail(two_xdps["attached"][0]["prog"]["id"] ==
+ two_xdps["attached"][1]["prog"]["id"],
+ "Offloaded and other programs have the same id")
+
def test_spurios_extack(sim, obj, skip_hw, needle):
res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw,
include_stderr=True)
@@ -600,6 +612,67 @@ def test_spurios_extack(sim, obj, skip_hw, needle):
include_stderr=True)
check_no_extack(res, needle)
+def test_multi_prog(sim, obj, modename, modeid):
+ start_test("Test multi-attachment XDP - %s + offload..." %
+ (modename or "default", ))
+ sim.set_xdp(obj, "offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+ fail("prog" not in xdp, "Base program not reported in single program mode")
+ fail(len(xdp["attached"]) != 1,
+ "Wrong attached program count with one program")
+
+ sim.set_xdp(obj, modename)
+ two_xdps = sim.ip_link_show(xdp=True)["xdp"]
+
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after other activated")
+ check_multi_basic(two_xdps)
+
+ offloaded2 = sim.dfs_read("bpf_offloaded_id")
+ fail(offloaded != offloaded2,
+ "Offload ID changed after loading other program")
+
+ start_test("Test multi-attachment XDP - replace...")
+ ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
+ fail(ret == 0, "Replaced one of programs without -force")
+ check_extack(err, "XDP program already attached.", args)
+
+ if modename == "" or modename == "drv":
+ othermode = "" if modename == "drv" else "drv"
+ start_test("Test multi-attachment XDP - detach...")
+ ret, _, err = sim.unset_xdp(othermode, force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program with a bad mode")
+ check_extack(err, "program loaded with different flags.", args)
+
+ sim.unset_xdp("offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+
+ fail(xdp["mode"] != modeid, "Bad mode reported after multiple programs")
+ fail("prog" not in xdp,
+ "Base program not reported after multi program mode")
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after other activated")
+ fail(len(xdp["attached"]) != 1,
+ "Wrong attached program count with remaining programs")
+ fail(offloaded != "0", "Offload ID reported with only other program left")
+
+ start_test("Test multi-attachment XDP - reattach...")
+ sim.set_xdp(obj, "offload")
+ two_xdps = sim.ip_link_show(xdp=True)["xdp"]
+
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Other program not reported after offload activated")
+ check_multi_basic(two_xdps)
+
+ start_test("Test multi-attachment XDP - device remove...")
+ sim.remove()
+
+ sim = NetdevSim()
+ sim.set_ethtool_tc_offloads(True)
+ return sim
# Parse command line
parser = argparse.ArgumentParser()
@@ -842,7 +915,9 @@ try:
ret, _, err = sim.set_xdp(obj, "generic", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
- fail(err.count("File exists") != 1, "Replaced driver XDP with generic")
+ check_extack(err,
+ "native and generic XDP can't be active at the same time.",
+ args)
ret, _, err = sim.set_xdp(obj, "", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
@@ -931,59 +1006,9 @@ try:
rm(pin_file)
bpftool_prog_list_wait(expected=0)
- start_test("Test multi-attachment XDP - attach...")
- sim.set_xdp(obj, "offload")
- xdp = sim.ip_link_show(xdp=True)["xdp"]
- offloaded = sim.dfs_read("bpf_offloaded_id")
- fail("prog" not in xdp, "Base program not reported in single program mode")
- fail(len(ipl["xdp"]["attached"]) != 1,
- "Wrong attached program count with one program")
-
- sim.set_xdp(obj, "")
- two_xdps = sim.ip_link_show(xdp=True)["xdp"]
- offloaded2 = sim.dfs_read("bpf_offloaded_id")
-
- fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
- fail("prog" in two_xdps, "Base program reported in multi program mode")
- fail(xdp["attached"][0] not in two_xdps["attached"],
- "Offload program not reported after driver activated")
- fail(len(two_xdps["attached"]) != 2,
- "Wrong attached program count with two programs")
- fail(two_xdps["attached"][0]["prog"]["id"] ==
- two_xdps["attached"][1]["prog"]["id"],
- "offloaded and drv programs have the same id")
- fail(offloaded != offloaded2,
- "offload ID changed after loading driver program")
-
- start_test("Test multi-attachment XDP - replace...")
- ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
- fail(err.count("busy") != 1, "Replaced one of programs without -force")
-
- start_test("Test multi-attachment XDP - detach...")
- ret, _, err = sim.unset_xdp("drv", force=True,
- fail=False, include_stderr=True)
- fail(ret == 0, "Removed program with a bad mode")
- check_extack(err, "program loaded with different flags.", args)
-
- sim.unset_xdp("offload")
- xdp = sim.ip_link_show(xdp=True)["xdp"]
- offloaded = sim.dfs_read("bpf_offloaded_id")
-
- fail(xdp["mode"] != 1, "Bad mode reported after multiple programs")
- fail("prog" not in xdp,
- "Base program not reported after multi program mode")
- fail(xdp["attached"][0] not in two_xdps["attached"],
- "Offload program not reported after driver activated")
- fail(len(ipl["xdp"]["attached"]) != 1,
- "Wrong attached program count with remaining programs")
- fail(offloaded != "0", "offload ID reported with only driver program left")
-
- start_test("Test multi-attachment XDP - device remove...")
- sim.set_xdp(obj, "offload")
- sim.remove()
-
- sim = NetdevSim()
- sim.set_ethtool_tc_offloads(True)
+ sim = test_multi_prog(sim, obj, "", 1)
+ sim = test_multi_prog(sim, obj, "drv", 1)
+ sim = test_multi_prog(sim, obj, "generic", 2)
start_test("Test mixing of TC and XDP...")
sim.tc_add_ingress()
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 25f0083a9b2e..5d10aee9e277 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -4,92 +4,30 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <stdio.h>
-#include <unistd.h>
-#include <errno.h>
-#include <string.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <time.h>
-
-#include <linux/types.h>
-typedef __u16 __sum16;
-#include <arpa/inet.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/filter.h>
-#include <linux/perf_event.h>
-#include <linux/unistd.h>
-
-#include <sys/ioctl.h>
-#include <sys/wait.h>
-#include <sys/types.h>
-#include <fcntl.h>
-
-#include <linux/bpf.h>
-#include <linux/err.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include "test_iptunnel_common.h"
-#include "bpf_util.h"
-#include "bpf_endian.h"
+#include "test_progs.h"
#include "bpf_rlimit.h"
-#include "trace_helpers.h"
-
-static int error_cnt, pass_cnt;
-static bool jit_enabled;
-#define MAGIC_BYTES 123
+int error_cnt, pass_cnt;
+bool jit_enabled;
-/* ipv4 test vector */
-static struct {
- struct ethhdr eth;
- struct iphdr iph;
- struct tcphdr tcp;
-} __packed pkt_v4 = {
+struct ipv4_packet pkt_v4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
- .iph.protocol = 6,
+ .iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
+ .tcp.doff = 5,
};
-/* ipv6 test vector */
-static struct {
- struct ethhdr eth;
- struct ipv6hdr iph;
- struct tcphdr tcp;
-} __packed pkt_v6 = {
+struct ipv6_packet pkt_v6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
- .iph.nexthdr = 6,
+ .iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
+ .tcp.doff = 5,
};
-#define _CHECK(condition, tag, duration, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- error_cnt++; \
- printf("%s:FAIL:%s ", __func__, tag); \
- printf(format); \
- } else { \
- pass_cnt++; \
- printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
- } \
- __ret; \
-})
-
-#define CHECK(condition, tag, format...) \
- _CHECK(condition, tag, duration, format)
-#define CHECK_ATTR(condition, tag, format...) \
- _CHECK(condition, tag, tattr.duration, format)
-
-static int bpf_find_map(const char *test, struct bpf_object *obj,
- const char *name)
+int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
{
struct bpf_map *map;
@@ -102,349 +40,6 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
return bpf_map__fd(map);
}
-static void test_pkt_access(void)
-{
- const char *file = "./test_pkt_access.o";
- struct bpf_object *obj;
- __u32 duration, retval;
- int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv4",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
-
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
- bpf_object__close(obj);
-}
-
-static void test_prog_run_xattr(void)
-{
- const char *file = "./test_pkt_access.o";
- struct bpf_object *obj;
- char buf[10];
- int err;
- struct bpf_prog_test_run_attr tattr = {
- .repeat = 1,
- .data_in = &pkt_v4,
- .data_size_in = sizeof(pkt_v4),
- .data_out = buf,
- .data_size_out = 5,
- };
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
- &tattr.prog_fd);
- if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
- return;
-
- memset(buf, 0, sizeof(buf));
-
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
- "err %d errno %d retval %d\n", err, errno, tattr.retval);
-
- CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
- "incorrect output size, want %lu have %u\n",
- sizeof(pkt_v4), tattr.data_size_out);
-
- CHECK_ATTR(buf[5] != 0, "overflow",
- "BPF_PROG_TEST_RUN ignored size hint\n");
-
- tattr.data_out = NULL;
- tattr.data_size_out = 0;
- errno = 0;
-
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
- "err %d errno %d retval %d\n", err, errno, tattr.retval);
-
- tattr.data_size_out = 1;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
-
- bpf_object__close(obj);
-}
-
-static void test_xdp(void)
-{
- struct vip key4 = {.protocol = 6, .family = AF_INET};
- struct vip key6 = {.protocol = 6, .family = AF_INET6};
- struct iptnl_info value4 = {.family = AF_INET};
- struct iptnl_info value6 = {.family = AF_INET6};
- const char *file = "./test_xdp.o";
- struct bpf_object *obj;
- char buf[128];
- struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
- __u32 duration, retval, size;
- int err, prog_fd, map_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_fd = bpf_find_map(__func__, obj, "vip2tnl");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key4, &value4, 0);
- bpf_map_update_elem(map_fd, &key6, &value6, 0);
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
-
- CHECK(err || retval != XDP_TX || size != 74 ||
- iph->protocol != IPPROTO_IPIP, "ipv4",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_TX || size != 114 ||
- iph6->nexthdr != IPPROTO_IPV6, "ipv6",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-out:
- bpf_object__close(obj);
-}
-
-static void test_xdp_adjust_tail(void)
-{
- const char *file = "./test_adjust_tail.o";
- struct bpf_object *obj;
- char buf[128];
- __u32 duration, retval, size;
- int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
-
- CHECK(err || retval != XDP_DROP,
- "ipv4", "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_TX || size != 54,
- "ipv6", "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
- bpf_object__close(obj);
-}
-
-
-
-#define MAGIC_VAL 0x1234
-#define NUM_ITER 100000
-#define VIP_NUM 5
-
-static void test_l4lb(const char *file)
-{
- unsigned int nr_cpus = bpf_num_possible_cpus();
- struct vip key = {.protocol = 6};
- struct vip_meta {
- __u32 flags;
- __u32 vip_num;
- } value = {.vip_num = VIP_NUM};
- __u32 stats_key = VIP_NUM;
- struct vip_stats {
- __u64 bytes;
- __u64 pkts;
- } stats[nr_cpus];
- struct real_definition {
- union {
- __be32 dst;
- __be32 dstv6[4];
- };
- __u8 flags;
- } real_def = {.dst = MAGIC_VAL};
- __u32 ch_key = 11, real_num = 3;
- __u32 duration, retval, size;
- int err, i, prog_fd, map_fd;
- __u64 bytes = 0, pkts = 0;
- struct bpf_object *obj;
- char buf[128];
- u32 *magic = (u32 *)buf;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_fd = bpf_find_map(__func__, obj, "vip_map");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key, &value, 0);
-
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
-
- map_fd = bpf_find_map(__func__, obj, "reals");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
- *magic != MAGIC_VAL, "ipv4",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
- *magic != MAGIC_VAL, "ipv6",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- map_fd = bpf_find_map(__func__, obj, "stats");
- if (map_fd < 0)
- goto out;
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
- for (i = 0; i < nr_cpus; i++) {
- bytes += stats[i].bytes;
- pkts += stats[i].pkts;
- }
- if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
- error_cnt++;
- printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
- }
-out:
- bpf_object__close(obj);
-}
-
-static void test_l4lb_all(void)
-{
- const char *file1 = "./test_l4lb.o";
- const char *file2 = "./test_l4lb_noinline.o";
-
- test_l4lb(file1);
- test_l4lb(file2);
-}
-
-static void test_xdp_noinline(void)
-{
- const char *file = "./test_xdp_noinline.o";
- unsigned int nr_cpus = bpf_num_possible_cpus();
- struct vip key = {.protocol = 6};
- struct vip_meta {
- __u32 flags;
- __u32 vip_num;
- } value = {.vip_num = VIP_NUM};
- __u32 stats_key = VIP_NUM;
- struct vip_stats {
- __u64 bytes;
- __u64 pkts;
- } stats[nr_cpus];
- struct real_definition {
- union {
- __be32 dst;
- __be32 dstv6[4];
- };
- __u8 flags;
- } real_def = {.dst = MAGIC_VAL};
- __u32 ch_key = 11, real_num = 3;
- __u32 duration, retval, size;
- int err, i, prog_fd, map_fd;
- __u64 bytes = 0, pkts = 0;
- struct bpf_object *obj;
- char buf[128];
- u32 *magic = (u32 *)buf;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_fd = bpf_find_map(__func__, obj, "vip_map");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key, &value, 0);
-
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
-
- map_fd = bpf_find_map(__func__, obj, "reals");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 1 || size != 54 ||
- *magic != MAGIC_VAL, "ipv4",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 1 || size != 74 ||
- *magic != MAGIC_VAL, "ipv6",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- map_fd = bpf_find_map(__func__, obj, "stats");
- if (map_fd < 0)
- goto out;
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
- for (i = 0; i < nr_cpus; i++) {
- bytes += stats[i].bytes;
- pkts += stats[i].pkts;
- }
- if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
- error_cnt++;
- printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
- }
-out:
- bpf_object__close(obj);
-}
-
-static void test_tcp_estats(void)
-{
- const char *file = "./test_tcp_estats.o";
- int err, prog_fd;
- struct bpf_object *obj;
- __u32 duration = 0;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- CHECK(err, "", "err %d errno %d\n", err, errno);
- if (err) {
- error_cnt++;
- return;
- }
-
- bpf_object__close(obj);
-}
-
-static inline __u64 ptr_to_u64(const void *ptr)
-{
- return (__u64) (unsigned long) ptr;
-}
-
static bool is_jit_enabled(void)
{
const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
@@ -463,475 +58,7 @@ static bool is_jit_enabled(void)
return enabled;
}
-static void test_bpf_obj_id(void)
-{
- const __u64 array_magic_value = 0xfaceb00c;
- const __u32 array_key = 0;
- const int nr_iters = 2;
- const char *file = "./test_obj_id.o";
- const char *expected_prog_name = "test_obj_id";
- const char *expected_map_name = "test_map_id";
- const __u64 nsec_per_sec = 1000000000;
-
- struct bpf_object *objs[nr_iters];
- int prog_fds[nr_iters], map_fds[nr_iters];
- /* +1 to test for the info_len returned by kernel */
- struct bpf_prog_info prog_infos[nr_iters + 1];
- struct bpf_map_info map_infos[nr_iters + 1];
- /* Each prog only uses one map. +1 to test nr_map_ids
- * returned by kernel.
- */
- __u32 map_ids[nr_iters + 1];
- char jited_insns[128], xlated_insns[128], zeros[128];
- __u32 i, next_id, info_len, nr_id_found, duration = 0;
- struct timespec real_time_ts, boot_time_ts;
- int err = 0;
- __u64 array_value;
- uid_t my_uid = getuid();
- time_t now, load_time;
-
- err = bpf_prog_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
-
- err = bpf_map_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
-
- for (i = 0; i < nr_iters; i++)
- objs[i] = NULL;
-
- /* Check bpf_obj_get_info_by_fd() */
- bzero(zeros, sizeof(zeros));
- for (i = 0; i < nr_iters; i++) {
- now = time(NULL);
- err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
- &objs[i], &prog_fds[i]);
- /* test_obj_id.o is a dumb prog. It should never fail
- * to load.
- */
- if (err)
- error_cnt++;
- assert(!err);
-
- /* Insert a magic value to the map */
- map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
- assert(map_fds[i] >= 0);
- err = bpf_map_update_elem(map_fds[i], &array_key,
- &array_magic_value, 0);
- assert(!err);
-
- /* Check getting map info */
- info_len = sizeof(struct bpf_map_info) * 2;
- bzero(&map_infos[i], info_len);
- err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
- &info_len);
- if (CHECK(err ||
- map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
- map_infos[i].key_size != sizeof(__u32) ||
- map_infos[i].value_size != sizeof(__u64) ||
- map_infos[i].max_entries != 1 ||
- map_infos[i].map_flags != 0 ||
- info_len != sizeof(struct bpf_map_info) ||
- strcmp((char *)map_infos[i].name, expected_map_name),
- "get-map-info(fd)",
- "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
- err, errno,
- map_infos[i].type, BPF_MAP_TYPE_ARRAY,
- info_len, sizeof(struct bpf_map_info),
- map_infos[i].key_size,
- map_infos[i].value_size,
- map_infos[i].max_entries,
- map_infos[i].map_flags,
- map_infos[i].name, expected_map_name))
- goto done;
-
- /* Check getting prog info */
- info_len = sizeof(struct bpf_prog_info) * 2;
- bzero(&prog_infos[i], info_len);
- bzero(jited_insns, sizeof(jited_insns));
- bzero(xlated_insns, sizeof(xlated_insns));
- prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
- prog_infos[i].jited_prog_len = sizeof(jited_insns);
- prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
- prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
- prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
- prog_infos[i].nr_map_ids = 2;
- err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
- assert(!err);
- err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
- assert(!err);
- err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
- &info_len);
- load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
- + (prog_infos[i].load_time / nsec_per_sec);
- if (CHECK(err ||
- prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
- info_len != sizeof(struct bpf_prog_info) ||
- (jit_enabled && !prog_infos[i].jited_prog_len) ||
- (jit_enabled &&
- !memcmp(jited_insns, zeros, sizeof(zeros))) ||
- !prog_infos[i].xlated_prog_len ||
- !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
- load_time < now - 60 || load_time > now + 60 ||
- prog_infos[i].created_by_uid != my_uid ||
- prog_infos[i].nr_map_ids != 1 ||
- *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
- strcmp((char *)prog_infos[i].name, expected_prog_name),
- "get-prog-info(fd)",
- "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
- err, errno, i,
- prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
- info_len, sizeof(struct bpf_prog_info),
- jit_enabled,
- prog_infos[i].jited_prog_len,
- prog_infos[i].xlated_prog_len,
- !!memcmp(jited_insns, zeros, sizeof(zeros)),
- !!memcmp(xlated_insns, zeros, sizeof(zeros)),
- load_time, now,
- prog_infos[i].created_by_uid, my_uid,
- prog_infos[i].nr_map_ids, 1,
- *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
- prog_infos[i].name, expected_prog_name))
- goto done;
- }
-
- /* Check bpf_prog_get_next_id() */
- nr_id_found = 0;
- next_id = 0;
- while (!bpf_prog_get_next_id(next_id, &next_id)) {
- struct bpf_prog_info prog_info = {};
- __u32 saved_map_id;
- int prog_fd;
-
- info_len = sizeof(prog_info);
-
- prog_fd = bpf_prog_get_fd_by_id(next_id);
- if (prog_fd < 0 && errno == ENOENT)
- /* The bpf_prog is in the dead row */
- continue;
- if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
- "prog_fd %d next_id %d errno %d\n",
- prog_fd, next_id, errno))
- break;
-
- for (i = 0; i < nr_iters; i++)
- if (prog_infos[i].id == next_id)
- break;
-
- if (i == nr_iters)
- continue;
-
- nr_id_found++;
-
- /* Negative test:
- * prog_info.nr_map_ids = 1
- * prog_info.map_ids = NULL
- */
- prog_info.nr_map_ids = 1;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
- if (CHECK(!err || errno != EFAULT,
- "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
- err, errno, EFAULT))
- break;
- bzero(&prog_info, sizeof(prog_info));
- info_len = sizeof(prog_info);
-
- saved_map_id = *(int *)((long)prog_infos[i].map_ids);
- prog_info.map_ids = prog_infos[i].map_ids;
- prog_info.nr_map_ids = 2;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
- prog_infos[i].jited_prog_insns = 0;
- prog_infos[i].xlated_prog_insns = 0;
- CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
- memcmp(&prog_info, &prog_infos[i], info_len) ||
- *(int *)(long)prog_info.map_ids != saved_map_id,
- "get-prog-info(next_id->fd)",
- "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
- err, errno, info_len, sizeof(struct bpf_prog_info),
- memcmp(&prog_info, &prog_infos[i], info_len),
- *(int *)(long)prog_info.map_ids, saved_map_id);
- close(prog_fd);
- }
- CHECK(nr_id_found != nr_iters,
- "check total prog id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
-
- /* Check bpf_map_get_next_id() */
- nr_id_found = 0;
- next_id = 0;
- while (!bpf_map_get_next_id(next_id, &next_id)) {
- struct bpf_map_info map_info = {};
- int map_fd;
-
- info_len = sizeof(map_info);
-
- map_fd = bpf_map_get_fd_by_id(next_id);
- if (map_fd < 0 && errno == ENOENT)
- /* The bpf_map is in the dead row */
- continue;
- if (CHECK(map_fd < 0, "get-map-fd(next_id)",
- "map_fd %d next_id %u errno %d\n",
- map_fd, next_id, errno))
- break;
-
- for (i = 0; i < nr_iters; i++)
- if (map_infos[i].id == next_id)
- break;
-
- if (i == nr_iters)
- continue;
-
- nr_id_found++;
-
- err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
- assert(!err);
-
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
- CHECK(err || info_len != sizeof(struct bpf_map_info) ||
- memcmp(&map_info, &map_infos[i], info_len) ||
- array_value != array_magic_value,
- "check get-map-info(next_id->fd)",
- "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
- err, errno, info_len, sizeof(struct bpf_map_info),
- memcmp(&map_info, &map_infos[i], info_len),
- array_value, array_magic_value);
-
- close(map_fd);
- }
- CHECK(nr_id_found != nr_iters,
- "check total map id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
-
-done:
- for (i = 0; i < nr_iters; i++)
- bpf_object__close(objs[i]);
-}
-
-static void test_pkt_md_access(void)
-{
- const char *file = "./test_pkt_md_access.o";
- struct bpf_object *obj;
- __u32 duration, retval;
- int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
-
- bpf_object__close(obj);
-}
-
-static void test_obj_name(void)
-{
- struct {
- const char *name;
- int success;
- int expected_errno;
- } tests[] = {
- { "", 1, 0 },
- { "_123456789ABCDE", 1, 0 },
- { "_123456789ABCDEF", 0, EINVAL },
- { "_123456789ABCD\n", 0, EINVAL },
- };
- struct bpf_insn prog[] = {
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- __u32 duration = 0;
- int i;
-
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
- size_t name_len = strlen(tests[i].name) + 1;
- union bpf_attr attr;
- size_t ncopy;
- int fd;
-
- /* test different attr.prog_name during BPF_PROG_LOAD */
- ncopy = name_len < sizeof(attr.prog_name) ?
- name_len : sizeof(attr.prog_name);
- bzero(&attr, sizeof(attr));
- attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
- attr.insn_cnt = 2;
- attr.insns = ptr_to_u64(prog);
- attr.license = ptr_to_u64("");
- memcpy(attr.prog_name, tests[i].name, ncopy);
-
- fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
- CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
- (!tests[i].success && errno != tests[i].expected_errno),
- "check-bpf-prog-name",
- "fd %d(%d) errno %d(%d)\n",
- fd, tests[i].success, errno, tests[i].expected_errno);
-
- if (fd != -1)
- close(fd);
-
- /* test different attr.map_name during BPF_MAP_CREATE */
- ncopy = name_len < sizeof(attr.map_name) ?
- name_len : sizeof(attr.map_name);
- bzero(&attr, sizeof(attr));
- attr.map_type = BPF_MAP_TYPE_ARRAY;
- attr.key_size = 4;
- attr.value_size = 4;
- attr.max_entries = 1;
- attr.map_flags = 0;
- memcpy(attr.map_name, tests[i].name, ncopy);
- fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
- CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
- (!tests[i].success && errno != tests[i].expected_errno),
- "check-bpf-map-name",
- "fd %d(%d) errno %d(%d)\n",
- fd, tests[i].success, errno, tests[i].expected_errno);
-
- if (fd != -1)
- close(fd);
- }
-}
-
-static void test_tp_attach_query(void)
-{
- const int num_progs = 3;
- int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
- __u32 duration = 0, info_len, saved_prog_ids[num_progs];
- const char *file = "./test_tracepoint.o";
- struct perf_event_query_bpf *query;
- struct perf_event_attr attr = {};
- struct bpf_object *obj[num_progs];
- struct bpf_prog_info prog_info;
- char buf[256];
-
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- return;
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
- "read", "bytes %d errno %d\n", bytes, errno))
- return;
-
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
-
- query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
- for (i = 0; i < num_progs; i++) {
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
- &prog_fd[i]);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- goto cleanup1;
-
- bzero(&prog_info, sizeof(prog_info));
- prog_info.jited_prog_len = 0;
- prog_info.xlated_prog_len = 0;
- prog_info.nr_map_ids = 0;
- info_len = sizeof(prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
- if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
- err, errno))
- goto cleanup1;
- saved_prog_ids[i] = prog_info.id;
-
- pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
- pmu_fd[i], errno))
- goto cleanup2;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
- err, errno))
- goto cleanup3;
-
- if (i == 0) {
- /* check NULL prog array query */
- query->ids_len = num_progs;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(err || query->prog_cnt != 0,
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
- }
-
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
- err, errno))
- goto cleanup3;
-
- if (i == 1) {
- /* try to get # of programs only */
- query->ids_len = 0;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(err || query->prog_cnt != 2,
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
-
- /* try a few negative tests */
- /* invalid query pointer */
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
- (struct perf_event_query_bpf *)0x1);
- if (CHECK(!err || errno != EFAULT,
- "perf_event_ioc_query_bpf",
- "err %d errno %d\n", err, errno))
- goto cleanup3;
-
- /* no enough space */
- query->ids_len = 1;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
- }
-
- query->ids_len = num_progs;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(err || query->prog_cnt != (i + 1),
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
- for (j = 0; j < i + 1; j++)
- if (CHECK(saved_prog_ids[j] != query->ids[j],
- "perf_event_ioc_query_bpf",
- "#%d saved_prog_id %x query prog_id %x\n",
- j, saved_prog_ids[j], query->ids[j]))
- goto cleanup3;
- }
-
- i = num_progs - 1;
- for (; i >= 0; i--) {
- cleanup3:
- ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
- cleanup2:
- close(pmu_fd[i]);
- cleanup1:
- bpf_object__close(obj[i]);
- }
- free(query);
-}
-
-static int compare_map_keys(int map1_fd, int map2_fd)
+int compare_map_keys(int map1_fd, int map2_fd)
{
__u32 key, next_key;
char val_buf[PERF_MAX_STACK_DEPTH *
@@ -958,7 +85,7 @@ static int compare_map_keys(int map1_fd, int map2_fd)
return 0;
}
-static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
+int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
{
__u32 key, next_key, *cur_key_p, *next_key_p;
char *val_buf1, *val_buf2;
@@ -994,165 +121,7 @@ out:
return err;
}
-static void test_stacktrace_map()
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *file = "./test_stacktrace_map.o";
- int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
- struct perf_event_attr attr = {};
- __u32 key, val, duration = 0;
- struct bpf_object *obj;
- char buf[256];
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- return;
-
- /* Get the ID for the sched/sched_switch tracepoint */
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (bytes <= 0 || bytes >= sizeof(buf))
- goto close_prog;
-
- /* Open the perf event and attach bpf progrram */
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
- pmu_fd, errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (err)
- goto disable_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (err)
- goto disable_pmu;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (control_map_fd < 0)
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (stackid_hmap_fd < 0)
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (stackmap_fd < 0)
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (stack_amap_fd < 0)
- goto disable_pmu;
-
- /* give some time for bpf program run */
- sleep(1);
-
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu_noerr;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu_noerr;
-
- stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
- err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
- if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu_noerr;
-
- goto disable_pmu_noerr;
-disable_pmu:
- error_cnt++;
-disable_pmu_noerr:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
- close(pmu_fd);
-close_prog:
- bpf_object__close(obj);
-}
-
-static void test_stacktrace_map_raw_tp()
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd;
- const char *file = "./test_stacktrace_map.o";
- int efd, err, prog_fd;
- __u32 key, val, duration = 0;
- struct bpf_object *obj;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
- return;
-
- efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (control_map_fd < 0)
- goto close_prog;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (stackid_hmap_fd < 0)
- goto close_prog;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (stackmap_fd < 0)
- goto close_prog;
-
- /* give some time for bpf program run */
- sleep(1);
-
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto close_prog;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto close_prog;
-
- goto close_prog_noerr;
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static int extract_build_id(char *build_id, size_t size)
+int extract_build_id(char *build_id, size_t size)
{
FILE *fp;
char *line = NULL;
@@ -1176,741 +145,22 @@ err:
return -1;
}
-static void test_stacktrace_build_id(void)
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *file = "./test_stacktrace_build_id.o";
- int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
- struct perf_event_attr attr = {};
- __u32 key, previous_key, val, duration = 0;
- struct bpf_object *obj;
- char buf[256];
- int i, j;
- struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
- int build_id_matches = 0;
- int retry = 1;
-
-retry:
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- goto out;
-
- /* Get the ID for the sched/sched_switch tracepoint */
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/random/urandom_read/id");
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
- "read", "bytes %d errno %d\n", bytes, errno))
- goto close_prog;
-
- /* Open the perf event and attach bpf progrram */
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
- pmu_fd, errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
- err, errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
- == 0);
- assert(system("./urandom_read") == 0);
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = extract_build_id(buf, 256);
-
- if (CHECK(err, "get build_id with readelf",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
- if (CHECK(err, "get_next_key from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
-
- do {
- char build_id[64];
-
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
- if (CHECK(err, "lookup_elem from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
- for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
- if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
- id_offs[i].offset != 0) {
- for (j = 0; j < 20; ++j)
- sprintf(build_id + 2 * j, "%02x",
- id_offs[i].build_id[j] & 0xff);
- if (strstr(buf, build_id) != NULL)
- build_id_matches = 1;
- }
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
-
- /* stack_map_get_build_id_offset() is racy and sometimes can return
- * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
- * try it one more time.
- */
- if (build_id_matches < 1 && retry--) {
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
- close(pmu_fd);
- bpf_object__close(obj);
- printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
- __func__);
- goto retry;
- }
-
- if (CHECK(build_id_matches < 1, "build id match",
- "Didn't find expected build ID from the map\n"))
- goto disable_pmu;
-
- stack_trace_len = PERF_MAX_STACK_DEPTH
- * sizeof(struct bpf_stack_build_id);
- err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
- CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
- "err %d errno %d\n", err, errno);
-
-disable_pmu:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-
-close_pmu:
- close(pmu_fd);
-
-close_prog:
- bpf_object__close(obj);
-
-out:
- return;
-}
-
-static void test_stacktrace_build_id_nmi(void)
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *file = "./test_stacktrace_build_id.o";
- int err, pmu_fd, prog_fd;
- struct perf_event_attr attr = {
- .sample_freq = 5000,
- .freq = 1,
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
- };
- __u32 key, previous_key, val, duration = 0;
- struct bpf_object *obj;
- char buf[256];
- int i, j;
- struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
- int build_id_matches = 0;
- int retry = 1;
-
-retry:
- err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- return;
-
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd < 0, "perf_event_open",
- "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
- pmu_fd, errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
- err, errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
- == 0);
- assert(system("taskset 0x1 ./urandom_read 100000") == 0);
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = extract_build_id(buf, 256);
-
- if (CHECK(err, "get build_id with readelf",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
- if (CHECK(err, "get_next_key from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
-
- do {
- char build_id[64];
-
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
- if (CHECK(err, "lookup_elem from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
- for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
- if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
- id_offs[i].offset != 0) {
- for (j = 0; j < 20; ++j)
- sprintf(build_id + 2 * j, "%02x",
- id_offs[i].build_id[j] & 0xff);
- if (strstr(buf, build_id) != NULL)
- build_id_matches = 1;
- }
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
-
- /* stack_map_get_build_id_offset() is racy and sometimes can return
- * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
- * try it one more time.
- */
- if (build_id_matches < 1 && retry--) {
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
- close(pmu_fd);
- bpf_object__close(obj);
- printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
- __func__);
- goto retry;
- }
-
- if (CHECK(build_id_matches < 1, "build id match",
- "Didn't find expected build ID from the map\n"))
- goto disable_pmu;
-
- /*
- * We intentionally skip compare_stack_ips(). This is because we
- * only support one in_nmi() ips-to-build_id translation per cpu
- * at any time, thus stack_amap here will always fallback to
- * BPF_STACK_BUILD_ID_IP;
- */
-
-disable_pmu:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-
-close_pmu:
- close(pmu_fd);
-
-close_prog:
- bpf_object__close(obj);
-}
-
-#define MAX_CNT_RAWTP 10ull
-#define MAX_STACK_RAWTP 100
-struct get_stack_trace_t {
- int pid;
- int kern_stack_size;
- int user_stack_size;
- int user_stack_buildid_size;
- __u64 kern_stack[MAX_STACK_RAWTP];
- __u64 user_stack[MAX_STACK_RAWTP];
- struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
-};
-
-static int get_stack_print_output(void *data, int size)
+void *spin_lock_thread(void *arg)
{
- bool good_kern_stack = false, good_user_stack = false;
- const char *nonjit_func = "___bpf_prog_run";
- struct get_stack_trace_t *e = data;
- int i, num_stack;
- static __u64 cnt;
- struct ksym *ks;
-
- cnt++;
-
- if (size < sizeof(struct get_stack_trace_t)) {
- __u64 *raw_data = data;
- bool found = false;
-
- num_stack = size / sizeof(__u64);
- /* If jit is enabled, we do not have a good way to
- * verify the sanity of the kernel stack. So we
- * just assume it is good if the stack is not empty.
- * This could be improved in the future.
- */
- if (jit_enabled) {
- found = num_stack > 0;
- } else {
- for (i = 0; i < num_stack; i++) {
- ks = ksym_search(raw_data[i]);
- if (strcmp(ks->name, nonjit_func) == 0) {
- found = true;
- break;
- }
- }
- }
- if (found) {
- good_kern_stack = true;
- good_user_stack = true;
- }
- } else {
- num_stack = e->kern_stack_size / sizeof(__u64);
- if (jit_enabled) {
- good_kern_stack = num_stack > 0;
- } else {
- for (i = 0; i < num_stack; i++) {
- ks = ksym_search(e->kern_stack[i]);
- if (strcmp(ks->name, nonjit_func) == 0) {
- good_kern_stack = true;
- break;
- }
- }
- }
- if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
- good_user_stack = true;
- }
- if (!good_kern_stack || !good_user_stack)
- return LIBBPF_PERF_EVENT_ERROR;
-
- if (cnt == MAX_CNT_RAWTP)
- return LIBBPF_PERF_EVENT_DONE;
-
- return LIBBPF_PERF_EVENT_CONT;
-}
-
-static void test_get_stack_raw_tp(void)
-{
- const char *file = "./test_get_stack_rawtp.o";
- int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
- struct perf_event_attr attr = {};
- struct timespec tv = {0, 10};
- __u32 key = 0, duration = 0;
- struct bpf_object *obj;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
- return;
-
- efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
- if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
- perfmap_fd, errno))
- goto close_prog;
-
- err = load_kallsyms();
- if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- attr.sample_type = PERF_SAMPLE_RAW;
- attr.type = PERF_TYPE_SOFTWARE;
- attr.config = PERF_COUNT_SW_BPF_OUTPUT;
- pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
- -1/*group_fd*/, 0);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
- errno))
- goto close_prog;
-
- err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
- if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
- errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
- err, errno))
- goto close_prog;
-
- err = perf_event_mmap(pmu_fd);
- if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- /* trigger some syscall action */
- for (i = 0; i < MAX_CNT_RAWTP; i++)
- nanosleep(&tv, NULL);
-
- err = perf_event_poller(pmu_fd, get_stack_print_output);
- if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- goto close_prog_noerr;
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static void test_task_fd_query_rawtp(void)
-{
- const char *file = "./test_get_stack_rawtp.o";
- __u64 probe_offset, probe_addr;
- __u32 len, prog_id, fd_type;
- struct bpf_object *obj;
- int efd, err, prog_fd;
- __u32 duration = 0;
- char buf[256];
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
- return;
-
- efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- /* query (getpid(), efd) */
- len = sizeof(buf);
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
- errno))
- goto close_prog;
-
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- strcmp(buf, "sys_enter") == 0;
- if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
- fd_type, buf))
- goto close_prog;
-
- /* test zero len */
- len = 0;
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
- err, errno))
- goto close_prog;
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- len == strlen("sys_enter");
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
- goto close_prog;
-
- /* test empty buffer */
- len = sizeof(buf);
- err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
- err, errno))
- goto close_prog;
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- len == strlen("sys_enter");
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
- goto close_prog;
-
- /* test smaller buffer */
- len = 3;
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
- "err %d errno %d\n", err, errno))
- goto close_prog;
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- len == strlen("sys_enter") &&
- strcmp(buf, "sy") == 0;
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
- goto close_prog;
-
- goto close_prog_noerr;
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static void test_task_fd_query_tp_core(const char *probe_name,
- const char *tp_name)
-{
- const char *file = "./test_tracepoint.o";
- int err, bytes, efd, prog_fd, pmu_fd;
- struct perf_event_attr attr = {};
- __u64 probe_offset, probe_addr;
- __u32 len, prog_id, fd_type;
- struct bpf_object *obj;
- __u32 duration = 0;
- char buf[256];
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/%s/id", probe_name);
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
- "bytes %d errno %d\n", bytes, errno))
- goto close_prog;
-
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- /* query (getpid(), pmu_fd) */
- len = sizeof(buf);
- err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
- if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
- fd_type, buf))
- goto close_pmu;
-
- close(pmu_fd);
- goto close_prog_noerr;
-
-close_pmu:
- close(pmu_fd);
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static void test_task_fd_query_tp(void)
-{
- test_task_fd_query_tp_core("sched/sched_switch",
- "sched_switch");
- test_task_fd_query_tp_core("syscalls/sys_enter_read",
- "sys_enter_read");
-}
-
-static void test_reference_tracking()
-{
- const char *file = "./test_sk_lookup_kern.o";
- struct bpf_object *obj;
- struct bpf_program *prog;
- __u32 duration = 0;
- int err = 0;
-
- obj = bpf_object__open(file);
- if (IS_ERR(obj)) {
- error_cnt++;
- return;
- }
-
- bpf_object__for_each_program(prog, obj) {
- const char *title;
-
- /* Ignore .text sections */
- title = bpf_program__title(prog, false);
- if (strstr(title, ".text") != NULL)
- continue;
-
- bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
+ __u32 duration, retval;
+ int err, prog_fd = *(u32 *) arg;
- /* Expect verifier failure if test name has 'fail' */
- if (strstr(title, "fail") != NULL) {
- libbpf_set_print(NULL, NULL, NULL);
- err = !bpf_program__load(prog, "GPL", 0);
- libbpf_set_print(printf, printf, NULL);
- } else {
- err = bpf_program__load(prog, "GPL", 0);
- }
- CHECK(err, title, "\n");
- }
- bpf_object__close(obj);
+ err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+ pthread_exit(arg);
}
-enum {
- QUEUE,
- STACK,
-};
-
-static void test_queue_stack_map(int type)
-{
- const int MAP_SIZE = 32;
- __u32 vals[MAP_SIZE], duration, retval, size, val;
- int i, err, prog_fd, map_in_fd, map_out_fd;
- char file[32], buf[128];
- struct bpf_object *obj;
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
-
- /* Fill test values to be used */
- for (i = 0; i < MAP_SIZE; i++)
- vals[i] = rand();
-
- if (type == QUEUE)
- strncpy(file, "./test_queue_map.o", sizeof(file));
- else if (type == STACK)
- strncpy(file, "./test_stack_map.o", sizeof(file));
- else
- return;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_in_fd = bpf_find_map(__func__, obj, "map_in");
- if (map_in_fd < 0)
- goto out;
-
- map_out_fd = bpf_find_map(__func__, obj, "map_out");
- if (map_out_fd < 0)
- goto out;
-
- /* Push 32 elements to the input map */
- for (i = 0; i < MAP_SIZE; i++) {
- err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
- if (err) {
- error_cnt++;
- goto out;
- }
- }
-
- /* The eBPF program pushes iph.saddr in the output map,
- * pops the input map and saves this value in iph.daddr
- */
- for (i = 0; i < MAP_SIZE; i++) {
- if (type == QUEUE) {
- val = vals[i];
- pkt_v4.iph.saddr = vals[i] * 5;
- } else if (type == STACK) {
- val = vals[MAP_SIZE - 1 - i];
- pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
- }
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- if (err || retval || size != sizeof(pkt_v4) ||
- iph->daddr != val)
- break;
- }
-
- CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
- "bpf_map_pop_elem",
- "err %d errno %d retval %d size %d iph->daddr %u\n",
- err, errno, retval, size, iph->daddr);
-
- /* Queue is empty, program should return TC_ACT_SHOT */
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4),
- "check-queue-stack-map-empty",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-
- /* Check that the program pushed elements correctly */
- for (i = 0; i < MAP_SIZE; i++) {
- err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
- if (err || val != vals[i] * 5)
- break;
- }
-
- CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
- "bpf_map_push_elem", "err %d value %u\n", err, val);
-
-out:
- pkt_v4.iph.saddr = 0;
- bpf_object__close(obj);
-}
+#define DECLARE
+#include <prog_tests/tests.h>
+#undef DECLARE
int main(void)
{
@@ -1918,27 +168,9 @@ int main(void)
jit_enabled = is_jit_enabled();
- test_pkt_access();
- test_prog_run_xattr();
- test_xdp();
- test_xdp_adjust_tail();
- test_l4lb_all();
- test_xdp_noinline();
- test_tcp_estats();
- test_bpf_obj_id();
- test_pkt_md_access();
- test_obj_name();
- test_tp_attach_query();
- test_stacktrace_map();
- test_stacktrace_build_id();
- test_stacktrace_build_id_nmi();
- test_stacktrace_map_raw_tp();
- test_get_stack_raw_tp();
- test_task_fd_query_rawtp();
- test_task_fd_query_tp();
- test_reference_tracking();
- test_queue_stack_map(QUEUE);
- test_queue_stack_map(STACK);
+#define CALL
+#include <prog_tests/tests.h>
+#undef CALL
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
new file mode 100644
index 000000000000..51a07367cd43
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <time.h>
+#include <signal.h>
+
+#include <linux/types.h>
+typedef __u16 __sum16;
+#include <arpa/inet.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/filter.h>
+#include <linux/perf_event.h>
+#include <linux/unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "test_iptunnel_common.h"
+#include "bpf_util.h"
+#include "bpf_endian.h"
+#include "trace_helpers.h"
+#include "flow_dissector_load.h"
+
+extern int error_cnt, pass_cnt;
+extern bool jit_enabled;
+
+#define MAGIC_BYTES 123
+
+/* ipv4 test vector */
+struct ipv4_packet {
+ struct ethhdr eth;
+ struct iphdr iph;
+ struct tcphdr tcp;
+} __packed;
+extern struct ipv4_packet pkt_v4;
+
+/* ipv6 test vector */
+struct ipv6_packet {
+ struct ethhdr eth;
+ struct ipv6hdr iph;
+ struct tcphdr tcp;
+} __packed;
+extern struct ipv6_packet pkt_v6;
+
+#define _CHECK(condition, tag, duration, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ error_cnt++; \
+ printf("%s:FAIL:%s ", __func__, tag); \
+ printf(format); \
+ } else { \
+ pass_cnt++; \
+ printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
+ } \
+ __ret; \
+})
+
+#define CHECK(condition, tag, format...) \
+ _CHECK(condition, tag, duration, format)
+#define CHECK_ATTR(condition, tag, format...) \
+ _CHECK(condition, tag, tattr.duration, format)
+
+#define MAGIC_VAL 0x1234
+#define NUM_ITER 100000
+#define VIP_NUM 5
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
+int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
+int compare_map_keys(int map1_fd, int map2_fd);
+int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
+int extract_build_id(char *build_id, size_t size);
+void *spin_lock_thread(void *arg);
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index 561ffb6d6433..fb679ac3d4b0 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -20,6 +20,7 @@
#define MAX_INSNS 512
char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static bool verbose = false;
struct sock_test {
const char *descr;
@@ -325,6 +326,7 @@ static int load_sock_prog(const struct bpf_insn *prog,
enum bpf_attach_type attach_type)
{
struct bpf_load_program_attr attr;
+ int ret;
memset(&attr, 0, sizeof(struct bpf_load_program_attr));
attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
@@ -332,8 +334,13 @@ static int load_sock_prog(const struct bpf_insn *prog,
attr.insns = prog;
attr.insns_cnt = probe_prog_length(attr.insns);
attr.license = "GPL";
+ attr.log_level = 2;
- return bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ if (verbose && ret < 0)
+ fprintf(stderr, "%s\n", bpf_log_buf);
+
+ return ret;
}
static int attach_sock_prog(int cgfd, int progfd,
diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c
new file mode 100644
index 000000000000..bc8943938bf5
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_sock_fields.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <sys/socket.h>
+#include <sys/epoll.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
+
+enum bpf_array_idx {
+ SRV_IDX,
+ CLI_IDX,
+ __NR_BPF_ARRAY_IDX,
+};
+
+#define CHECK(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
+ printf(format); \
+ printf("\n"); \
+ exit(-1); \
+ } \
+})
+
+#define TEST_CGROUP "/test-bpf-sock-fields"
+#define DATA "Hello BPF!"
+#define DATA_LEN sizeof(DATA)
+
+static struct sockaddr_in6 srv_sa6, cli_sa6;
+static int linum_map_fd;
+static int addr_map_fd;
+static int tp_map_fd;
+static int sk_map_fd;
+static __u32 srv_idx = SRV_IDX;
+static __u32 cli_idx = CLI_IDX;
+
+static void init_loopback6(struct sockaddr_in6 *sa6)
+{
+ memset(sa6, 0, sizeof(*sa6));
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = in6addr_loopback;
+}
+
+static void print_sk(const struct bpf_sock *sk)
+{
+ char src_ip4[24], dst_ip4[24];
+ char src_ip6[64], dst_ip6[64];
+
+ inet_ntop(AF_INET, &sk->src_ip4, src_ip4, sizeof(src_ip4));
+ inet_ntop(AF_INET6, &sk->src_ip6, src_ip6, sizeof(src_ip6));
+ inet_ntop(AF_INET, &sk->dst_ip4, dst_ip4, sizeof(dst_ip4));
+ inet_ntop(AF_INET6, &sk->dst_ip6, dst_ip6, sizeof(dst_ip6));
+
+ printf("state:%u bound_dev_if:%u family:%u type:%u protocol:%u mark:%u priority:%u "
+ "src_ip4:%x(%s) src_ip6:%x:%x:%x:%x(%s) src_port:%u "
+ "dst_ip4:%x(%s) dst_ip6:%x:%x:%x:%x(%s) dst_port:%u\n",
+ sk->state, sk->bound_dev_if, sk->family, sk->type, sk->protocol,
+ sk->mark, sk->priority,
+ sk->src_ip4, src_ip4,
+ sk->src_ip6[0], sk->src_ip6[1], sk->src_ip6[2], sk->src_ip6[3],
+ src_ip6, sk->src_port,
+ sk->dst_ip4, dst_ip4,
+ sk->dst_ip6[0], sk->dst_ip6[1], sk->dst_ip6[2], sk->dst_ip6[3],
+ dst_ip6, ntohs(sk->dst_port));
+}
+
+static void print_tp(const struct bpf_tcp_sock *tp)
+{
+ printf("snd_cwnd:%u srtt_us:%u rtt_min:%u snd_ssthresh:%u rcv_nxt:%u "
+ "snd_nxt:%u snd:una:%u mss_cache:%u ecn_flags:%u "
+ "rate_delivered:%u rate_interval_us:%u packets_out:%u "
+ "retrans_out:%u total_retrans:%u segs_in:%u data_segs_in:%u "
+ "segs_out:%u data_segs_out:%u lost_out:%u sacked_out:%u "
+ "bytes_received:%llu bytes_acked:%llu\n",
+ tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh,
+ tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache,
+ tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us,
+ tp->packets_out, tp->retrans_out, tp->total_retrans,
+ tp->segs_in, tp->data_segs_in, tp->segs_out,
+ tp->data_segs_out, tp->lost_out, tp->sacked_out,
+ tp->bytes_received, tp->bytes_acked);
+}
+
+static void check_result(void)
+{
+ struct bpf_tcp_sock srv_tp, cli_tp;
+ struct bpf_sock srv_sk, cli_sk;
+ __u32 linum, idx0 = 0;
+ int err;
+
+ err = bpf_map_lookup_elem(linum_map_fd, &idx0, &linum);
+ CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &srv_idx, &srv_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &srv_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &srv_idx, &srv_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &srv_idx)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &cli_idx, &cli_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &cli_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &cli_idx, &cli_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &cli_idx)",
+ "err:%d errno:%d", err, errno);
+
+ printf("srv_sk: ");
+ print_sk(&srv_sk);
+ printf("\n");
+
+ printf("cli_sk: ");
+ print_sk(&cli_sk);
+ printf("\n");
+
+ printf("srv_tp: ");
+ print_tp(&srv_tp);
+ printf("\n");
+
+ printf("cli_tp: ");
+ print_tp(&cli_tp);
+ printf("\n");
+
+ CHECK(srv_sk.state == 10 ||
+ !srv_sk.state ||
+ srv_sk.family != AF_INET6 ||
+ srv_sk.protocol != IPPROTO_TCP ||
+ memcmp(srv_sk.src_ip6, &in6addr_loopback,
+ sizeof(srv_sk.src_ip6)) ||
+ memcmp(srv_sk.dst_ip6, &in6addr_loopback,
+ sizeof(srv_sk.dst_ip6)) ||
+ srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+ srv_sk.dst_port != cli_sa6.sin6_port,
+ "Unexpected srv_sk", "Check srv_sk output. linum:%u", linum);
+
+ CHECK(cli_sk.state == 10 ||
+ !cli_sk.state ||
+ cli_sk.family != AF_INET6 ||
+ cli_sk.protocol != IPPROTO_TCP ||
+ memcmp(cli_sk.src_ip6, &in6addr_loopback,
+ sizeof(cli_sk.src_ip6)) ||
+ memcmp(cli_sk.dst_ip6, &in6addr_loopback,
+ sizeof(cli_sk.dst_ip6)) ||
+ cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
+ cli_sk.dst_port != srv_sa6.sin6_port,
+ "Unexpected cli_sk", "Check cli_sk output. linum:%u", linum);
+
+ CHECK(srv_tp.data_segs_out != 1 ||
+ srv_tp.data_segs_in ||
+ srv_tp.snd_cwnd != 10 ||
+ srv_tp.total_retrans ||
+ srv_tp.bytes_acked != DATA_LEN,
+ "Unexpected srv_tp", "Check srv_tp output. linum:%u", linum);
+
+ CHECK(cli_tp.data_segs_out ||
+ cli_tp.data_segs_in != 1 ||
+ cli_tp.snd_cwnd != 10 ||
+ cli_tp.total_retrans ||
+ cli_tp.bytes_received != DATA_LEN,
+ "Unexpected cli_tp", "Check cli_tp output. linum:%u", linum);
+}
+
+static void test(void)
+{
+ int listen_fd, cli_fd, accept_fd, epfd, err;
+ struct epoll_event ev;
+ socklen_t addrlen;
+
+ addrlen = sizeof(struct sockaddr_in6);
+ ev.events = EPOLLIN;
+
+ epfd = epoll_create(1);
+ CHECK(epfd == -1, "epoll_create()", "epfd:%d errno:%d", epfd, errno);
+
+ /* Prepare listen_fd */
+ listen_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ CHECK(listen_fd == -1, "socket()", "listen_fd:%d errno:%d",
+ listen_fd, errno);
+
+ init_loopback6(&srv_sa6);
+ err = bind(listen_fd, (struct sockaddr *)&srv_sa6, sizeof(srv_sa6));
+ CHECK(err, "bind(listen_fd)", "err:%d errno:%d", err, errno);
+
+ err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
+ CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d", err, errno);
+
+ err = listen(listen_fd, 1);
+ CHECK(err, "listen(listen_fd)", "err:%d errno:%d", err, errno);
+
+ /* Prepare cli_fd */
+ cli_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ CHECK(cli_fd == -1, "socket()", "cli_fd:%d errno:%d", cli_fd, errno);
+
+ init_loopback6(&cli_sa6);
+ err = bind(cli_fd, (struct sockaddr *)&cli_sa6, sizeof(cli_sa6));
+ CHECK(err, "bind(cli_fd)", "err:%d errno:%d", err, errno);
+
+ err = getsockname(cli_fd, (struct sockaddr *)&cli_sa6, &addrlen);
+ CHECK(err, "getsockname(cli_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Update addr_map with srv_sa6 and cli_sa6 */
+ err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0);
+ CHECK(err, "map_update", "err:%d errno:%d", err, errno);
+
+ err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0);
+ CHECK(err, "map_update", "err:%d errno:%d", err, errno);
+
+ /* Connect from cli_sa6 to srv_sa6 */
+ err = connect(cli_fd, (struct sockaddr *)&srv_sa6, addrlen);
+ printf("srv_sa6.sin6_port:%u cli_sa6.sin6_port:%u\n\n",
+ ntohs(srv_sa6.sin6_port), ntohs(cli_sa6.sin6_port));
+ CHECK(err && errno != EINPROGRESS,
+ "connect(cli_fd)", "err:%d errno:%d", err, errno);
+
+ ev.data.fd = listen_fd;
+ err = epoll_ctl(epfd, EPOLL_CTL_ADD, listen_fd, &ev);
+ CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, listen_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Accept the connection */
+ /* Have some timeout in accept(listen_fd). Just in case. */
+ err = epoll_wait(epfd, &ev, 1, 1000);
+ CHECK(err != 1 || ev.data.fd != listen_fd,
+ "epoll_wait(listen_fd)",
+ "err:%d errno:%d ev.data.fd:%d listen_fd:%d",
+ err, errno, ev.data.fd, listen_fd);
+
+ accept_fd = accept(listen_fd, NULL, NULL);
+ CHECK(accept_fd == -1, "accept(listen_fd)", "accept_fd:%d errno:%d",
+ accept_fd, errno);
+ close(listen_fd);
+
+ /* Send some data from accept_fd to cli_fd */
+ err = send(accept_fd, DATA, DATA_LEN, 0);
+ CHECK(err != DATA_LEN, "send(accept_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Have some timeout in recv(cli_fd). Just in case. */
+ ev.data.fd = cli_fd;
+ err = epoll_ctl(epfd, EPOLL_CTL_ADD, cli_fd, &ev);
+ CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, cli_fd)", "err:%d errno:%d",
+ err, errno);
+
+ err = epoll_wait(epfd, &ev, 1, 1000);
+ CHECK(err != 1 || ev.data.fd != cli_fd,
+ "epoll_wait(cli_fd)", "err:%d errno:%d ev.data.fd:%d cli_fd:%d",
+ err, errno, ev.data.fd, cli_fd);
+
+ err = recv(cli_fd, NULL, 0, MSG_TRUNC);
+ CHECK(err, "recv(cli_fd)", "err:%d errno:%d", err, errno);
+
+ close(epfd);
+ close(accept_fd);
+ close(cli_fd);
+
+ check_result();
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_prog_load_attr attr = {
+ .file = "test_sock_fields_kern.o",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .expected_attach_type = BPF_CGROUP_INET_EGRESS,
+ };
+ int cgroup_fd, prog_fd, err;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+
+ err = setup_cgroup_environment();
+ CHECK(err, "setup_cgroup_environment()", "err:%d errno:%d",
+ err, errno);
+
+ atexit(cleanup_cgroup_environment);
+
+ /* Create a cgroup, get fd, and join it */
+ cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
+ CHECK(cgroup_fd == -1, "create_and_get_cgroup()",
+ "cgroup_fd:%d errno:%d", cgroup_fd, errno);
+
+ err = join_cgroup(TEST_CGROUP);
+ CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
+
+ err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+ CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
+
+ err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
+ CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
+ "err:%d errno%d", err, errno);
+ close(cgroup_fd);
+
+ map = bpf_object__find_map_by_name(obj, "addr_map");
+ CHECK(!map, "cannot find addr_map", "(null)");
+ addr_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "sock_result_map");
+ CHECK(!map, "cannot find sock_result_map", "(null)");
+ sk_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "tcp_sock_result_map");
+ CHECK(!map, "cannot find tcp_sock_result_map", "(null)");
+ tp_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "linum_map");
+ CHECK(!map, "cannot find linum_map", "(null)");
+ linum_map_fd = bpf_map__fd(map);
+
+ test();
+
+ bpf_object__close(obj);
+ cleanup_cgroup_environment();
+
+ printf("PASS\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index fc7832ee566b..e51d63786ff8 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -158,10 +158,8 @@ static int run_test(int cgfd)
bpf_object__for_each_program(prog, pobj) {
prog_name = bpf_program__title(prog, /*needs_copy*/ false);
- if (libbpf_attach_type_by_name(prog_name, &attach_type)) {
- log_err("Unexpected prog: %s", prog_name);
+ if (libbpf_attach_type_by_name(prog_name, &attach_type))
goto err;
- }
err = bpf_prog_attach(bpf_program__fd(prog), cgfd, attach_type,
BPF_F_ALLOW_OVERRIDE);
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index e85a771f607b..3845144e2c91 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -10,7 +10,6 @@
#include <unistd.h>
#include <string.h>
#include <errno.h>
-#include <sys/ioctl.h>
#include <stdbool.h>
#include <signal.h>
#include <fcntl.h>
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 4e4353711a86..86152d9ae95b 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -148,17 +148,17 @@ int main(int argc, char **argv)
pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd);
sprintf(test_script,
- "/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP",
+ "iptables -A INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
sprintf(test_script,
- "/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
+ "nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
TESTPORT);
system(test_script);
sprintf(test_script,
- "/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP",
+ "iptables -D INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 2fd90d456892..477a9dcf9fff 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -32,8 +32,10 @@
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
+#include <linux/btf.h>
#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
#ifdef HAVE_GENHDR
# include "autoconf.h"
@@ -49,7 +51,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 13
+#define MAX_NR_MAPS 14
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -59,6 +61,7 @@
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
static bool unpriv_disabled = false;
+static int skips;
struct bpf_test {
const char *descr;
@@ -76,6 +79,7 @@ struct bpf_test {
int fixup_map_in_map[MAX_FIXUPS];
int fixup_cgroup_storage[MAX_FIXUPS];
int fixup_percpu_cgroup_storage[MAX_FIXUPS];
+ int fixup_map_spin_lock[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t retval, retval_unpriv, insn_processed;
@@ -211,15394 +215,46 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
BPF_MOV64_IMM(BPF_REG_5, 0), \
BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
-static struct bpf_test tests[] = {
- {
- "add+sub+mul",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_2, 3),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -3,
- },
- {
- "DIV32 by 0, zero check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV32 by 0, zero check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV64 by 0, zero check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD32 by 0, zero check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD32 by 0, zero check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD64 by 0, zero check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV32 by 0, zero check ok, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 2),
- BPF_MOV32_IMM(BPF_REG_2, 16),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 8,
- },
- {
- "DIV32 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV32 by 0, zero check 2, cls",
- .insns = {
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 by 0, zero check, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "MOD32 by 0, zero check ok, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 3),
- BPF_MOV32_IMM(BPF_REG_2, 5),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "MOD32 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD32 by 0, zero check 2, cls",
- .insns = {
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD64 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 2),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "MOD64 by 0, zero check 2, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, -1),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = -1,
- },
- /* Just make sure that JITs used udiv/umod as otherwise we get
- * an exception from INT_MIN/-1 overflow similarly as with div
- * by zero.
- */
- {
- "DIV32 overflow, check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV32 overflow, check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 overflow, check 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 overflow, check 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "MOD32 overflow, check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = INT_MIN,
- },
- {
- "MOD32 overflow, check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = INT_MIN,
- },
- {
- "MOD64 overflow, check 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD64 overflow, check 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "xor32 zero extend check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_2, -1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
- BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
- BPF_MOV32_IMM(BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "empty prog",
- .insns = {
- },
- .errstr = "unknown opcode 00",
- .result = REJECT,
- },
- {
- "only exit insn",
- .insns = {
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "unreachable",
- .insns = {
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .errstr = "unreachable",
- .result = REJECT,
- },
- {
- "unreachable2",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unreachable",
- .result = REJECT,
- },
- {
- "out of range jump",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "out of range jump2",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, -2),
- BPF_EXIT_INSN(),
- },
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "test1 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "test2 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "test3 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test4 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test5 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test6 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_RAW_INSN(0, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "test7 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "test8 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "uses reserved fields",
- .result = REJECT,
- },
- {
- "test9 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 1, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test10 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test11 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test12 ld_imm64",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "not pointing to valid bpf_map",
- .result = REJECT,
- },
- {
- "test13 ld_imm64",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "arsh32 on imm",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "arsh32 on imm 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
- BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -16069393,
- },
- {
- "arsh32 on reg",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "arsh32 on reg 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
- BPF_MOV64_IMM(BPF_REG_1, 15),
- BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 43724,
- },
- {
- "arsh64 on imm",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "arsh64 on reg",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "no bpf_exit",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
- },
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "loop (back-edge)",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "loop2 (back-edge)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "conditional loop",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "read uninitialized register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "read invalid register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R15 is invalid",
- .result = REJECT,
- },
- {
- "program doesn't init R0 before exit",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "program doesn't init R0 before exit in all branches",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "stack out of bounds",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack",
- .result = REJECT,
- },
- {
- "invalid call insn1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode 8d",
- .result = REJECT,
- },
- {
- "invalid call insn2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_CALL uses reserved",
- .result = REJECT,
- },
- {
- "invalid function call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid func unknown#1234567",
- .result = REJECT,
- },
- {
- "uninitialized stack1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 2 },
- .errstr = "invalid indirect read from stack",
- .result = REJECT,
- },
- {
- "uninitialized stack2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid read from stack",
- .result = REJECT,
- },
- {
- "invalid fp arithmetic",
- /* If this gets ever changed, make sure JITs can deal with it. */
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 subtraction from stack pointer",
- .result = REJECT,
- },
- {
- "non-invalid fp arithmetic",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "invalid argument register",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "non-invalid argument register",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "check valid spill/fill",
- .insns = {
- /* spill R1(ctx) into stack */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- /* fill it back into R2 */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
- /* should be able to access R0 = *(R2 + 8) */
- /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .retval = POINTER_VALUE,
- },
- {
- "check valid spill/fill, skb mark",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = ACCEPT,
- },
- {
- "check corrupted spill/fill",
- .insns = {
- /* spill R1(ctx) into stack */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- /* mess up with R1 pointer on stack */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
- /* fill back into R0 is fine for priv.
- * R0 now becomes SCALAR_VALUE.
- */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- /* Load from R0 should fail. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .errstr = "R0 invalid mem access 'inv",
- .result = REJECT,
- },
- {
- "check corrupted spill/fill, LSB",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "check corrupted spill/fill, MSB",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "invalid src register in STX",
- .insns = {
- BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R15 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in STX",
- .insns = {
- BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R14 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in ST",
- .insns = {
- BPF_ST_MEM(BPF_B, 14, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R14 is invalid",
- .result = REJECT,
- },
- {
- "invalid src register in LDX",
- .insns = {
- BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R12 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in LDX",
- .insns = {
- BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R11 is invalid",
- .result = REJECT,
- },
- {
- "junk insn",
- .insns = {
- BPF_RAW_INSN(0, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode 00",
- .result = REJECT,
- },
- {
- "junk insn2",
- .insns = {
- BPF_RAW_INSN(1, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_LDX uses reserved fields",
- .result = REJECT,
- },
- {
- "junk insn3",
- .insns = {
- BPF_RAW_INSN(-1, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode ff",
- .result = REJECT,
- },
- {
- "junk insn4",
- .insns = {
- BPF_RAW_INSN(-1, -1, -1, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode ff",
- .result = REJECT,
- },
- {
- "junk insn5",
- .insns = {
- BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_ALU uses reserved fields",
- .result = REJECT,
- },
- {
- "misaligned read from stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned stack access",
- .result = REJECT,
- },
- {
- "invalid map_fd for function call",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_delete_elem),
- BPF_EXIT_INSN(),
- },
- .errstr = "fd 0 is not pointing to valid bpf_map",
- .result = REJECT,
- },
- {
- "don't check return value before access",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access 'map_value_or_null'",
- .result = REJECT,
- },
- {
- "access memory with incorrect alignment",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "misaligned value access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "sometimes access memory with incorrect alignment",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
- .errstr_unpriv = "R0 leaks addr",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "jump test 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 14),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 5),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 19),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_JMP_IMM(BPF_JA, 0, 0, 15),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
- BPF_JMP_IMM(BPF_JA, 0, 0, 7),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
- BPF_JMP_IMM(BPF_JA, 0, 0, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_delete_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 24 },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = -ENOENT,
- },
- {
- "jump test 4",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "access skb fields ok",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, queue_mapping)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, protocol)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_present)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_tci)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "access skb fields bad1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "access skb fields bad2",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "access skb fields bad3",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -12),
- },
- .fixup_map_hash_8b = { 6 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "access skb fields bad4",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -13),
- },
- .fixup_map_hash_8b = { 7 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff family",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff local_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff local_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "valid access __sk_buff family",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff local_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff local_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "invalid access of tc_classid for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of skb->mark for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->mark is not writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->tc_index is writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "check skb->priority is writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, priority)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "direct packet read for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "direct packet write for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "overlapping checks for direct packet access SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access family in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, family)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_ip4 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access local_ip4 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_port in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access local_port in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_ip6 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access local_ip6 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access size in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, size)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid 64B read of size in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, size)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid read past end of SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, size) + 4),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid read offset in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, family) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet read for SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "direct packet write for SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "overlapping checks for direct packet access SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "check skb->mark is not writeable by sockets",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "check skb->tc_index is not writeable by sockets",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "check cb access: byte",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "__sk_buff->hash, offset 0, byte store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, hash)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "__sk_buff->tc_index, offset 3, byte store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index) + 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash byte load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: byte, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check cb access: half",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: half, unaligned",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check __sk_buff->hash, offset 0, half store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, hash)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->tc_index, offset 2, half store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash half load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash half load permitted 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash half load not permitted, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#endif
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash half load not permitted, unaligned 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
-#endif
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check cb access: half, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check cb access: word",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: word, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: double, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double, unaligned 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double, oob 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, oob 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->ifindex dw store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, ifindex)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->ifindex dw load not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, ifindex)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check out of range skb->cb access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 256),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
- },
- {
- "write skb fields from socket prog",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .errstr_unpriv = "R1 leaks addr",
- .result_unpriv = REJECT,
- },
- {
- "write skb fields from tc_cls_act prog",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tstamp)),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tstamp)),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "PTR_TO_STACK store/load",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xfaceb00c,
- },
- {
- "PTR_TO_STACK store/load - bad alignment on off",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
- },
- {
- "PTR_TO_STACK store/load - bad alignment on reg",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
- },
- {
- "PTR_TO_STACK store/load - out of bounds low",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off=-79992 size=8",
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- },
- {
- "PTR_TO_STACK store/load - out of bounds high",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off=0 size=8",
- },
- {
- "unpriv: return pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr",
- .retval = POINTER_VALUE,
- },
- {
- "unpriv: add const to pointer",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "unpriv: add pointer to pointer",
- .insns = {
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 pointer += pointer",
- },
- {
- "unpriv: neg pointer",
- .insns = {
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 pointer arithmetic",
- },
- {
- "unpriv: cmp pointer with const",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 pointer comparison",
- },
- {
- "unpriv: cmp pointer with pointer",
- .insns = {
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R10 pointer comparison",
- },
- {
- "unpriv: check that printk is disallowed",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_trace_printk),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "unknown func bpf_trace_printk#6",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: pass pointer to helper function",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr_unpriv = "R4 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: indirectly pass pointer on stack to helper function",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "invalid indirect read from stack off -8+0 size 8",
- .result = REJECT,
- },
- {
- "unpriv: mangle pointer on stack 1",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: mangle pointer on stack 2",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: read pointer from stack in small chunks",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid size",
- .result = REJECT,
- },
- {
- "unpriv: write pointer into ctx",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 leaks addr",
- .result_unpriv = REJECT,
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "unpriv: spill/fill of ctx",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "unpriv: spill/fill of ctx 2",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of ctx 3",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=fp expected=ctx",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of ctx 4",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
- BPF_REG_0, -8, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=inv expected=ctx",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - ctx and sock",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb == NULL) *target = sock; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* else *target = skb; */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* struct __sk_buff *skb = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* skb->mark = 42; */
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- /* if (sk) bpf_sk_release(sk) */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "type=ctx expected=sock",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - leak sock",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb == NULL) *target = sock; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* else *target = skb; */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* struct __sk_buff *skb = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* skb->mark = 42; */
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- //.errstr = "same insn cannot be used with different pointers",
- .errstr = "Unreleased reference",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb) *target = skb */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* else *target = sock */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* struct bpf_sock *sk = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct bpf_sock, mark)),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb) *target = skb */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* else *target = sock */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* struct bpf_sock *sk = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct bpf_sock, mark)),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- //.errstr = "same insn cannot be used with different pointers",
- .errstr = "cannot write into socket",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers ldx",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- -(__s32)offsetof(struct bpf_perf_event_data,
- sample_period) - 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
- offsetof(struct bpf_perf_event_data,
- sample_period)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "unpriv: write pointer into map elem value",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "alu32: mov u32 const",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_7, 0),
- BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
- BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "unpriv: partial copy of pointer",
- .insns = {
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 partial copy",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: pass pointer to tail_call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .errstr_unpriv = "R3 leaks addr into helper",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: cmp map pointer with zero",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: write into frame pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "frame pointer is read only",
- .result = REJECT,
- },
- {
- "unpriv: spill/fill frame pointer",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "frame pointer is read only",
- .result = REJECT,
- },
- {
- "unpriv: cmp of frame pointer",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: adding of fp",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: cmp of stack pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R2 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "runtime/jit: tail_call within bounds, prog once",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "runtime/jit: tail_call within bounds, prog loop",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 41,
- },
- {
- "runtime/jit: tail_call within bounds, no prog",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "runtime/jit: tail_call out of bounds",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 256),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "runtime/jit: pass negative index to tail_call",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "runtime/jit: pass > 32bit index to tail_call",
- .insns = {
- BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 2 },
- .result = ACCEPT,
- .retval = 42,
- /* Verifier rewrite for unpriv skips tail call here. */
- .retval_unpriv = 2,
- },
- {
- "PTR_TO_STACK check high 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "invalid stack off=0 size=1",
- .result = REJECT,
- },
- {
- "PTR_TO_STACK check high 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check high 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check high 7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "fp pointer offset",
- },
- {
- "PTR_TO_STACK check low 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check low 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check low 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "invalid stack off=-513 size=1",
- .result = REJECT,
- },
- {
- "PTR_TO_STACK check low 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "math between fp pointer",
- },
- {
- "PTR_TO_STACK check low 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check low 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check low 7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "fp pointer offset",
- },
- {
- "PTR_TO_STACK mixed reg/k, 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK mixed reg/k, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK mixed reg/k, 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -3,
- },
- {
- "PTR_TO_STACK reg",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid stack off=0 size=1",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "stack pointer arithmetic",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "raw_stack: no skb_load_bytes",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- /* Call to skb_load_bytes() omitted. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid read from stack off -8+0 size 8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, negative len",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, negative len 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, ~0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, zero len",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, no init",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, init",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs around bounds",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs corruption",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs corruption 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R3 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs + data",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-513 access_size=8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-1 access_size=8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-512 access_size=0",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, large access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 512),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "context stores via ST",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_ST stores into R1 ctx is not allowed",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "context stores via XADD",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
- BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
- BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access off=76",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- },
- {
- "direct packet access: test4 (write)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test5 (pkt_end >= reg, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test6 (pkt_end >= reg, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test7 (pkt_end >= reg, both accesses)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test8 (double test, variant 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test9 (double test, variant 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test10 (write invalid)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test11 (shift, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_3, 144),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test12 (and, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_3, 144),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test13 (branches, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
- BPF_MOV64_IMM(BPF_REG_3, 14),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_3, 24),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
- BPF_MOV64_IMM(BPF_REG_5, 12),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test15 (spill with xadd)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_5, 4096),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 invalid mem access 'inv'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test16 (arith on data_end)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test17 (pruning, alignment)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
- BPF_JMP_A(-6),
- },
- .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "direct packet access: test18 (imm += pkt_ptr, 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 8),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test19 (imm += pkt_ptr, 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- BPF_MOV64_IMM(BPF_REG_4, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test20 (x += pkt_ptr, 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test21 (x += pkt_ptr, 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test22 (x += pkt_ptr, 3)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test23 (x += pkt_ptr, 4)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test24 (x += pkt_ptr, 5)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 64),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test25 (marking on <, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test26 (marking on <, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test27 (marking on <=, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test28 (marking on <=, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test1, valid packet_ptr range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test2, unchecked packet_ptr",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test3, variable add",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test4, packet_ptr with bad range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 7 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test5, packet_ptr with too short range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 6 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test6, cls valid packet_ptr range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test7, cls unchecked packet_ptr",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test8, cls variable add",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test9, cls packet_ptr with bad range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 7 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test10, cls packet_ptr with too short range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 6 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test11, cls unsuitable helper 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_4, 42),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_store_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "helper access to the packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test12, cls unsuitable helper 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_4, 4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "helper access to the packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test13, cls helper ok",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test14, cls helper ok sub",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test15, cls helper fail sub",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test16, cls helper fail range 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test17, cls helper fail range 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, -9),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R2 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test18, cls helper fail range 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, ~0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R2 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test19, cls helper range zero",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test20, pkt end as input",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=pkt_end expected=fp",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test21, wrong reg",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "prevent map lookup in sockmap",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_sockmap = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
- },
- {
- "prevent map lookup in sockhash",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_sockhash = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
- },
- {
- "prevent map lookup in xskmap",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_xskmap = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "prevent map lookup in stack trace",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_stacktrace = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "prevent map lookup in prog array",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_prog2 = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
- },
- {
- "valid map access into an array with a constant",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "valid map access into an array with a register",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "valid map access into an array with a variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "valid map access into an array with a signed variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a constant",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=48 size=8",
- .result = REJECT,
- },
- {
- "invalid map access into an array with a register",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 min value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with no floor check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "R0 unbounded memory access",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a invalid max check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "invalid access to map value, value_size=48 off=44 size=8",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a invalid max check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3, 11 },
- .errstr = "R0 pointer += pointer",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet read test#1 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, queue_mapping)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, protocol)),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_present)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid bpf_context access off=76 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#2 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_tci)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_proto)),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, priority)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, priority)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff,
- ingress_ifindex)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#3 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
- offsetof(struct __sk_buff, cb[4])),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#4 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of tc_classid for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of data_meta for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data_meta)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of flow_keys for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, flow_keys)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid write access to napi_id for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
- offsetof(struct __sk_buff, napi_id)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "valid cgroup storage access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "fd 1 is not pointing to valid bpf_map",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=256 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid cgroup storage access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 7),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .errstr_unpriv = "R2 leaks addr into helper function",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "valid per-cpu cgroup storage access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "fd 1 is not pointing to valid bpf_map",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=256 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid per-cpu cgroup storage access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 7),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .errstr_unpriv = "R2 leaks addr into helper function",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "write tstamp from CGROUP_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tstamp)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid bpf_context access off=152 size=8",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "read tstamp from CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tstamp)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "multiple registers share map_lookup_elem result",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "invalid memory access with multiple map_lookup_elem calls",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = REJECT,
- .errstr = "R4 !read_ok",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "valid indirect map_lookup_elem access with 2nd lookup in branch",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_2, 10),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "invalid map access from else condition",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 unbounded memory access",
- .result = REJECT,
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "constant register |= constant should keep constant type",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant should not bypass stack boundary checks",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-48 access_size=58",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant register should keep constant type",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_4, 13),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant register should not bypass stack boundary checks",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_4, 24),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-48 access_size=58",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "invalid direct packet write for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "cannot write into packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "invalid direct packet write for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "cannot write into packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
- },
- {
- "direct packet write for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "direct packet read for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "direct packet read for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
- },
- {
- "direct packet read for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "overlapping checks for direct packet access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "make headroom for LWT_XMIT",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
- /* split for s390 to succeed */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 42),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "invalid access of tc_classid for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of tc_classid for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of tc_classid for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "leak pointer into ctx 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 2 },
- .errstr_unpriv = "R2 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- },
- {
- "leak pointer into ctx 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- },
- {
- "leak pointer into ctx 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .errstr_unpriv = "R2 leaks addr into ctx",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "leak pointer into map val",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr_unpriv = "R6 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "helper access to map: full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=0",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=56",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: negative range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=0",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): negative range (> adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): negative range (< adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): negative range (> adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): negative range (< adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 unbounded memory access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): wrong max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=45",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 unbounded memory access",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <=, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <=, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 unbounded memory access",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, good access 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 min value is negative",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, good access 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 min value is negative",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map access: known scalar += value_ptr from different maps",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 tried to add from different maps",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar from different maps",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 min value is outside of the array range",
- .retval = 1,
- },
- {
- "map access: known scalar += value_ptr from different maps, but same value properties",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: mixing value pointer and scalar, 1",
- .insns = {
- // load map value pointer into r0 and r2
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
- BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- // load some number from the map into r1
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
- // branch A
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_JMP_A(2),
- // branch B
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0x100000),
- // common instruction
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- // branch A
- BPF_JMP_A(4),
- // branch B
- BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
- // verifier follows fall-through
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- // fake-dead code; targeted from branch A to
- // prevent dead code sanitization
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 1 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different pointers or scalars",
- .retval = 0,
- },
- {
- "map access: mixing value pointer and scalar, 2",
- .insns = {
- // load map value pointer into r0 and r2
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
- BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- // load some number from the map into r1
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- // branch A
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0x100000),
- BPF_JMP_A(2),
- // branch B
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- // common instruction
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- // branch A
- BPF_JMP_A(4),
- // branch B
- BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
- // verifier follows fall-through
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- // fake-dead code; targeted from branch A to
- // prevent dead code sanitization
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 1 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different maps or paths",
- .retval = 0,
- },
- {
- "sanitation: alu with different scalars",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
- BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0x100000),
- BPF_JMP_A(2),
- BPF_MOV64_IMM(BPF_REG_2, 42),
- BPF_MOV64_IMM(BPF_REG_3, 0x100001),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 1 },
- .result = ACCEPT,
- .retval = 0x100000,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: known scalar += value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "invalid access to map value",
- },
- {
- "map access: value_ptr += known scalar, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "invalid access to map value",
- },
- {
- "map access: value_ptr += known scalar, 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, -2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 5",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: value_ptr += known scalar, 6",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: unknown scalar += value_ptr, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_1, 19),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 max value is outside of the array range",
- .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
- },
- {
- "map access: value_ptr += unknown scalar, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += unknown scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: value_ptr += unknown scalar, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 pointer += pointer prohibited",
- },
- {
- "map access: known scalar -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 tried to subtract pointer from scalar",
- },
- {
- "map access: value_ptr -= known scalar",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- },
- {
- "map access: value_ptr -= known scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: unknown scalar -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 tried to subtract pointer from scalar",
- },
- {
- "map access: value_ptr -= unknown scalar",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is negative",
- },
- {
- "map access: value_ptr -= unknown scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- .errstr_unpriv = "R0 pointer -= pointer prohibited",
- },
- {
- "map lookup helper access to map",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 8 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map update helper access to map",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map update helper access to map: wrong size",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .fixup_map_hash_16b = { 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=0 size=16",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- offsetof(struct other_val, bar)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm): out-of-bound 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- sizeof(struct other_val) - 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm): out-of-bound 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct other_val, bar)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg): out-of-bound 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- sizeof(struct other_val) - 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg): out-of-bound 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, -4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct other_val, bar), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable): no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable): wrong max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct other_val, bar) + 1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 11 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=9 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map element value is preserved across register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- },
- {
- "map element value or null is marked on register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- },
- {
- "map element value store of cleared call register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R1 !read_ok",
- .errstr = "R1 !read_ok",
- .result = REJECT,
- .result_unpriv = REJECT,
- },
- {
- "map element value with unaligned store",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value with unaligned load",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value illegal alu op, 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 bitwise operator &= on pointer",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 32-bit pointer arithmetic prohibited",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 pointer arithmetic with /= operator",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 pointer arithmetic prohibited",
- .errstr = "invalid mem access 'inv'",
- .result = REJECT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value illegal alu op, 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_3, 4096),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 invalid mem access 'inv'",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value is preserved across register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
- offsetof(struct test_val, foo)),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, bitwise AND, zero included",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP (signed), correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, bounds + offset",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- /* because max wasn't checked, signed min is negative */
- .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, no min check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP (signed), no min check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val), 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) + 1, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=49",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map adjusted, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) - 20, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map adjusted, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) - 19, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 0 /* csum_diff of 64-byte packet */,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: 8 bytes leak",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+32 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: 8 bytes no leak (init memory)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "invalid and of negative number",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid range check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
- BPF_MOV32_IMM(BPF_REG_3, 1),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
- BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map in map access",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .result = ACCEPT,
- },
- {
- "invalid inner map pointer",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .errstr = "R1 pointer arithmetic on map_ptr prohibited",
- .result = REJECT,
- },
- {
- "forgot null checking on the inner map pointer",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .errstr = "R1 type=map_value_or_null expected=map_ptr",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .errstr = "R5 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_7, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "ld_abs: tests on r6 and skb data reload helper",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42 /* ultimate return value */,
- },
- {
- "ld_ind: check calling conv, r1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_3, 1),
- BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .errstr = "R5 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "check bpf_perf_event_data->sample_period byte load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 7),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period half load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 6),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period word load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 4),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period dword load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check skb->data half load not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->tc_classid half load not permitted for lwt prog",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "bounds checks mixing signed and unsigned, positive bounds",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 2),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 5",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_6, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 min value is negative, either use unsigned",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 7",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 8",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 9",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 10",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 11",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- /* Dead branch. */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 12",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -6),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 13",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 2),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 14",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_8, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
- BPF_JMP_IMM(BPF_JA, 0, 0, -7),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 15",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -6),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- .result_unpriv = REJECT,
- },
- {
- "subtraction bounds (map value) variant 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- },
- {
- "subtraction bounds (map value) variant 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "check subtraction on pointers for unpriv",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1, 9 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R9 pointer -= pointer prohibited",
- },
- {
- "bounds check based on zero-extended MOV",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0x0000'0000'ffff'ffff */
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0 */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check based on sign-extended MOV. test1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0xffff'ffff'ffff'ffff */
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0xffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
- /* r0 = <oob pointer> */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access to OOB pointer */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 4294967295",
- .result = REJECT
- },
- {
- "bounds check based on sign-extended MOV. test2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0xffff'ffff'ffff'ffff */
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0xfff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
- /* r0 = <oob pointer> */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access to OOB pointer */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 min value is outside of the array range",
- .result = REJECT
- },
- {
- "bounds check based on reg_off + var_off + insn_off. test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "value_size=8 off=1073741825",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "bounds check based on reg_off + var_off + insn_off. test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "value 1073741823",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "bounds check after truncation of non-boundary-crossing range",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- /* r2 = 0x10'0000'0000 */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
- /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- /* r1 = [0x00, 0xff] */
- BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
- /* r1 = 0 */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check after truncation of boundary-crossing range (1)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
- * [0x0000'0000, 0x0000'007f]
- */
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0x00, 0xff] or
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- /* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after truncation of boundary-crossing range (2)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
- * [0x0000'0000, 0x0000'007f]
- * difference to previous test: truncation via MOV32
- * instead of ALU32.
- */
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0x00, 0xff] or
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- /* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after wrapping 32-bit addition",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- /* r1 = 0x7fff'ffff */
- BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
- /* r1 = 0xffff'fffe */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- /* r1 = 0 */
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check after shift with oversized count operand",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_2, 32),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- /* r1 = (u32)1 << (u32)32 = ? */
- BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
- /* r1 = [0x0000, 0xffff] */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
- /* computes unknown pointer, potentially OOB */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT
- },
- {
- "bounds check after right shift of maybe-negative number",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- /* r1 = [-0x01, 0xfe] */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
- /* r1 = 0 or 0xff'ffff'ffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* r1 = 0 or 0xffff'ffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* computes unknown pointer, potentially OOB */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after 32-bit right shift with 64-bit input",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- /* r1 = 2 */
- BPF_MOV64_IMM(BPF_REG_1, 2),
- /* r1 = 1<<32 */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
- /* r1 = 0 (NOT 2!) */
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
- /* r1 = 0xffff'fffe (NOT 0!) */
- BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
- /* computes OOB pointer */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
- .result = REJECT,
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 2147483646",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "pointer offset 1073741822",
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "pointer offset -1073741822",
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_1, 1000000),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 1000000000000",
- .result = REJECT
- },
- {
- "pointer/scalar confusion in state equality check (way 1)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr as return value"
- },
- {
- "pointer/scalar confusion in state equality check (way 2)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_JMP_A(1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr as return value"
- },
- {
- "variable-offset ctx access",
- .insns = {
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- /* add it to skb. We now have either &skb->len or
- * &skb->pkt_type, but we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- /* dereference it */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "variable-offset stack access",
- .insns = {
- /* Fill the top 8 bytes of the stack */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
- /* add it to fp. We now have either fp-4 or fp-8, but
- * we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
- /* dereference it */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "indirect variable-offset stack access",
- .insns = {
- /* Fill the top 8 bytes of the stack */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
- /* add it to fp. We now have either fp-4 or fp-8, but
- * we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
- /* dereference it indirectly */
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .errstr = "variable stack read R2",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "direct stack access with 32-bit wraparound. test1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer and 2147483647",
- .result = REJECT
- },
- {
- "direct stack access with 32-bit wraparound. test2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer and 1073741823",
- .result = REJECT
- },
- {
- "direct stack access with 32-bit wraparound. test3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer offset 1073741822",
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result = REJECT
- },
- {
- "liveness pruning and write screening",
- .insns = {
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* branch conditions teach us nothing about R2 */
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "varlen_map_value_access pruning",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "R0 unbounded memory access",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid 64-bit BPF_END",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 0),
- {
- .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
- .dst_reg = BPF_REG_0,
- .src_reg = 0,
- .off = 0,
- .imm = 32,
- },
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode d7",
- .result = REJECT,
- },
- {
- "XDP, using ifindex from netdev",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, ingress_ifindex)),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .retval = 1,
- },
- {
- "meta access, test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet, off=-8",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test5",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_xdp_adjust_meta),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R3 !read_ok",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test7",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test8",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test9",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test10",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_IMM(BPF_REG_5, 42),
- BPF_MOV64_IMM(BPF_REG_6, 24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test11",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_IMM(BPF_REG_5, 42),
- BPF_MOV64_IMM(BPF_REG_6, 24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test12",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "arithmetic ops make PTR_TO_CTX unusable",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct __sk_buff, data) -
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .errstr = "dereference of modified ctx ptr",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "pkt_end - pkt_start is allowed",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = TEST_DATA_LEN,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "XDP pkt read, pkt_end mangling, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "XDP pkt read, pkt_end mangling, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "check deducing bounds from const, 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check deducing bounds from const, 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, ~0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, ~0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 9",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 10",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
- /* Marks reg as unknown. */
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
- },
- {
- "bpf_exit with invalid return code. test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x0; 0xffffffff)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x0; 0x3)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x2; 0x0)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 is not a known value (ctx)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test7",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has unknown scalar value",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "calls: basic sanity",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: not on unpriviledged",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: div by 0 in subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(BPF_REG_2, 0),
- BPF_MOV32_IMM(BPF_REG_3, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: multiple ret types in subprog 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- },
- {
- "calls: multiple ret types in subprog 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 16 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- },
- {
- "calls: overlapping caller/callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn is not an exit or jmp",
- .result = REJECT,
- },
- {
- "calls: wrong recursive calls",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: wrong src reg",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "BPF_CALL uses reserved fields",
- .result = REJECT,
- },
- {
- "calls: wrong off value",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "BPF_CALL uses reserved fields",
- .result = REJECT,
- },
- {
- "calls: jump back loop",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn 0 to 0",
- .result = REJECT,
- },
- {
- "calls: conditional call",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: conditional call 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: conditional call 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: conditional call 4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -5),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: conditional call 5",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: conditional call 6",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: using r0 returned by callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: using uninit r0 from callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "calls: callee is using r1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN,
- },
- {
- "calls: callee using args1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "calls: callee using wrong args2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "calls: callee using two args",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, len)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
- offsetof(struct __sk_buff, len)),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
- },
- {
- "calls: callee changing pkt pointers",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- /* clear_all_pkt_pointers() has to walk all frames
- * to make sure that pkt pointers in the caller
- * are cleared when callee is calling a helper that
- * adjusts packet size
- */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_xdp_adjust_head),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R6 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls with args",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN + TEST_DATA_LEN,
- },
- {
- "calls: calls with stack arith",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "calls: calls with misaligned stack access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- .errstr = "misaligned stack access",
- .result = REJECT,
- },
- {
- "calls: calls control flow, jump test",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 43),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 43,
- },
- {
- "calls: calls control flow, jump test 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 43),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "jump out of range from insn 1 to 4",
- .result = REJECT,
- },
- {
- "calls: two calls with bad jump",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range from insn 11 to 9",
- .result = REJECT,
- },
- {
- "calls: recursive call. test1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "calls: recursive call. test2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "calls: unreachable code",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "unreachable insn 6",
- .result = REJECT,
- },
- {
- "calls: invalid call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "invalid destination",
- .result = REJECT,
- },
- {
- "calls: invalid call 2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "invalid destination",
- .result = REJECT,
- },
- {
- "calls: jumping across function bodies. test1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: jumping across function bodies. test2",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: call without exit",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "calls: call into middle of ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn",
- .result = REJECT,
- },
- {
- "calls: call into middle of other call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn",
- .result = REJECT,
- },
- {
- "calls: ld_abs with changing ctx data in callee",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
- .result = REJECT,
- },
- {
- "calls: two calls with bad fallthrough",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "calls: two calls with stack read",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: two calls with stack write",
- .insns = {
- /* main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
- /* write into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* read from stack frame of main prog */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: stack overflow using two frames (pre-call access)",
- .insns = {
- /* prog 1 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* prog 2 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "combined stack size",
- .result = REJECT,
- },
- {
- "calls: stack overflow using two frames (post-call access)",
- .insns = {
- /* prog 1 */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
-
- /* prog 2 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "combined stack size",
- .result = REJECT,
- },
- {
- "calls: stack depth check using three frames. test1",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=32, stack_A=256, stack_B=64
- * and max(main+A, main+A+B) < 512
- */
- .result = ACCEPT,
- },
- {
- "calls: stack depth check using three frames. test2",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=32, stack_A=64, stack_B=256
- * and max(main+A, main+A+B) < 512
- */
- .result = ACCEPT,
- },
- {
- "calls: stack depth check using three frames. test3",
- .insns = {
- /* main */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
- BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- /* B */
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=64, stack_A=224, stack_B=256
- * and max(main+A, main+A+B) > 512
- */
- .errstr = "combined stack",
- .result = REJECT,
- },
- {
- "calls: stack depth check using three frames. test4",
- /* void main(void) {
- * func1(0);
- * func1(1);
- * func2(1);
- * }
- * void func1(int alloc_or_recurse) {
- * if (alloc_or_recurse) {
- * frame_pointer[-300] = 1;
- * } else {
- * func2(alloc_or_recurse);
- * }
- * }
- * void func2(int alloc_or_recurse) {
- * if (alloc_or_recurse) {
- * frame_pointer[-300] = 1;
- * }
- * }
- */
- .insns = {
- /* main */
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
- BPF_EXIT_INSN(),
- /* B */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = REJECT,
- .errstr = "combined stack",
- },
- {
- "calls: stack depth check using three frames. test5",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
- BPF_EXIT_INSN(),
- /* A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
- BPF_EXIT_INSN(),
- /* C */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
- BPF_EXIT_INSN(),
- /* D */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
- BPF_EXIT_INSN(),
- /* E */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
- BPF_EXIT_INSN(),
- /* F */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
- BPF_EXIT_INSN(),
- /* G */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
- BPF_EXIT_INSN(),
- /* H */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "call stack",
- .result = REJECT,
- },
- {
- "calls: spill into caller stack frame",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "cannot spill",
- .result = REJECT,
- },
- {
- "calls: write into caller stack frame",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "calls: write into callee stack frame",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "cannot return stack pointer",
- .result = REJECT,
- },
- {
- "calls: two calls with stack write and void return",
- .insns = {
- /* main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* write into stack frame of main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
- BPF_EXIT_INSN(), /* void return */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: ambiguous return value",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "calls: two calls that return map_value",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
-
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that return map_value with bool condition",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(), /* return 1 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that return map_value with incorrect bool check",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(), /* return 1 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = REJECT,
- .errstr = "invalid read from stack off -16+0 size 8",
- },
- {
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = ACCEPT,
- },
- {
- "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0), // 26
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
- BPF_JMP_IMM(BPF_JA, 0, 0, -30),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -8),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls that receive map_value_ptr_or_null via arg. test1",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that receive map_value_ptr_or_null via arg. test2",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 0 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- },
- {
- "calls: pkt_ptr spill into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = POINTER_VALUE,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- /* Marking is still kept, but not in all cases safe. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid access to packet",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* Marking is still kept and safe here. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* Check marking propagated. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "same insn cannot be used with different",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R4 invalid mem access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R4 invalid mem access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 8",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 9",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid access to packet",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: caller stack init to zero or map_value_or_null",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- /* fetch map_value_or_null or const_zero from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* store into map_value */
- BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* if (ctx == 0) return; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
- /* else bpf_map_lookup() and *(fp - 8) = r0 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 13 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "calls: stack init to zero and pruning",
- .insns = {
- /* first make allocated_stack 16 byte */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
- /* now fork the execution such that the false branch
- * of JGT insn will be verified second and it skisp zero
- * init of fp-8 stack slot. If stack liveness marking
- * is missing live_read marks from call map_lookup
- * processing then pruning will incorrectly assume
- * that fp-8 stack slot was unused in the fall-through
- * branch and will accept the program incorrectly
- */
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 6 },
- .errstr = "invalid indirect read from stack off -8+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "calls: two calls returning different map pointers for lookup (hash, array)",
- .insns = {
- /* main prog */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_CALL_REL(11),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_CALL_REL(12),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- /* subprog 1 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* subprog 2 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_48b = { 13 },
- .fixup_map_array_48b = { 16 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: two calls returning different map pointers for lookup (hash, map in map)",
- .insns = {
- /* main prog */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_CALL_REL(11),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_CALL_REL(12),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- /* subprog 1 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* subprog 2 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_in_map = { 16 },
- .fixup_map_array_48b = { 13 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'map_ptr'",
- },
- {
- "cond: two branches returning different map pointers for lookup (tail, tail)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 5 },
- .fixup_prog2 = { 2 },
- .result_unpriv = REJECT,
- .errstr_unpriv = "tail_call abusing map_ptr",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "cond: two branches returning same map pointers for lookup (tail, tail)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog2 = { 2, 5 },
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "search pruning: all branches should be verified (nop operation)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_A(1),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R6 invalid mem access 'inv'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "search pruning: all branches should be verified (invalid stack access)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
- BPF_JMP_A(1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "invalid read from stack off -16+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "jit: lsh, rsh, arsh by 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 0xff),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: mov32 for ldimm64, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
- BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: mov32 for ldimm64, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
- BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: various mul tests",
- .insns = {
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
- BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
- BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "xadd/w check unaligned stack",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "xadd/w check unaligned map",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = REJECT,
- .errstr = "misaligned value access off",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "xadd/w check unaligned pkt",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 99),
- BPF_JMP_IMM(BPF_JA, 0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "BPF_XADD stores into R2 pkt is not allowed",
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "xadd/w check whether src/dst got mangled, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 3,
- },
- {
- "xadd/w check whether src/dst got mangled, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 3,
- },
- {
- "bpf_get_stack return R0 within range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
- BPF_MOV64_IMM(BPF_REG_4, 256),
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
- BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "ld_abs: invalid op 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_DW, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "unknown opcode",
- },
- {
- "ld_abs: invalid op 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 256),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "unknown opcode",
- },
- {
- "ld_abs: nmap reduced",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
- BPF_MOV32_IMM(BPF_REG_0, 18),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
- BPF_LD_IND(BPF_W, BPF_REG_7, 14),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
- BPF_MOV32_IMM(BPF_REG_0, 280971478),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
- BPF_MOV32_IMM(BPF_REG_0, 22),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
- BPF_LD_IND(BPF_H, BPF_REG_7, 14),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
- BPF_MOV32_IMM(BPF_REG_0, 17366),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV32_IMM(BPF_REG_0, 256),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .data = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 256,
- },
- {
- "ld_abs: div + abs, test 1",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
- BPF_LD_ABS(BPF_B, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 10,
- },
- {
- "ld_abs: div + abs, test 2",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
- BPF_LD_ABS(BPF_B, 128),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: div + abs, test 3",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: div + abs, test 4",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
- BPF_LD_ABS(BPF_B, 256),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: vlan + abs, test 1",
- .insns = { },
- .data = {
- 0x34,
- },
- .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0xbef,
- },
- {
- "ld_abs: vlan + abs, test 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .data = {
- 0x34,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "ld_abs: jump around ld_abs",
- .insns = { },
- .data = {
- 10, 11,
- },
- .fill_helper = bpf_fill_jump_around_ld_abs,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 10,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 1",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 4090,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 2",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2047,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 3",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 511,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 4",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 5,
- },
- {
- "pass unmodified ctx pointer to helper",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: leak potential reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: leak potential reference on stack",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: leak potential reference on stack 2",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: zero potential reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: copy and zero potential references",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: release reference without check",
- .insns = {
- BPF_SK_LOOKUP,
- /* reference in r0 may be NULL */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=sock_or_null expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: release reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference 2",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference twice",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: release reference twice inside branch",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: alloc, check, free in one subbranch",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
- /* if (offsetof(skb, mark) > data_len) exit; */
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
- offsetof(struct __sk_buff, mark)),
- BPF_SK_LOOKUP,
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
- /* Leak reference in R0 */
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "reference tracking: alloc, check, free in both subbranches",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
- /* if (offsetof(skb, mark) > data_len) exit; */
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
- offsetof(struct __sk_buff, mark)),
- BPF_SK_LOOKUP,
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "reference tracking in call: free reference in subprog",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "pass modified ctx pointer to helper, 1",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- },
- {
- "pass modified ctx pointer to helper, 2",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_socket_cookie),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr_unpriv = "dereference of modified ctx ptr",
- .errstr = "dereference of modified ctx ptr",
- },
- {
- "pass modified ctx pointer to helper, 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
- },
- {
- "mov64 src == dst",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
- // Check bounds are OK
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "mov64 src != dst",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
- // Check bounds are OK
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "allocated_stack",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
- BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
- BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = ACCEPT,
- .insn_processed = 15,
- },
- {
- "masking, test out of bounds 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 5),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 3",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 4",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 5",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 6",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 9",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 10",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 11",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 12",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 4),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 4,
- },
- {
- "masking, test in bounds 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 3",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xfffffffe,
- },
- {
- "masking, test in bounds 4",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
- BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xabcde,
- },
- {
- "masking, test in bounds 5",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 6",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 46),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 46,
- },
- {
- "masking, test in bounds 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -46),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 46,
- },
- {
- "masking, test in bounds 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -47),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "reference tracking in call: free reference in subprog and outside",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking in call: alloc & leak reference in subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
- BPF_SK_LOOKUP,
- /* spill unchecked sk_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking in call: alloc in subprog, release outside",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(), /* return sk */
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = POINTER_VALUE,
- .result = ACCEPT,
- },
- {
- "reference tracking in call: sk_ptr leak into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- /* spill unchecked sk_ptr into stack of caller */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking in call: sk_ptr spill into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- /* spill unchecked sk_ptr into stack of caller */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* now the sk_ptr is verified, free the reference */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: allow LD_ABS",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: forbid LD_ABS while holding reference",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
- .result = REJECT,
- },
- {
- "reference tracking: allow LD_IND",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "reference tracking: forbid LD_IND while holding reference",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
- .result = REJECT,
- },
- {
- "reference tracking: check reference or tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- BPF_SK_LOOKUP,
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 17 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference then tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- BPF_SK_LOOKUP,
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 18 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: leak possible reference over tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- /* Look up socket and store in REG_6 */
- BPF_SK_LOOKUP,
- /* bpf_tail_call() */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 16 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "tail_call would lead to reference leak",
- .result = REJECT,
- },
- {
- "reference tracking: leak checked reference over tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- /* Look up socket and store in REG_6 */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- /* if (!sk) goto end */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 17 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "tail_call would lead to reference leak",
- .result = REJECT,
- },
- {
- "reference tracking: mangle and release sock_or_null",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
- .result = REJECT,
- },
- {
- "reference tracking: mangle and release sock",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R1 pointer arithmetic on sock prohibited",
- .result = REJECT,
- },
- {
- "reference tracking: access member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: write to member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LD_IMM64(BPF_REG_2, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
- offsetof(struct bpf_sock, mark)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "cannot write into socket",
- .result = REJECT,
- },
- {
- "reference tracking: invalid 64-bit access of member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid bpf_sock access off=0 size=8",
- .result = REJECT,
- },
- {
- "reference tracking: access after release",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "reference tracking: direct access for lookup",
- .insns = {
- /* Check that the packet is at least 64B long */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
- /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "calls: ctx read at start of subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "check wire_len is not readable by sockets",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check wire_len is readable by tc classifier",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "check wire_len is not writable by tc classifier",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "calls: cross frame pruning",
- .insns = {
- /* r8 = !!random();
- * call pruner()
- * if (r8)
- * do something bad;
- */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: functional",
- .insns = {
- /* r0 = 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* prep for direct packet access via r2 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
-
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
-
- /* reg, bit 63 or bit 0 set, taken */
- BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
- BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
- BPF_EXIT_INSN(),
-
- /* reg, bit 62, not taken */
- BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
- BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_EXIT_INSN(),
-
- /* imm, any bit set, taken */
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
- BPF_EXIT_INSN(),
-
- /* imm, bit 31 set, taken */
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
- BPF_EXIT_INSN(),
-
- /* all good - return r0 == 2 */
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .runs = 7,
- .retvals = {
- { .retval = 2,
- .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
- },
- { .retval = 2,
- .data64 = { (1ULL << 63) | (1U << 31), }
- },
- { .retval = 2,
- .data64 = { (1ULL << 31) | (1U << 0), }
- },
- { .retval = 2,
- .data64 = { (__u32)-1, }
- },
- { .retval = 2,
- .data64 = { ~0x4000000000000000ULL, }
- },
- { .retval = 0,
- .data64 = { 0, }
- },
- { .retval = 0,
- .data64 = { ~0ULL, }
- },
- },
- },
- {
- "jset: sign-extend",
- .insns = {
- /* r0 = 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* prep for direct packet access via r2 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
-
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
-
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
- BPF_EXIT_INSN(),
+/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
+ * value into 0 and does necessary preparation for direct packet access
+ * through r2. The allowed access range is 8 bytes.
+ */
+#define BPF_DIRECT_PKT_R2 \
+ BPF_MOV64_IMM(BPF_REG_0, 0), \
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
+ offsetof(struct __sk_buff, data)), \
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
+ offsetof(struct __sk_buff, data_end)), \
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
+ BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
+ BPF_EXIT_INSN()
+
+/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
+ * positive u32, and zero-extend it into 64-bit.
+ */
+#define BPF_RAND_UEXT_R7 \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
+ BPF_FUNC_get_prandom_u32), \
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
+
+/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
+ * negative u32, and sign-extend it into 64-bit.
+ */
+#define BPF_RAND_SEXT_R7 \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
+ BPF_FUNC_get_prandom_u32), \
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
- },
- {
- "jset: known const compare",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .retval_unpriv = 1,
- .result_unpriv = ACCEPT,
- .retval = 1,
- .result = ACCEPT,
- },
- {
- "jset: known const compare bad",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: unknown const compare taken",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: unknown const compare not taken",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: half-known const compare",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- },
- {
- "jset: range",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- },
+static struct bpf_test tests[] = {
+#define FILL_ARRAY
+#include <verifier/tests.h>
+#undef FILL_ARRAY
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -15611,6 +267,16 @@ static int probe_filter_length(const struct bpf_insn *fp)
return len + 1;
}
+static bool skip_unsupported_map(enum bpf_map_type map_type)
+{
+ if (!bpf_probe_map_type(map_type, 0)) {
+ printf("SKIP (unsupported map type %d)\n", map_type);
+ skips++;
+ return true;
+ }
+ return false;
+}
+
static int create_map(uint32_t type, uint32_t size_key,
uint32_t size_value, uint32_t max_elem)
{
@@ -15618,8 +284,11 @@ static int create_map(uint32_t type, uint32_t size_key,
fd = bpf_create_map(type, size_key, size_value, max_elem,
type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
- if (fd < 0)
+ if (fd < 0) {
+ if (skip_unsupported_map(type))
+ return -1;
printf("Failed to create hash map '%s'!\n", strerror(errno));
+ }
return fd;
}
@@ -15669,6 +338,8 @@ static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
sizeof(int), max_elem, 0);
if (mfd < 0) {
+ if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
+ return -1;
printf("Failed to create prog array '%s'!\n", strerror(errno));
return -1;
}
@@ -15699,15 +370,20 @@ static int create_map_in_map(void)
inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(int), 1, 0);
if (inner_map_fd < 0) {
+ if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
+ return -1;
printf("Failed to create array '%s'!\n", strerror(errno));
return inner_map_fd;
}
outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
sizeof(int), inner_map_fd, 1, 0);
- if (outer_map_fd < 0)
+ if (outer_map_fd < 0) {
+ if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
+ return -1;
printf("Failed to create array of maps '%s'!\n",
strerror(errno));
+ }
close(inner_map_fd);
@@ -15722,10 +398,105 @@ static int create_cgroup_storage(bool percpu)
fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
TEST_DATA_LEN, 0, 0);
- if (fd < 0)
+ if (fd < 0) {
+ if (skip_unsupported_map(type))
+ return -1;
printf("Failed to create cgroup storage '%s'!\n",
strerror(errno));
+ }
+
+ return fd;
+}
+
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+#define BTF_TYPE_ENC(name, info, size_or_type) \
+ (name), (info), (size_or_type)
+#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
+ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
+#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
+ BTF_INT_ENC(encoding, bits_offset, bits)
+#define BTF_MEMBER_ENC(name, type, bits_offset) \
+ (name), (type), (bits_offset)
+
+struct btf_raw_data {
+ __u32 raw_types[64];
+ const char *str_sec;
+ __u32 str_sec_size;
+};
+
+/* struct bpf_spin_lock {
+ * int val;
+ * };
+ * struct val {
+ * int cnt;
+ * struct bpf_spin_lock l;
+ * };
+ */
+static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
+static __u32 btf_raw_types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* struct bpf_spin_lock */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
+ BTF_MEMBER_ENC(15, 1, 0), /* int val; */
+ /* struct val */ /* [3] */
+ BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
+ BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
+ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
+};
+
+static int load_btf(void)
+{
+ struct btf_header hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(btf_raw_types),
+ .str_off = sizeof(btf_raw_types),
+ .str_len = sizeof(btf_str_sec),
+ };
+ void *ptr, *raw_btf;
+ int btf_fd;
+
+ ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
+ sizeof(btf_str_sec));
+
+ memcpy(ptr, &hdr, sizeof(hdr));
+ ptr += sizeof(hdr);
+ memcpy(ptr, btf_raw_types, hdr.type_len);
+ ptr += hdr.type_len;
+ memcpy(ptr, btf_str_sec, hdr.str_len);
+ ptr += hdr.str_len;
+
+ btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
+ free(raw_btf);
+ if (btf_fd < 0)
+ return -1;
+ return btf_fd;
+}
+
+static int create_map_spin_lock(void)
+{
+ struct bpf_create_map_attr attr = {
+ .name = "test_map",
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .key_size = 4,
+ .value_size = 8,
+ .max_entries = 1,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 3,
+ };
+ int fd, btf_fd;
+ btf_fd = load_btf();
+ if (btf_fd < 0)
+ return -1;
+ attr.btf_fd = btf_fd;
+ fd = bpf_create_map_xattr(&attr);
+ if (fd < 0)
+ printf("Failed to create map with spin_lock\n");
return fd;
}
@@ -15747,6 +518,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_in_map = test->fixup_map_in_map;
int *fixup_cgroup_storage = test->fixup_cgroup_storage;
int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
+ int *fixup_map_spin_lock = test->fixup_map_spin_lock;
if (test->fill_helper)
test->fill_helper(test);
@@ -15863,6 +635,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_stacktrace++;
} while (*fixup_map_stacktrace);
}
+ if (*fixup_map_spin_lock) {
+ map_fds[13] = create_map_spin_lock();
+ do {
+ prog[*fixup_map_spin_lock].imm = map_fds[13];
+ fixup_map_spin_lock++;
+ } while (*fixup_map_spin_lock);
+ }
}
static int set_admin(bool admin)
@@ -15928,6 +707,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
int run_errs, run_successes;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
+ int fixup_skips;
__u32 pflags;
int i, err;
@@ -15936,7 +716,13 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (!prog_type)
prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ fixup_skips = skips;
do_test_fixup(test, prog_type, prog, map_fds);
+ /* If there were some map skips during fixup due to missing bpf
+ * features, skip this test.
+ */
+ if (fixup_skips != skips)
+ return;
prog_len = probe_filter_length(prog);
pflags = 0;
@@ -15946,6 +732,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
pflags |= BPF_F_ANY_ALIGNMENT;
fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
+ if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
+ printf("SKIP (unsupported program type %d)\n", prog_type);
+ skips++;
+ goto close_fds;
+ }
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
@@ -16099,7 +890,7 @@ static bool test_as_unpriv(struct bpf_test *test)
static int do_test(bool unpriv, unsigned int from, unsigned int to)
{
- int i, passes = 0, errors = 0, skips = 0;
+ int i, passes = 0, errors = 0;
for (i = from; i < to; i++) {
struct bpf_test *test = &tests[i];
diff --git a/tools/testing/selftests/bpf/verifier/.gitignore b/tools/testing/selftests/bpf/verifier/.gitignore
new file mode 100644
index 000000000000..45984a364647
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/.gitignore
@@ -0,0 +1 @@
+tests.h
diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c
new file mode 100644
index 000000000000..e0fad1548737
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/and.c
@@ -0,0 +1,50 @@
+{
+ "invalid and of negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid range check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
+ BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
new file mode 100644
index 000000000000..0dcecaf3ec6f
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/array_access.c
@@ -0,0 +1,219 @@
+{
+ "valid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "valid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "valid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "valid map access into an array with a signed variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=48 size=8",
+ .result = REJECT,
+},
+{
+ "invalid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with no floor check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "R0 unbounded memory access",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "invalid access to map value, value_size=48 off=44 size=8",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3, 11 },
+ .errstr = "R0 pointer += pointer",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic.c b/tools/testing/selftests/bpf/verifier/basic.c
new file mode 100644
index 000000000000..b8d18642653a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic.c
@@ -0,0 +1,23 @@
+{
+ "empty prog",
+ .insns = {
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+},
+{
+ "only exit insn",
+ .insns = {
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "no bpf_exit",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
+ },
+ .errstr = "not an exit",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_call.c b/tools/testing/selftests/bpf/verifier/basic_call.c
new file mode 100644
index 000000000000..a8c6ab4c1622
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_call.c
@@ -0,0 +1,50 @@
+{
+ "invalid call insn1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode 8d",
+ .result = REJECT,
+},
+{
+ "invalid call insn2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_CALL uses reserved",
+ .result = REJECT,
+},
+{
+ "invalid function call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid func unknown#1234567",
+ .result = REJECT,
+},
+{
+ "invalid argument register",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "non-invalid argument register",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c
new file mode 100644
index 000000000000..ed91a7b9a456
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_instr.c
@@ -0,0 +1,134 @@
+{
+ "add+sub+mul",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_2, 3),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -3,
+},
+{
+ "xor32 zero extend check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_2, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
+ BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "arsh32 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 on imm 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -16069393,
+},
+{
+ "arsh32 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 on reg 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
+ BPF_MOV64_IMM(BPF_REG_1, 15),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 43724,
+},
+{
+ "arsh64 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "arsh64 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "invalid 64-bit BPF_END",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ {
+ .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
+ .dst_reg = BPF_REG_0,
+ .src_reg = 0,
+ .off = 0,
+ .imm = 32,
+ },
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode d7",
+ .result = REJECT,
+},
+{
+ "mov64 src == dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "mov64 src != dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_stack.c b/tools/testing/selftests/bpf/verifier/basic_stack.c
new file mode 100644
index 000000000000..b56f8117c09d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_stack.c
@@ -0,0 +1,64 @@
+{
+ "stack out of bounds",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack",
+ .result = REJECT,
+},
+{
+ "uninitialized stack1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 2 },
+ .errstr = "invalid indirect read from stack",
+ .result = REJECT,
+},
+{
+ "uninitialized stack2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid read from stack",
+ .result = REJECT,
+},
+{
+ "invalid fp arithmetic",
+ /* If this gets ever changed, make sure JITs can deal with it. */
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 subtraction from stack pointer",
+ .result = REJECT,
+},
+{
+ "non-invalid fp arithmetic",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "misaligned read from stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c
new file mode 100644
index 000000000000..7a0aab3f2cd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c
@@ -0,0 +1,45 @@
+{
+ "invalid src register in STX",
+ .insns = {
+ BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R15 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in STX",
+ .insns = {
+ BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R14 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in ST",
+ .insns = {
+ BPF_ST_MEM(BPF_B, 14, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R14 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid src register in LDX",
+ .insns = {
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R12 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in LDX",
+ .insns = {
+ BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R11 is invalid",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
new file mode 100644
index 000000000000..d55f476f2237
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -0,0 +1,508 @@
+{
+ "subtraction bounds (map value) variant 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+},
+{
+ "subtraction bounds (map value) variant 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
+ BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "check subtraction on pointers for unpriv",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1, 9 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R9 pointer -= pointer prohibited",
+},
+{
+ "bounds check based on zero-extended MOV",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0x0000'0000'ffff'ffff */
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check based on sign-extended MOV. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 4294967295",
+ .result = REJECT
+},
+{
+ "bounds check based on sign-extended MOV. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xfff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT
+},
+{
+ "bounds check based on reg_off + var_off + insn_off. test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "value_size=8 off=1073741825",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "bounds check based on reg_off + var_off + insn_off. test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "value 1073741823",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "bounds check after truncation of non-boundary-crossing range",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ /* r2 = 0x10'0000'0000 */
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+ /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = [0x00, 0xff] */
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check after truncation of boundary-crossing range (1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after truncation of boundary-crossing range (2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ * difference to previous test: truncation via MOV32
+ * instead of ALU32.
+ */
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after wrapping 32-bit addition",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ /* r1 = 0x7fff'ffff */
+ BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+ /* r1 = 0xffff'fffe */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check after shift with oversized count operand",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ /* r1 = (u32)1 << (u32)32 = ? */
+ BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x0000, 0xffff] */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT
+},
+{
+ "bounds check after right shift of maybe-negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ /* r1 = [-0x01, 0xfe] */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+ /* r1 = 0 or 0xff'ffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* r1 = 0 or 0xffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after 32-bit right shift with 64-bit input",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* r1 = 2 */
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ /* r1 = 1<<32 */
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
+ /* r1 = 0 (NOT 2!) */
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
+ /* r1 = 0xffff'fffe (NOT 0!) */
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
+ /* computes OOB pointer */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access",
+ .result = REJECT,
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 2147483646",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "pointer offset 1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "pointer offset -1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 1000000),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 1000000000000",
+ .result = REJECT
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
new file mode 100644
index 000000000000..1fd07a4f27ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
@@ -0,0 +1,124 @@
+{
+ "check deducing bounds from const, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "check deducing bounds from const, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check deducing bounds from const, 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check deducing bounds from const, 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check deducing bounds from const, 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ /* Marks reg as unknown. */
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
new file mode 100644
index 000000000000..9baca7a75c42
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
@@ -0,0 +1,406 @@
+{
+ "bounds checks mixing signed and unsigned, positive bounds",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 5",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_6, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 min value is negative, either use unsigned",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 7",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 8",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 9",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 10",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 11",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ /* Dead branch. */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 12",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 13",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 14",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -7),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 15",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
new file mode 100644
index 000000000000..f24d50f09dbe
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
@@ -0,0 +1,44 @@
+{
+ "bpf_get_stack return R0 within range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_4, 256),
+ BPF_EMIT_CALL(BPF_FUNC_get_stack),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
+ BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_get_stack),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
new file mode 100644
index 000000000000..4004891afa9c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -0,0 +1,1942 @@
+{
+ "calls: basic sanity",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: not on unpriviledged",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: div by 0 in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_2, 0),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: multiple ret types in subprog 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+},
+{
+ "calls: multiple ret types in subprog 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 16 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+},
+{
+ "calls: overlapping caller/callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn is not an exit or jmp",
+ .result = REJECT,
+},
+{
+ "calls: wrong recursive calls",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: wrong src reg",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+},
+{
+ "calls: wrong off value",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+},
+{
+ "calls: jump back loop",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn 0 to 0",
+ .result = REJECT,
+},
+{
+ "calls: conditional call",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: conditional call 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: conditional call 5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 6",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: using r0 returned by callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: using uninit r0 from callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "calls: callee is using r1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+},
+{
+ "calls: callee using args1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
+{
+ "calls: callee using wrong args2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "calls: callee using two args",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
+},
+{
+ "calls: callee changing pkt pointers",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* clear_all_pkt_pointers() has to walk all frames
+ * to make sure that pkt pointers in the caller
+ * are cleared when callee is calling a helper that
+ * adjusts packet size
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R6 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls with args",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN,
+},
+{
+ "calls: calls with stack arith",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "calls: calls with misaligned stack access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+},
+{
+ "calls: calls control flow, jump test",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 43,
+},
+{
+ "calls: calls control flow, jump test 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "jump out of range from insn 1 to 4",
+ .result = REJECT,
+},
+{
+ "calls: two calls with bad jump",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range from insn 11 to 9",
+ .result = REJECT,
+},
+{
+ "calls: recursive call. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "calls: recursive call. test2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "calls: unreachable code",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "unreachable insn 6",
+ .result = REJECT,
+},
+{
+ "calls: invalid call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+},
+{
+ "calls: invalid call 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+},
+{
+ "calls: jumping across function bodies. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: jumping across function bodies. test2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: call without exit",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+},
+{
+ "calls: call into middle of ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+},
+{
+ "calls: call into middle of other call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+},
+{
+ "calls: ld_abs with changing ctx data in callee",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
+ .result = REJECT,
+},
+{
+ "calls: two calls with bad fallthrough",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+},
+{
+ "calls: two calls with stack read",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: two calls with stack write",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ /* write into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* read from stack frame of main prog */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: stack overflow using two frames (pre-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+},
+{
+ "calls: stack overflow using two frames (post-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+},
+{
+ "calls: stack depth check using three frames. test1",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=256, stack_B=64
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+},
+{
+ "calls: stack depth check using three frames. test2",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=64, stack_B=256
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+},
+{
+ "calls: stack depth check using three frames. test3",
+ .insns = {
+ /* main */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ /* B */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=64, stack_A=224, stack_B=256
+ * and max(main+A, main+A+B) > 512
+ */
+ .errstr = "combined stack",
+ .result = REJECT,
+},
+{
+ "calls: stack depth check using three frames. test4",
+ /* void main(void) {
+ * func1(0);
+ * func1(1);
+ * func2(1);
+ * }
+ * void func1(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * } else {
+ * func2(alloc_or_recurse);
+ * }
+ * }
+ * void func2(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * }
+ * }
+ */
+ .insns = {
+ /* main */
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = REJECT,
+ .errstr = "combined stack",
+},
+{
+ "calls: stack depth check using three frames. test5",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+ BPF_EXIT_INSN(),
+ /* C */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+ BPF_EXIT_INSN(),
+ /* D */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+ BPF_EXIT_INSN(),
+ /* E */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+ BPF_EXIT_INSN(),
+ /* F */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+ BPF_EXIT_INSN(),
+ /* G */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+ BPF_EXIT_INSN(),
+ /* H */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "call stack",
+ .result = REJECT,
+},
+{
+ "calls: spill into caller stack frame",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot spill",
+ .result = REJECT,
+},
+{
+ "calls: write into caller stack frame",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "calls: write into callee stack frame",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot return stack pointer",
+ .result = REJECT,
+},
+{
+ "calls: two calls with stack write and void return",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* write into stack frame of main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(), /* void return */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: ambiguous return value",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "calls: two calls that return map_value",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that return map_value with bool condition",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that return map_value with incorrect bool check",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -16+0 size 8",
+},
+{
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = ACCEPT,
+},
+{
+ "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0), // 26
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
+ BPF_JMP_IMM(BPF_JA, 0, 0, -30),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -8),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls that receive map_value_ptr_or_null via arg. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that receive map_value_ptr_or_null via arg. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 0 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+},
+{
+ "calls: pkt_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* Marking is still kept, but not in all cases safe. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Marking is still kept and safe here. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Check marking propagated. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "same insn cannot be used with different",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: caller stack init to zero or map_value_or_null",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ /* fetch map_value_or_null or const_zero from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* store into map_value */
+ BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* if (ctx == 0) return; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+ /* else bpf_map_lookup() and *(fp - 8) = r0 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 13 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "calls: stack init to zero and pruning",
+ .insns = {
+ /* first make allocated_stack 16 byte */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ /* now fork the execution such that the false branch
+ * of JGT insn will be verified second and it skisp zero
+ * init of fp-8 stack slot. If stack liveness marking
+ * is missing live_read marks from call map_lookup
+ * processing then pruning will incorrectly assume
+ * that fp-8 stack slot was unused in the fall-through
+ * branch and will accept the program incorrectly
+ */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 6 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "calls: ctx read at start of subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "calls: cross frame pruning",
+ .insns = {
+ /* r8 = !!random();
+ * call pruner()
+ * if (r8)
+ * do something bad;
+ */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cfg.c b/tools/testing/selftests/bpf/verifier/cfg.c
new file mode 100644
index 000000000000..349c0862fb4c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cfg.c
@@ -0,0 +1,70 @@
+{
+ "unreachable",
+ .insns = {
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unreachable",
+ .result = REJECT,
+},
+{
+ "unreachable2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unreachable",
+ .result = REJECT,
+},
+{
+ "out of range jump",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "out of range jump2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, -2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "loop (back-edge)",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "loop2 (back-edge)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "conditional loop",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c
new file mode 100644
index 000000000000..6d65fe3e7321
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c
@@ -0,0 +1,72 @@
+{
+ "bpf_exit with invalid return code. test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x0; 0xffffffff)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x0; 0x3)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x2; 0x0)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 is not a known value (ctx)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test7",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has unknown scalar value",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_skb.c b/tools/testing/selftests/bpf/verifier/cgroup_skb.c
new file mode 100644
index 000000000000..52e4c03b076b
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_skb.c
@@ -0,0 +1,197 @@
+{
+ "direct packet read test#1 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid bpf_context access off=76 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#2 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_proto)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, priority)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, priority)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, ingress_ifindex)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#3 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#4 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of tc_classid for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of data_meta for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_meta)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of flow_keys for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, flow_keys)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid write access to napi_id for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid bpf_context access off=152 size=8",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_storage.c b/tools/testing/selftests/bpf/verifier/cgroup_storage.c
new file mode 100644
index 000000000000..97057c0a1b8a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_storage.c
@@ -0,0 +1,220 @@
+{
+ "valid cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .errstr_unpriv = "R2 leaks addr into helper function",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "valid per-cpu cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid per-cpu cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .errstr_unpriv = "R2 leaks addr into helper function",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
diff --git a/tools/testing/selftests/bpf/verifier/const_or.c b/tools/testing/selftests/bpf/verifier/const_or.c
new file mode 100644
index 000000000000..84446dfc7c1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/const_or.c
@@ -0,0 +1,60 @@
+{
+ "constant register |= constant should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant register should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 13),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant register should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 24),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c
new file mode 100644
index 000000000000..92762c08f5e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx.c
@@ -0,0 +1,93 @@
+{
+ "context stores via ST",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ST stores into R1 ctx is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "context stores via XADD",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
+ BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "arithmetic ops make PTR_TO_CTX unusable",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct __sk_buff, data) -
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "dereference of modified ctx ptr",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "pass unmodified ctx pointer to helper",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "pass modified ctx pointer to helper, 1",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+},
+{
+ "pass modified ctx pointer to helper, 2",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_socket_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr_unpriv = "dereference of modified ctx ptr",
+ .errstr = "dereference of modified ctx ptr",
+},
+{
+ "pass modified ctx pointer to helper, 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "variable ctx access var_off=(0x0; 0x4)",
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
new file mode 100644
index 000000000000..c6c69220a569
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
@@ -0,0 +1,181 @@
+{
+ "valid access family in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, family)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_ip4 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access local_ip4 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_port in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access local_port in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_ip6 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access local_ip6 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access size in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, size)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "invalid 64B read of size in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, size)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid read past end of SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, size) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "invalid read offset in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, family) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet read for SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "direct packet write for SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "overlapping checks for direct packet access SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
new file mode 100644
index 000000000000..c660deb582f1
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -0,0 +1,1034 @@
+{
+ "access skb fields ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "access skb fields bad1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "access skb fields bad2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "access skb fields bad3",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -12),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "access skb fields bad4",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -13),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff family",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff local_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff local_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "valid access __sk_buff family",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff local_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff local_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "invalid access of tc_classid for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of skb->mark for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->mark is not writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->tc_index is writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "check skb->priority is writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, priority)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "direct packet read for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "direct packet write for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "overlapping checks for direct packet access SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "check skb->mark is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
+{
+ "check skb->tc_index is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
+{
+ "check cb access: byte",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "__sk_buff->hash, offset 0, byte store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, hash)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "__sk_buff->tc_index, offset 3, byte store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check skb->hash byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: byte, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check cb access: half",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: half, unaligned",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check __sk_buff->hash, offset 0, half store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, hash)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->tc_index, offset 2, half store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check skb->hash half load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash half load permitted 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash half load not permitted, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check skb->hash half load not permitted, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check cb access: half, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check cb access: word",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: word, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: double, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check cb access: double, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->ifindex dw store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, ifindex)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->ifindex dw load not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, ifindex)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check cb access: double, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check out of range skb->cb access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 256),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+},
+{
+ "write skb fields from socket prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .errstr_unpriv = "R1 leaks addr",
+ .result_unpriv = REJECT,
+},
+{
+ "write skb fields from tc_cls_act prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "check skb->data half load not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "read gso_segs from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write gso_segs from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access off=164 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read gso_segs from CLS",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "check wire_len is not readable by sockets",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check wire_len is readable by tc classifier",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "check wire_len is not writable by tc classifier",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
new file mode 100644
index 000000000000..50a8a63be4ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -0,0 +1,159 @@
+{
+ "dead code: start",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: mid 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: mid 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "dead code: end 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: end 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: end 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: tail of main + func",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: tail of main + two functions",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: function in the middle and mid of another func",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: middle of main before call",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "dead code: start of a function",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
new file mode 100644
index 000000000000..e3fc22e672c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -0,0 +1,633 @@
+{
+ "pkt_end - pkt_start is allowed",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access off=76",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+},
+{
+ "direct packet access: test4 (write)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test5 (pkt_end >= reg, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test6 (pkt_end >= reg, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test7 (pkt_end >= reg, both accesses)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test8 (double test, variant 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test9 (double test, variant 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test10 (write invalid)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test11 (shift, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test12 (and, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test13 (branches, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 14),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 24),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_IMM(BPF_REG_5, 12),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test15 (spill with xadd)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 4096),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test16 (arith on data_end)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test17 (pruning, alignment)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_A(-6),
+ },
+ .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "direct packet access: test18 (imm += pkt_ptr, 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 8),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test19 (imm += pkt_ptr, 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test20 (x += pkt_ptr, 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test21 (x += pkt_ptr, 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test22 (x += pkt_ptr, 3)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test23 (x += pkt_ptr, 4)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test24 (x += pkt_ptr, 5)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 64),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test25 (marking on <, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test26 (marking on <, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test27 (marking on <=, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test28 (marking on <=, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c
new file mode 100644
index 000000000000..698e3779fdd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c
@@ -0,0 +1,40 @@
+{
+ "direct stack access with 32-bit wraparound. test1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer and 2147483647",
+ .result = REJECT
+},
+{
+ "direct stack access with 32-bit wraparound. test2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer and 1073741823",
+ .result = REJECT
+},
+{
+ "direct stack access with 32-bit wraparound. test3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer offset 1073741822",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result = REJECT
+},
diff --git a/tools/testing/selftests/bpf/verifier/div0.c b/tools/testing/selftests/bpf/verifier/div0.c
new file mode 100644
index 000000000000..7685edfbcf71
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/div0.c
@@ -0,0 +1,184 @@
+{
+ "DIV32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 2),
+ BPF_MOV32_IMM(BPF_REG_2, 16),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 8,
+},
+{
+ "DIV32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 by 0, zero check, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "MOD32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 3),
+ BPF_MOV32_IMM(BPF_REG_2, 5),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "MOD32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD64 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "MOD64 by 0, zero check 2, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, -1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = -1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/div_overflow.c b/tools/testing/selftests/bpf/verifier/div_overflow.c
new file mode 100644
index 000000000000..bd3f38dbe796
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/div_overflow.c
@@ -0,0 +1,104 @@
+/* Just make sure that JITs used udiv/umod as otherwise we get
+ * an exception from INT_MIN/-1 overflow similarly as with div
+ * by zero.
+ */
+{
+ "DIV32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "MOD32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+},
+{
+ "MOD32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+},
+{
+ "MOD64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
new file mode 100644
index 000000000000..1f39d845c64f
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
@@ -0,0 +1,614 @@
+{
+ "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, bitwise AND, zero included",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP (signed), correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, bounds + offset",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ /* because max wasn't checked, signed min is negative */
+ .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP (signed), no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val), 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=49",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map adjusted, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 20, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map adjusted, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 0 /* csum_diff of 64-byte packet */,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: 8 bytes leak",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+32 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: 8 bytes no leak (init memory)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_packet_access.c b/tools/testing/selftests/bpf/verifier/helper_packet_access.c
new file mode 100644
index 000000000000..ae54587e9829
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_packet_access.c
@@ -0,0 +1,460 @@
+{
+ "helper access to packet: test1, valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test2, unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test3, variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test4, packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test5, packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test6, cls valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test7, cls unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test8, cls variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test9, cls packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test10, cls packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test11, cls unsuitable helper 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 42),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test12, cls unsuitable helper 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test13, cls helper ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test14, cls helper ok sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test15, cls helper fail sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test16, cls helper fail range 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test17, cls helper fail range 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, -9),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R2 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test18, cls helper fail range 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, ~0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R2 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test19, cls helper range zero",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test20, pkt end as input",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=pkt_end expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test21, wrong reg",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_value_access.c b/tools/testing/selftests/bpf/verifier/helper_value_access.c
new file mode 100644
index 000000000000..7572e403ddb9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_value_access.c
@@ -0,0 +1,953 @@
+{
+ "helper access to map: full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=56",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: negative range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 unbounded memory access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): wrong max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=45",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 unbounded memory access",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <=, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <=, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 unbounded memory access",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, good access 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 min value is negative",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, good access 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 min value is negative",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map lookup helper access to map",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 8 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map update helper access to map",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map update helper access to map: wrong size",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_16b = { 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=0 size=16",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, offsetof(struct other_val, bar)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm): out-of-bound 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct other_val) - 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm): out-of-bound 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct other_val, bar)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg): out-of-bound 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct other_val) - 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg): out-of-bound 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, -4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable): no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable): wrong max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar) + 1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 11 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=9 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jit.c b/tools/testing/selftests/bpf/verifier/jit.c
new file mode 100644
index 000000000000..be488b4495a3
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jit.c
@@ -0,0 +1,88 @@
+{
+ "jit: lsh, rsh, arsh by 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 0xff),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: mov32 for ldimm64, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: mov32 for ldimm64, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
+ BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: various mul tests",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
+ BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
+ BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
+ BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
new file mode 100644
index 000000000000..f0961c58581e
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -0,0 +1,746 @@
+{
+ "jset32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ /* reg, high bits shouldn't be tested */
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, -2, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1ULL << 63, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000),
+ BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1ULL << 63, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jeq32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 2,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jeq32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x7000000000000001),
+ BPF_JMP32_REG(BPF_JEQ, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 2, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jeq32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jne32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 2,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jne32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP32_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 2, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 2, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jne32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jge32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, UINT_MAX - 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jge32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, UINT_MAX | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { INT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jge32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jgt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JGT, BPF_REG_7, UINT_MAX - 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jgt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (UINT_MAX - 1) | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jgt32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jle32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, INT_MAX, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { INT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jle32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (INT_MAX - 1) | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX | 1ULL << 32, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX - 2, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jle32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jlt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JLT, BPF_REG_7, INT_MAX, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX - 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jlt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, INT_MAX | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX | 1ULL << 32, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { (INT_MAX - 1) | 3ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jlt32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsge32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsge32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (__u32)-1 | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 2,
+ .data64 = { 0x7fffffff | 1ULL << 32, }
+ },
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsge32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsgt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSGT, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsgt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 0,
+ .data64 = { 0x1ffffffffULL, }
+ },
+ { .retval = 2,
+ .data64 = { 0x7fffffff, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsgt32: min/max deduction",
+ .insns = {
+ BPF_RAND_SEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, (__u32)(-2) | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsle32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSLE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsle32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 2,
+ .data64 = { (__u32)-1, }
+ },
+ { .retval = 0,
+ .data64 = { 0x7fffffff | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsle32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jslt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jslt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7fffffff | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 2,
+ .data64 = { 0xffffffff, }
+ },
+ { .retval = 0,
+ .data64 = { 0x7fffffff | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jslt32: min/max deduction",
+ .insns = {
+ BPF_RAND_SEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, (__u32)(-1) | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
new file mode 100644
index 000000000000..8dcd4e0383d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jset.c
@@ -0,0 +1,167 @@
+{
+ "jset: functional",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ /* reg, bit 63 or bit 0 set, taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+
+ /* reg, bit 62, not taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, any bit set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, bit 31 set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ /* all good - return r0 == 2 */
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 7,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (__u32)-1, }
+ },
+ { .retval = 2,
+ .data64 = { ~0x4000000000000000ULL, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ { .retval = 0,
+ .data64 = { ~0ULL, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset: sign-extend",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+ .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset: known const compare",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .retval_unpriv = 1,
+ .result_unpriv = ACCEPT,
+ .retval = 1,
+ .result = ACCEPT,
+},
+{
+ "jset: known const compare bad",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: unknown const compare taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: unknown const compare not taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: half-known const compare",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+},
+{
+ "jset: range",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jump.c b/tools/testing/selftests/bpf/verifier/jump.c
new file mode 100644
index 000000000000..8e6fcc8940f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jump.c
@@ -0,0 +1,180 @@
+{
+ "jump test 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 14),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 11),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 19),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 15),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 11),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 24 },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = -ENOENT,
+},
+{
+ "jump test 4",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/junk_insn.c b/tools/testing/selftests/bpf/verifier/junk_insn.c
new file mode 100644
index 000000000000..89d690f1992a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/junk_insn.c
@@ -0,0 +1,45 @@
+{
+ "junk insn",
+ .insns = {
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+},
+{
+ "junk insn2",
+ .insns = {
+ BPF_RAW_INSN(1, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_LDX uses reserved fields",
+ .result = REJECT,
+},
+{
+ "junk insn3",
+ .insns = {
+ BPF_RAW_INSN(-1, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode ff",
+ .result = REJECT,
+},
+{
+ "junk insn4",
+ .insns = {
+ BPF_RAW_INSN(-1, -1, -1, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode ff",
+ .result = REJECT,
+},
+{
+ "junk insn5",
+ .insns = {
+ BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ALU uses reserved fields",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_abs.c b/tools/testing/selftests/bpf/verifier/ld_abs.c
new file mode 100644
index 000000000000..f6599d2ec22d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_abs.c
@@ -0,0 +1,286 @@
+{
+ "ld_abs: check calling conv, r1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R5 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "ld_abs: tests on r6 and skb data reload helper",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42 /* ultimate return value */,
+},
+{
+ "ld_abs: invalid op 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_DW, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "unknown opcode",
+},
+{
+ "ld_abs: invalid op 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 256),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "unknown opcode",
+},
+{
+ "ld_abs: nmap reduced",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
+ BPF_MOV32_IMM(BPF_REG_0, 18),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
+ BPF_LD_IND(BPF_W, BPF_REG_7, 14),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
+ BPF_MOV32_IMM(BPF_REG_0, 280971478),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
+ BPF_MOV32_IMM(BPF_REG_0, 22),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+ BPF_LD_IND(BPF_H, BPF_REG_7, 14),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
+ BPF_MOV32_IMM(BPF_REG_0, 17366),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 256),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 256,
+},
+{
+ "ld_abs: div + abs, test 1",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+ BPF_LD_ABS(BPF_B, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 10,
+},
+{
+ "ld_abs: div + abs, test 2",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+ BPF_LD_ABS(BPF_B, 128),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: div + abs, test 3",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: div + abs, test 4",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_B, 256),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: vlan + abs, test 1",
+ .insns = { },
+ .data = {
+ 0x34,
+ },
+ .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0xbef,
+},
+{
+ "ld_abs: vlan + abs, test 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_vlan_push),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 0x34,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "ld_abs: jump around ld_abs",
+ .insns = { },
+ .data = {
+ 10, 11,
+ },
+ .fill_helper = bpf_fill_jump_around_ld_abs,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 10,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_dw.c b/tools/testing/selftests/bpf/verifier/ld_dw.c
new file mode 100644
index 000000000000..d2c75b889598
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_dw.c
@@ -0,0 +1,36 @@
+{
+ "ld_dw: xor semi-random 64 bit imms, test 1",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 4090,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 2",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2047,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 3",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 511,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 4",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 5,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
new file mode 100644
index 000000000000..3856dba733e9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
@@ -0,0 +1,154 @@
+{
+ "test1 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid BPF_LD_IMM insn",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "test2 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid BPF_LD_IMM insn",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "test3 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test4 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test5 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test6 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "test7 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "test8 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "uses reserved fields",
+ .result = REJECT,
+},
+{
+ "test9 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test10 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test11 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test12 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "not pointing to valid bpf_map",
+ .result = REJECT,
+},
+{
+ "test13 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test14 ld_imm64: reject 2nd imm != 0",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_1,
+ BPF_PSEUDO_MAP_FD, 0, 0),
+ BPF_RAW_INSN(0, 0, 0, 0, 0xfefefe),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 1 },
+ .errstr = "unrecognized bpf_ld_imm64 insn",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_ind.c b/tools/testing/selftests/bpf/verifier/ld_ind.c
new file mode 100644
index 000000000000..079734227538
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_ind.c
@@ -0,0 +1,72 @@
+{
+ "ld_ind: check calling conv, r1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R5 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/leak_ptr.c b/tools/testing/selftests/bpf/verifier/leak_ptr.c
new file mode 100644
index 000000000000..d6eec17f2cd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/leak_ptr.c
@@ -0,0 +1,67 @@
+{
+ "leak pointer into ctx 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 2 },
+ .errstr_unpriv = "R2 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+},
+{
+ "leak pointer into ctx 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+},
+{
+ "leak pointer into ctx 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .errstr_unpriv = "R2 leaks addr into ctx",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "leak pointer into map val",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr_unpriv = "R6 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/lwt.c b/tools/testing/selftests/bpf/verifier/lwt.c
new file mode 100644
index 000000000000..2cab6a3966bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/lwt.c
@@ -0,0 +1,189 @@
+{
+ "invalid direct packet write for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "invalid direct packet write for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+},
+{
+ "direct packet write for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "direct packet read for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "direct packet read for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+},
+{
+ "direct packet read for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "overlapping checks for direct packet access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "make headroom for LWT_XMIT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ /* split for s390 to succeed */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "invalid access of tc_classid for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of tc_classid for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of tc_classid for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->tc_classid half load not permitted for lwt prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_in_map.c b/tools/testing/selftests/bpf/verifier/map_in_map.c
new file mode 100644
index 000000000000..2798927ee9ff
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_in_map.c
@@ -0,0 +1,62 @@
+{
+ "map in map access",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .result = ACCEPT,
+},
+{
+ "invalid inner map pointer",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .errstr = "R1 pointer arithmetic on map_ptr prohibited",
+ .result = REJECT,
+},
+{
+ "forgot null checking on the inner map pointer",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .errstr = "R1 type=map_value_or_null expected=map_ptr",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c
new file mode 100644
index 000000000000..cd26ee6b7b1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c
@@ -0,0 +1,100 @@
+{
+ "calls: two calls returning different map pointers for lookup (hash, array)",
+ .insns = {
+ /* main prog */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_CALL_REL(11),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_CALL_REL(12),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* subprog 1 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* subprog 2 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_48b = { 13 },
+ .fixup_map_array_48b = { 16 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: two calls returning different map pointers for lookup (hash, map in map)",
+ .insns = {
+ /* main prog */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_CALL_REL(11),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_CALL_REL(12),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* subprog 1 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* subprog 2 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_in_map = { 16 },
+ .fixup_map_array_48b = { 13 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'map_ptr'",
+},
+{
+ "cond: two branches returning different map pointers for lookup (tail, tail)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 5 },
+ .fixup_prog2 = { 2 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "tail_call abusing map_ptr",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "cond: two branches returning same map pointers for lookup (tail, tail)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog2 = { 2, 5 },
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .retval = 42,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_ret_val.c b/tools/testing/selftests/bpf/verifier/map_ret_val.c
new file mode 100644
index 000000000000..bdd0e8d18333
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_ret_val.c
@@ -0,0 +1,65 @@
+{
+ "invalid map_fd for function call",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fd 0 is not pointing to valid bpf_map",
+ .result = REJECT,
+},
+{
+ "don't check return value before access",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access 'map_value_or_null'",
+ .result = REJECT,
+},
+{
+ "access memory with incorrect alignment",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "misaligned value access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "sometimes access memory with incorrect alignment",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access",
+ .errstr_unpriv = "R0 leaks addr",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/masking.c b/tools/testing/selftests/bpf/verifier/masking.c
new file mode 100644
index 000000000000..6e1358c544fd
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/masking.c
@@ -0,0 +1,322 @@
+{
+ "masking, test out of bounds 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 5),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 4",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 5",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 6",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 11",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 12",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 4),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 4,
+},
+{
+ "masking, test in bounds 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xfffffffe,
+},
+{
+ "masking, test in bounds 4",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
+ BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xabcde,
+},
+{
+ "masking, test in bounds 5",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 6",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 46),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 46,
+},
+{
+ "masking, test in bounds 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -46),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 46,
+},
+{
+ "masking, test in bounds 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -47),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
diff --git a/tools/testing/selftests/bpf/verifier/meta_access.c b/tools/testing/selftests/bpf/verifier/meta_access.c
new file mode 100644
index 000000000000..205292b8dd65
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/meta_access.c
@@ -0,0 +1,235 @@
+{
+ "meta access, test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet, off=-8",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_meta),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R3 !read_ok",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test7",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test10",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_IMM(BPF_REG_5, 42),
+ BPF_MOV64_IMM(BPF_REG_6, 24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test11",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_IMM(BPF_REG_5, 42),
+ BPF_MOV64_IMM(BPF_REG_6, 24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test12",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
diff --git a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
new file mode 100644
index 000000000000..471c1a5950d8
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
@@ -0,0 +1,59 @@
+{
+ "check bpf_perf_event_data->sample_period byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 7),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period half load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 6),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period word load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 4),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period dword load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
new file mode 100644
index 000000000000..bbdba990fefb
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
@@ -0,0 +1,74 @@
+{
+ "prevent map lookup in sockmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "prevent map lookup in sockhash",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockhash = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "prevent map lookup in xskmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_xskmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "prevent map lookup in stack trace",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_stacktrace = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "prevent map lookup in prog array",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog2 = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
+},
diff --git a/tools/testing/selftests/bpf/verifier/raw_stack.c b/tools/testing/selftests/bpf/verifier/raw_stack.c
new file mode 100644
index 000000000000..193d9e87d5a9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/raw_stack.c
@@ -0,0 +1,305 @@
+{
+ "raw_stack: no skb_load_bytes",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ /* Call to skb_load_bytes() omitted. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -8+0 size 8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, negative len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, negative len 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, ~0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, zero len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, no init",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, init",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs around bounds",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs corruption",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs corruption 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R3 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs + data",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-513 access_size=8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-1 access_size=8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-512 access_size=0",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, large access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 512),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
new file mode 100644
index 000000000000..3ed3593bd8b6
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -0,0 +1,607 @@
+{
+ "reference tracking: leak potential reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak potential reference on stack",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak potential reference on stack 2",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: zero potential reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: copy and zero potential references",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference without check",
+ .insns = {
+ BPF_SK_LOOKUP,
+ /* reference in r0 may be NULL */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=sock_or_null expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference 2",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference twice",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference twice inside branch",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: alloc, check, free in one subbranch",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
+ /* Leak reference in R0 */
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "reference tracking: alloc, check, free in both subbranches",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "reference tracking in call: free reference in subprog",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking in call: free reference in subprog and outside",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: alloc & leak reference in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
+ BPF_SK_LOOKUP,
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: alloc in subprog, release outside",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(), /* return sk */
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ .result = ACCEPT,
+},
+{
+ "reference tracking in call: sk_ptr leak into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: sk_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* now the sk_ptr is verified, free the reference */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: allow LD_ABS",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: forbid LD_ABS while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+},
+{
+ "reference tracking: allow LD_IND",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "reference tracking: forbid LD_IND while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+},
+{
+ "reference tracking: check reference or tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference then tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 18 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: leak possible reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP,
+ /* bpf_tail_call() */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 16 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak checked reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* if (!sk) goto end */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+},
+{
+ "reference tracking: mangle and release sock_or_null",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
+ .result = REJECT,
+},
+{
+ "reference tracking: mangle and release sock",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock prohibited",
+ .result = REJECT,
+},
+{
+ "reference tracking: access member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: write to member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LD_IMM64(BPF_REG_2, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
+ offsetof(struct bpf_sock, mark)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "cannot write into sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: invalid 64-bit access of member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid sock access off=0 size=8",
+ .result = REJECT,
+},
+{
+ "reference tracking: access after release",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "reference tracking: direct access for lookup",
+ .insns = {
+ /* Check that the packet is at least 64B long */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
+ /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/runtime_jit.c b/tools/testing/selftests/bpf/verifier/runtime_jit.c
new file mode 100644
index 000000000000..a9a8f620e71c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/runtime_jit.c
@@ -0,0 +1,80 @@
+{
+ "runtime/jit: tail_call within bounds, prog once",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "runtime/jit: tail_call within bounds, prog loop",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 41,
+},
+{
+ "runtime/jit: tail_call within bounds, no prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "runtime/jit: tail_call out of bounds",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 256),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "runtime/jit: pass negative index to tail_call",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "runtime/jit: pass > 32bit index to tail_call",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 2 },
+ .result = ACCEPT,
+ .retval = 42,
+ /* Verifier rewrite for unpriv skips tail call here. */
+ .retval_unpriv = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
new file mode 100644
index 000000000000..7e50cb80873a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
@@ -0,0 +1,156 @@
+{
+ "pointer/scalar confusion in state equality check (way 1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+},
+{
+ "pointer/scalar confusion in state equality check (way 2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+},
+{
+ "liveness pruning and write screening",
+ .insns = {
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* branch conditions teach us nothing about R2 */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "varlen_map_value_access pruning",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "R0 unbounded memory access",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "search pruning: all branches should be verified (nop operation)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R6 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "search pruning: all branches should be verified (invalid stack access)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_JMP_A(1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "invalid read from stack off -16+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "allocated_stack",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
+ BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+ .insn_processed = 15,
+},
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
new file mode 100644
index 000000000000..0ddfdf76aba5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -0,0 +1,384 @@
+{
+ "skb->sk: no NULL check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'sock_common_or_null'",
+},
+{
+ "skb->sk: sk->family [non fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, family)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "skb->sk: sk->type [fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock_common access",
+},
+{
+ "bpf_sk_fullsock(skb->sk): no !skb->sk check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "type=sock_common_or_null expected=sock_common",
+},
+{
+ "sk_fullsock(skb->sk): no NULL check on ret",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'sock_or_null'",
+},
+{
+ "sk_fullsock(skb->sk): sk->type [fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->family [non fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, family)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->state [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, state)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->type [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->protocol [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, protocol)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): beyond last field",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, state)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "bpf_tcp_sock(skb->sk): no !skb->sk check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "type=sock_common_or_null expected=sock_common",
+},
+{
+ "bpf_tcp_sock(skb->sk): no NULL check on ret",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'tcp_sock_or_null'",
+},
+{
+ "bpf_tcp_sock(skb->sk): tp->snd_cwnd",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_tcp_sock(skb->sk): tp->bytes_acked",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, bytes_acked)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_tcp_sock(skb->sk): beyond last field",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_tcp_sock, bytes_acked)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid tcp_sock access",
+},
+{
+ "bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_sk_release(skb->sk)",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "type=sock_common expected=sock",
+},
+{
+ "bpf_sk_release(bpf_sk_fullsock(skb->sk))",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "reference has not been acquired before",
+},
+{
+ "bpf_sk_release(bpf_tcp_sock(skb->sk))",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "type=tcp_sock expected=sock",
+},
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
new file mode 100644
index 000000000000..45d43bf82f26
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -0,0 +1,76 @@
+{
+ "check valid spill/fill",
+ .insns = {
+ /* spill R1(ctx) into stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ /* fill it back into R2 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
+ /* should be able to access R0 = *(R2 + 8) */
+ /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .retval = POINTER_VALUE,
+},
+{
+ "check valid spill/fill, skb mark",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+},
+{
+ "check corrupted spill/fill",
+ .insns = {
+ /* spill R1(ctx) into stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ /* mess up with R1 pointer on stack */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
+ /* fill back into R0 is fine for priv.
+ * R0 now becomes SCALAR_VALUE.
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ /* Load from R0 should fail. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .errstr = "R0 invalid mem access 'inv",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check corrupted spill/fill, LSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
+{
+ "check corrupted spill/fill, MSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
diff --git a/tools/testing/selftests/bpf/verifier/spin_lock.c b/tools/testing/selftests/bpf/verifier/spin_lock.c
new file mode 100644
index 000000000000..781621facae4
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/spin_lock.c
@@ -0,0 +1,333 @@
+{
+ "spin_lock: test1 success",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test2 direct ld/st",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "cannot be accessed directly",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test3 direct ld/st",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "cannot be accessed directly",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "spin_lock: test4 direct ld/st",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "cannot be accessed directly",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "spin_lock: test5 call within a locked region",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "calls are not allowed",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test6 missing unlock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "unlock is missing",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test7 unlock without lock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "without taking a lock",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test8 double lock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "calls are not allowed",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test9 different lock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3, 11 },
+ .result = REJECT,
+ .errstr = "unlock of different lock",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test10 lock in subprog without unlock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "unlock is missing",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test11 ld_abs under lock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 4 },
+ .result = REJECT,
+ .errstr = "inside bpf_spin_lock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c
new file mode 100644
index 000000000000..7276620ef242
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c
@@ -0,0 +1,317 @@
+{
+ "PTR_TO_STACK store/load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xfaceb00c,
+},
+{
+ "PTR_TO_STACK store/load - bad alignment on off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
+},
+{
+ "PTR_TO_STACK store/load - bad alignment on reg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
+},
+{
+ "PTR_TO_STACK store/load - out of bounds low",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=-79992 size=8",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+},
+{
+ "PTR_TO_STACK store/load - out of bounds high",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=0 size=8",
+},
+{
+ "PTR_TO_STACK check high 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "invalid stack off=0 size=1",
+ .result = REJECT,
+},
+{
+ "PTR_TO_STACK check high 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check high 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check high 7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "fp pointer offset",
+},
+{
+ "PTR_TO_STACK check low 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check low 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check low 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "invalid stack off=-513 size=1",
+ .result = REJECT,
+},
+{
+ "PTR_TO_STACK check low 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between fp pointer",
+},
+{
+ "PTR_TO_STACK check low 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check low 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check low 7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "fp pointer offset",
+},
+{
+ "PTR_TO_STACK mixed reg/k, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK mixed reg/k, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK mixed reg/k, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -3,
+},
+{
+ "PTR_TO_STACK reg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid stack off=0 size=1",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "stack pointer arithmetic",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
+ BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/uninit.c b/tools/testing/selftests/bpf/verifier/uninit.c
new file mode 100644
index 000000000000..987a5871ff1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/uninit.c
@@ -0,0 +1,39 @@
+{
+ "read uninitialized register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "read invalid register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R15 is invalid",
+ .result = REJECT,
+},
+{
+ "program doesn't init R0 before exit",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "program doesn't init R0 before exit in all branches",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
new file mode 100644
index 000000000000..dbaf5be947b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -0,0 +1,522 @@
+{
+ "unpriv: return pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr",
+ .retval = POINTER_VALUE,
+},
+{
+ "unpriv: add const to pointer",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "unpriv: add pointer to pointer",
+ .insns = {
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 pointer += pointer",
+},
+{
+ "unpriv: neg pointer",
+ .insns = {
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 pointer arithmetic",
+},
+{
+ "unpriv: cmp pointer with const",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 pointer comparison",
+},
+{
+ "unpriv: cmp pointer with pointer",
+ .insns = {
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R10 pointer comparison",
+},
+{
+ "unpriv: check that printk is disallowed",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "unknown func bpf_trace_printk#6",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "unpriv: pass pointer to helper function",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R4 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: indirectly pass pointer on stack to helper function",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+},
+{
+ "unpriv: mangle pointer on stack 1",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: mangle pointer on stack 2",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: read pointer from stack in small chunks",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid size",
+ .result = REJECT,
+},
+{
+ "unpriv: write pointer into ctx",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 leaks addr",
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "unpriv: spill/fill of ctx",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "unpriv: spill/fill of ctx 2",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of ctx 3",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=fp expected=ctx",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of ctx 4",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=inv expected=ctx",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - ctx and sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ /* if (sk) bpf_sk_release(sk) */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "type=ctx expected=sock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - leak sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "Unreleased reference",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "cannot write into sock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers ldx",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
+ -(__s32)offsetof(struct bpf_perf_event_data,
+ sample_period) - 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "unpriv: write pointer into map elem value",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "alu32: mov u32 const",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_7, 0),
+ BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "unpriv: partial copy of pointer",
+ .insns = {
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 partial copy",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: pass pointer to tail_call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .errstr_unpriv = "R3 leaks addr into helper",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: cmp map pointer with zero",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: write into frame pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "frame pointer is read only",
+ .result = REJECT,
+},
+{
+ "unpriv: spill/fill frame pointer",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "frame pointer is read only",
+ .result = REJECT,
+},
+{
+ "unpriv: cmp of frame pointer",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: adding of fp",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: cmp of stack pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R2 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value.c b/tools/testing/selftests/bpf/verifier/value.c
new file mode 100644
index 000000000000..0e42592b1218
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value.c
@@ -0,0 +1,104 @@
+{
+ "map element value store of cleared call register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R1 !read_ok",
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+},
+{
+ "map element value with unaligned store",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value with unaligned load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, offsetof(struct test_val, foo)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_adj_spill.c b/tools/testing/selftests/bpf/verifier/value_adj_spill.c
new file mode 100644
index 000000000000..7135e8021b81
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_adj_spill.c
@@ -0,0 +1,43 @@
+{
+ "map element value is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+},
+{
+ "map element value or null is marked on register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
new file mode 100644
index 000000000000..7f6c232cd842
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
@@ -0,0 +1,94 @@
+{
+ "map element value illegal alu op, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 bitwise operator &= on pointer",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 32-bit pointer arithmetic prohibited",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 pointer arithmetic with /= operator",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value illegal alu op, 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_3, 4096),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 invalid mem access 'inv'",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_or_null.c b/tools/testing/selftests/bpf/verifier/value_or_null.c
new file mode 100644
index 000000000000..860d4a71cd83
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_or_null.c
@@ -0,0 +1,152 @@
+{
+ "multiple registers share map_lookup_elem result",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "invalid memory access with multiple map_lookup_elem calls",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = REJECT,
+ .errstr = "R4 !read_ok",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "valid indirect map_lookup_elem access with 2nd lookup in branch",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_2, 10),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "invalid map access from else condition",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT,
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
new file mode 100644
index 000000000000..c3de1a2c9dc5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -0,0 +1,838 @@
+{
+ "map access: known scalar += value_ptr from different maps",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar from different maps",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 min value is outside of the array range",
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr from different maps, but same value properties",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: mixing value pointer and scalar, 1",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different pointers or scalars",
+ .retval = 0,
+},
+{
+ "map access: mixing value pointer and scalar, 2",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different maps or paths",
+ .retval = 0,
+},
+{
+ "sanitation: alu with different scalars 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100001),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 0x100000,
+},
+{
+ "sanitation: alu with different scalars 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_delete_elem),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_EMIT_CALL(BPF_FUNC_map_delete_elem),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = -EINVAL * 2,
+},
+{
+ "sanitation: alu with different scalars 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, EINVAL),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, EINVAL),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -EINVAL * 2,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+},
+{
+ "map access: value_ptr += known scalar, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+},
+{
+ "map access: value_ptr += known scalar, 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, -2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 5",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: value_ptr += known scalar, 6",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: unknown scalar += value_ptr, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: unknown scalar += value_ptr, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: unknown scalar += value_ptr, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 0xabcdef12,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: unknown scalar += value_ptr, 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_1, 19),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 max value is outside of the array range",
+ .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: value_ptr += unknown scalar, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += unknown scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: value_ptr += unknown scalar, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 pointer += pointer prohibited",
+},
+{
+ "map access: known scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+},
+{
+ "map access: value_ptr -= known scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+},
+{
+ "map access: value_ptr -= known scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: unknown scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+},
+{
+ "map access: value_ptr -= unknown scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is negative",
+},
+{
+ "map access: value_ptr -= unknown scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 pointer -= pointer prohibited",
+},
diff --git a/tools/testing/selftests/bpf/verifier/var_off.c b/tools/testing/selftests/bpf/verifier/var_off.c
new file mode 100644
index 000000000000..1e536ff121a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/var_off.c
@@ -0,0 +1,66 @@
+{
+ "variable-offset ctx access",
+ .insns = {
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ /* add it to skb. We now have either &skb->len or
+ * &skb->pkt_type, but we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ /* dereference it */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "variable ctx access var_off=(0x0; 0x4)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "variable-offset stack access",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+ /* add it to fp. We now have either fp-4 or fp-8, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "indirect variable-offset stack access",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+ /* add it to fp. We now have either fp-4 or fp-8, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it indirectly */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .errstr = "variable stack read R2",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xadd.c b/tools/testing/selftests/bpf/verifier/xadd.c
new file mode 100644
index 000000000000..c5de2e62cc8b
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xadd.c
@@ -0,0 +1,97 @@
+{
+ "xadd/w check unaligned stack",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "xadd/w check unaligned map",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = REJECT,
+ .errstr = "misaligned value access off",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "xadd/w check unaligned pkt",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 99),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
+ BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
+ BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R2 pkt is not allowed",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "xadd/w check whether src/dst got mangled, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+},
+{
+ "xadd/w check whether src/dst got mangled, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xdp.c b/tools/testing/selftests/bpf/verifier/xdp.c
new file mode 100644
index 000000000000..5ac390508139
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xdp.c
@@ -0,0 +1,14 @@
+{
+ "XDP, using ifindex from netdev",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, ingress_ifindex)),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
new file mode 100644
index 000000000000..bfb97383e6b5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
@@ -0,0 +1,900 @@
+{
+ "XDP pkt read, pkt_end mangling, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "XDP pkt read, pkt_end mangling, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
new file mode 100755
index 000000000000..5ba5bef44d5b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
@@ -0,0 +1,200 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that blackhole routes are marked as offloaded and that packets hitting
+# them are dropped by the ASIC and not by the kernel.
+#
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | 2001:db8:1::1/64 |
+# | | |
+# | | default via 192.0.2.2 |
+# | | default via 2001:db8:1::2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | + $rp1 |
+# | 192.0.2.2/24 |
+# | 2001:db8:1::2/64 |
+# | |
+# | 2001:db8:2::2/64 |
+# | 198.51.100.2/24 |
+# | + $rp2 |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | default via 2001:db8:2::2 |
+# | | |
+# | | 2001:db8:2::1/64 |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ blackhole_ipv4
+ blackhole_ipv6
+"
+NUM_NETIFS=4
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add default vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del default vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.1/24 2001:db8:2::1/64
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+ ip -6 route add default vrf v$h2 nexthop via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del default vrf v$h2 nexthop via 2001:db8:2::2
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 198.51.100.1/24 2001:db8:2::1/64
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ tc qdisc add dev $rp1 clsact
+
+ __addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
+ __addr_add_del $rp2 add 198.51.100.2/24 2001:db8:2::2/64
+}
+
+router_destroy()
+{
+ __addr_add_del $rp2 del 198.51.100.2/24 2001:db8:2::2/64
+ __addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+
+ tc qdisc del dev $rp1 clsact
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+ping_ipv4()
+{
+ ping_test $h1 198.51.100.1 ": h1->h2"
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::1 ": h1->h2"
+}
+
+blackhole_ipv4()
+{
+ # Transmit packets from H1 to H2 and make sure they are dropped by the
+ # ASIC and not by the kernel
+ RET=0
+
+ ip -4 route add blackhole 198.51.100.0/30
+ tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_hw dst_ip 198.51.100.1 src_ip 192.0.2.1 ip_proto icmp \
+ action pass
+
+ ip -4 route show 198.51.100.0/30 | grep -q offload
+ check_err $? "route not marked as offloaded when should"
+
+ ping_do $h1 198.51.100.1
+ check_fail $? "ping passed when should not"
+
+ tc_check_packets "dev $rp1 ingress" 101 0
+ check_err $? "packets trapped and not dropped by ASIC"
+
+ log_test "IPv4 blackhole route"
+
+ tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower
+ ip -4 route del blackhole 198.51.100.0/30
+}
+
+blackhole_ipv6()
+{
+ RET=0
+
+ ip -6 route add blackhole 2001:db8:2::/120
+ tc filter add dev $rp1 ingress protocol ipv6 pref 1 handle 101 flower \
+ skip_hw dst_ip 2001:db8:2::1 src_ip 2001:db8:1::1 \
+ ip_proto icmpv6 action pass
+
+ ip -6 route show 2001:db8:2::/120 | grep -q offload
+ check_err $? "route not marked as offloaded when should"
+
+ ping6_do $h1 2001:db8:2::1
+ check_fail $? "ping passed when should not"
+
+ tc_check_packets "dev $rp1 ingress" 101 0
+ check_err $? "packets trapped and not dropped by ASIC"
+
+ log_test "IPv6 blackhole route"
+
+ tc filter del dev $rp1 ingress protocol ipv6 pref 1 handle 101 flower
+ ip -6 route del blackhole 2001:db8:2::/120
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ router_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
index 1ca631d5aaba..40f16f2a3afd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
@@ -148,9 +148,10 @@ dscp_ping_test()
eval "t0s=($(dscp_fetch_stats $dev_10 10)
$(dscp_fetch_stats $dev_20 20))"
+ local ping_timeout=$((PING_TIMEOUT * 5))
ip vrf exec $vrf_name \
${PING} -Q $dscp_10 ${sip:+-I $sip} $dip \
- -c 10 -i 0.1 -w 2 &> /dev/null
+ -c 10 -i 0.5 -w $ping_timeout &> /dev/null
local -A t1s
eval "t1s=($(dscp_fetch_stats $dev_10 10)
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
index 281d90766e12..9faf02e32627 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
@@ -169,9 +169,10 @@ dscp_ping_test()
eval "local -A dev1_t0s=($(dscp_fetch_stats $dev1 0))"
eval "local -A dev2_t0s=($(dscp_fetch_stats $dev2 0))"
+ local ping_timeout=$((PING_TIMEOUT * 5))
ip vrf exec $vrf_name \
${PING} -Q $dscp ${sip:+-I $sip} $dip \
- -c 10 -i 0.1 -w 2 &> /dev/null
+ -c 10 -i 0.5 -w $ping_timeout &> /dev/null
eval "local -A dev1_t1s=($(dscp_fetch_stats $dev1 0))"
eval "local -A dev2_t1s=($(dscp_fetch_stats $dev2 0))"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index b41d6256b2d0..a372b2f60874 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -9,10 +9,11 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ delta_two_masks_one_key_test delta_simple_rehash_test \
bloom_simple_test bloom_complex_test bloom_delta_test"
NUM_NETIFS=2
source $lib_dir/tc_common.sh
-source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
tcflags="skip_hw"
@@ -38,6 +39,55 @@ h2_destroy()
simple_if_fini $h2 192.0.2.2/24 198.51.100.2/24
}
+tp_record()
+{
+ local tracepoint=$1
+ local cmd=$2
+
+ perf record -q -e $tracepoint $cmd
+ return $?
+}
+
+tp_record_all()
+{
+ local tracepoint=$1
+ local seconds=$2
+
+ perf record -a -q -e $tracepoint sleep $seconds
+ return $?
+}
+
+__tp_hit_count()
+{
+ local tracepoint=$1
+
+ local perf_output=`perf script -F trace:event,trace`
+ return `echo $perf_output | grep "$tracepoint:" | wc -l`
+}
+
+tp_check_hits()
+{
+ local tracepoint=$1
+ local count=$2
+
+ __tp_hit_count $tracepoint
+ if [[ "$?" -ne "$count" ]]; then
+ return 1
+ fi
+ return 0
+}
+
+tp_check_hits_any()
+{
+ local tracepoint=$1
+
+ __tp_hit_count $tracepoint
+ if [[ "$?" -eq "0" ]]; then
+ return 1
+ fi
+ return 0
+}
+
single_mask_test()
{
# When only a single mask is required, the device uses the master
@@ -182,20 +232,38 @@ multiple_masks_test()
# spillage is performed correctly and that the right filter is
# matched
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
local index
RET=0
NUM_MASKS=32
+ NUM_ERPS=16
BASE_INDEX=100
for i in $(eval echo {1..$NUM_MASKS}); do
index=$((BASE_INDEX - i))
- tc filter add dev $h2 ingress protocol ip pref $index \
- handle $index \
- flower $tcflags dst_ip 192.0.2.2/${i} src_ip 192.0.2.1 \
- action drop
+ if ((i > NUM_ERPS)); then
+ exp_hits=1
+ err_msg="$i filters - C-TCAM spill did not happen when it was expected"
+ else
+ exp_hits=0
+ err_msg="$i filters - C-TCAM spill happened when it should not"
+ fi
+
+ tp_record "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" \
+ "tc filter add dev $h2 ingress protocol ip pref $index \
+ handle $index \
+ flower $tcflags \
+ dst_ip 192.0.2.2/${i} src_ip 192.0.2.1/${i} \
+ action drop"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" \
+ $exp_hits
+ check_err $? "$err_msg"
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 \
-B 192.0.2.2 -t ip -q
@@ -325,28 +393,6 @@ ctcam_edge_cases_test()
ctcam_no_atcam_masks_test
}
-tp_record()
-{
- local tracepoint=$1
- local cmd=$2
-
- perf record -q -e $tracepoint $cmd
- return $?
-}
-
-tp_check_hits()
-{
- local tracepoint=$1
- local count=$2
-
- perf_output=`perf script -F trace:event,trace`
- hits=`echo $perf_output | grep "$tracepoint:" | wc -l`
- if [[ "$count" -ne "$hits" ]]; then
- return 1
- fi
- return 0
-}
-
delta_simple_test()
{
# The first filter will create eRP, the second filter will fit into
@@ -405,6 +451,365 @@ delta_simple_test()
log_test "delta simple test ($tcflags)"
}
+delta_two_masks_one_key_test()
+{
+ # If 2 keys are the same and only differ in mask in a way that
+ # they belong under the same ERP (second is delta of the first),
+ # there should be no C-TCAM spill.
+
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
+ pref 1 handle 101 flower $tcflags dst_ip 192.0.2.0/24 \
+ action drop"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
+ check_err $? "incorrect C-TCAM spill while inserting the first rule"
+
+ tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
+ pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
+ action drop"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
+ check_err $? "incorrect C-TCAM spill while inserting the second rule"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match on correct filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+
+ log_test "delta two masks one key test ($tcflags)"
+}
+
+delta_simple_rehash_test()
+{
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_fail $? "Rehash trace was hit even when rehash should be disabled"
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 3000
+ check_err $? "Failed to set ACL region rehash interval"
+
+ sleep 1
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.1.0/25 action drop
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
+ $tcflags dst_ip 192.0.3.0/24 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_err $? "Migrate trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_err $? "Migrate end trace was not hit"
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_fail $? "Migrate trace was hit when no migration should happen"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_fail $? "Migrate end trace was hit when no migration should happen"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after rehash"
+
+ tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ log_test "delta simple rehash test ($tcflags)"
+}
+
+delta_simple_ipv6_rehash_test()
+{
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_fail $? "Rehash trace was hit even when rehash should be disabled"
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 3000
+ check_err $? "Failed to set ACL region rehash interval"
+
+ sleep 1
+
+ tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 101 flower \
+ $tcflags dst_ip 2001:db8:1::0/121 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 2 handle 102 flower \
+ $tcflags dst_ip 2001:db8:2::2 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 3 handle 103 flower \
+ $tcflags dst_ip 2001:db8:3::0/120 action drop
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_err $? "Migrate trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_err $? "Migrate end trace was not hit"
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_fail $? "Migrate trace was hit when no migration should happen"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_fail $? "Migrate end trace was hit when no migration should happen"
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after rehash"
+
+ tc filter del dev $h2 ingress protocol ipv6 pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 101 flower
+
+ log_test "delta simple IPv6 rehash test ($tcflags)"
+}
+
+TEST_RULE_BASE=256
+declare -a test_rules_inserted
+
+test_rule_add()
+{
+ local iface=$1
+ local tcflags=$2
+ local index=$3
+
+ if ! [ ${test_rules_inserted[$index]} ] ; then
+ test_rules_inserted[$index]=false
+ fi
+ if ${test_rules_inserted[$index]} ; then
+ return
+ fi
+
+ local number=$(( $index + $TEST_RULE_BASE ))
+ printf -v hexnumber '%x' $number
+
+ batch="${batch}filter add dev $iface ingress protocol ipv6 pref 1 \
+ handle $number flower $tcflags \
+ src_ip 2001:db8:1::$hexnumber action drop\n"
+ test_rules_inserted[$index]=true
+}
+
+test_rule_del()
+{
+ local iface=$1
+ local index=$2
+
+ if ! [ ${test_rules_inserted[$index]} ] ; then
+ test_rules_inserted[$index]=false
+ fi
+ if ! ${test_rules_inserted[$index]} ; then
+ return
+ fi
+
+ local number=$(( $index + $TEST_RULE_BASE ))
+ printf -v hexnumber '%x' $number
+
+ batch="${batch}filter del dev $iface ingress protocol ipv6 pref 1 \
+ handle $number flower\n"
+ test_rules_inserted[$index]=false
+}
+
+test_rule_add_or_remove()
+{
+ local iface=$1
+ local tcflags=$2
+ local index=$3
+
+ if ! [ ${test_rules_inserted[$index]} ] ; then
+ test_rules_inserted[$index]=false
+ fi
+ if ${test_rules_inserted[$index]} ; then
+ test_rule_del $iface $index
+ else
+ test_rule_add $iface $tcflags $index
+ fi
+}
+
+test_rule_add_or_remove_random_batch()
+{
+ local iface=$1
+ local tcflags=$2
+ local total_count=$3
+ local skip=0
+ local count=0
+ local MAXSKIP=20
+ local MAXCOUNT=20
+
+ for ((i=1;i<=total_count;i++)); do
+ if (( $skip == 0 )) && (($count == 0)); then
+ ((skip=$RANDOM % $MAXSKIP + 1))
+ ((count=$RANDOM % $MAXCOUNT + 1))
+ fi
+ if (( $skip != 0 )); then
+ ((skip-=1))
+ else
+ ((count-=1))
+ test_rule_add_or_remove $iface $tcflags $i
+ fi
+ done
+}
+
+delta_massive_ipv6_rehash_test()
+{
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_fail $? "Rehash trace was hit even when rehash should be disabled"
+
+ RANDOM=4432897
+ declare batch=""
+ test_rule_add_or_remove_random_batch $h2 $tcflags 5000
+
+ echo -n -e $batch | tc -b -
+
+ declare batch=""
+ test_rule_add_or_remove_random_batch $h2 $tcflags 5000
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 3000
+ check_err $? "Failed to set ACL region rehash interval"
+
+ sleep 1
+
+ tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 101 flower \
+ $tcflags dst_ip 2001:db8:1::0/121 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 2 handle 102 flower \
+ $tcflags dst_ip 2001:db8:2::2 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 3 handle 103 flower \
+ $tcflags dst_ip 2001:db8:3::0/120 action drop
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ echo -n -e $batch | tc -b -
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after rehash"
+
+ tc filter del dev $h2 ingress protocol ipv6 pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 101 flower
+
+ declare batch=""
+ for i in {1..5000}; do
+ test_rule_del $h2 $tcflags $i
+ done
+ echo -e $batch | tc -b -
+
+ log_test "delta massive IPv6 rehash test ($tcflags)"
+}
+
bloom_simple_test()
{
# Bloom filter requires that the eRP table is used. This test
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
index a0a80e1a69e8..e7ffc79561b7 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -2,7 +2,6 @@
# SPDX-License-Identifier: GPL-2.0
NUM_NETIFS=6
-source ../../../../net/forwarding/lib.sh
source ../../../../net/forwarding/tc_common.sh
source devlink_lib_spectrum.sh
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh
new file mode 100755
index 000000000000..749ba3cfda1d
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test vetoing of FDB entries that mlxsw can not offload. This exercises several
+# different veto vectors to test various rollback scenarios in the vxlan driver.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ fdb_create_veto_test
+ fdb_replace_veto_test
+ fdb_append_veto_test
+ fdb_changelink_veto_test
+"
+NUM_NETIFS=2
+source $lib_dir/lib.sh
+
+setup_prepare()
+{
+ swp1=${NETIFS[p1]}
+ swp2=${NETIFS[p2]}
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link set dev $swp1 up
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 up
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link set dev vxlan0 master br0
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip link set dev vxlan0 nomaster
+ ip link del dev vxlan0
+
+ ip link set dev $swp2 down
+ ip link set dev $swp1 nomaster
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+fdb_create_veto_test()
+{
+ RET=0
+
+ bridge fdb add 01:02:03:04:05:06 dev vxlan0 self static \
+ dst 198.51.100.2 2>/dev/null
+ check_fail $? "multicast MAC not rejected"
+
+ bridge fdb add 01:02:03:04:05:06 dev vxlan0 self static \
+ dst 198.51.100.2 2>&1 >/dev/null | grep -q mlxsw_spectrum
+ check_err $? "multicast MAC rejected without extack"
+
+ log_test "vxlan FDB veto - create"
+}
+
+fdb_replace_veto_test()
+{
+ RET=0
+
+ bridge fdb add 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2
+ check_err $? "valid FDB rejected"
+
+ bridge fdb replace 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2 port 1234 2>/dev/null
+ check_fail $? "FDB with an explicit port not rejected"
+
+ bridge fdb replace 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2 port 1234 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with an explicit port rejected without extack"
+
+ log_test "vxlan FDB veto - replace"
+}
+
+fdb_append_veto_test()
+{
+ RET=0
+
+ bridge fdb add 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.2
+ check_err $? "valid FDB rejected"
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.3 port 1234 2>/dev/null
+ check_fail $? "FDB with an explicit port not rejected"
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.3 port 1234 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with an explicit port rejected without extack"
+
+ log_test "vxlan FDB veto - append"
+}
+
+fdb_changelink_veto_test()
+{
+ RET=0
+
+ ip link set dev vxlan0 type vxlan \
+ group 224.0.0.1 dev lo 2>/dev/null
+ check_fail $? "FDB with a multicast IP not rejected"
+
+ ip link set dev vxlan0 type vxlan \
+ group 224.0.0.1 dev lo 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with a multicast IP rejected without extack"
+
+ log_test "vxlan FDB veto - changelink"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/filesystems/binderfs/.gitignore b/tools/testing/selftests/filesystems/binderfs/.gitignore
new file mode 100644
index 000000000000..8a5d9bf63dd4
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/.gitignore
@@ -0,0 +1 @@
+binderfs_test
diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
new file mode 100644
index 000000000000..58cb659b56b4
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CFLAGS += -I../../../../../usr/include/
+TEST_GEN_PROGS := binderfs_test
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
new file mode 100644
index 000000000000..8c2ed962e1c7
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <linux/android/binder.h>
+#include <linux/android/binderfs.h>
+#include "../../kselftest.h"
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+again:
+ ret = write(fd, buf, count);
+ if (ret < 0 && errno == EINTR)
+ goto again;
+
+ return ret;
+}
+
+static void write_to_file(const char *filename, const void *buf, size_t count,
+ int allowed_errno)
+{
+ int fd, saved_errno;
+ ssize_t ret;
+
+ fd = open(filename, O_WRONLY | O_CLOEXEC);
+ if (fd < 0)
+ ksft_exit_fail_msg("%s - Failed to open file %s\n",
+ strerror(errno), filename);
+
+ ret = write_nointr(fd, buf, count);
+ if (ret < 0) {
+ if (allowed_errno && (errno == allowed_errno)) {
+ close(fd);
+ return;
+ }
+
+ goto on_error;
+ }
+
+ if ((size_t)ret != count)
+ goto on_error;
+
+ close(fd);
+ return;
+
+on_error:
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to write to file %s\n",
+ strerror(errno), filename);
+
+ ksft_exit_fail_msg("Failed to write to file %s\n", filename);
+}
+
+static void change_to_userns(void)
+{
+ int ret;
+ uid_t uid;
+ gid_t gid;
+ /* {g,u}id_map files only allow a max of 4096 bytes written to them */
+ char idmap[4096];
+
+ uid = getuid();
+ gid = getgid();
+
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to unshare user namespace\n",
+ strerror(errno));
+
+ write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT);
+
+ ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid);
+ if (ret < 0 || (size_t)ret >= sizeof(idmap))
+ ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
+ strerror(errno));
+
+ write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0);
+
+ ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid);
+ if (ret < 0 || (size_t)ret >= sizeof(idmap))
+ ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
+ strerror(errno));
+
+ write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0);
+
+ ret = setgid(0);
+ if (ret)
+ ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
+ strerror(errno));
+
+ ret = setuid(0);
+ if (ret)
+ ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
+ strerror(errno));
+}
+
+static void change_to_mountns(void)
+{
+ int ret;
+
+ ret = unshare(CLONE_NEWNS);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to unshare mount namespace\n",
+ strerror(errno));
+
+ ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to mount / as private\n",
+ strerror(errno));
+}
+
+static void rmdir_protect_errno(const char *dir)
+{
+ int saved_errno = errno;
+ (void)rmdir(dir);
+ errno = saved_errno;
+}
+
+static void __do_binderfs_test(void)
+{
+ int fd, ret, saved_errno;
+ size_t len;
+ ssize_t wret;
+ bool keep = false;
+ struct binderfs_device device = { 0 };
+ struct binder_version version = { 0 };
+
+ change_to_mountns();
+
+ ret = mkdir("/dev/binderfs", 0755);
+ if (ret < 0) {
+ if (errno != EEXIST)
+ ksft_exit_fail_msg(
+ "%s - Failed to create binderfs mountpoint\n",
+ strerror(errno));
+
+ keep = true;
+ }
+
+ ret = mount(NULL, "/dev/binderfs", "binder", 0, 0);
+ if (ret < 0) {
+ if (errno != ENODEV)
+ ksft_exit_fail_msg("%s - Failed to mount binderfs\n",
+ strerror(errno));
+
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_skip(
+ "The Android binderfs filesystem is not available\n");
+ }
+
+ /* binderfs mount test passed */
+ ksft_inc_pass_cnt();
+
+ memcpy(device.name, "my-binder", strlen("my-binder"));
+
+ fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ ksft_exit_fail_msg(
+ "%s - Failed to open binder-control device\n",
+ strerror(errno));
+
+ ret = ioctl(fd, BINDER_CTL_ADD, &device);
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ if (ret < 0) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg(
+ "%s - Failed to allocate new binder device\n",
+ strerror(errno));
+ }
+
+ ksft_print_msg(
+ "Allocated new binder device with major %d, minor %d, and name %s\n",
+ device.major, device.minor, device.name);
+
+ /* binder device allocation test passed */
+ ksft_inc_pass_cnt();
+
+ fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY);
+ if (fd < 0) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg("%s - Failed to open my-binder device\n",
+ strerror(errno));
+ }
+
+ ret = ioctl(fd, BINDER_VERSION, &version);
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ if (ret < 0) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg(
+ "%s - Failed to open perform BINDER_VERSION request\n",
+ strerror(errno));
+ }
+
+ ksft_print_msg("Detected binder version: %d\n",
+ version.protocol_version);
+
+ /* binder transaction with binderfs binder device passed */
+ ksft_inc_pass_cnt();
+
+ ret = unlink("/dev/binderfs/my-binder");
+ if (ret < 0) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg("%s - Failed to delete binder device\n",
+ strerror(errno));
+ }
+
+ /* binder device removal passed */
+ ksft_inc_pass_cnt();
+
+ ret = unlink("/dev/binderfs/binder-control");
+ if (!ret) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg("Managed to delete binder-control device\n");
+ } else if (errno != EPERM) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg(
+ "%s - Failed to delete binder-control device but exited with unexpected error code\n",
+ strerror(errno));
+ }
+
+ /* binder-control device removal failed as expected */
+ ksft_inc_xfail_cnt();
+
+on_error:
+ ret = umount2("/dev/binderfs", MNT_DETACH);
+ keep ?: rmdir_protect_errno("/dev/binderfs");
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to unmount binderfs\n",
+ strerror(errno));
+
+ /* binderfs unmount test passed */
+ ksft_inc_pass_cnt();
+}
+
+static void binderfs_test_privileged()
+{
+ if (geteuid() != 0)
+ ksft_print_msg(
+ "Tests are not run as root. Skipping privileged tests\n");
+ else
+ __do_binderfs_test();
+}
+
+static void binderfs_test_unprivileged()
+{
+ change_to_userns();
+ __do_binderfs_test();
+}
+
+int main(int argc, char *argv[])
+{
+ binderfs_test_privileged();
+ binderfs_test_unprivileged();
+ ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/filesystems/binderfs/config b/tools/testing/selftests/filesystems/binderfs/config
new file mode 100644
index 000000000000..02dd6cc9cf99
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/config
@@ -0,0 +1,3 @@
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDERFS=y
+CONFIG_ANDROID_BINDER_IPC=y
diff --git a/tools/testing/selftests/firmware/config b/tools/testing/selftests/firmware/config
index 913a25a4a32b..bf634dda0720 100644
--- a/tools/testing/selftests/firmware/config
+++ b/tools/testing/selftests/firmware/config
@@ -1,6 +1,5 @@
CONFIG_TEST_FIRMWARE=y
CONFIG_FW_LOADER=y
CONFIG_FW_LOADER_USER_HELPER=y
-CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 466cf2f91ba0..a4320c4b44dc 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -155,8 +155,11 @@ read_firmwares()
{
for i in $(seq 0 3); do
config_set_read_fw_idx $i
- # Verify the contents match
- if ! diff -q "$FW" $DIR/read_firmware 2>/dev/null ; then
+ # Verify the contents are what we expect.
+ # -Z required for now -- check for yourself, md5sum
+ # on $FW and DIR/read_firmware will yield the same. Even
+ # cmp agrees, so something is off.
+ if ! diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
echo "request #$i: firmware was not loaded" >&2
exit 1
fi
@@ -168,7 +171,7 @@ read_firmwares_expect_nofile()
for i in $(seq 0 3); do
config_set_read_fw_idx $i
# Ensures contents differ
- if diff -q "$FW" $DIR/read_firmware 2>/dev/null ; then
+ if diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
echo "request $i: file was not expected to match" >&2
exit 1
fi
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 6c5f1b2ffb74..1cbb12e284a6 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -91,7 +91,7 @@ verify_reqs()
if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
echo "usermode helper disabled so ignoring test"
- exit $ksft_skip
+ exit 0
fi
fi
}
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index 75244db70331..136387422b00 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -154,17 +154,17 @@ fi
# Define text colors
# Check available colors on the terminal, if any
-ncolors=`tput colors 2>/dev/null`
+ncolors=`tput colors 2>/dev/null || echo 0`
color_reset=
color_red=
color_green=
color_blue=
# If stdout exists and number of colors is eight or more, use them
-if [ -t 1 -a "$ncolors" -a "$ncolors" -ge 8 ]; then
- color_reset="\e[0m"
- color_red="\e[31m"
- color_green="\e[32m"
- color_blue="\e[34m"
+if [ -t 1 -a "$ncolors" -ge 8 ]; then
+ color_reset="\033[0m"
+ color_red="\033[31m"
+ color_green="\033[32m"
+ color_blue="\033[34m"
fi
strip_esc() {
@@ -173,8 +173,13 @@ strip_esc() {
}
prlog() { # messages
- echo -e "$@"
- [ "$LOG_FILE" ] && echo -e "$@" | strip_esc >> $LOG_FILE
+ newline="\n"
+ if [ "$1" = "-n" ] ; then
+ newline=
+ shift
+ fi
+ printf "$*$newline"
+ [ "$LOG_FILE" ] && printf "$*$newline" | strip_esc >> $LOG_FILE
}
catlog() { #file
cat $1
diff --git a/tools/testing/selftests/ir/ir_loopback.c b/tools/testing/selftests/ir/ir_loopback.c
index 858c19caf224..e700e09e3682 100644
--- a/tools/testing/selftests/ir/ir_loopback.c
+++ b/tools/testing/selftests/ir/ir_loopback.c
@@ -27,6 +27,8 @@
#define TEST_SCANCODES 10
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define SYSFS_PATH_MAX 256
+#define DNAME_PATH_MAX 256
static const struct {
enum rc_proto proto;
@@ -51,12 +53,16 @@ static const struct {
{ RC_PROTO_RC6_6A_32, "rc-6-6a-32", 0xffffffff, "rc-6" },
{ RC_PROTO_RC6_MCE, "rc-6-mce", 0x00007fff, "rc-6" },
{ RC_PROTO_SHARP, "sharp", 0x1fff, "sharp" },
+ { RC_PROTO_IMON, "imon", 0x7fffffff, "imon" },
+ { RC_PROTO_RCMM12, "rcmm-12", 0x00000fff, "rcmm" },
+ { RC_PROTO_RCMM24, "rcmm-24", 0x00ffffff, "rcmm" },
+ { RC_PROTO_RCMM32, "rcmm-32", 0xffffffff, "rcmm" },
};
int lirc_open(const char *rc)
{
struct dirent *dent;
- char buf[100];
+ char buf[SYSFS_PATH_MAX + DNAME_PATH_MAX];
DIR *d;
int fd;
@@ -74,7 +80,7 @@ int lirc_open(const char *rc)
}
if (!dent)
- ksft_exit_fail_msg("cannot find lirc device for %s\n", rc);
+ ksft_exit_skip("cannot find lirc device for %s\n", rc);
closedir(d);
@@ -139,6 +145,11 @@ int main(int argc, char **argv)
(((scancode >> 8) ^ ~scancode) & 0xff) == 0)
continue;
+ if (rc_proto == RC_PROTO_RCMM32 &&
+ (scancode & 0x000c0000) != 0x000c0000 &&
+ scancode & 0x00008000)
+ continue;
+
struct lirc_scancode lsc = {
.rc_proto = rc_proto,
.scancode = scancode
diff --git a/tools/testing/selftests/ir/ir_loopback.sh b/tools/testing/selftests/ir/ir_loopback.sh
index 0a0b8dfa39be..b90dc9939f45 100755
--- a/tools/testing/selftests/ir/ir_loopback.sh
+++ b/tools/testing/selftests/ir/ir_loopback.sh
@@ -4,6 +4,11 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+if [ $UID != 0 ]; then
+ echo "Please run ir_loopback test as root [SKIP]"
+ exit $ksft_skip
+fi
+
if ! /sbin/modprobe -q -n rc-loopback; then
echo "ir_loopback: module rc-loopback is not found [SKIP]"
exit $ksft_skip
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index a3edb2c8e43d..47e1d995c182 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -13,6 +13,7 @@
#include <stdlib.h>
#include <unistd.h>
#include <stdarg.h>
+#include <stdio.h>
/* define kselftest exit codes */
#define KSFT_PASS 0
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 76d654ef3234..2d90c98eeb67 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -168,8 +168,8 @@
#define __TEST_IMPL(test_name, _signal) \
static void test_name(struct __test_metadata *_metadata); \
static struct __test_metadata _##test_name##_object = \
- { name: "global." #test_name, \
- fn: &test_name, termsig: _signal }; \
+ { .name = "global." #test_name, \
+ .fn = &test_name, .termsig = _signal }; \
static void __attribute__((constructor)) _register_##test_name(void) \
{ \
__register_test(&_##test_name##_object); \
@@ -304,9 +304,9 @@
} \
static struct __test_metadata \
_##fixture_name##_##test_name##_object = { \
- name: #fixture_name "." #test_name, \
- fn: &wrapper_##fixture_name##_##test_name, \
- termsig: signal, \
+ .name = #fixture_name "." #test_name, \
+ .fn = &wrapper_##fixture_name##_##test_name, \
+ .termsig = signal, \
}; \
static void __attribute__((constructor)) \
_register_##fixture_name##_##test_name(void) \
diff --git a/tools/testing/selftests/livepatch/Makefile b/tools/testing/selftests/livepatch/Makefile
new file mode 100644
index 000000000000..af4aee79bebb
--- /dev/null
+++ b/tools/testing/selftests/livepatch/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_GEN_PROGS := \
+ test-livepatch.sh \
+ test-callbacks.sh \
+ test-shadow-vars.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/livepatch/README b/tools/testing/selftests/livepatch/README
new file mode 100644
index 000000000000..b73cd0e2dd51
--- /dev/null
+++ b/tools/testing/selftests/livepatch/README
@@ -0,0 +1,43 @@
+====================
+Livepatch Self Tests
+====================
+
+This is a small set of sanity tests for the kernel livepatching.
+
+The test suite loads and unloads several test kernel modules to verify
+livepatch behavior. Debug information is logged to the kernel's message
+buffer and parsed for expected messages. (Note: the tests will clear
+the message buffer between individual tests.)
+
+
+Config
+------
+
+Set these config options and their prerequisites:
+
+CONFIG_LIVEPATCH=y
+CONFIG_TEST_LIVEPATCH=m
+
+
+Running the tests
+-----------------
+
+Test kernel modules are built as part of lib/ (make modules) and need to
+be installed (make modules_install) as the test scripts will modprobe
+them.
+
+To run the livepatch selftests, from the top of the kernel source tree:
+
+ % make -C tools/testing/selftests TARGETS=livepatch run_tests
+
+
+Adding tests
+------------
+
+See the common functions.sh file for the existing collection of utility
+functions, most importantly set_dynamic_debug() and check_result(). The
+latter function greps the kernel's ring buffer for "livepatch:" and
+"test_klp" strings, so tests be sure to include one of those strings for
+result comparison. Other utility functions include general module
+loading and livepatch loading helpers (waiting for patch transitions,
+sysfs entries, etc.)
diff --git a/tools/testing/selftests/livepatch/config b/tools/testing/selftests/livepatch/config
new file mode 100644
index 000000000000..0dd7700464a8
--- /dev/null
+++ b/tools/testing/selftests/livepatch/config
@@ -0,0 +1 @@
+CONFIG_TEST_LIVEPATCH=m
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
new file mode 100644
index 000000000000..30195449c63c
--- /dev/null
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -0,0 +1,198 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+# Shell functions for the rest of the scripts.
+
+MAX_RETRIES=600
+RETRY_INTERVAL=".1" # seconds
+
+# log(msg) - write message to kernel log
+# msg - insightful words
+function log() {
+ echo "$1" > /dev/kmsg
+}
+
+# die(msg) - game over, man
+# msg - dying words
+function die() {
+ log "ERROR: $1"
+ echo "ERROR: $1" >&2
+ exit 1
+}
+
+# set_dynamic_debug() - setup kernel dynamic debug
+# TODO - push and pop this config?
+function set_dynamic_debug() {
+ cat << EOF > /sys/kernel/debug/dynamic_debug/control
+file kernel/livepatch/* +p
+func klp_try_switch_task -p
+EOF
+}
+
+# loop_until(cmd) - loop a command until it is successful or $MAX_RETRIES,
+# sleep $RETRY_INTERVAL between attempts
+# cmd - command and its arguments to run
+function loop_until() {
+ local cmd="$*"
+ local i=0
+ while true; do
+ eval "$cmd" && return 0
+ [[ $((i++)) -eq $MAX_RETRIES ]] && return 1
+ sleep $RETRY_INTERVAL
+ done
+}
+
+function is_livepatch_mod() {
+ local mod="$1"
+
+ if [[ $(modinfo "$mod" | awk '/^livepatch:/{print $NF}') == "Y" ]]; then
+ return 0
+ fi
+
+ return 1
+}
+
+function __load_mod() {
+ local mod="$1"; shift
+
+ local msg="% modprobe $mod $*"
+ log "${msg%% }"
+ ret=$(modprobe "$mod" "$@" 2>&1)
+ if [[ "$ret" != "" ]]; then
+ die "$ret"
+ fi
+
+ # Wait for module in sysfs ...
+ loop_until '[[ -e "/sys/module/$mod" ]]' ||
+ die "failed to load module $mod"
+}
+
+
+# load_mod(modname, params) - load a kernel module
+# modname - module name to load
+# params - module parameters to pass to modprobe
+function load_mod() {
+ local mod="$1"; shift
+
+ is_livepatch_mod "$mod" &&
+ die "use load_lp() to load the livepatch module $mod"
+
+ __load_mod "$mod" "$@"
+}
+
+# load_lp_nowait(modname, params) - load a kernel module with a livepatch
+# but do not wait on until the transition finishes
+# modname - module name to load
+# params - module parameters to pass to modprobe
+function load_lp_nowait() {
+ local mod="$1"; shift
+
+ is_livepatch_mod "$mod" ||
+ die "module $mod is not a livepatch"
+
+ __load_mod "$mod" "$@"
+
+ # Wait for livepatch in sysfs ...
+ loop_until '[[ -e "/sys/kernel/livepatch/$mod" ]]' ||
+ die "failed to load module $mod (sysfs)"
+}
+
+# load_lp(modname, params) - load a kernel module with a livepatch
+# modname - module name to load
+# params - module parameters to pass to modprobe
+function load_lp() {
+ local mod="$1"; shift
+
+ load_lp_nowait "$mod" "$@"
+
+ # Wait until the transition finishes ...
+ loop_until 'grep -q '^0$' /sys/kernel/livepatch/$mod/transition' ||
+ die "failed to complete transition"
+}
+
+# load_failing_mod(modname, params) - load a kernel module, expect to fail
+# modname - module name to load
+# params - module parameters to pass to modprobe
+function load_failing_mod() {
+ local mod="$1"; shift
+
+ local msg="% modprobe $mod $*"
+ log "${msg%% }"
+ ret=$(modprobe "$mod" "$@" 2>&1)
+ if [[ "$ret" == "" ]]; then
+ die "$mod unexpectedly loaded"
+ fi
+ log "$ret"
+}
+
+# unload_mod(modname) - unload a kernel module
+# modname - module name to unload
+function unload_mod() {
+ local mod="$1"
+
+ # Wait for module reference count to clear ...
+ loop_until '[[ $(cat "/sys/module/$mod/refcnt") == "0" ]]' ||
+ die "failed to unload module $mod (refcnt)"
+
+ log "% rmmod $mod"
+ ret=$(rmmod "$mod" 2>&1)
+ if [[ "$ret" != "" ]]; then
+ die "$ret"
+ fi
+
+ # Wait for module in sysfs ...
+ loop_until '[[ ! -e "/sys/module/$mod" ]]' ||
+ die "failed to unload module $mod (/sys/module)"
+}
+
+# unload_lp(modname) - unload a kernel module with a livepatch
+# modname - module name to unload
+function unload_lp() {
+ unload_mod "$1"
+}
+
+# disable_lp(modname) - disable a livepatch
+# modname - module name to unload
+function disable_lp() {
+ local mod="$1"
+
+ log "% echo 0 > /sys/kernel/livepatch/$mod/enabled"
+ echo 0 > /sys/kernel/livepatch/"$mod"/enabled
+
+ # Wait until the transition finishes and the livepatch gets
+ # removed from sysfs...
+ loop_until '[[ ! -e "/sys/kernel/livepatch/$mod" ]]' ||
+ die "failed to disable livepatch $mod"
+}
+
+# set_pre_patch_ret(modname, pre_patch_ret)
+# modname - module name to set
+# pre_patch_ret - new pre_patch_ret value
+function set_pre_patch_ret {
+ local mod="$1"; shift
+ local ret="$1"
+
+ log "% echo $ret > /sys/module/$mod/parameters/pre_patch_ret"
+ echo "$ret" > /sys/module/"$mod"/parameters/pre_patch_ret
+
+ # Wait for sysfs value to hold ...
+ loop_until '[[ $(cat "/sys/module/$mod/parameters/pre_patch_ret") == "$ret" ]]' ||
+ die "failed to set pre_patch_ret parameter for $mod module"
+}
+
+# check_result() - verify dmesg output
+# TODO - better filter, out of order msgs, etc?
+function check_result {
+ local expect="$*"
+ local result
+
+ result=$(dmesg | grep -v 'tainting' | grep -e 'livepatch:' -e 'test_klp' | sed 's/^\[[ 0-9.]*\] //')
+
+ if [[ "$expect" == "$result" ]] ; then
+ echo "ok"
+ else
+ echo -e "not ok\n\n$(diff -upr --label expected --label result <(echo "$expect") <(echo "$result"))\n"
+ die "livepatch kselftest(s) failed"
+ fi
+}
diff --git a/tools/testing/selftests/livepatch/test-callbacks.sh b/tools/testing/selftests/livepatch/test-callbacks.sh
new file mode 100755
index 000000000000..e97a9dcb73c7
--- /dev/null
+++ b/tools/testing/selftests/livepatch/test-callbacks.sh
@@ -0,0 +1,587 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+. $(dirname $0)/functions.sh
+
+MOD_LIVEPATCH=test_klp_callbacks_demo
+MOD_LIVEPATCH2=test_klp_callbacks_demo2
+MOD_TARGET=test_klp_callbacks_mod
+MOD_TARGET_BUSY=test_klp_callbacks_busy
+
+set_dynamic_debug
+
+
+# TEST: target module before livepatch
+#
+# Test a combination of loading a kernel module and a livepatch that
+# patches a function in the first module. Load the target module
+# before the livepatch module. Unload them in the same order.
+#
+# - On livepatch enable, before the livepatch transition starts,
+# pre-patch callbacks are executed for vmlinux and $MOD_TARGET (those
+# klp_objects currently loaded). After klp_objects are patched
+# according to the klp_patch, their post-patch callbacks run and the
+# transition completes.
+#
+# - Similarly, on livepatch disable, pre-patch callbacks run before the
+# unpatching transition starts. klp_objects are reverted, post-patch
+# callbacks execute and the transition completes.
+
+echo -n "TEST: target module before livepatch ... "
+dmesg -C
+
+load_mod $MOD_TARGET
+load_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET
+
+check_result "% modprobe $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_init
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit"
+
+
+# TEST: module_coming notifier
+#
+# This test is similar to the previous test, but (un)load the livepatch
+# module before the target kernel module. This tests the livepatch
+# core's module_coming handler.
+#
+# - On livepatch enable, only pre/post-patch callbacks are executed for
+# currently loaded klp_objects, in this case, vmlinux.
+#
+# - When a targeted module is subsequently loaded, only its
+# pre/post-patch callbacks are executed.
+#
+# - On livepatch disable, all currently loaded klp_objects' (vmlinux and
+# $MOD_TARGET) pre/post-unpatch callbacks are executed.
+
+echo -n "TEST: module_coming notifier ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+load_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_TARGET: ${MOD_TARGET}_init
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit"
+
+
+# TEST: module_going notifier
+#
+# Test loading the livepatch after a targeted kernel module, then unload
+# the kernel module before disabling the livepatch. This tests the
+# livepatch core's module_going handler.
+#
+# - First load a target module, then the livepatch.
+#
+# - When a target module is unloaded, the livepatch is only reverted
+# from that klp_object ($MOD_TARGET). As such, only its pre and
+# post-unpatch callbacks are executed when this occurs.
+#
+# - When the livepatch is disabled, pre and post-unpatch callbacks are
+# run for the remaining klp_object, vmlinux.
+
+echo -n "TEST: module_going notifier ... "
+dmesg -C
+
+load_mod $MOD_TARGET
+load_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_init
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': patching complete
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+livepatch: reverting patch '$MOD_LIVEPATCH' on unloading module '$MOD_TARGET'
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: module_coming and module_going notifiers
+#
+# This test is similar to the previous test, however the livepatch is
+# loaded first. This tests the livepatch core's module_coming and
+# module_going handlers.
+#
+# - First load the livepatch.
+#
+# - When a targeted kernel module is subsequently loaded, only its
+# pre/post-patch callbacks are executed.
+#
+# - When the target module is unloaded, the livepatch is only reverted
+# from the $MOD_TARGET klp_object. As such, only pre and
+# post-unpatch callbacks are executed when this occurs.
+
+echo -n "TEST: module_coming and module_going notifiers ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+load_mod $MOD_TARGET
+unload_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_TARGET: ${MOD_TARGET}_init
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+livepatch: reverting patch '$MOD_LIVEPATCH' on unloading module '$MOD_TARGET'
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: target module not present
+#
+# A simple test of loading a livepatch without one of its patch target
+# klp_objects ever loaded ($MOD_TARGET).
+#
+# - Load the livepatch.
+#
+# - As expected, only pre/post-(un)patch handlers are executed for
+# vmlinux.
+
+echo -n "TEST: target module not present ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: pre-patch callback -ENODEV
+#
+# Test a scenario where a vmlinux pre-patch callback returns a non-zero
+# status (ie, failure).
+#
+# - First load a target module.
+#
+# - Load the livepatch module, setting its 'pre_patch_ret' value to -19
+# (-ENODEV). When its vmlinux pre-patch callback executes, this
+# status code will propagate back to the module-loading subsystem.
+# The result is that the insmod command refuses to load the livepatch
+# module.
+
+echo -n "TEST: pre-patch callback -ENODEV ... "
+dmesg -C
+
+load_mod $MOD_TARGET
+load_failing_mod $MOD_LIVEPATCH pre_patch_ret=-19
+unload_mod $MOD_TARGET
+
+check_result "% modprobe $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_init
+% modprobe $MOD_LIVEPATCH pre_patch_ret=-19
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+test_klp_callbacks_demo: pre_patch_callback: vmlinux
+livepatch: pre-patch callback failed for object 'vmlinux'
+livepatch: failed to enable patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': canceling patching transition, going to unpatch
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+modprobe: ERROR: could not insert '$MOD_LIVEPATCH': No such device
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit"
+
+
+# TEST: module_coming + pre-patch callback -ENODEV
+#
+# Similar to the previous test, setup a livepatch such that its vmlinux
+# pre-patch callback returns success. However, when a targeted kernel
+# module is later loaded, have the livepatch return a failing status
+# code.
+#
+# - Load the livepatch, vmlinux pre-patch callback succeeds.
+#
+# - Set a trap so subsequent pre-patch callbacks to this livepatch will
+# return -ENODEV.
+#
+# - The livepatch pre-patch callback for subsequently loaded target
+# modules will return failure, so the module loader refuses to load
+# the kernel module. No post-patch or pre/post-unpatch callbacks are
+# executed for this klp_object.
+#
+# - Pre/post-unpatch callbacks are run for the vmlinux klp_object.
+
+echo -n "TEST: module_coming + pre-patch callback -ENODEV ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+set_pre_patch_ret $MOD_LIVEPATCH -19
+load_failing_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo -19 > /sys/module/$MOD_LIVEPATCH/parameters/pre_patch_ret
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+livepatch: pre-patch callback failed for object '$MOD_TARGET'
+livepatch: patch '$MOD_LIVEPATCH' failed for module '$MOD_TARGET', refusing to load module '$MOD_TARGET'
+modprobe: ERROR: could not insert '$MOD_TARGET': No such device
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: multiple target modules
+#
+# Test loading multiple targeted kernel modules. This test-case is
+# mainly for comparing with the next test-case.
+#
+# - Load a target "busy" kernel module which kicks off a worker function
+# that immediately exits.
+#
+# - Proceed with loading the livepatch and another ordinary target
+# module. Post-patch callbacks are executed and the transition
+# completes quickly.
+
+echo -n "TEST: multiple target modules ... "
+dmesg -C
+
+load_mod $MOD_TARGET_BUSY sleep_secs=0
+# give $MOD_TARGET_BUSY::busymod_work_func() a chance to run
+sleep 5
+load_lp $MOD_LIVEPATCH
+load_mod $MOD_TARGET
+unload_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET_BUSY
+
+check_result "% modprobe $MOD_TARGET_BUSY sleep_secs=0
+$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_init
+$MOD_TARGET_BUSY: busymod_work_func, sleeping 0 seconds ...
+$MOD_TARGET_BUSY: busymod_work_func exit
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_TARGET: ${MOD_TARGET}_init
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+livepatch: reverting patch '$MOD_LIVEPATCH' on unloading module '$MOD_TARGET'
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_TARGET_BUSY
+$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_exit"
+
+
+
+# TEST: busy target module
+#
+# A similar test as the previous one, but force the "busy" kernel module
+# to do longer work.
+#
+# The livepatching core will refuse to patch a task that is currently
+# executing a to-be-patched function -- the consistency model stalls the
+# current patch transition until this safety-check is met. Test a
+# scenario where one of a livepatch's target klp_objects sits on such a
+# function for a long time. Meanwhile, load and unload other target
+# kernel modules while the livepatch transition is in progress.
+#
+# - Load the "busy" kernel module, this time make it do 10 seconds worth
+# of work.
+#
+# - Meanwhile, the livepatch is loaded. Notice that the patch
+# transition does not complete as the targeted "busy" module is
+# sitting on a to-be-patched function.
+#
+# - Load a second target module (this one is an ordinary idle kernel
+# module). Note that *no* post-patch callbacks will be executed while
+# the livepatch is still in transition.
+#
+# - Request an unload of the simple kernel module. The patch is still
+# transitioning, so its pre-unpatch callbacks are skipped.
+#
+# - Finally the livepatch is disabled. Since none of the patch's
+# klp_object's post-patch callbacks executed, the remaining
+# klp_object's pre-unpatch callbacks are skipped.
+
+echo -n "TEST: busy target module ... "
+dmesg -C
+
+load_mod $MOD_TARGET_BUSY sleep_secs=10
+load_lp_nowait $MOD_LIVEPATCH
+# Don't wait for transition, load $MOD_TARGET while the transition
+# is still stalled in $MOD_TARGET_BUSY::busymod_work_func()
+sleep 5
+load_mod $MOD_TARGET
+unload_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET_BUSY
+
+check_result "% modprobe $MOD_TARGET_BUSY sleep_secs=10
+$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_init
+$MOD_TARGET_BUSY: busymod_work_func, sleeping 10 seconds ...
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_TARGET: ${MOD_TARGET}_init
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit
+livepatch: reverting patch '$MOD_LIVEPATCH' on unloading module '$MOD_TARGET'
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': reversing transition from patching to unpatching
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_TARGET_BUSY
+$MOD_TARGET_BUSY: busymod_work_func exit
+$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_exit"
+
+
+# TEST: multiple livepatches
+#
+# Test loading multiple livepatches. This test-case is mainly for comparing
+# with the next test-case.
+#
+# - Load and unload two livepatches, pre and post (un)patch callbacks
+# execute as each patch progresses through its (un)patching
+# transition.
+
+echo -n "TEST: multiple livepatches ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH2
+disable_lp $MOD_LIVEPATCH2
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_LIVEPATCH2
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+$MOD_LIVEPATCH2: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH2/enabled
+livepatch: '$MOD_LIVEPATCH2': initializing unpatching transition
+$MOD_LIVEPATCH2: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH2': completing unpatching transition
+$MOD_LIVEPATCH2: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': unpatching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH2
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: atomic replace
+#
+# Load multiple livepatches, but the second as an 'atomic-replace'
+# patch. When the latter loads, the original livepatch should be
+# disabled and *none* of its pre/post-unpatch callbacks executed. On
+# the other hand, when the atomic-replace livepatch is disabled, its
+# pre/post-unpatch callbacks *should* be executed.
+#
+# - Load and unload two livepatches, the second of which has its
+# .replace flag set true.
+#
+# - Pre and post patch callbacks are executed for both livepatches.
+#
+# - Once the atomic replace module is loaded, only its pre and post
+# unpatch callbacks are executed.
+
+echo -n "TEST: atomic replace ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH2 replace=1
+disable_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_LIVEPATCH2 replace=1
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+$MOD_LIVEPATCH2: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH2/enabled
+livepatch: '$MOD_LIVEPATCH2': initializing unpatching transition
+$MOD_LIVEPATCH2: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH2': completing unpatching transition
+$MOD_LIVEPATCH2: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': unpatching complete
+% rmmod $MOD_LIVEPATCH2
+% rmmod $MOD_LIVEPATCH"
+
+
+exit 0
diff --git a/tools/testing/selftests/livepatch/test-livepatch.sh b/tools/testing/selftests/livepatch/test-livepatch.sh
new file mode 100755
index 000000000000..f05268aea859
--- /dev/null
+++ b/tools/testing/selftests/livepatch/test-livepatch.sh
@@ -0,0 +1,168 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+. $(dirname $0)/functions.sh
+
+MOD_LIVEPATCH=test_klp_livepatch
+MOD_REPLACE=test_klp_atomic_replace
+
+set_dynamic_debug
+
+
+# TEST: basic function patching
+# - load a livepatch that modifies the output from /proc/cmdline and
+# verify correct behavior
+# - unload the livepatch and make sure the patch was removed
+
+echo -n "TEST: basic function patching ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+
+if [[ "$(cat /proc/cmdline)" != "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+ echo -e "FAIL\n\n"
+ die "livepatch kselftest(s) failed"
+fi
+
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+if [[ "$(cat /proc/cmdline)" == "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+ echo -e "FAIL\n\n"
+ die "livepatch kselftest(s) failed"
+fi
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: multiple livepatches
+# - load a livepatch that modifies the output from /proc/cmdline and
+# verify correct behavior
+# - load another livepatch and verify that both livepatches are active
+# - unload the second livepatch and verify that the first is still active
+# - unload the first livepatch and verify none are active
+
+echo -n "TEST: multiple livepatches ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+load_lp $MOD_REPLACE replace=0
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+disable_lp $MOD_REPLACE
+unload_lp $MOD_REPLACE
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+$MOD_LIVEPATCH: this has been live patched
+% modprobe $MOD_REPLACE replace=0
+livepatch: enabling patch '$MOD_REPLACE'
+livepatch: '$MOD_REPLACE': initializing patching transition
+livepatch: '$MOD_REPLACE': starting patching transition
+livepatch: '$MOD_REPLACE': completing patching transition
+livepatch: '$MOD_REPLACE': patching complete
+$MOD_LIVEPATCH: this has been live patched
+$MOD_REPLACE: this has been live patched
+% echo 0 > /sys/kernel/livepatch/$MOD_REPLACE/enabled
+livepatch: '$MOD_REPLACE': initializing unpatching transition
+livepatch: '$MOD_REPLACE': starting unpatching transition
+livepatch: '$MOD_REPLACE': completing unpatching transition
+livepatch: '$MOD_REPLACE': unpatching complete
+% rmmod $MOD_REPLACE
+$MOD_LIVEPATCH: this has been live patched
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: atomic replace livepatch
+# - load a livepatch that modifies the output from /proc/cmdline and
+# verify correct behavior
+# - load an atomic replace livepatch and verify that only the second is active
+# - remove the first livepatch and verify that the atomic replace livepatch
+# is still active
+# - remove the atomic replace livepatch and verify that none are active
+
+echo -n "TEST: atomic replace livepatch ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+load_lp $MOD_REPLACE replace=1
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+unload_lp $MOD_LIVEPATCH
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+disable_lp $MOD_REPLACE
+unload_lp $MOD_REPLACE
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+$MOD_LIVEPATCH: this has been live patched
+% modprobe $MOD_REPLACE replace=1
+livepatch: enabling patch '$MOD_REPLACE'
+livepatch: '$MOD_REPLACE': initializing patching transition
+livepatch: '$MOD_REPLACE': starting patching transition
+livepatch: '$MOD_REPLACE': completing patching transition
+livepatch: '$MOD_REPLACE': patching complete
+$MOD_REPLACE: this has been live patched
+% rmmod $MOD_LIVEPATCH
+$MOD_REPLACE: this has been live patched
+% echo 0 > /sys/kernel/livepatch/$MOD_REPLACE/enabled
+livepatch: '$MOD_REPLACE': initializing unpatching transition
+livepatch: '$MOD_REPLACE': starting unpatching transition
+livepatch: '$MOD_REPLACE': completing unpatching transition
+livepatch: '$MOD_REPLACE': unpatching complete
+% rmmod $MOD_REPLACE"
+
+
+exit 0
diff --git a/tools/testing/selftests/livepatch/test-shadow-vars.sh b/tools/testing/selftests/livepatch/test-shadow-vars.sh
new file mode 100755
index 000000000000..04a37831e204
--- /dev/null
+++ b/tools/testing/selftests/livepatch/test-shadow-vars.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+. $(dirname $0)/functions.sh
+
+MOD_TEST=test_klp_shadow_vars
+
+set_dynamic_debug
+
+
+# TEST: basic shadow variable API
+# - load a module that exercises the shadow variable API
+
+echo -n "TEST: basic shadow variable API ... "
+dmesg -C
+
+load_mod $MOD_TEST
+unload_mod $MOD_TEST
+
+check_result "% modprobe $MOD_TEST
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1234) = PTR0
+$MOD_TEST: got expected NULL result
+$MOD_TEST: shadow_ctor: PTR6 -> PTR1
+$MOD_TEST: klp_shadow_alloc(obj=PTR5, id=0x1234, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR1 = PTR6
+$MOD_TEST: shadow_ctor: PTR8 -> PTR2
+$MOD_TEST: klp_shadow_alloc(obj=PTR9, id=0x1234, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR2 = PTR8
+$MOD_TEST: shadow_ctor: PTR10 -> PTR3
+$MOD_TEST: klp_shadow_alloc(obj=PTR5, id=0x1235, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR3 = PTR10
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1234) = PTR6
+$MOD_TEST: got expected PTR6 -> PTR1 result
+$MOD_TEST: klp_shadow_get(obj=PTR9, id=0x1234) = PTR8
+$MOD_TEST: got expected PTR8 -> PTR2 result
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1235) = PTR10
+$MOD_TEST: got expected PTR10 -> PTR3 result
+$MOD_TEST: shadow_ctor: PTR11 -> PTR4
+$MOD_TEST: klp_shadow_get_or_alloc(obj=PTR12, id=0x1234, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR4 = PTR11
+$MOD_TEST: klp_shadow_get_or_alloc(obj=PTR12, id=0x1234, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR4 = PTR11
+$MOD_TEST: got expected PTR11 -> PTR4 result
+$MOD_TEST: shadow_dtor(obj=PTR5, shadow_data=PTR6)
+$MOD_TEST: klp_shadow_free(obj=PTR5, id=0x1234, dtor=PTR13)
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1234) = PTR0
+$MOD_TEST: got expected NULL result
+$MOD_TEST: shadow_dtor(obj=PTR9, shadow_data=PTR8)
+$MOD_TEST: klp_shadow_free(obj=PTR9, id=0x1234, dtor=PTR13)
+$MOD_TEST: klp_shadow_get(obj=PTR9, id=0x1234) = PTR0
+$MOD_TEST: got expected NULL result
+$MOD_TEST: shadow_dtor(obj=PTR12, shadow_data=PTR11)
+$MOD_TEST: klp_shadow_free(obj=PTR12, id=0x1234, dtor=PTR13)
+$MOD_TEST: klp_shadow_get(obj=PTR12, id=0x1234) = PTR0
+$MOD_TEST: got expected NULL result
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1235) = PTR10
+$MOD_TEST: got expected PTR10 -> PTR3 result
+$MOD_TEST: shadow_dtor(obj=PTR5, shadow_data=PTR10)
+$MOD_TEST: klp_shadow_free_all(id=0x1235, dtor=PTR13)
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1234) = PTR0
+$MOD_TEST: shadow_get() got expected NULL result
+% rmmod test_klp_shadow_vars"
+
+exit 0
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 10baa1652fc2..c67d32eeb668 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -54,6 +54,22 @@ static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
return fd;
}
+static int mfd_assert_reopen_fd(int fd_in)
+{
+ int r, fd;
+ char path[100];
+
+ sprintf(path, "/proc/self/fd/%d", fd_in);
+
+ fd = open(path, O_RDWR);
+ if (fd < 0) {
+ printf("re-open of existing fd %d failed\n", fd_in);
+ abort();
+ }
+
+ return fd;
+}
+
static void mfd_fail_new(const char *name, unsigned int flags)
{
int r;
@@ -255,6 +271,25 @@ static void mfd_assert_read(int fd)
munmap(p, mfd_def_size);
}
+/* Test that PROT_READ + MAP_SHARED mappings work. */
+static void mfd_assert_read_shared(int fd)
+{
+ void *p;
+
+ /* verify PROT_READ and MAP_SHARED *is* allowed */
+ p = mmap(NULL,
+ mfd_def_size,
+ PROT_READ,
+ MAP_SHARED,
+ fd,
+ 0);
+ if (p == MAP_FAILED) {
+ printf("mmap() failed: %m\n");
+ abort();
+ }
+ munmap(p, mfd_def_size);
+}
+
static void mfd_assert_write(int fd)
{
ssize_t l;
@@ -693,6 +728,44 @@ static void test_seal_write(void)
}
/*
+ * Test SEAL_FUTURE_WRITE
+ * Test whether SEAL_FUTURE_WRITE actually prevents modifications.
+ */
+static void test_seal_future_write(void)
+{
+ int fd, fd2;
+ void *p;
+
+ printf("%s SEAL-FUTURE-WRITE\n", memfd_str);
+
+ fd = mfd_assert_new("kern_memfd_seal_future_write",
+ mfd_def_size,
+ MFD_CLOEXEC | MFD_ALLOW_SEALING);
+
+ p = mfd_assert_mmap_shared(fd);
+
+ mfd_assert_has_seals(fd, 0);
+
+ mfd_assert_add_seals(fd, F_SEAL_FUTURE_WRITE);
+ mfd_assert_has_seals(fd, F_SEAL_FUTURE_WRITE);
+
+ /* read should pass, writes should fail */
+ mfd_assert_read(fd);
+ mfd_assert_read_shared(fd);
+ mfd_fail_write(fd);
+
+ fd2 = mfd_assert_reopen_fd(fd);
+ /* read should pass, writes should still fail */
+ mfd_assert_read(fd2);
+ mfd_assert_read_shared(fd2);
+ mfd_fail_write(fd2);
+
+ munmap(p, mfd_def_size);
+ close(fd2);
+ close(fd);
+}
+
+/*
* Test SEAL_SHRINK
* Test whether SEAL_SHRINK actually prevents shrinking
*/
@@ -945,6 +1018,7 @@ int main(int argc, char **argv)
test_basic();
test_seal_write();
+ test_seal_future_write();
test_seal_shrink();
test_seal_grow();
test_seal_resize();
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 5821bdd98d20..e9c860d00416 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -17,8 +17,7 @@ CONFIG_VLAN_8021Q=y
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_NF_NAT=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP6_NF_NAT=m
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 802b4af18729..1080ff55a788 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -388,6 +388,7 @@ fib_carrier_unicast_test()
set -e
$IP link set dev dummy0 carrier off
+ sleep 1
set +e
echo " Carrier down"
diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
index 5cd2aed97958..da96eff72a8e 100644
--- a/tools/testing/selftests/net/forwarding/config
+++ b/tools/testing/selftests/net/forwarding/config
@@ -10,3 +10,5 @@ CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_ACT_GACT=m
CONFIG_VETH=m
+CONFIG_NAMESPACES=y
+CONFIG_NET_NS=y
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 5ab1e5f43022..57cf8914910d 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -32,7 +32,7 @@ DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \
##############################################################################
# Sanity checks
-devlink -j resource show "$DEVLINK_DEV" &> /dev/null
+devlink help 2>&1 | grep resource &> /dev/null
if [ $? -ne 0 ]; then
echo "SKIP: iproute2 too old, missing devlink resource support"
exit 1
diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
index e819d049d9ce..e2adb533c8fc 100644
--- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
+++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
@@ -33,3 +33,6 @@ PAUSE_ON_CLEANUP=no
NETIF_TYPE=veth
# Whether to create virtual interfaces (veth) or not
NETIF_CREATE=yes
+# Timeout (in seconds) before ping exits regardless of how many packets have
+# been sent or received
+PING_TIMEOUT=5
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh
new file mode 100755
index 000000000000..abb694397b86
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel without key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1
+ sw2_flat_create gre $ol2 $ul2
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh
new file mode 100755
index 000000000000..c4f373337e48
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1 key 233
+ sw2_flat_create gre $ol2 $ul2 key 233
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh
new file mode 100755
index 000000000000..a811130c0627
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1 ikey 111 okey 222
+ sw2_flat_create gre $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh
new file mode 100755
index 000000000000..05c5b3cf2f78
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1
+ sw2_hierarchical_create gre $ol2 $ul2
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh
new file mode 100755
index 000000000000..9b105dbca32a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1 key 22
+ sw2_hierarchical_create gre $ol2 $ul2 key 22
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh
new file mode 100755
index 000000000000..e275d25bd83a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1 ikey 111 okey 222
+ sw2_hierarchical_create gre $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_lib.sh b/tools/testing/selftests/net/forwarding/ipip_lib.sh
new file mode 100644
index 000000000000..30f36a57bae6
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_lib.sh
@@ -0,0 +1,349 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Handles creation and destruction of IP-in-IP or GRE tunnels over the given
+# topology. Supports both flat and hierarchical models.
+#
+# Flat Model:
+# Overlay and underlay share the same VRF.
+# SW1 uses default VRF so tunnel has no bound dev.
+# SW2 uses non-default VRF tunnel has a bound dev.
+# +-------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-----+
+# |
+# +-------------------|-----+
+# | SW1 | |
+# | $ol1 + |
+# | 192.0.2.2/28 |
+# | |
+# | + g1a (gre) |
+# | loc=192.0.2.65 |
+# | rem=192.0.2.66 --. |
+# | tos=inherit | |
+# | .------------------' |
+# | | |
+# | v |
+# | + $ul1.111 (vlan) |
+# | | 192.0.2.129/28 |
+# | \ |
+# | \_______ |
+# | | |
+# |VRF default + $ul1 |
+# +------------|------------+
+# |
+# +------------|------------+
+# | SW2 + $ul2 |
+# | _______| |
+# | / |
+# | / |
+# | + $ul2.111 (vlan) |
+# | ^ 192.0.2.130/28 |
+# | | |
+# | | |
+# | '------------------. |
+# | + g2a (gre) | |
+# | loc=192.0.2.66 | |
+# | rem=192.0.2.65 --' |
+# | tos=inherit |
+# | |
+# | $ol2 + |
+# | 192.0.2.17/28 | |
+# | VRF v$ol2 | |
+# +-------------------|-----+
+# |
+# +-------------------|-----+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +-------------------------+
+#
+# Hierarchical model:
+# The tunnel is bound to a device in a different VRF
+#
+# +---------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-------+
+# |
+# +-------------------|-------+
+# | SW1 | |
+# | +-----------------|-----+ |
+# | | $ol1 + | |
+# | | 192.0.2.2/28 | |
+# | | | |
+# | | + g1a (gre) | |
+# | | rem=192.0.2.66 | |
+# | | tos=inherit | |
+# | | loc=192.0.2.65 | |
+# | | ^ | |
+# | | VRF v$ol1 | | |
+# | +-----------|-----------+ |
+# | | |
+# | +-----------|-----------+ |
+# | | VRF v$ul1 | | |
+# | | | | |
+# | | | | |
+# | | v | |
+# | | dummy1 + | |
+# | | 192.0.2.65 | |
+# | | .-------' | |
+# | | | | |
+# | | v | |
+# | | + $ul1.111 (vlan) | |
+# | | | 192.0.2.129/28 | |
+# | | \ | |
+# | | \_____ | |
+# | | | | |
+# | | + $ul1 | |
+# | +----------|------------+ |
+# +------------|--------------+
+# |
+# +------------|--------------+
+# | SW2 | |
+# | +----------|------------+ |
+# | | + $ul2 | |
+# | | _____| | |
+# | | / | |
+# | | / | |
+# | | | $ul2.111 (vlan) | |
+# | | + 192.0.2.130/28 | |
+# | | ^ | |
+# | | | | |
+# | | '-------. | |
+# | | dummy2 + | |
+# | | 192.0.2.66 | |
+# | | ^ | |
+# | | | | |
+# | | | | |
+# | | VRF v$ul2 | | |
+# | +-----------|-----------+ |
+# | | |
+# | +-----------|-----------+ |
+# | | VRF v$ol2 | | |
+# | | | | |
+# | | v | |
+# | | g2a (gre)+ | |
+# | | loc=192.0.2.66 | |
+# | | rem=192.0.2.65 | |
+# | | tos=inherit | |
+# | | | |
+# | | $ol2 + | |
+# | | 192.0.2.17/28 | | |
+# | +-----------------|-----+ |
+# +-------------------|-------+
+# |
+# +-------------------|-------+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +---------------------------+
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.18/28
+ ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ simple_if_fini $h2 192.0.2.18/28
+}
+
+sw1_flat_create()
+{
+ local type=$1; shift
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip link set dev $ol1 up
+ __addr_add_del $ol1 add "192.0.2.2/28"
+
+ ip link set dev $ul1 up
+ vlan_create $ul1 111 "" 192.0.2.129/28
+
+ tunnel_create g1a $type 192.0.2.65 192.0.2.66 tos inherit "$@"
+ ip link set dev g1a up
+ __addr_add_del g1a add "192.0.2.65/32"
+
+ ip route add 192.0.2.66/32 via 192.0.2.130
+
+ ip route add 192.0.2.16/28 nexthop dev g1a
+}
+
+sw1_flat_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip route del 192.0.2.16/28
+
+ ip route del 192.0.2.66/32 via 192.0.2.130
+ __simple_if_fini g1a 192.0.2.65/32
+ tunnel_destroy g1a
+
+ vlan_destroy $ul1 111
+ __simple_if_fini $ul1
+ __simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_flat_create()
+{
+ local type=$1; shift
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 192.0.2.17/28
+ __simple_if_init $ul2 v$ol2
+ vlan_create $ul2 111 v$ol2 192.0.2.130/28
+
+ tunnel_create g2a $type 192.0.2.66 192.0.2.65 tos inherit dev v$ol2 \
+ "$@"
+ __simple_if_init g2a v$ol2 192.0.2.66/32
+
+ ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ ip route add vrf v$ol2 192.0.2.0/28 nexthop dev g2a
+}
+
+sw2_flat_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip route del vrf v$ol2 192.0.2.0/28
+
+ ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ __simple_if_fini g2a 192.0.2.66/32
+ tunnel_destroy g2a
+
+ vlan_destroy $ul2 111
+ __simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+sw1_hierarchical_create()
+{
+ local type=$1; shift
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ simple_if_init $ol1 192.0.2.2/28
+ simple_if_init $ul1
+ ip link add name dummy1 type dummy
+ __simple_if_init dummy1 v$ul1 192.0.2.65/32
+
+ vlan_create $ul1 111 v$ul1 192.0.2.129/28
+ tunnel_create g1a $type 192.0.2.65 192.0.2.66 tos inherit dev dummy1 \
+ "$@"
+ ip link set dev g1a master v$ol1
+
+ ip route add vrf v$ul1 192.0.2.66/32 via 192.0.2.130
+ ip route add vrf v$ol1 192.0.2.16/28 nexthop dev g1a
+}
+
+sw1_hierarchical_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip route del vrf v$ol1 192.0.2.16/28
+ ip route del vrf v$ul1 192.0.2.66/32
+
+ tunnel_destroy g1a
+ vlan_destroy $ul1 111
+
+ __simple_if_fini dummy1 192.0.2.65/32
+ ip link del dev dummy1
+
+ simple_if_fini $ul1
+ simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_hierarchical_create()
+{
+ local type=$1; shift
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 192.0.2.17/28
+ simple_if_init $ul2
+
+ ip link add name dummy2 type dummy
+ __simple_if_init dummy2 v$ul2 192.0.2.66/32
+
+ vlan_create $ul2 111 v$ul2 192.0.2.130/28
+ tunnel_create g2a $type 192.0.2.66 192.0.2.65 tos inherit dev dummy2 \
+ "$@"
+ ip link set dev g2a master v$ol2
+
+ ip route add vrf v$ul2 192.0.2.65/32 via 192.0.2.129
+ ip route add vrf v$ol2 192.0.2.0/28 nexthop dev g2a
+}
+
+sw2_hierarchical_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip route del vrf v$ol2 192.0.2.0/28
+ ip route del vrf v$ul2 192.0.2.65/32
+
+ tunnel_destroy g2a
+ vlan_destroy $ul2 111
+
+ __simple_if_fini dummy2 192.0.2.66/32
+ ip link del dev dummy2
+
+ simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+topo_mtu_change()
+{
+ local mtu=$1
+
+ ip link set mtu $mtu dev $h1
+ ip link set mtu $mtu dev $ol1
+ ip link set mtu $mtu dev g1a
+ ip link set mtu $mtu dev $ul1
+ ip link set mtu $mtu dev $ul1.111
+ ip link set mtu $mtu dev $h2
+ ip link set mtu $mtu dev $ol2
+ ip link set mtu $mtu dev g2a
+ ip link set mtu $mtu dev $ul2
+ ip link set mtu $mtu dev $ul2.111
+}
+
+test_mtu_change()
+{
+ local encap=$1; shift
+
+ RET=0
+
+ ping_do $h1 192.0.2.18 "-s 1800 -w 3"
+ check_fail $? "ping $encap should not pass with size 1800"
+
+ RET=0
+
+ topo_mtu_change 2000
+ ping_do $h1 192.0.2.18 "-s 1800 -w 3"
+ check_err $?
+ log_test "ping $encap packet size 1800 after MTU change"
+}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 3f248d1f5b91..9385dc971269 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -17,6 +17,7 @@ NETIF_TYPE=${NETIF_TYPE:=veth}
NETIF_CREATE=${NETIF_CREATE:=yes}
MCD=${MCD:=smcrouted}
MC_CLI=${MC_CLI:=smcroutectl}
+PING_TIMEOUT=${PING_TIMEOUT:=5}
relative_path="${BASH_SOURCE%/*}"
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
@@ -211,7 +212,7 @@ log_test()
return 1
fi
- printf "TEST: %-60s [PASS]\n" "$test_name $opt_str"
+ printf "TEST: %-60s [ OK ]\n" "$test_name $opt_str"
return 0
}
@@ -820,7 +821,8 @@ ping_do()
local vrf_name
vrf_name=$(master_name_get $if_name)
- ip vrf exec $vrf_name $PING $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ip vrf exec $vrf_name \
+ $PING $args $dip -c 10 -i 0.1 -w $PING_TIMEOUT &> /dev/null
}
ping_test()
@@ -840,7 +842,8 @@ ping6_do()
local vrf_name
vrf_name=$(master_name_get $if_name)
- ip vrf exec $vrf_name $PING6 $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ip vrf exec $vrf_name \
+ $PING6 $args $dip -c 10 -i 0.1 -w $PING_TIMEOUT &> /dev/null
}
ping6_test()
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
index 61844caf671e..28d568c48a73 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -190,6 +190,8 @@ setup_prepare()
h4_create
switch_create
+ forwarding_enable
+
trap_install $h3 ingress
trap_install $h4 ingress
}
@@ -201,6 +203,8 @@ cleanup()
trap_uninstall $h4 ingress
trap_uninstall $h3 ingress
+ forwarding_restore
+
switch_destroy
h4_destroy
h3_destroy
@@ -220,11 +224,15 @@ test_lag_slave()
RET=0
+ tc filter add dev $swp1 ingress pref 999 \
+ proto 802.1q flower vlan_ethtype arp $tcflags \
+ action pass
mirror_install $swp1 ingress gt4 \
- "proto 802.1q flower vlan_id 333 $tcflags"
+ "proto 802.1q flower vlan_id 333 $tcflags"
# Test connectivity through $up_dev when $down_dev is set down.
ip link set dev $down_dev down
+ ip neigh flush dev br1
setup_wait_dev $up_dev
setup_wait_dev $host_dev
$ARPING -I br1 192.0.2.130 -qfc 1
@@ -240,6 +248,7 @@ test_lag_slave()
ip link set dev $up_dev up
ip link set dev $down_dev up
mirror_uninstall $swp1 ingress
+ tc filter del dev $swp1 ingress pref 999
log_test "$what ($tcflags)"
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
index 135902aa8b11..472bd023e2a5 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -79,6 +79,7 @@ test_span_gre_ttl()
mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 0
ip link set dev $tundev type $type ttl 50
+ sleep 2
mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
ip link set dev $tundev type $type ttl 100
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
index 12914f40612d..09389f3b9369 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
@@ -81,6 +81,8 @@ full_test_span_gre_dir_acl()
local match_dip=$1; shift
local what=$1; shift
+ RET=0
+
mirror_install $swp1 $direction $tundev \
"protocol ip flower $tcflags dst_ip $match_dip"
fail_test_span_gre_dir $tundev $direction
@@ -108,8 +110,6 @@ test_ip6gretap()
test_all()
{
- RET=0
-
slow_path_trap_install $swp1 ingress
slow_path_trap_install $swp1 egress
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index 204b25f13934..c02291e9841e 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -1,11 +1,44 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-# This test uses standard topology for testing gretap. See
-# mirror_gre_topo_lib.sh for more details.
-#
# Test for "tc action mirred egress mirror" when the underlay route points at a
# vlan device on top of a bridge device with vlan filtering (802.1q).
+#
+# +---------------------+ +---------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +-----|---------------+ +---------------|-----+
+# | |
+# +-----|-------------------------------------------------------------|-----+
+# | SW o--> mirred egress mirror dev {gt4,gt6} | |
+# | | | |
+# | +---|-------------------------------------------------------------|---+ |
+# | | + $swp1 br1 $swp2 + | |
+# | | | |
+# | | + $swp3 | |
+# | +---|-----------------------------------------------------------------+ |
+# | | | |
+# | | + br1.555 |
+# | | 192.0.2.130/28 |
+# | | 2001:db8:2::2/64 |
+# | | |
+# | | + gt6 (ip6gretap) + gt4 (gretap) |
+# | | : loc=2001:db8:2::1 : loc=192.0.2.129 |
+# | | : rem=2001:db8:2::2 : rem=192.0.2.130 |
+# | | : ttl=100 : ttl=100 |
+# | | : tos=inherit : tos=inherit |
+# | | : : |
+# +-----|---------------------:----------------------:----------------------+
+# | : :
+# +-----|---------------------:----------------------:----------------------+
+# | H3 + $h3 + h3-gt6 (ip6gretap) + h3-gt4 (gretap) |
+# | | loc=2001:db8:2::2 loc=192.0.2.130 |
+# | + $h3.555 rem=2001:db8:2::1 rem=192.0.2.129 |
+# | 192.0.2.130/28 ttl=100 ttl=100 |
+# | 2001:db8:2::2/64 tos=inherit tos=inherit |
+# | |
+# +-------------------------------------------------------------------------+
ALL_TESTS="
test_gretap
@@ -30,6 +63,15 @@ source mirror_gre_topo_lib.sh
require_command $ARPING
+h3_addr_add_del()
+{
+ local add_del=$1; shift
+ local dev=$1; shift
+
+ ip addr $add_del dev $dev 192.0.2.130/28
+ ip addr $add_del dev $dev 2001:db8:2::2/64
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -55,7 +97,8 @@ setup_prepare()
ip route rep 192.0.2.130/32 dev br1.555
ip -6 route rep 2001:db8:2::2/128 dev br1.555
- vlan_create $h3 555 v$h3 192.0.2.130/28 2001:db8:2::2/64
+ vlan_create $h3 555 v$h3
+ h3_addr_add_del add $h3.555
ip link set dev $swp3 master br1
bridge vlan add dev $swp3 vid 555
@@ -68,6 +111,8 @@ cleanup()
ip link set dev $swp2 nomaster
ip link set dev $swp3 nomaster
+
+ h3_addr_add_del del $h3.555
vlan_destroy $h3 555
vlan_destroy br1 555
@@ -182,13 +227,19 @@ test_span_gre_untagged_egress()
quick_test_span_gre_dir $tundev ingress
quick_test_span_vlan_dir $h3 555 ingress
+ h3_addr_add_del del $h3.555
bridge vlan add dev $swp3 vid 555 pvid untagged
- sleep 1
+ h3_addr_add_del add $h3
+ sleep 5
+
quick_test_span_gre_dir $tundev ingress
fail_test_span_vlan_dir $h3 555 ingress
+ h3_addr_add_del del $h3
bridge vlan add dev $swp3 vid 555
- sleep 1
+ h3_addr_add_del add $h3.555
+ sleep 5
+
quick_test_span_gre_dir $tundev ingress
quick_test_span_vlan_dir $h3 555 ingress
@@ -218,12 +269,25 @@ test_span_gre_fdb_roaming()
mirror_install $swp1 ingress $tundev "matchall $tcflags"
quick_test_span_gre_dir $tundev ingress
- bridge fdb del dev $swp3 $h3mac vlan 555 master
- bridge fdb add dev $swp2 $h3mac vlan 555 master
- sleep 1
- fail_test_span_gre_dir $tundev ingress
-
- bridge fdb del dev $swp2 $h3mac vlan 555 master
+ while ((RET == 0)); do
+ bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
+ bridge fdb add dev $swp2 $h3mac vlan 555 master
+ sleep 1
+ fail_test_span_gre_dir $tundev ingress
+
+ if ! bridge fdb sh dev $swp2 vlan 555 master \
+ | grep -q $h3mac; then
+ printf "TEST: %-60s [RETRY]\n" \
+ "$what: MAC roaming ($tcflags)"
+ # ARP or ND probably reprimed the FDB while the test
+ # was running. We would get a spurious failure.
+ RET=0
+ continue
+ fi
+ break
+ done
+
+ bridge fdb del dev $swp2 $h3mac vlan 555 master 2>/dev/null
# Re-prime FDB
$ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index 07991e1025c7..00797597fcf5 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -29,9 +29,12 @@ mirror_test()
local pref=$1; shift
local expect=$1; shift
+ local ping_timeout=$((PING_TIMEOUT * 5))
local t0=$(tc_rule_stats_get $dev $pref)
ip vrf exec $vrf_name \
- ${PING} ${sip:+-I $sip} $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ${PING} ${sip:+-I $sip} $dip -c 10 -i 0.5 -w $ping_timeout \
+ &> /dev/null
+ sleep 0.5
local t1=$(tc_rule_stats_get $dev $pref)
local delta=$((t1 - t0))
# Tolerate a couple stray extra packets.
diff --git a/tools/testing/selftests/net/forwarding/router_broadcast.sh b/tools/testing/selftests/net/forwarding/router_broadcast.sh
index 7bd2ebb6e9de..9a678ece32b4 100755
--- a/tools/testing/selftests/net/forwarding/router_broadcast.sh
+++ b/tools/testing/selftests/net/forwarding/router_broadcast.sh
@@ -170,7 +170,8 @@ ping_test_from()
log_info "ping $dip, expected reply from $from"
ip vrf exec $(master_name_get $oif) \
- $PING -I $oif $dip -c 10 -i 0.1 -w 2 -b 2>&1 | grep $from &> /dev/null
+ $PING -I $oif $dip -c 10 -i 0.1 -w $PING_TIMEOUT -b 2>&1 \
+ | grep $from &> /dev/null
check_err_fail $fail $?
}
diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
new file mode 100755
index 000000000000..a0b5f57d6bd3
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
@@ -0,0 +1,567 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +---------------------------+ +------------------------------+
+# | vrf-h1 | | vrf-h2 |
+# | + $h1 | | + $h2 |
+# | | 10.1.1.101/24 | | | 10.1.2.101/24 |
+# | | default via 10.1.1.1 | | | default via 10.1.2.1 |
+# +----|----------------------+ +----|-------------------------+
+# | |
+# +----|--------------------------------------------|-------------------------+
+# | SW | | |
+# | +--|--------------------------------------------|-----------------------+ |
+# | | + $swp1 br1 + $swp2 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.1 local 10.0.0.1 | |
+# | | remote 10.0.0.2 remote 10.0.0.2 | |
+# | | id 1000 id 2000 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | | |
+# | | + vlan10 vlan20 + | |
+# | | | 10.1.1.11/24 10.1.2.11/24 | | |
+# | | | | | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | 10.1.1.1/24 10.1.2.1/24 | |
+# | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | |
+# | | vrf-green | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | + $rp1 +lo |
+# | | 192.0.2.1/24 10.0.0.1/32 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | vrf-spine |
+# | + $rp2 |
+# | 192.0.2.2/24 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | |
+# | + v1 (veth) |
+# | | 192.0.3.2/24 |
+# +----|--------------------------------------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | + v2 (veth) +lo NS1 (netns) |
+# | 192.0.3.1/24 10.0.0.2/32 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | vrf-green | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | | 10.1.1.1/24 10.1.2.1/24 | | |
+# | | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | | |
+# | | | | | |
+# | | + vlan10 vlan20 + | |
+# | | | 10.1.1.12/24 10.1.2.12/24 | | |
+# | | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.2 local 10.0.0.2 | |
+# | | remote 10.0.0.1 remote 10.0.0.1 | |
+# | | id 1000 id 2000 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + w1 (veth) + w3 (veth) | |
+# | | | vid 10 pvid untagged br1 | vid 20 pvid untagged | |
+# | +--|------------------------------------------|-------------------------+ |
+# | | | |
+# | | | |
+# | +--|----------------------+ +--|-------------------------+ |
+# | | | vrf-h1 | | | vrf-h2 | |
+# | | + w2 (veth) | | + w4 (veth) | |
+# | | 10.1.1.102/24 | | 10.1.2.102/24 | |
+# | | default via 10.1.1.1 | | default via 10.1.2.1 | |
+# | +-------------------------+ +----------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ arp_decap
+ arp_suppression
+"
+NUM_NETIFS=6
+source lib.sh
+
+require_command $ARPING
+
+hx_create()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ vrf_create $vrf_name
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $vrf_name up
+ ip link set dev $if_name up
+
+ ip address add $ip_addr/24 dev $if_name
+ ip neigh replace $gw_ip lladdr 00:00:5e:00:01:01 nud permanent \
+ dev $if_name
+ ip route add default vrf $vrf_name nexthop via $gw_ip
+}
+export -f hx_create
+
+hx_destroy()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ ip route del default vrf $vrf_name nexthop via $gw_ip
+ ip neigh del $gw_ip dev $if_name
+ ip address del $ip_addr/24 dev $if_name
+
+ ip link set dev $if_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ hx_create "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h1_destroy()
+{
+ hx_destroy "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h2_create()
+{
+ hx_create "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+h2_destroy()
+{
+ hx_destroy "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ ip address add dev $rp1 192.0.2.1/24
+ ip route add 10.0.0.2/32 nexthop via 192.0.2.2
+
+ ip link add name vx10 type vxlan id 1000 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 10 dev $swp1 pvid untagged
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+ bridge vlan add vid 20 dev $swp2 pvid untagged
+
+ ip address add 10.0.0.1/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.11/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.11/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+
+switch_destroy()
+{
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+ bridge vlan del vid 20 dev br1 self
+ bridge vlan del vid 10 dev br1 self
+
+ ip link del dev vlan20
+
+ ip link del dev vlan10
+
+ vrf_destroy "vrf-green"
+
+ ip address del 10.0.0.1/32 dev lo
+
+ bridge vlan del vid 20 dev $swp2
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 10 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 20 dev vx20
+ ip link set dev vx20 nomaster
+
+ ip link set dev vx20 down
+ ip link del dev vx20
+
+ bridge vlan del vid 10 dev vx10
+ ip link set dev vx10 nomaster
+
+ ip link set dev vx10 down
+ ip link del dev vx10
+
+ ip route del 10.0.0.2/32 nexthop via 192.0.2.2
+ ip address del dev $rp1 192.0.2.1/24
+ ip link set dev $rp1 down
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+spine_create()
+{
+ vrf_create "vrf-spine"
+ ip link set dev $rp2 master vrf-spine
+ ip link set dev v1 master vrf-spine
+ ip link set dev vrf-spine up
+ ip link set dev $rp2 up
+ ip link set dev v1 up
+
+ ip address add 192.0.2.2/24 dev $rp2
+ ip address add 192.0.3.2/24 dev v1
+
+ ip route add 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+ ip route add 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+}
+
+spine_destroy()
+{
+ ip route del 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+ ip route del 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+
+ ip address del 192.0.3.2/24 dev v1
+ ip address del 192.0.2.2/24 dev $rp2
+
+ ip link set dev v1 down
+ ip link set dev $rp2 down
+ vrf_destroy "vrf-spine"
+}
+
+ns_h1_create()
+{
+ hx_create "vrf-h1" w2 10.1.1.102 10.1.1.1
+}
+export -f ns_h1_create
+
+ns_h2_create()
+{
+ hx_create "vrf-h2" w4 10.1.2.102 10.1.2.1
+}
+export -f ns_h2_create
+
+ns_switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ ip link set dev br1 up
+
+ ip link set dev v2 up
+ ip address add dev v2 192.0.3.1/24
+ ip route add 10.0.0.1/32 nexthop via 192.0.3.2
+
+ ip link add name vx10 type vxlan id 1000 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev w1 master br1
+ ip link set dev w1 up
+ bridge vlan add vid 10 dev w1 pvid untagged
+
+ ip link set dev w3 master br1
+ ip link set dev w3 up
+ bridge vlan add vid 20 dev w3 pvid untagged
+
+ ip address add 10.0.0.2/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.12/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.12/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+export -f ns_switch_create
+
+ns_init()
+{
+ ip link add name w1 type veth peer name w2
+ ip link add name w3 type veth peer name w4
+
+ ip link set dev lo up
+
+ ns_h1_create
+ ns_h2_create
+ ns_switch_create
+}
+export -f ns_init
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 ns_init
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+macs_populate()
+{
+ local mac1=$1; shift
+ local mac2=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local dst=$1; shift
+
+ bridge fdb add $mac1 dev vx10 self master extern_learn static \
+ dst $dst vlan 10
+ bridge fdb add $mac2 dev vx20 self master extern_learn static \
+ dst $dst vlan 20
+
+ ip neigh add $ip1 lladdr $mac1 nud noarp dev vlan10 \
+ extern_learn
+ ip neigh add $ip2 lladdr $mac2 nud noarp dev vlan20 \
+ extern_learn
+}
+export -f macs_populate
+
+macs_initialize()
+{
+ local h1_ns_mac=$(in_ns ns1 mac_get w2)
+ local h2_ns_mac=$(in_ns ns1 mac_get w4)
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ macs_populate $h1_ns_mac $h2_ns_mac 10.1.1.102 10.1.2.102 10.0.0.2
+ in_ns ns1 macs_populate $h1_mac $h2_mac 10.1.1.101 10.1.2.101 10.0.0.1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ spine_create
+ ns1_create
+
+ macs_initialize
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns1_destroy
+ spine_destroy
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 10.1.2.101 ": local->local vid 10->vid 20"
+ ping_test $h1 10.1.1.102 ": local->remote vid 10->vid 10"
+ ping_test $h2 10.1.2.102 ": local->remote vid 20->vid 20"
+ ping_test $h1 10.1.2.102 ": local->remote vid 10->vid 20"
+ ping_test $h2 10.1.1.102 ": local->remote vid 20->vid 10"
+}
+
+arp_decap()
+{
+ # Repeat the ping tests, but without populating the neighbours. This
+ # makes sure we correctly decapsulate ARP packets
+ log_info "deleting neighbours from vlan interfaces"
+
+ ip neigh del 10.1.1.102 dev vlan10
+ ip neigh del 10.1.2.102 dev vlan20
+
+ ping_ipv4
+
+ ip neigh replace 10.1.1.102 lladdr $(in_ns ns1 mac_get w2) nud noarp \
+ dev vlan10 extern_learn
+ ip neigh replace 10.1.2.102 lladdr $(in_ns ns1 mac_get w4) nud noarp \
+ dev vlan20 extern_learn
+}
+
+arp_suppression_compare()
+{
+ local expect=$1; shift
+ local actual=$(in_ns ns1 tc_rule_stats_get vx10 1 ingress)
+
+ (( expect == actual ))
+ check_err $? "expected $expect arps got $actual"
+}
+
+arp_suppression()
+{
+ ip link set dev vx10 type bridge_slave neigh_suppress on
+
+ in_ns ns1 tc qdisc add dev vx10 clsact
+ in_ns ns1 tc filter add dev vx10 ingress proto arp pref 1 handle 101 \
+ flower dst_mac ff:ff:ff:ff:ff:ff arp_tip 10.1.1.102 arp_op \
+ request action pass
+
+ # The neighbour is configured on the SVI and ARP suppression is on, so
+ # the ARP request should be suppressed
+ RET=0
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 0
+
+ log_test "neigh_suppress: on / neigh exists: yes"
+
+ # Delete the neighbour from the the SVI. A single ARP request should be
+ # received by the remote VTEP
+ RET=0
+
+ ip neigh del 10.1.1.102 dev vlan10
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 1
+
+ log_test "neigh_suppress: on / neigh exists: no"
+
+ # Turn off ARP suppression and make sure ARP is not suppressed,
+ # regardless of neighbour existence on the SVI
+ RET=0
+
+ ip neigh del 10.1.1.102 dev vlan10 &> /dev/null
+ ip link set dev vx10 type bridge_slave neigh_suppress off
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 2
+
+ log_test "neigh_suppress: off / neigh exists: no"
+
+ RET=0
+
+ ip neigh add 10.1.1.102 lladdr $(in_ns ns1 mac_get w2) nud noarp \
+ dev vlan10 extern_learn
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 3
+
+ log_test "neigh_suppress: off / neigh exists: yes"
+
+ in_ns ns1 tc qdisc del dev vx10 clsact
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
new file mode 100755
index 000000000000..1209031bc794
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
@@ -0,0 +1,551 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +---------------------------+ +------------------------------+
+# | vrf-h1 | | vrf-h2 |
+# | + $h1 | | + $h2 |
+# | | 10.1.1.101/24 | | | 10.1.2.101/24 |
+# | | default via 10.1.1.1 | | | default via 10.1.2.1 |
+# +----|----------------------+ +----|-------------------------+
+# | |
+# +----|--------------------------------------------|-------------------------+
+# | SW | | |
+# | +--|--------------------------------------------|-----------------------+ |
+# | | + $swp1 br1 + $swp2 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.1 local 10.0.0.1 | |
+# | | remote 10.0.0.2 remote 10.0.0.2 | |
+# | | id 1010 id 1020 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx4001 | |
+# | | local 10.0.0.1 | |
+# | | remote 10.0.0.2 | |
+# | | id 104001 | |
+# | | dstport 4789 | |
+# | | vid 4001 pvid untagged | |
+# | | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | | | |
+# | | + vlan10 | vlan20 + | |
+# | | | 10.1.1.11/24 | 10.1.2.11/24 | | |
+# | | | | | | |
+# | | + vlan10-v (macvlan) + vlan20-v (macvlan) + | |
+# | | 10.1.1.1/24 vlan4001 10.1.2.1/24 | |
+# | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | |
+# | | vrf-green | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | + $rp1 +lo |
+# | | 192.0.2.1/24 10.0.0.1/32 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | vrf-spine |
+# | + $rp2 |
+# | 192.0.2.2/24 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | |
+# | + v1 (veth) |
+# | | 192.0.3.2/24 |
+# +----|--------------------------------------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | + v2 (veth) +lo NS1 (netns) |
+# | 192.0.3.1/24 10.0.0.2/32 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | vrf-green | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | | 10.1.1.1/24 10.1.2.1/24 | | |
+# | | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | | |
+# | | | vlan4001 | | |
+# | | + vlan10 + vlan20 + | |
+# | | | 10.1.1.12/24 | 10.1.2.12/24 | | |
+# | | | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.2 local 10.0.0.2 | |
+# | | remote 10.0.0.1 remote 10.0.0.1 | |
+# | | id 1010 id 1020 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx4001 | |
+# | | local 10.0.0.2 | |
+# | | remote 10.0.0.1 | |
+# | | id 104001 | |
+# | | dstport 4789 | |
+# | | vid 4001 pvid untagged | |
+# | | | |
+# | | + w1 (veth) + w3 (veth) | |
+# | | | vid 10 pvid untagged br1 | vid 20 pvid untagged | |
+# | +--|------------------------------------------|-------------------------+ |
+# | | | |
+# | | | |
+# | +--|----------------------+ +--|-------------------------+ |
+# | | | vrf-h1 | | | vrf-h2 | |
+# | | + w2 (veth) | | + w4 (veth) | |
+# | | 10.1.1.102/24 | | 10.1.2.102/24 | |
+# | | default via 10.1.1.1 | | default via 10.1.2.1 | |
+# | +-------------------------+ +----------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+"
+NUM_NETIFS=6
+source lib.sh
+
+hx_create()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ vrf_create $vrf_name
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $vrf_name up
+ ip link set dev $if_name up
+
+ ip address add $ip_addr/24 dev $if_name
+ ip neigh replace $gw_ip lladdr 00:00:5e:00:01:01 nud permanent \
+ dev $if_name
+ ip route add default vrf $vrf_name nexthop via $gw_ip
+}
+export -f hx_create
+
+hx_destroy()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ ip route del default vrf $vrf_name nexthop via $gw_ip
+ ip neigh del $gw_ip dev $if_name
+ ip address del $ip_addr/24 dev $if_name
+
+ ip link set dev $if_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ hx_create "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h1_destroy()
+{
+ hx_destroy "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h2_create()
+{
+ hx_create "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+h2_destroy()
+{
+ hx_destroy "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ ip address add dev $rp1 192.0.2.1/24
+ ip route add 10.0.0.2/32 nexthop via 192.0.2.2
+
+ ip link add name vx10 type vxlan id 1010 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 1020 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 10 dev $swp1 pvid untagged
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+ bridge vlan add vid 20 dev $swp2 pvid untagged
+
+ ip link add name vx4001 type vxlan id 104001 \
+ local 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx4001 up
+
+ ip link set dev vx4001 master br1
+ bridge vlan add vid 4001 dev vx4001 pvid untagged
+
+ ip address add 10.0.0.1/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.11/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.11/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ ip link add link br1 name vlan4001 up master vrf-green \
+ type vlan id 4001
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+ bridge vlan add vid 4001 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+
+switch_destroy()
+{
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+ bridge vlan del vid 4001 dev br1 self
+ bridge vlan del vid 20 dev br1 self
+ bridge vlan del vid 10 dev br1 self
+
+ ip link del dev vlan4001
+
+ ip link del dev vlan20
+
+ ip link del dev vlan10
+
+ vrf_destroy "vrf-green"
+
+ ip address del 10.0.0.1/32 dev lo
+
+ bridge vlan del vid 20 dev $swp2
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 10 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 4001 dev vx4001
+ ip link set dev vx4001 nomaster
+
+ ip link set dev vx4001 down
+ ip link del dev vx4001
+
+ bridge vlan del vid 20 dev vx20
+ ip link set dev vx20 nomaster
+
+ ip link set dev vx20 down
+ ip link del dev vx20
+
+ bridge vlan del vid 10 dev vx10
+ ip link set dev vx10 nomaster
+
+ ip link set dev vx10 down
+ ip link del dev vx10
+
+ ip route del 10.0.0.2/32 nexthop via 192.0.2.2
+ ip address del dev $rp1 192.0.2.1/24
+ ip link set dev $rp1 down
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+spine_create()
+{
+ vrf_create "vrf-spine"
+ ip link set dev $rp2 master vrf-spine
+ ip link set dev v1 master vrf-spine
+ ip link set dev vrf-spine up
+ ip link set dev $rp2 up
+ ip link set dev v1 up
+
+ ip address add 192.0.2.2/24 dev $rp2
+ ip address add 192.0.3.2/24 dev v1
+
+ ip route add 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+ ip route add 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+}
+
+spine_destroy()
+{
+ ip route del 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+ ip route del 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+
+ ip address del 192.0.3.2/24 dev v1
+ ip address del 192.0.2.2/24 dev $rp2
+
+ ip link set dev v1 down
+ ip link set dev $rp2 down
+ vrf_destroy "vrf-spine"
+}
+
+ns_h1_create()
+{
+ hx_create "vrf-h1" w2 10.1.1.102 10.1.1.1
+}
+export -f ns_h1_create
+
+ns_h2_create()
+{
+ hx_create "vrf-h2" w4 10.1.2.102 10.1.2.1
+}
+export -f ns_h2_create
+
+ns_switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ ip link set dev br1 up
+
+ ip link set dev v2 up
+ ip address add dev v2 192.0.3.1/24
+ ip route add 10.0.0.1/32 nexthop via 192.0.3.2
+
+ ip link add name vx10 type vxlan id 1010 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 1020 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link add name vx4001 type vxlan id 104001 \
+ local 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx4001 up
+
+ ip link set dev vx4001 master br1
+ bridge vlan add vid 4001 dev vx4001 pvid untagged
+
+ ip link set dev w1 master br1
+ ip link set dev w1 up
+ bridge vlan add vid 10 dev w1 pvid untagged
+
+ ip link set dev w3 master br1
+ ip link set dev w3 up
+ bridge vlan add vid 20 dev w3 pvid untagged
+
+ ip address add 10.0.0.2/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.12/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.12/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ ip link add link br1 name vlan4001 up master vrf-green \
+ type vlan id 4001
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+ bridge vlan add vid 4001 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+export -f ns_switch_create
+
+ns_init()
+{
+ ip link add name w1 type veth peer name w2
+ ip link add name w3 type veth peer name w4
+
+ ip link set dev lo up
+
+ ns_h1_create
+ ns_h2_create
+ ns_switch_create
+}
+export -f ns_init
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 ns_init
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+__l2_vni_init()
+{
+ local mac1=$1; shift
+ local mac2=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local dst=$1; shift
+
+ bridge fdb add $mac1 dev vx10 self master extern_learn static \
+ dst $dst vlan 10
+ bridge fdb add $mac2 dev vx20 self master extern_learn static \
+ dst $dst vlan 20
+
+ ip neigh add $ip1 lladdr $mac1 nud noarp dev vlan10 \
+ extern_learn
+ ip neigh add $ip2 lladdr $mac2 nud noarp dev vlan20 \
+ extern_learn
+}
+export -f __l2_vni_init
+
+l2_vni_init()
+{
+ local h1_ns_mac=$(in_ns ns1 mac_get w2)
+ local h2_ns_mac=$(in_ns ns1 mac_get w4)
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ __l2_vni_init $h1_ns_mac $h2_ns_mac 10.1.1.102 10.1.2.102 10.0.0.2
+ in_ns ns1 __l2_vni_init $h1_mac $h2_mac 10.1.1.101 10.1.2.101 10.0.0.1
+}
+
+__l3_vni_init()
+{
+ local mac=$1; shift
+ local vtep_ip=$1; shift
+ local host1_ip=$1; shift
+ local host2_ip=$1; shift
+
+ bridge fdb add $mac dev vx4001 self master extern_learn static \
+ dst $vtep_ip vlan 4001
+
+ ip neigh add $vtep_ip lladdr $mac nud noarp dev vlan4001 extern_learn
+
+ ip route add $host1_ip/32 vrf vrf-green nexthop via $vtep_ip \
+ dev vlan4001 onlink
+ ip route add $host2_ip/32 vrf vrf-green nexthop via $vtep_ip \
+ dev vlan4001 onlink
+}
+export -f __l3_vni_init
+
+l3_vni_init()
+{
+ local vlan4001_ns_mac=$(in_ns ns1 mac_get vlan4001)
+ local vlan4001_mac=$(mac_get vlan4001)
+
+ __l3_vni_init $vlan4001_ns_mac 10.0.0.2 10.1.1.102 10.1.2.102
+ in_ns ns1 __l3_vni_init $vlan4001_mac 10.0.0.1 10.1.1.101 10.1.2.101
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ spine_create
+ ns1_create
+
+ l2_vni_init
+ l3_vni_init
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns1_destroy
+ spine_destroy
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 10.1.2.101 ": local->local vid 10->vid 20"
+ ping_test $h1 10.1.1.102 ": local->remote vid 10->vid 10"
+ ping_test $h2 10.1.2.102 ": local->remote vid 20->vid 20"
+ ping_test $h1 10.1.2.102 ": local->remote vid 10->vid 20"
+ ping_test $h2 10.1.1.102 ": local->remote vid 20->vid 10"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
index 5d56cc0838f6..c0c9ecb891e1 100644
--- a/tools/testing/selftests/net/ip_defrag.c
+++ b/tools/testing/selftests/net/ip_defrag.c
@@ -20,6 +20,7 @@ static bool cfg_do_ipv4;
static bool cfg_do_ipv6;
static bool cfg_verbose;
static bool cfg_overlap;
+static bool cfg_permissive;
static unsigned short cfg_port = 9000;
const struct in_addr addr4 = { .s_addr = __constant_htonl(INADDR_LOOPBACK + 2) };
@@ -35,7 +36,7 @@ const struct in6_addr addr6 = IN6ADDR_LOOPBACK_INIT;
static int payload_len;
static int max_frag_len;
-#define MSG_LEN_MAX 60000 /* Max UDP payload length. */
+#define MSG_LEN_MAX 10000 /* Max UDP payload length. */
#define IP4_MF (1u << 13) /* IPv4 MF flag. */
#define IP6_MF (1) /* IPv6 MF flag. */
@@ -59,13 +60,14 @@ static void recv_validate_udp(int fd_udp)
msg_counter++;
if (cfg_overlap) {
- if (ret != -1)
- error(1, 0, "recv: expected timeout; got %d",
- (int)ret);
- if (errno != ETIMEDOUT && errno != EAGAIN)
- error(1, errno, "recv: expected timeout: %d",
- errno);
- return; /* OK */
+ if (ret == -1 && (errno == ETIMEDOUT || errno == EAGAIN))
+ return; /* OK */
+ if (!cfg_permissive) {
+ if (ret != -1)
+ error(1, 0, "recv: expected timeout; got %d",
+ (int)ret);
+ error(1, errno, "recv: expected timeout: %d", errno);
+ }
}
if (ret == -1)
@@ -203,7 +205,6 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
{
struct ip *iphdr = (struct ip *)ip_frame;
struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
- const bool ipv4 = !ipv6;
int res;
int offset;
int frag_len;
@@ -251,7 +252,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
}
/* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
- if (ipv4 && !cfg_overlap && (rand() % 100 < 20) &&
+ if (!cfg_overlap && (rand() % 100 < 20) &&
(payload_len > 9 * max_frag_len)) {
offset = 6 * max_frag_len;
while (offset < (UDP_HLEN + payload_len)) {
@@ -276,41 +277,38 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
/* IPv4 ignores duplicates, so randomly send a duplicate. */
- if (ipv4 && (1 == rand() % 100))
+ if (rand() % 100 == 1)
send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
if (cfg_overlap) {
- /* Send an extra random fragment. */
+ /* Send an extra random fragment.
+ *
+ * Duplicates and some fragments completely inside
+ * previously sent fragments are dropped/ignored. So
+ * random offset and frag_len can result in a dropped
+ * fragment instead of a dropped queue/packet. Thus we
+ * hard-code offset and frag_len.
+ */
+ if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
+ /* not enough payload for random offset and frag_len. */
+ offset = 8;
+ frag_len = UDP_HLEN + max_frag_len;
+ } else {
+ offset = rand() % (payload_len / 2);
+ frag_len = 2 * max_frag_len + 1 + rand() % 256;
+ }
if (ipv6) {
struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
/* sendto() returns EINVAL if offset + frag_len is too small. */
- offset = rand() % (UDP_HLEN + payload_len - 1);
- frag_len = max_frag_len + rand() % 256;
/* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
frag_len &= ~0x7;
fraghdr->ip6f_offlg = htons(offset / 8 | IP6_MF);
ip6hdr->ip6_plen = htons(frag_len);
frag_len += IP6_HLEN;
} else {
- /* In IPv4, duplicates and some fragments completely inside
- * previously sent fragments are dropped/ignored. So
- * random offset and frag_len can result in a dropped
- * fragment instead of a dropped queue/packet. So we
- * hard-code offset and frag_len.
- *
- * See ade446403bfb ("net: ipv4: do not handle duplicate
- * fragments as overlapping").
- */
- if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
- /* not enough payload to play with random offset and frag_len. */
- offset = 8;
- frag_len = IP4_HLEN + UDP_HLEN + max_frag_len;
- } else {
- offset = rand() % (payload_len / 2);
- frag_len = 2 * max_frag_len + 1 + rand() % 256;
- }
+ frag_len += IP4_HLEN;
iphdr->ip_off = htons(offset / 8 | IP4_MF);
iphdr->ip_len = htons(frag_len);
}
@@ -327,7 +325,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
/* IPv4 ignores duplicates, so randomly send a duplicate. */
- if (ipv4 && (1 == rand() % 100))
+ if (rand() % 100 == 1)
send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
@@ -342,7 +340,7 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
*/
struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
int idx;
- int min_frag_len = ipv6 ? 1280 : 8;
+ int min_frag_len = 8;
/* Initialize the payload. */
for (idx = 0; idx < MSG_LEN_MAX; ++idx)
@@ -434,7 +432,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "46ov")) != -1) {
+ while ((c = getopt(argc, argv, "46opv")) != -1) {
switch (c) {
case '4':
cfg_do_ipv4 = true;
@@ -445,6 +443,9 @@ static void parse_opts(int argc, char **argv)
case 'o':
cfg_overlap = true;
break;
+ case 'p':
+ cfg_permissive = true;
+ break;
case 'v':
cfg_verbose = true;
break;
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
index 7dd79a9efb17..15d3489ecd9c 100755
--- a/tools/testing/selftests/net/ip_defrag.sh
+++ b/tools/testing/selftests/net/ip_defrag.sh
@@ -20,6 +20,10 @@ setup() {
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_high_thresh=9000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_timeout=1 >/dev/null 2>&1
+
# DST cache can get full with a lot of frags, with GC not keeping up with the test.
ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
}
@@ -43,4 +47,16 @@ ip netns exec "${NETNS}" ./ip_defrag -6
echo "ipv6 defrag with overlaps"
ip netns exec "${NETNS}" ./ip_defrag -6o
+# insert an nf_conntrack rule so that the codepath in nf_conntrack_reasm.c taken
+ip netns exec "${NETNS}" ip6tables -A INPUT -m conntrack --ctstate INVALID -j ACCEPT
+
+echo "ipv6 nf_conntrack defrag"
+ip netns exec "${NETNS}" ./ip_defrag -6
+
+echo "ipv6 nf_conntrack defrag with overlaps"
+# netfilter will drop some invalid packets, so we run the test in
+# permissive mode: i.e. pass the test if the packet is correctly assembled
+# even if we sent an overlap
+ip netns exec "${NETNS}" ./ip_defrag -6op
+
echo "all tests done"
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index e2c94e47707c..912b2dc50be3 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -103,6 +103,15 @@
# and check that configured MTU is used on link creation and changes, and
# that MTU is properly calculated instead when MTU is not configured from
# userspace
+#
+# - cleanup_ipv4_exception
+# Similar to pmtu_ipv4_vxlan4_exception, but explicitly generate PMTU
+# exceptions on multiple CPUs and check that the veth device tear-down
+# happens in a timely manner
+#
+# - cleanup_ipv6_exception
+# Same as above, but use IPv6 transport from A to B
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
@@ -135,7 +144,9 @@ tests="
pmtu_vti6_default_mtu vti6: default MTU assignment
pmtu_vti4_link_add_mtu vti4: MTU setting on link creation
pmtu_vti6_link_add_mtu vti6: MTU setting on link creation
- pmtu_vti6_link_change_mtu vti6: MTU changes on link changes"
+ pmtu_vti6_link_change_mtu vti6: MTU changes on link changes
+ cleanup_ipv4_exception ipv4: cleanup of cached exceptions
+ cleanup_ipv6_exception ipv6: cleanup of cached exceptions"
NS_A="ns-$(mktemp -u XXXXXX)"
NS_B="ns-$(mktemp -u XXXXXX)"
@@ -263,8 +274,6 @@ setup_fou_or_gue() {
${ns_a} ip link set ${encap}_a up
${ns_b} ip link set ${encap}_b up
-
- sleep 1
}
setup_fou44() {
@@ -302,6 +311,10 @@ setup_gue66() {
setup_namespaces() {
for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
ip netns add ${n} || return 1
+
+ # Disable DAD, so that we don't have to wait to use the
+ # configured IPv6 addresses
+ ip netns exec ${n} sysctl -q net/ipv6/conf/default/accept_dad=0
done
}
@@ -337,8 +350,6 @@ setup_vti() {
${ns_a} ip link set vti${proto}_a up
${ns_b} ip link set vti${proto}_b up
-
- sleep 1
}
setup_vti4() {
@@ -375,8 +386,6 @@ setup_vxlan_or_geneve() {
${ns_a} ip link set ${type}_a up
${ns_b} ip link set ${type}_b up
-
- sleep 1
}
setup_geneve4() {
@@ -588,8 +597,8 @@ test_pmtu_ipvX() {
mtu "${ns_b}" veth_B-R2 1500
# Create route exceptions
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst1} > /dev/null
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst2} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} > /dev/null
# Check that exceptions have been created with the correct PMTU
pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
@@ -621,7 +630,7 @@ test_pmtu_ipvX() {
# Decrease remote MTU on path via R2, get new exception
mtu "${ns_r2}" veth_R2-B 400
mtu "${ns_b}" veth_B-R2 400
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
@@ -638,7 +647,7 @@ test_pmtu_ipvX() {
check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1
# Get new exception
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
}
@@ -687,7 +696,7 @@ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception() {
mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000))
mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -767,7 +776,7 @@ test_pmtu_ipvX_over_fouY_or_gueY() {
mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -825,13 +834,13 @@ test_pmtu_vti4_exception() {
# Send DF packet without exceeding link layer MTU, check that no
# exception is created
- ${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
+ ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
# Now exceed link layer MTU by one byte, check that exception is created
# with the right PMTU value
- ${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
+ ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
}
@@ -847,7 +856,7 @@ test_pmtu_vti6_exception() {
mtu "${ns_b}" veth_b 4000
mtu "${ns_a}" vti6_a 5000
mtu "${ns_b}" vti6_b 5000
- ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${tunnel6_b_addr} > /dev/null
+ ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr} > /dev/null
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
@@ -1008,6 +1017,61 @@ test_pmtu_vti6_link_change_mtu() {
return ${fail}
}
+check_command() {
+ cmd=${1}
+
+ if ! which ${cmd} > /dev/null 2>&1; then
+ err " missing required command: '${cmd}'"
+ return 1
+ fi
+ return 0
+}
+
+test_cleanup_vxlanX_exception() {
+ outer="${1}"
+ encap="vxlan"
+ ll_mtu=4000
+
+ check_command taskset || return 2
+ cpu_list=$(grep -m 2 processor /proc/cpuinfo | cut -d ' ' -f 2)
+
+ setup namespaces routing ${encap}${outer} || return 2
+ trace "${ns_a}" ${encap}_a "${ns_b}" ${encap}_b \
+ "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B
+
+ # Create route exception by exceeding link layer MTU
+ mtu "${ns_a}" veth_A-R1 $((${ll_mtu} + 1000))
+ mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000))
+ mtu "${ns_b}" veth_B-R1 ${ll_mtu}
+ mtu "${ns_r1}" veth_R1-B ${ll_mtu}
+
+ mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
+ mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
+
+ # Fill exception cache for multiple CPUs (2)
+ # we can always use inner IPv4 for that
+ for cpu in ${cpu_list}; do
+ taskset --cpu-list ${cpu} ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${tunnel4_b_addr} > /dev/null
+ done
+
+ ${ns_a} ip link del dev veth_A-R1 &
+ iplink_pid=$!
+ sleep 1
+ if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
+ err " can't delete veth device in a timely manner, PMTU dst likely leaked"
+ return 1
+ fi
+}
+
+test_cleanup_ipv6_exception() {
+ test_cleanup_vxlanX_exception 6
+}
+
+test_cleanup_ipv4_exception() {
+ test_cleanup_vxlanX_exception 4
+}
+
usage() {
echo
echo "$0 [OPTIONS] [TEST]..."
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 78fc593dfe40..b447803f3f8a 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -391,7 +391,7 @@ kci_test_encap_vxlan()
vlan="test-vlan0"
testns="$1"
- ip netns exec "$testns" ip link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
+ ip -netns "$testns" link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
dev "$devdummy" dstport 4789 2>/dev/null
if [ $? -ne 0 ]; then
echo "FAIL: can't add vxlan interface, skipping test"
@@ -399,16 +399,68 @@ kci_test_encap_vxlan()
fi
check_err $?
- ip netns exec "$testns" ip addr add 10.2.11.49/24 dev "$vxlan"
+ ip -netns "$testns" addr add 10.2.11.49/24 dev "$vxlan"
check_err $?
- ip netns exec "$testns" ip link set up dev "$vxlan"
+ ip -netns "$testns" link set up dev "$vxlan"
check_err $?
- ip netns exec "$testns" ip link add link "$vxlan" name "$vlan" type vlan id 1
+ ip -netns "$testns" link add link "$vxlan" name "$vlan" type vlan id 1
check_err $?
- ip netns exec "$testns" ip link del "$vxlan"
+ # changelink testcases
+ ip -netns "$testns" link set dev "$vxlan" type vxlan vni 43 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan group ffe5::5 dev "$devdummy" 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan ttl inherit 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan ttl 64
+ check_err $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan nolearning
+ check_err $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan proxy 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan norsc 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan l2miss 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan l3miss 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan external 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan udpcsum 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumtx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumrx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumtx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumrx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan gbp 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan gpe 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link del "$vxlan"
check_err $?
if [ $ret -ne 0 ]; then
@@ -430,19 +482,19 @@ kci_test_encap_fou()
return $ksft_skip
fi
- ip netns exec "$testns" ip fou add port 7777 ipproto 47 2>/dev/null
+ ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
if [ $? -ne 0 ];then
echo "FAIL: can't add fou port 7777, skipping test"
return 1
fi
- ip netns exec "$testns" ip fou add port 8888 ipproto 4
+ ip -netns "$testns" fou add port 8888 ipproto 4
check_err $?
- ip netns exec "$testns" ip fou del port 9999 2>/dev/null
+ ip -netns "$testns" fou del port 9999 2>/dev/null
check_fail $?
- ip netns exec "$testns" ip fou del port 7777
+ ip -netns "$testns" fou del port 7777
check_err $?
if [ $ret -ne 0 ]; then
@@ -465,12 +517,12 @@ kci_test_encap()
return $ksft_skip
fi
- ip netns exec "$testns" ip link set lo up
+ ip -netns "$testns" link set lo up
check_err $?
- ip netns exec "$testns" ip link add name "$devdummy" type dummy
+ ip -netns "$testns" link add name "$devdummy" type dummy
check_err $?
- ip netns exec "$testns" ip link set "$devdummy" up
+ ip -netns "$testns" link set "$devdummy" up
check_err $?
kci_test_encap_vxlan "$testns"
@@ -759,24 +811,24 @@ kci_test_gretap()
fi
# test native tunnel
- ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type gretap seq \
key 102 local 172.16.1.100 remote 172.16.1.200
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
+ ip -netns "$testns" link add dev "$DEV_NS" type gretap external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
@@ -809,24 +861,24 @@ kci_test_ip6gretap()
fi
# test native tunnel
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap seq \
key 102 local fc00:100::1 remote fc00:100::2
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
+ ip -netns "$testns" addr add dev "$DEV_NS" fc00:200::1/96
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
@@ -858,40 +910,40 @@ kci_test_erspan()
fi
# test native tunnel erspan v1
- ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 1 erspan 488
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test native tunnel erspan v2
- ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external
+ ip -netns "$testns" link add dev "$DEV_NS" type erspan external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
@@ -923,41 +975,41 @@ kci_test_ip6erspan()
fi
# test native tunnel ip6erspan v1
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 1 erspan 488
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test native tunnel ip6erspan v2
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" \
+ ip -netns "$testns" link add dev "$DEV_NS" \
type ip6erspan external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index fac68d710f35..47ddfc154036 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -42,7 +42,7 @@ FIXTURE_SETUP(tls)
len = sizeof(addr);
memset(&tls12, 0, sizeof(tls12));
- tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.version = TLS_1_3_VERSION;
tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
addr.sin_family = AF_INET;
@@ -452,10 +452,12 @@ TEST_F(tls, recv_partial)
memset(recv_mem, 0, sizeof(recv_mem));
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
+ MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
memset(recv_mem, 0, sizeof(recv_mem));
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
+ MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
0);
}
@@ -565,10 +567,10 @@ TEST_F(tls, recv_peek_large_buf_mult_recs)
len = strlen(test_str_second) + 1;
EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
- len = sizeof(buf);
+ len = strlen(test_str) + 1;
memset(buf, 0, len);
- EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
-
+ EXPECT_NE((len = recv(self->cfd, buf, len,
+ MSG_PEEK | MSG_WAITALL)), -1);
len = strlen(test_str) + 1;
EXPECT_EQ(memcmp(test_str, buf, len), 0);
}
@@ -751,6 +753,20 @@ TEST_F(tls, control_msg)
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
vec.iov_base = buf;
+ EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ EXPECT_NE(cmsg, NULL);
+ EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+ EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+ record_type = *((unsigned char *)CMSG_DATA(cmsg));
+ EXPECT_EQ(record_type, 100);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+
+ /* Recv the message again without MSG_PEEK */
+ record_type = 0;
+ memset(buf, 0, sizeof(buf));
+
EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len);
cmsg = CMSG_FIRSTHDR(&msg);
EXPECT_NE(cmsg, NULL);
@@ -761,4 +777,140 @@ TEST_F(tls, control_msg)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
+TEST(keysizes) {
+ struct tls12_crypto_info_aes_gcm_256 tls12;
+ struct sockaddr_in addr;
+ int sfd, ret, fd, cfd;
+ socklen_t len;
+ bool notls;
+
+ notls = false;
+ len = sizeof(addr);
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret != 0) {
+ notls = true;
+ printf("Failure setting TCP_ULP, testing without tls\n");
+ }
+
+ if (!notls) {
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+ }
+
+ cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(cfd, 0);
+
+ if (!notls) {
+ ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls",
+ sizeof("tls"));
+ EXPECT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+ }
+
+ close(sfd);
+ close(fd);
+ close(cfd);
+}
+
+TEST(tls12) {
+ int fd, cfd;
+ bool notls;
+
+ struct tls12_crypto_info_aes_gcm_128 tls12;
+ struct sockaddr_in addr;
+ socklen_t len;
+ int sfd, ret;
+
+ notls = false;
+ len = sizeof(addr);
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret != 0) {
+ notls = true;
+ printf("Failure setting TCP_ULP, testing without tls\n");
+ }
+
+ if (!notls) {
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(cfd, 0);
+
+ if (!notls) {
+ ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls",
+ sizeof("tls"));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ close(sfd);
+
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10];
+
+ send_len = strlen(test_str) + 1;
+ EXPECT_EQ(send(fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+
+ close(fd);
+ close(cfd);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index aeac53a99aeb..ac2a30be9b32 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -37,7 +37,7 @@ run_one() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} && \
echo "ok" || \
echo "failed" &
@@ -81,7 +81,7 @@ run_one_nat() {
# will land on the 'plain' one
ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
pid=$!
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${family} -b ${addr2%/*} ${rx_args} && \
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} && \
echo "ok" || \
echo "failed"&
@@ -99,8 +99,8 @@ run_one_2sock() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -p 12345 &
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} && \
echo "ok" || \
echo "failed" &
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index e279051bc631..b8265ee9923f 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -17,7 +17,6 @@
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index 0c960f673324..db3d4a8b5a4c 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -45,6 +45,8 @@ static int cfg_alen = sizeof(struct sockaddr_in6);
static int cfg_expected_pkt_nr;
static int cfg_expected_pkt_len;
static int cfg_expected_gso_size;
+static int cfg_connect_timeout_ms;
+static int cfg_rcv_timeout_ms;
static struct sockaddr_storage cfg_bind_addr;
static bool interrupted;
@@ -87,7 +89,7 @@ static unsigned long gettimeofday_ms(void)
return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
}
-static void do_poll(int fd)
+static void do_poll(int fd, int timeout_ms)
{
struct pollfd pfd;
int ret;
@@ -102,8 +104,16 @@ static void do_poll(int fd)
break;
if (ret == -1)
error(1, errno, "poll");
- if (ret == 0)
- continue;
+ if (ret == 0) {
+ if (!timeout_ms)
+ continue;
+
+ timeout_ms -= 10;
+ if (timeout_ms <= 0) {
+ interrupted = true;
+ break;
+ }
+ }
if (pfd.revents != POLLIN)
error(1, errno, "poll: 0x%x expected 0x%x\n",
pfd.revents, POLLIN);
@@ -134,7 +144,7 @@ static int do_socket(bool do_tcp)
if (listen(accept_fd, 1))
error(1, errno, "listen");
- do_poll(accept_fd);
+ do_poll(accept_fd, cfg_connect_timeout_ms);
if (interrupted)
exit(0);
@@ -273,7 +283,9 @@ static void do_flush_udp(int fd)
static void usage(const char *filepath)
{
- error(1, 0, "Usage: %s [-Grtv] [-b addr] [-p port] [-l pktlen] [-n packetnr] [-S gsosize]", filepath);
+ error(1, 0, "Usage: %s [-C connect_timeout] [-Grtv] [-b addr] [-p port]"
+ " [-l pktlen] [-n packetnr] [-R rcv_timeout] [-S gsosize]",
+ filepath);
}
static void parse_opts(int argc, char **argv)
@@ -282,7 +294,7 @@ static void parse_opts(int argc, char **argv)
/* bind to any by default */
setup_sockaddr(PF_INET6, "::", &cfg_bind_addr);
- while ((c = getopt(argc, argv, "4b:Gl:n:p:rS:tv")) != -1) {
+ while ((c = getopt(argc, argv, "4b:C:Gl:n:p:rR:S:tv")) != -1) {
switch (c) {
case '4':
cfg_family = PF_INET;
@@ -292,6 +304,9 @@ static void parse_opts(int argc, char **argv)
case 'b':
setup_sockaddr(cfg_family, optarg, &cfg_bind_addr);
break;
+ case 'C':
+ cfg_connect_timeout_ms = strtoul(optarg, NULL, 0);
+ break;
case 'G':
cfg_gro_segment = true;
break;
@@ -307,6 +322,9 @@ static void parse_opts(int argc, char **argv)
case 'r':
cfg_read_all = true;
break;
+ case 'R':
+ cfg_rcv_timeout_ms = strtoul(optarg, NULL, 0);
+ break;
case 'S':
cfg_expected_gso_size = strtol(optarg, NULL, 0);
break;
@@ -329,8 +347,9 @@ static void parse_opts(int argc, char **argv)
static void do_recv(void)
{
+ int timeout_ms = cfg_tcp ? cfg_rcv_timeout_ms : cfg_connect_timeout_ms;
unsigned long tnow, treport;
- int fd, loop = 0;
+ int fd;
fd = do_socket(cfg_tcp);
@@ -342,12 +361,7 @@ static void do_recv(void)
treport = gettimeofday_ms() + 1000;
do {
- /* force termination after the second poll(); this cope both
- * with sender slower than receiver and missing packet errors
- */
- if (cfg_expected_pkt_nr && loop++)
- interrupted = true;
- do_poll(fd);
+ do_poll(fd, timeout_ms);
if (cfg_tcp)
do_flush_tcp(fd);
@@ -365,6 +379,8 @@ static void do_recv(void)
treport = tnow + 1000;
}
+ timeout_ms = cfg_rcv_timeout_ms;
+
} while (!interrupted);
if (cfg_expected_pkt_nr && (packets != cfg_expected_pkt_nr))
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 47ed6cef93fb..c9ff2b47bd1c 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for netfilter selftests
-TEST_PROGS := nft_trans_stress.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh
include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
index 1017313e41a8..59caa8f71cd8 100644
--- a/tools/testing/selftests/netfilter/config
+++ b/tools/testing/selftests/netfilter/config
@@ -1,2 +1,2 @@
CONFIG_NET_NS=y
-NF_TABLES_INET=y
+CONFIG_NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
new file mode 100755
index 000000000000..8ec76681605c
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -0,0 +1,762 @@
+#!/bin/bash
+#
+# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ip netns add ns0
+ip netns add ns1
+ip netns add ns2
+
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ip -net ns0 link set lo up
+ip -net ns0 link set veth0 up
+ip -net ns0 addr add 10.0.1.1/24 dev veth0
+ip -net ns0 addr add dead:1::1/64 dev veth0
+
+ip -net ns0 link set veth1 up
+ip -net ns0 addr add 10.0.2.1/24 dev veth1
+ip -net ns0 addr add dead:2::1/64 dev veth1
+
+for i in 1 2; do
+ ip -net ns$i link set lo up
+ ip -net ns$i link set eth0 up
+ ip -net ns$i addr add 10.0.$i.99/24 dev eth0
+ ip -net ns$i route add default via 10.0.$i.1
+ ip -net ns$i addr add dead:$i::99/64 dev eth0
+ ip -net ns$i route add default via dead:$i::1
+done
+
+bad_counter()
+{
+ local ns=$1
+ local counter=$2
+ local expect=$3
+
+ echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
+ ip netns exec $ns nft list counter inet filter $counter 1>&2
+}
+
+check_counters()
+{
+ ns=$1
+ local lret=0
+
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0in "packets 1 bytes 84"
+ lret=1
+ fi
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0out "packets 1 bytes 84"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0in6 "$expect"
+ lret=1
+ fi
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0out6 "$expect"
+ lret=1
+ fi
+
+ return $lret
+}
+
+check_ns0_counters()
+{
+ local ns=$1
+ local lret=0
+
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0in "packets 0 bytes 0"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0in6 "packets 0 bytes 0"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0out "packets 0 bytes 0"
+ lret=1
+ fi
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0out6 "packets 0 bytes 0"
+ lret=1
+ fi
+
+ for dir in "in" "out" ; do
+ expect="packets 1 bytes 84"
+ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 $ns$dir "$expect"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 $ns$dir6 "$expect"
+ lret=1
+ fi
+ done
+
+ return $lret
+}
+
+reset_counters()
+{
+ for i in 0 1 2;do
+ ip netns exec ns$i nft reset counters inet > /dev/null
+ done
+}
+
+test_local_dnat6()
+{
+ local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+ chain output {
+ type nat hook output priority 0; policy accept;
+ ip6 daddr dead:1::99 dnat to dead:2::99
+ }
+}
+EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add add ip6 dnat hook"
+ return $ksft_skip
+ fi
+
+ # ping netns1, expect rewrite to netns2
+ ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
+ if [ $? -ne 0 ]; then
+ lret=1
+ echo "ERROR: ping6 failed"
+ return $lret
+ fi
+
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 0 count in ns1
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 1 packet in ns2
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
+ ip netns exec ns0 nft flush chain ip6 nat output
+
+ return $lret
+}
+
+test_local_dnat()
+{
+ local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+ chain output {
+ type nat hook output priority 0; policy accept;
+ ip daddr 10.0.1.99 dnat to 10.0.2.99
+ }
+}
+EOF
+ # ping netns1, expect rewrite to netns2
+ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+ if [ $? -ne 0 ]; then
+ lret=1
+ echo "ERROR: ping failed"
+ return $lret
+ fi
+
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 0 count in ns1
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 1 packet in ns2
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
+
+ ip netns exec ns0 nft flush chain ip nat output
+
+ reset_counters
+ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+ if [ $? -ne 0 ]; then
+ lret=1
+ echo "ERROR: ping failed"
+ return $lret
+ fi
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 1 count in ns1
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 0 packet in ns2
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
+
+ return $lret
+}
+
+
+test_masquerade6()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 via ipv6"
+ return 1
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ meta oif veth0 masquerade
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+ lret=1
+ fi
+
+ # ns1 should have seen packets from ns0, due to masquerade
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns1 should not have seen packets from ns2, due to masquerade
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft flush chain ip6 nat postrouting
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+
+ return $lret
+}
+
+test_masquerade()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: canot ping ns1 from ns2"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ meta oif veth0 masquerade
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+ lret=1
+ fi
+
+ # ns1 should have seen packets from ns0, due to masquerade
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns1 should not have seen packets from ns2, due to masquerade
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft flush chain ip nat postrouting
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not flush nat postrouting" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+
+ return $lret
+}
+
+test_redirect6()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
+ lret=1
+ fi
+
+ # ns1 should have seen no packets from ns2, due to redirection
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns0 should have seen packets from ns2, due to masquerade
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft delete table ip6 nat
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not delete ip6 nat table" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
+
+ return $lret
+}
+
+test_redirect()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
+ lret=1
+ fi
+
+ # ns1 should have seen no packets from ns2, due to redirection
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns0 should have seen packets from ns2, due to masquerade
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft delete table ip nat
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not delete nat table" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IP redirection for ns2"
+
+ return $lret
+}
+
+
+# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
+for i in 0 1 2; do
+ip netns exec ns$i nft -f - <<EOF
+table inet filter {
+ counter ns0in {}
+ counter ns1in {}
+ counter ns2in {}
+
+ counter ns0out {}
+ counter ns1out {}
+ counter ns2out {}
+
+ counter ns0in6 {}
+ counter ns1in6 {}
+ counter ns2in6 {}
+
+ counter ns0out6 {}
+ counter ns1out6 {}
+ counter ns2out6 {}
+
+ map nsincounter {
+ type ipv4_addr : counter
+ elements = { 10.0.1.1 : "ns0in",
+ 10.0.2.1 : "ns0in",
+ 10.0.1.99 : "ns1in",
+ 10.0.2.99 : "ns2in" }
+ }
+
+ map nsincounter6 {
+ type ipv6_addr : counter
+ elements = { dead:1::1 : "ns0in6",
+ dead:2::1 : "ns0in6",
+ dead:1::99 : "ns1in6",
+ dead:2::99 : "ns2in6" }
+ }
+
+ map nsoutcounter {
+ type ipv4_addr : counter
+ elements = { 10.0.1.1 : "ns0out",
+ 10.0.2.1 : "ns0out",
+ 10.0.1.99: "ns1out",
+ 10.0.2.99: "ns2out" }
+ }
+
+ map nsoutcounter6 {
+ type ipv6_addr : counter
+ elements = { dead:1::1 : "ns0out6",
+ dead:2::1 : "ns0out6",
+ dead:1::99 : "ns1out6",
+ dead:2::99 : "ns2out6" }
+ }
+
+ chain input {
+ type filter hook input priority 0; policy accept;
+ counter name ip saddr map @nsincounter
+ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
+ }
+ chain output {
+ type filter hook output priority 0; policy accept;
+ counter name ip daddr map @nsoutcounter
+ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
+ }
+}
+EOF
+done
+
+sleep 3
+# test basic connectivity
+for i in 1 2; do
+ ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
+ if [ $? -ne 0 ];then
+ echo "ERROR: Could not reach other namespace(s)" 1>&2
+ ret=1
+ fi
+
+ ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
+ if [ $? -ne 0 ];then
+ echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
+ ret=1
+ fi
+ check_counters ns$i
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+
+ check_ns0_counters ns$i
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+ reset_counters
+done
+
+if [ $ret -eq 0 ];then
+ echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
+fi
+
+reset_counters
+test_local_dnat
+test_local_dnat6
+
+reset_counters
+test_masquerade
+test_masquerade6
+
+reset_counters
+test_redirect
+test_redirect6
+
+for i in 0 1 2; do ip netns del ns$i;done
+
+exit $ret
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index 9050eeea5f5f..1de8bd8ccf5d 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -9,6 +9,3 @@ all: $(TEST_PROGS)
top_srcdir = ../../../../..
KSFT_KHDR_INSTALL := 1
include ../../lib.mk
-
-clean:
- rm -fr $(TEST_GEN_FILES)
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
index dd4162fc0419..6dee9e636a95 100644
--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
@@ -5,6 +5,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <unistd.h>
#include <sys/time.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
index ecc14d68e101..908de689a902 100644
--- a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
+++ b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
@@ -25,7 +25,7 @@ unsigned long long clock_frequency;
unsigned long long timebase_frequency;
double timebase_multiplier;
-static inline unsigned long long mftb(void)
+static inline unsigned long mftb(void)
{
unsigned long low;
diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h
index 52b4710469d2..96043b9b9829 100644
--- a/tools/testing/selftests/powerpc/include/reg.h
+++ b/tools/testing/selftests/powerpc/include/reg.h
@@ -77,6 +77,14 @@
#define TEXASR_TE 0x0000000004000000
#define TEXASR_ROT 0x0000000002000000
+/* MSR register bits */
+#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */
+
+#define __MASK(X) (1UL<<(X))
+
+/* macro to check TM MSR bits */
+#define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */
+
/* Vector Instructions */
#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \
((rb) << 11) | (((xs) >> 5)))
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index ae43a614835d..7636bf45d5d5 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -102,8 +102,10 @@ do { \
#if defined(__powerpc64__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
+#define UCONTEXT_MSR(UC) (UC)->uc_mcontext.gp_regs[PT_MSR]
#elif defined(__powerpc__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_NIP]
+#define UCONTEXT_MSR(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_MSR]
#else
#error implement UCONTEXT_NIA
#endif
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c b/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c
index 167135bd92a8..af1b80265076 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c
@@ -11,7 +11,6 @@
#include <sys/wait.h>
#include <unistd.h>
#include <setjmp.h>
-#include <signal.h>
#include "ebb.h"
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 208452a93e2c..951fe855f7cd 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -11,6 +11,7 @@ tm-signal-context-chk-fpu
tm-signal-context-chk-gpr
tm-signal-context-chk-vmx
tm-signal-context-chk-vsx
+tm-signal-context-force-tm
tm-signal-sigreturn-nt
tm-vmx-unavail
tm-unavailable
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 75a685359129..c0734ed0ef56 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -4,7 +4,8 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu
TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
- $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn tm-signal-sigreturn-nt
+ $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn tm-signal-sigreturn-nt \
+ tm-signal-context-force-tm
top_srcdir = ../../../../..
include ../../lib.mk
@@ -20,6 +21,7 @@ $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
$(OUTPUT)/tm-resched-dscr: ../pmu/lib.c
$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx
$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64
+$(OUTPUT)/tm-signal-context-force-tm: CFLAGS += -pthread -m64
SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS))
$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
new file mode 100644
index 000000000000..31717625f318
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018, Breno Leitao, Gustavo Romero, IBM Corp.
+ *
+ * This test raises a SIGUSR1 signal, and toggle the MSR[TS]
+ * fields at the signal handler. With MSR[TS] being set, the kernel will
+ * force a recheckpoint, which may cause a segfault when returning to
+ * user space. Since the test needs to re-run, the segfault needs to be
+ * caught and handled.
+ *
+ * In order to continue the test even after a segfault, the context is
+ * saved prior to the signal being raised, and it is restored when there is
+ * a segmentation fault. This happens for COUNT_MAX times.
+ *
+ * This test never fails (as returning EXIT_FAILURE). It either succeeds,
+ * or crash the kernel (on a buggy kernel).
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <ucontext.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include "tm.h"
+#include "utils.h"
+#include "reg.h"
+
+#define COUNT_MAX 5000 /* Number of interactions */
+
+/*
+ * This test only runs on 64 bits system. Unsetting MSR_TS_S to avoid
+ * compilation issue on 32 bits system. There is no side effect, since the
+ * whole test will be skipped if it is not running on 64 bits system.
+ */
+#ifndef __powerpc64__
+#undef MSR_TS_S
+#define MSR_TS_S 0
+#endif
+
+/* Setting contexts because the test will crash and we want to recover */
+ucontext_t init_context, main_context;
+
+static int count, first_time;
+
+void usr_signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ ucontext_t *ucp = uc;
+ int ret;
+
+ /*
+ * Allocating memory in a signal handler, and never freeing it on
+ * purpose, forcing the heap increase, so, the memory leak is what
+ * we want here.
+ */
+ ucp->uc_link = mmap(NULL, sizeof(ucontext_t),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ if (ucp->uc_link == (void *)-1) {
+ perror("Mmap failed");
+ exit(-1);
+ }
+
+ /* Forcing the page to be allocated in a page fault */
+ ret = madvise(ucp->uc_link, sizeof(ucontext_t), MADV_DONTNEED);
+ if (ret) {
+ perror("madvise failed");
+ exit(-1);
+ }
+
+ memcpy(&ucp->uc_link->uc_mcontext, &ucp->uc_mcontext,
+ sizeof(ucp->uc_mcontext));
+
+ /* Forcing to enable MSR[TM] */
+ UCONTEXT_MSR(ucp) |= MSR_TS_S;
+
+ /*
+ * A fork inside a signal handler seems to be more efficient than a
+ * fork() prior to the signal being raised.
+ */
+ if (fork() == 0) {
+ /*
+ * Both child and parent will return, but, child returns
+ * with count set so it will exit in the next segfault.
+ * Parent will continue to loop.
+ */
+ count = COUNT_MAX;
+ }
+
+ /*
+ * If the change above does not hit the bug, it will cause a
+ * segmentation fault, since the ck structures are NULL.
+ */
+}
+
+void seg_signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ if (count == COUNT_MAX) {
+ /* Return to tm_signal_force_msr() and exit */
+ setcontext(&main_context);
+ }
+
+ count++;
+
+ /* Reexecute the test */
+ setcontext(&init_context);
+}
+
+void tm_trap_test(void)
+{
+ struct sigaction usr_sa, seg_sa;
+ stack_t ss;
+
+ usr_sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ usr_sa.sa_sigaction = usr_signal_handler;
+
+ seg_sa.sa_flags = SA_SIGINFO;
+ seg_sa.sa_sigaction = seg_signal_handler;
+
+ /*
+ * Set initial context. Will get back here from
+ * seg_signal_handler()
+ */
+ getcontext(&init_context);
+
+ /* Allocated an alternative signal stack area */
+ ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ ss.ss_size = SIGSTKSZ;
+ ss.ss_flags = 0;
+
+ if (ss.ss_sp == (void *)-1) {
+ perror("mmap error\n");
+ exit(-1);
+ }
+
+ /* Force the allocation through a page fault */
+ if (madvise(ss.ss_sp, SIGSTKSZ, MADV_DONTNEED)) {
+ perror("madvise\n");
+ exit(-1);
+ }
+
+ /* Setting an alternative stack to generate a page fault when
+ * the signal is raised.
+ */
+ if (sigaltstack(&ss, NULL)) {
+ perror("sigaltstack\n");
+ exit(-1);
+ }
+
+ /* The signal handler will enable MSR_TS */
+ sigaction(SIGUSR1, &usr_sa, NULL);
+ /* If it does not crash, it will segfault, avoid it to retest */
+ sigaction(SIGSEGV, &seg_sa, NULL);
+
+ raise(SIGUSR1);
+}
+
+int tm_signal_context_force_tm(void)
+{
+ SKIP_IF(!have_htm());
+ /*
+ * Skipping if not running on 64 bits system, since I think it is
+ * not possible to set mcontext's [MSR] with TS, due to it being 32
+ * bits.
+ */
+ SKIP_IF(!is_ppc64le());
+
+ /* Will get back here after COUNT_MAX interactions */
+ getcontext(&main_context);
+
+ if (!first_time++)
+ tm_trap_test();
+
+ return EXIT_SUCCESS;
+}
+
+int main(int argc, char **argv)
+{
+ test_harness(tm_signal_context_force_tm, "tm_signal_context_force_tm");
+}
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 29bac5ef9a93..444ad39d3700 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -2,6 +2,7 @@
/fd-002-posix-eq
/fd-003-kthread
/proc-loadavg-001
+/proc-pid-vm
/proc-self-map-files-001
/proc-self-map-files-002
/proc-self-syscall
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index 434d033ee067..5163dc887aa3 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -6,6 +6,7 @@ TEST_GEN_PROGS += fd-001-lookup
TEST_GEN_PROGS += fd-002-posix-eq
TEST_GEN_PROGS += fd-003-kthread
TEST_GEN_PROGS += proc-loadavg-001
+TEST_GEN_PROGS += proc-pid-vm
TEST_GEN_PROGS += proc-self-map-files-001
TEST_GEN_PROGS += proc-self-map-files-002
TEST_GEN_PROGS += proc-self-syscall
diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c
index fcff7047000d..471e2aa28077 100644
--- a/tools/testing/selftests/proc/proc-loadavg-001.c
+++ b/tools/testing/selftests/proc/proc-loadavg-001.c
@@ -30,7 +30,7 @@ int main(void)
if (unshare(CLONE_NEWPID) == -1) {
if (errno == ENOSYS || errno == EPERM)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c
new file mode 100644
index 000000000000..bbe8150d18aa
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-pid-vm.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Fork and exec tiny 1 page executable which precisely controls its VM.
+ * Test /proc/$PID/maps
+ * Test /proc/$PID/smaps
+ * Test /proc/$PID/smaps_rollup
+ * Test /proc/$PID/statm
+ *
+ * FIXME require CONFIG_TMPFS which can be disabled
+ * FIXME test other values from "smaps"
+ * FIXME support other archs
+ */
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/mount.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/uio.h>
+#include <linux/kdev_t.h>
+
+static inline long sys_execveat(int dirfd, const char *pathname, char **argv, char **envp, int flags)
+{
+ return syscall(SYS_execveat, dirfd, pathname, argv, envp, flags);
+}
+
+static void make_private_tmp(void)
+{
+ if (unshare(CLONE_NEWNS) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ exit(4);
+ }
+ exit(1);
+ }
+ if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
+ exit(1);
+ }
+ if (mount(NULL, "/tmp", "tmpfs", 0, NULL) == -1) {
+ exit(1);
+ }
+}
+
+static pid_t pid = -1;
+static void ate(void)
+{
+ if (pid > 0) {
+ kill(pid, SIGTERM);
+ }
+}
+
+struct elf64_hdr {
+ uint8_t e_ident[16];
+ uint16_t e_type;
+ uint16_t e_machine;
+ uint32_t e_version;
+ uint64_t e_entry;
+ uint64_t e_phoff;
+ uint64_t e_shoff;
+ uint32_t e_flags;
+ uint16_t e_ehsize;
+ uint16_t e_phentsize;
+ uint16_t e_phnum;
+ uint16_t e_shentsize;
+ uint16_t e_shnum;
+ uint16_t e_shstrndx;
+};
+
+struct elf64_phdr {
+ uint32_t p_type;
+ uint32_t p_flags;
+ uint64_t p_offset;
+ uint64_t p_vaddr;
+ uint64_t p_paddr;
+ uint64_t p_filesz;
+ uint64_t p_memsz;
+ uint64_t p_align;
+};
+
+#ifdef __x86_64__
+#define PAGE_SIZE 4096
+#define VADDR (1UL << 32)
+#define MAPS_OFFSET 73
+
+#define syscall 0x0f, 0x05
+#define mov_rdi(x) \
+ 0x48, 0xbf, \
+ (x)&0xff, ((x)>>8)&0xff, ((x)>>16)&0xff, ((x)>>24)&0xff, \
+ ((x)>>32)&0xff, ((x)>>40)&0xff, ((x)>>48)&0xff, ((x)>>56)&0xff
+
+#define mov_rsi(x) \
+ 0x48, 0xbe, \
+ (x)&0xff, ((x)>>8)&0xff, ((x)>>16)&0xff, ((x)>>24)&0xff, \
+ ((x)>>32)&0xff, ((x)>>40)&0xff, ((x)>>48)&0xff, ((x)>>56)&0xff
+
+#define mov_eax(x) \
+ 0xb8, (x)&0xff, ((x)>>8)&0xff, ((x)>>16)&0xff, ((x)>>24)&0xff
+
+static const uint8_t payload[] = {
+ /* Casually unmap stack, vDSO and everything else. */
+ /* munmap */
+ mov_rdi(VADDR + 4096),
+ mov_rsi((1ULL << 47) - 4096 - VADDR - 4096),
+ mov_eax(11),
+ syscall,
+
+ /* Ping parent. */
+ /* write(0, &c, 1); */
+ 0x31, 0xff, /* xor edi, edi */
+ 0x48, 0x8d, 0x35, 0x00, 0x00, 0x00, 0x00, /* lea rsi, [rip] */
+ 0xba, 0x01, 0x00, 0x00, 0x00, /* mov edx, 1 */
+ mov_eax(1),
+ syscall,
+
+ /* 1: pause(); */
+ mov_eax(34),
+ syscall,
+
+ 0xeb, 0xf7, /* jmp 1b */
+};
+
+static int make_exe(const uint8_t *payload, size_t len)
+{
+ struct elf64_hdr h;
+ struct elf64_phdr ph;
+
+ struct iovec iov[3] = {
+ {&h, sizeof(struct elf64_hdr)},
+ {&ph, sizeof(struct elf64_phdr)},
+ {(void *)payload, len},
+ };
+ int fd, fd1;
+ char buf[64];
+
+ memset(&h, 0, sizeof(h));
+ h.e_ident[0] = 0x7f;
+ h.e_ident[1] = 'E';
+ h.e_ident[2] = 'L';
+ h.e_ident[3] = 'F';
+ h.e_ident[4] = 2;
+ h.e_ident[5] = 1;
+ h.e_ident[6] = 1;
+ h.e_ident[7] = 0;
+ h.e_type = 2;
+ h.e_machine = 0x3e;
+ h.e_version = 1;
+ h.e_entry = VADDR + sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr);
+ h.e_phoff = sizeof(struct elf64_hdr);
+ h.e_shoff = 0;
+ h.e_flags = 0;
+ h.e_ehsize = sizeof(struct elf64_hdr);
+ h.e_phentsize = sizeof(struct elf64_phdr);
+ h.e_phnum = 1;
+ h.e_shentsize = 0;
+ h.e_shnum = 0;
+ h.e_shstrndx = 0;
+
+ memset(&ph, 0, sizeof(ph));
+ ph.p_type = 1;
+ ph.p_flags = (1<<2)|1;
+ ph.p_offset = 0;
+ ph.p_vaddr = VADDR;
+ ph.p_paddr = 0;
+ ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
+ ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
+ ph.p_align = 4096;
+
+ fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_EXCL|O_TMPFILE, 0700);
+ if (fd == -1) {
+ exit(1);
+ }
+
+ if (writev(fd, iov, 3) != sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len) {
+ exit(1);
+ }
+
+ /* Avoid ETXTBSY on exec. */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%u", fd);
+ fd1 = open(buf, O_RDONLY|O_CLOEXEC);
+ close(fd);
+
+ return fd1;
+}
+#endif
+
+#ifdef __x86_64__
+int main(void)
+{
+ int pipefd[2];
+ int exec_fd;
+
+ atexit(ate);
+
+ make_private_tmp();
+
+ /* Reserve fd 0 for 1-byte pipe ping from child. */
+ close(0);
+ if (open("/", O_RDONLY|O_DIRECTORY|O_PATH) != 0) {
+ return 1;
+ }
+
+ exec_fd = make_exe(payload, sizeof(payload));
+
+ if (pipe(pipefd) == -1) {
+ return 1;
+ }
+ if (dup2(pipefd[1], 0) != 0) {
+ return 1;
+ }
+
+ pid = fork();
+ if (pid == -1) {
+ return 1;
+ }
+ if (pid == 0) {
+ sys_execveat(exec_fd, "", NULL, NULL, AT_EMPTY_PATH);
+ return 1;
+ }
+
+ char _;
+ if (read(pipefd[0], &_, 1) != 1) {
+ return 1;
+ }
+
+ struct stat st;
+ if (fstat(exec_fd, &st) == -1) {
+ return 1;
+ }
+
+ /* Generate "head -n1 /proc/$PID/maps" */
+ char buf0[256];
+ memset(buf0, ' ', sizeof(buf0));
+ int len = snprintf(buf0, sizeof(buf0),
+ "%08lx-%08lx r-xp 00000000 %02lx:%02lx %llu",
+ VADDR, VADDR + PAGE_SIZE,
+ MAJOR(st.st_dev), MINOR(st.st_dev),
+ (unsigned long long)st.st_ino);
+ buf0[len] = ' ';
+ snprintf(buf0 + MAPS_OFFSET, sizeof(buf0) - MAPS_OFFSET,
+ "/tmp/#%llu (deleted)\n", (unsigned long long)st.st_ino);
+
+
+ /* Test /proc/$PID/maps */
+ {
+ char buf[256];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/maps", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(rv == strlen(buf0));
+ assert(memcmp(buf, buf0, strlen(buf0)) == 0);
+ }
+
+ /* Test /proc/$PID/smaps */
+ {
+ char buf[1024];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/smaps", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(0 <= rv && rv <= sizeof(buf));
+
+ assert(rv >= strlen(buf0));
+ assert(memcmp(buf, buf0, strlen(buf0)) == 0);
+
+#define RSS1 "Rss: 4 kB\n"
+#define RSS2 "Rss: 0 kB\n"
+#define PSS1 "Pss: 4 kB\n"
+#define PSS2 "Pss: 0 kB\n"
+ assert(memmem(buf, rv, RSS1, strlen(RSS1)) ||
+ memmem(buf, rv, RSS2, strlen(RSS2)));
+ assert(memmem(buf, rv, PSS1, strlen(PSS1)) ||
+ memmem(buf, rv, PSS2, strlen(PSS2)));
+
+ static const char *S[] = {
+ "Size: 4 kB\n",
+ "KernelPageSize: 4 kB\n",
+ "MMUPageSize: 4 kB\n",
+ "Anonymous: 0 kB\n",
+ "AnonHugePages: 0 kB\n",
+ "Shared_Hugetlb: 0 kB\n",
+ "Private_Hugetlb: 0 kB\n",
+ "Locked: 0 kB\n",
+ };
+ int i;
+
+ for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+ assert(memmem(buf, rv, S[i], strlen(S[i])));
+ }
+ }
+
+ /* Test /proc/$PID/smaps_rollup */
+ {
+ char bufr[256];
+ memset(bufr, ' ', sizeof(bufr));
+ len = snprintf(bufr, sizeof(bufr),
+ "%08lx-%08lx ---p 00000000 00:00 0",
+ VADDR, VADDR + PAGE_SIZE);
+ bufr[len] = ' ';
+ snprintf(bufr + MAPS_OFFSET, sizeof(bufr) - MAPS_OFFSET,
+ "[rollup]\n");
+
+ char buf[1024];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/smaps_rollup", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(0 <= rv && rv <= sizeof(buf));
+
+ assert(rv >= strlen(bufr));
+ assert(memcmp(buf, bufr, strlen(bufr)) == 0);
+
+ assert(memmem(buf, rv, RSS1, strlen(RSS1)) ||
+ memmem(buf, rv, RSS2, strlen(RSS2)));
+ assert(memmem(buf, rv, PSS1, strlen(PSS1)) ||
+ memmem(buf, rv, PSS2, strlen(PSS2)));
+
+ static const char *S[] = {
+ "Anonymous: 0 kB\n",
+ "AnonHugePages: 0 kB\n",
+ "Shared_Hugetlb: 0 kB\n",
+ "Private_Hugetlb: 0 kB\n",
+ "Locked: 0 kB\n",
+ };
+ int i;
+
+ for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+ assert(memmem(buf, rv, S[i], strlen(S[i])));
+ }
+ }
+
+ /* Test /proc/$PID/statm */
+ {
+ char buf[64];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/statm", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(rv == 7 * 2);
+
+ assert(buf[0] == '1'); /* ->total_vm */
+ assert(buf[1] == ' ');
+ assert(buf[2] == '0' || buf[2] == '1'); /* rss */
+ assert(buf[3] == ' ');
+ assert(buf[4] == '0' || buf[2] == '1'); /* file rss */
+ assert(buf[5] == ' ');
+ assert(buf[6] == '1'); /* ELF executable segments */
+ assert(buf[7] == ' ');
+ assert(buf[8] == '0');
+ assert(buf[9] == ' ');
+ assert(buf[10] == '0'); /* ->data_vm + ->stack_vm */
+ assert(buf[11] == ' ');
+ assert(buf[12] == '0');
+ assert(buf[13] == '\n');
+ }
+
+ return 0;
+}
+#else
+int main(void)
+{
+ return 4;
+}
+#endif
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index 85744425b08d..762cb01f2ca7 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -63,7 +63,7 @@ int main(void)
p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
if (p == MAP_FAILED) {
if (errno == EPERM)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c
index 5ab5f4810e43..9f6d000c0245 100644
--- a/tools/testing/selftests/proc/proc-self-syscall.c
+++ b/tools/testing/selftests/proc/proc-self-syscall.c
@@ -20,7 +20,6 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
-#include <unistd.h>
#include <string.h>
#include <stdio.h>
@@ -39,7 +38,7 @@ int main(void)
fd = open("/proc/self/syscall", O_RDONLY);
if (fd == -1) {
if (errno == ENOENT)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/proc-self-wchan.c b/tools/testing/selftests/proc/proc-self-wchan.c
index a38b2fbaa7ad..b467b98a457d 100644
--- a/tools/testing/selftests/proc/proc-self-wchan.c
+++ b/tools/testing/selftests/proc/proc-self-wchan.c
@@ -27,7 +27,7 @@ int main(void)
fd = open("/proc/self/wchan", O_RDONLY);
if (fd == -1) {
if (errno == ENOENT)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/read.c b/tools/testing/selftests/proc/read.c
index 563e752e6eba..b3ef9e14d6cc 100644
--- a/tools/testing/selftests/proc/read.c
+++ b/tools/testing/selftests/proc/read.c
@@ -26,8 +26,10 @@
#include <dirent.h>
#include <stdbool.h>
#include <stdlib.h>
+#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
+#include <sys/vfs.h>
#include <fcntl.h>
#include <unistd.h>
@@ -123,10 +125,22 @@ static void f(DIR *d, unsigned int level)
int main(void)
{
DIR *d;
+ struct statfs sfs;
d = opendir("/proc");
if (!d)
+ return 4;
+
+ /* Ensure /proc is proc. */
+ if (fstatfs(dirfd(d), &sfs) == -1) {
+ return 1;
+ }
+ if (sfs.f_type != 0x9fa0) {
+ fprintf(stderr, "error: unexpected f_type %lx\n", (long)sfs.f_type);
return 2;
+ }
+
f(d, 0);
+
return 0;
}
diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
index da298394daa2..83552bb007b4 100755
--- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
@@ -40,17 +40,24 @@ mkdir $T
cat > $T/init << '__EOF___'
#!/bin/sh
# Run in userspace a few milliseconds every second. This helps to
-# exercise the NO_HZ_FULL portions of RCU.
+# exercise the NO_HZ_FULL portions of RCU. The 192 instances of "a" was
+# empirically shown to give a nice multi-millisecond burst of user-mode
+# execution on a 2GHz CPU, as desired. Modern CPUs will vary from a
+# couple of milliseconds up to perhaps 100 milliseconds, which is an
+# acceptable range.
+#
+# Why not calibrate an exact delay? Because within this initrd, we
+# are restricted to Bourne-shell builtins, which as far as I know do not
+# provide any means of obtaining a fine-grained timestamp.
+
+a4="a a a a"
+a16="$a4 $a4 $a4 $a4"
+a64="$a16 $a16 $a16 $a16"
+a192="$a64 $a64 $a64"
while :
do
q=
- for i in \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
+ for i in $a192
do
q="$q $i"
done
@@ -124,8 +131,8 @@ if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \
| grep -q '^yes'; then
# architecture supported by nolibc
${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \
- -nostdlib -include ../bin/nolibc.h -lgcc -s -static -Os \
- -o init init.c
+ -nostdlib -include ../../../../include/nolibc/nolibc.h \
+ -lgcc -s -static -Os -o init init.c
else
${CROSS_COMPILE}gcc -s -static -Os -o init init.c
fi
diff --git a/tools/testing/selftests/safesetid/.gitignore b/tools/testing/selftests/safesetid/.gitignore
new file mode 100644
index 000000000000..9c1a629bca01
--- /dev/null
+++ b/tools/testing/selftests/safesetid/.gitignore
@@ -0,0 +1 @@
+safesetid-test
diff --git a/tools/testing/selftests/safesetid/Makefile b/tools/testing/selftests/safesetid/Makefile
new file mode 100644
index 000000000000..98da7a504737
--- /dev/null
+++ b/tools/testing/selftests/safesetid/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for mount selftests.
+CFLAGS = -Wall -lcap -O2
+
+TEST_PROGS := run_tests.sh
+TEST_GEN_FILES := safesetid-test
+
+include ../lib.mk
diff --git a/tools/testing/selftests/safesetid/config b/tools/testing/selftests/safesetid/config
new file mode 100644
index 000000000000..9d44e5c2e096
--- /dev/null
+++ b/tools/testing/selftests/safesetid/config
@@ -0,0 +1,2 @@
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
diff --git a/tools/testing/selftests/safesetid/safesetid-test.c b/tools/testing/selftests/safesetid/safesetid-test.c
new file mode 100644
index 000000000000..892c8e8b1b8b
--- /dev/null
+++ b/tools/testing/selftests/safesetid/safesetid-test.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <errno.h>
+#include <pwd.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/capability.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdarg.h>
+
+#ifndef CLONE_NEWUSER
+# define CLONE_NEWUSER 0x10000000
+#endif
+
+#define ROOT_USER 0
+#define RESTRICTED_PARENT 1
+#define ALLOWED_CHILD1 2
+#define ALLOWED_CHILD2 3
+#define NO_POLICY_USER 4
+
+char* add_whitelist_policy_file = "/sys/kernel/security/safesetid/add_whitelist_policy";
+
+static void die(char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(EXIT_FAILURE);
+}
+
+static bool vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
+{
+ char buf[4096];
+ int fd;
+ ssize_t written;
+ int buf_len;
+
+ buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+ if (buf_len < 0) {
+ printf("vsnprintf failed: %s\n",
+ strerror(errno));
+ return false;
+ }
+ if (buf_len >= sizeof(buf)) {
+ printf("vsnprintf output truncated\n");
+ return false;
+ }
+
+ fd = open(filename, O_WRONLY);
+ if (fd < 0) {
+ if ((errno == ENOENT) && enoent_ok)
+ return true;
+ return false;
+ }
+ written = write(fd, buf, buf_len);
+ if (written != buf_len) {
+ if (written >= 0) {
+ printf("short write to %s\n", filename);
+ return false;
+ } else {
+ printf("write to %s failed: %s\n",
+ filename, strerror(errno));
+ return false;
+ }
+ }
+ if (close(fd) != 0) {
+ printf("close of %s failed: %s\n",
+ filename, strerror(errno));
+ return false;
+ }
+ return true;
+}
+
+static bool write_file(char *filename, char *fmt, ...)
+{
+ va_list ap;
+ bool ret;
+
+ va_start(ap, fmt);
+ ret = vmaybe_write_file(false, filename, fmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+static void ensure_user_exists(uid_t uid)
+{
+ struct passwd p;
+
+ FILE *fd;
+ char name_str[10];
+
+ if (getpwuid(uid) == NULL) {
+ memset(&p,0x00,sizeof(p));
+ fd=fopen("/etc/passwd","a");
+ if (fd == NULL)
+ die("couldn't open file\n");
+ if (fseek(fd, 0, SEEK_END))
+ die("couldn't fseek\n");
+ snprintf(name_str, 10, "%d", uid);
+ p.pw_name=name_str;
+ p.pw_uid=uid;
+ p.pw_gecos="Test account";
+ p.pw_dir="/dev/null";
+ p.pw_shell="/bin/false";
+ int value = putpwent(&p,fd);
+ if (value != 0)
+ die("putpwent failed\n");
+ if (fclose(fd))
+ die("fclose failed\n");
+ }
+}
+
+static void ensure_securityfs_mounted(void)
+{
+ int fd = open(add_whitelist_policy_file, O_WRONLY);
+ if (fd < 0) {
+ if (errno == ENOENT) {
+ // Need to mount securityfs
+ if (mount("securityfs", "/sys/kernel/security",
+ "securityfs", 0, NULL) < 0)
+ die("mounting securityfs failed\n");
+ } else {
+ die("couldn't find securityfs for unknown reason\n");
+ }
+ } else {
+ if (close(fd) != 0) {
+ die("close of %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+ }
+}
+
+static void write_policies(void)
+{
+ ssize_t written;
+ int fd;
+
+ fd = open(add_whitelist_policy_file, O_WRONLY);
+ if (fd < 0)
+ die("cant open add_whitelist_policy file\n");
+ written = write(fd, "1:2", strlen("1:2"));
+ if (written != strlen("1:2")) {
+ if (written >= 0) {
+ die("short write to %s\n", add_whitelist_policy_file);
+ } else {
+ die("write to %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+ }
+ written = write(fd, "1:3", strlen("1:3"));
+ if (written != strlen("1:3")) {
+ if (written >= 0) {
+ die("short write to %s\n", add_whitelist_policy_file);
+ } else {
+ die("write to %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+ }
+ if (close(fd) != 0) {
+ die("close of %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+}
+
+static bool test_userns(bool expect_success)
+{
+ uid_t uid;
+ char map_file_name[32];
+ size_t sz = sizeof(map_file_name);
+ pid_t cpid;
+ bool success;
+
+ uid = getuid();
+
+ int clone_flags = CLONE_NEWUSER;
+ cpid = syscall(SYS_clone, clone_flags, NULL);
+ if (cpid == -1) {
+ printf("clone failed");
+ return false;
+ }
+
+ if (cpid == 0) { /* Code executed by child */
+ // Give parent 1 second to write map file
+ sleep(1);
+ exit(EXIT_SUCCESS);
+ } else { /* Code executed by parent */
+ if(snprintf(map_file_name, sz, "/proc/%d/uid_map", cpid) < 0) {
+ printf("preparing file name string failed");
+ return false;
+ }
+ success = write_file(map_file_name, "0 0 1", uid);
+ return success == expect_success;
+ }
+
+ printf("should not reach here");
+ return false;
+}
+
+static void test_setuid(uid_t child_uid, bool expect_success)
+{
+ pid_t cpid, w;
+ int wstatus;
+
+ cpid = fork();
+ if (cpid == -1) {
+ die("fork\n");
+ }
+
+ if (cpid == 0) { /* Code executed by child */
+ setuid(child_uid);
+ if (getuid() == child_uid)
+ exit(EXIT_SUCCESS);
+ else
+ exit(EXIT_FAILURE);
+ } else { /* Code executed by parent */
+ do {
+ w = waitpid(cpid, &wstatus, WUNTRACED | WCONTINUED);
+ if (w == -1) {
+ die("waitpid\n");
+ }
+
+ if (WIFEXITED(wstatus)) {
+ if (WEXITSTATUS(wstatus) == EXIT_SUCCESS) {
+ if (expect_success) {
+ return;
+ } else {
+ die("unexpected success\n");
+ }
+ } else {
+ if (expect_success) {
+ die("unexpected failure\n");
+ } else {
+ return;
+ }
+ }
+ } else if (WIFSIGNALED(wstatus)) {
+ if (WTERMSIG(wstatus) == 9) {
+ if (expect_success)
+ die("killed unexpectedly\n");
+ else
+ return;
+ } else {
+ die("unexpected signal: %d\n", wstatus);
+ }
+ } else {
+ die("unexpected status: %d\n", wstatus);
+ }
+ } while (!WIFEXITED(wstatus) && !WIFSIGNALED(wstatus));
+ }
+
+ die("should not reach here\n");
+}
+
+static void ensure_users_exist(void)
+{
+ ensure_user_exists(ROOT_USER);
+ ensure_user_exists(RESTRICTED_PARENT);
+ ensure_user_exists(ALLOWED_CHILD1);
+ ensure_user_exists(ALLOWED_CHILD2);
+ ensure_user_exists(NO_POLICY_USER);
+}
+
+static void drop_caps(bool setid_retained)
+{
+ cap_value_t cap_values[] = {CAP_SETUID, CAP_SETGID};
+ cap_t caps;
+
+ caps = cap_get_proc();
+ if (setid_retained)
+ cap_set_flag(caps, CAP_EFFECTIVE, 2, cap_values, CAP_SET);
+ else
+ cap_clear(caps);
+ cap_set_proc(caps);
+ cap_free(caps);
+}
+
+int main(int argc, char **argv)
+{
+ ensure_users_exist();
+ ensure_securityfs_mounted();
+ write_policies();
+
+ if (prctl(PR_SET_KEEPCAPS, 1L))
+ die("Error with set keepcaps\n");
+
+ // First test to make sure we can write userns mappings from a user
+ // that doesn't have any restrictions (as long as it has CAP_SETUID);
+ setuid(NO_POLICY_USER);
+ setgid(NO_POLICY_USER);
+
+ // Take away all but setid caps
+ drop_caps(true);
+
+ // Need PR_SET_DUMPABLE flag set so we can write /proc/[pid]/uid_map
+ // from non-root parent process.
+ if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0))
+ die("Error with set dumpable\n");
+
+ if (!test_userns(true)) {
+ die("test_userns failed when it should work\n");
+ }
+
+ setuid(RESTRICTED_PARENT);
+ setgid(RESTRICTED_PARENT);
+
+ test_setuid(ROOT_USER, false);
+ test_setuid(ALLOWED_CHILD1, true);
+ test_setuid(ALLOWED_CHILD2, true);
+ test_setuid(NO_POLICY_USER, false);
+
+ if (!test_userns(false)) {
+ die("test_userns worked when it should fail\n");
+ }
+
+ // Now take away all caps
+ drop_caps(false);
+ test_setuid(2, false);
+ test_setuid(3, false);
+ test_setuid(4, false);
+
+ // NOTE: this test doesn't clean up users that were created in
+ // /etc/passwd or flush policies that were added to the LSM.
+ return EXIT_SUCCESS;
+}
diff --git a/tools/testing/selftests/safesetid/safesetid-test.sh b/tools/testing/selftests/safesetid/safesetid-test.sh
new file mode 100755
index 000000000000..e4fdce675c54
--- /dev/null
+++ b/tools/testing/selftests/safesetid/safesetid-test.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+TCID="safesetid-test.sh"
+errcode=0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+check_root()
+{
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo $TCID: must be run as root >&2
+ exit $ksft_skip
+ fi
+}
+
+main_function()
+{
+ check_root
+ ./safesetid-test
+}
+
+main_function
+echo "$TCID: done"
+exit $errcode
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 7e632b465ab4..f69d2ee29742 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -2611,6 +2611,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
{
long ret, sib;
void *status;
+ struct timespec delay = { .tv_nsec = 100000000 };
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
@@ -2664,7 +2665,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
- sleep(0.1);
+ nanosleep(&delay, NULL);
/* Switch to the remaining sibling */
sib = !sib;
@@ -2689,7 +2690,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
EXPECT_EQ(0, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
- sleep(0.1);
+ nanosleep(&delay, NULL);
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
@@ -2971,6 +2972,12 @@ TEST(get_metadata)
struct seccomp_metadata md;
long ret;
+ /* Only real root can get metadata. */
+ if (geteuid()) {
+ XFAIL(return, "get_metadata requires real root");
+ return;
+ }
+
ASSERT_EQ(0, pipe(pipefd));
pid = fork();
@@ -2985,11 +2992,11 @@ TEST(get_metadata)
};
/* one with log, one without */
- ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
+ EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
SECCOMP_FILTER_FLAG_LOG, &prog));
- ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
+ EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
- ASSERT_EQ(0, close(pipefd[0]));
+ EXPECT_EQ(0, close(pipefd[0]));
ASSERT_EQ(1, write(pipefd[1], "1", 1));
ASSERT_EQ(0, close(pipefd[1]));
@@ -3062,6 +3069,11 @@ TEST(user_notification_basic)
.filter = filter,
};
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
pid = fork();
ASSERT_GE(pid, 0);
@@ -3077,7 +3089,7 @@ TEST(user_notification_basic)
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
- /* Add some no-op filters so for grins. */
+ /* Add some no-op filters for grins. */
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
@@ -3143,6 +3155,11 @@ TEST(user_notification_kill_in_middle)
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
listener = user_trap_syscall(__NR_getpid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
@@ -3190,6 +3207,11 @@ TEST(user_notification_signal)
struct seccomp_notif_resp resp = {};
char c;
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0);
listener = user_trap_syscall(__NR_gettid,
@@ -3255,6 +3277,11 @@ TEST(user_notification_closed_listener)
long ret;
int status, listener;
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
listener = user_trap_syscall(__NR_getpid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
@@ -3287,7 +3314,7 @@ TEST(user_notification_child_pid_ns)
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
- ASSERT_EQ(unshare(CLONE_NEWPID), 0);
+ ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0);
listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
@@ -3324,6 +3351,10 @@ TEST(user_notification_sibling_pid_ns)
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
+ ASSERT_EQ(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), 0) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
@@ -3386,6 +3417,8 @@ TEST(user_notification_fault_recv)
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
+ ASSERT_EQ(unshare(CLONE_NEWUSER), 0);
+
listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
diff --git a/tools/testing/selftests/tc-testing/.gitignore b/tools/testing/selftests/tc-testing/.gitignore
index c5cc160948b3..c26d72e0166f 100644
--- a/tools/testing/selftests/tc-testing/.gitignore
+++ b/tools/testing/selftests/tc-testing/.gitignore
@@ -3,3 +3,4 @@ __pycache__/
plugins/
*.xml
*.tap
+tdc_config_local.py
diff --git a/tools/testing/selftests/tc-testing/TdcPlugin.py b/tools/testing/selftests/tc-testing/TdcPlugin.py
index 1d9e279331eb..b980a565fa89 100644
--- a/tools/testing/selftests/tc-testing/TdcPlugin.py
+++ b/tools/testing/selftests/tc-testing/TdcPlugin.py
@@ -18,13 +18,13 @@ class TdcPlugin:
if self.args.verbose > 1:
print(' -- {}.post_suite'.format(self.sub_class))
- def pre_case(self, test_ordinal, testid, test_name):
+ def pre_case(self, testid, test_name, test_skip):
'''run commands before test_runner does one test'''
if self.args.verbose > 1:
print(' -- {}.pre_case'.format(self.sub_class))
self.args.testid = testid
self.args.test_name = test_name
- self.args.test_ordinal = test_ordinal
+ self.args.test_skip = test_skip
def post_case(self):
'''run commands after test_runner does one test'''
diff --git a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
index 17b267dedbd9..a28571aff0e1 100644
--- a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
+++ b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
@@ -33,6 +33,11 @@ Each test case has required data:
id: A unique alphanumeric value to identify a particular test case
name: Descriptive name that explains the command under test
+skip: A completely optional key, if the corresponding value is "yes"
+ then tdc will not execute the test case in question. However,
+ this test case will still appear in the results output but
+ marked as skipped. This key can be placed anywhere inside the
+ test case at the top level.
category: A list of single-word descriptions covering what the command
under test is testing. Example: filter, actions, u32, gact, etc.
setup: The list of commands required to ensure the command under test
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
index e00c798de0bb..4bb866575ea1 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
@@ -102,6 +102,14 @@ class SubPlugin(TdcPlugin):
if not self.args.valgrind:
return
+ res = TestResult('{}-mem'.format(self.args.testid),
+ '{} memory leak check'.format(self.args.test_name))
+ if self.args.test_skip:
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Test case designated as skipped.')
+ self._add_results(res)
+ return
+
self.definitely_lost_re = re.compile(
r'definitely lost:\s+([,0-9]+)\s+bytes in\s+([,0-9]+)\sblocks', re.MULTILINE | re.DOTALL)
self.indirectly_lost_re = re.compile(
@@ -134,8 +142,6 @@ class SubPlugin(TdcPlugin):
nle_num = int(nle_mo.group(1))
mem_results = ''
- res = TestResult('{}-mem'.format(self.args.testid),
- '{} memory leak check'.format(self.args.test_name))
if (def_num > 0) or (ind_num > 0) or (pos_num > 0) or (nle_num > 0):
mem_results += 'not '
res.set_result(ResultState.fail)
@@ -146,12 +152,6 @@ class SubPlugin(TdcPlugin):
self._add_results(res)
- mem_results += 'ok {} - {}-mem # {}\n'.format(
- self.args.test_ordinal, self.args.testid, 'memory leak check')
- self._add_to_tap(mem_results)
- if mem_results.startswith('not '):
- print('{}'.format(content))
- self._add_to_tap(content)
def _add_results(self, res):
self._tsr.add_resultdata(res)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json b/tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json
new file mode 100644
index 000000000000..9002714b1851
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json
@@ -0,0 +1,177 @@
+[
+ {
+ "id": "e41d",
+ "name": "Add 1M flower filters with 10 parallel tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 10 add"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/add* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "1000000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "6f52",
+ "name": "Delete 1M flower filters with 10 parallel tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 1000000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 10 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/del* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "c9da",
+ "name": "Replace 1M flower filters with 10 parallel tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 1000000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 10 replace"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/replace* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "1000000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "14be",
+ "name": "Concurrently replace same range of 100k flower filters from 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py -d $DEV2 $BATCH_DIR 100000 10 replace"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/replace* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "100000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "0c44",
+ "name": "Concurrently delete same range of 100k flower filters from 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py -d $DEV2 $BATCH_DIR 100000 10 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/del* -print | xargs -n 1 -P 10 $TC -f -b",
+ "expExitCode": "123",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "ab62",
+ "name": "Add and delete from same tp with 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py -x init_ $DEV2 $BATCH_DIR 100000 5 add",
+ "find $BATCH_DIR/init_* -print | xargs -n 1 -P 5 $TC -b",
+ "./tdc_multibatch.py -x par_ -a 500001 -m 5 $DEV2 $BATCH_DIR 100000 5 add",
+ "./tdc_multibatch.py -x par_ $DEV2 $BATCH_DIR 100000 5 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/par_* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "500000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "6e8f",
+ "name": "Replace and delete from same tp with 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py -x init_ $DEV2 $BATCH_DIR 100000 10 add",
+ "find $BATCH_DIR/init_* -print | xargs -n 1 -P 5 $TC -b",
+ "./tdc_multibatch.py -x par_ -a 500001 -m 5 $DEV2 $BATCH_DIR 100000 5 replace",
+ "./tdc_multibatch.py -x par_ $DEV2 $BATCH_DIR 100000 5 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/par_* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "500000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index e6e4ce80a726..5cee15659e5f 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -61,10 +61,10 @@ class PluginMgr:
for pgn_inst in reversed(self.plugin_instances):
pgn_inst.post_suite(index)
- def call_pre_case(self, test_ordinal, testid, test_name):
+ def call_pre_case(self, testid, test_name, *, test_skip=False):
for pgn_inst in self.plugin_instances:
try:
- pgn_inst.pre_case(test_ordinal, testid, test_name)
+ pgn_inst.pre_case(testid, test_name, test_skip)
except Exception as ee:
print('exception {} in call to pre_case for {} plugin'.
format(ee, pgn_inst.__class__))
@@ -192,10 +192,19 @@ def run_one_test(pm, args, index, tidx):
print("\t====================\n=====> ", end="")
print("Test " + tidx["id"] + ": " + tidx["name"])
+ if 'skip' in tidx:
+ if tidx['skip'] == 'yes':
+ res = TestResult(tidx['id'], tidx['name'])
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Test case designated as skipped.')
+ pm.call_pre_case(tidx['id'], tidx['name'], test_skip=True)
+ pm.call_post_execute()
+ return res
+
# populate NAMES with TESTID for this test
NAMES['TESTID'] = tidx['id']
- pm.call_pre_case(index, tidx['id'], tidx['name'])
+ pm.call_pre_case(tidx['id'], tidx['name'])
prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"])
if (args.verbose > 0):
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
index 52fa539dc662..6a2bd2cf528e 100755
--- a/tools/testing/selftests/tc-testing/tdc_batch.py
+++ b/tools/testing/selftests/tc-testing/tdc_batch.py
@@ -13,6 +13,12 @@ parser.add_argument("device", help="device name")
parser.add_argument("file", help="batch file name")
parser.add_argument("-n", "--number", type=int,
help="how many lines in batch file")
+parser.add_argument(
+ "-a",
+ "--handle_start",
+ type=int,
+ default=1,
+ help="start handle range from (default: 1)")
parser.add_argument("-o", "--skip_sw",
help="skip_sw (offload), by default skip_hw",
action="store_true")
@@ -22,6 +28,21 @@ parser.add_argument("-s", "--share_action",
parser.add_argument("-p", "--prio",
help="all filters have different prio",
action="store_true")
+parser.add_argument(
+ "-e",
+ "--operation",
+ choices=['add', 'del', 'replace'],
+ default='add',
+ help="operation to perform on filters"
+ "(default: add filter)")
+parser.add_argument(
+ "-m",
+ "--mac_prefix",
+ type=int,
+ default=0,
+ choices=range(0, 256),
+ help="third byte of source MAC address of flower filter"
+ "(default: 0)")
args = parser.parse_args()
device = args.device
@@ -31,6 +52,8 @@ number = 1
if args.number:
number = args.number
+handle_start = args.handle_start
+
skip = "skip_hw"
if args.skip_sw:
skip = "skip_sw"
@@ -45,16 +68,43 @@ if args.prio:
if number > 0x4000:
number = 0x4000
+mac_prefix = args.mac_prefix
+
+def format_add_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+ return ("filter add dev {} {} protocol ip parent ffff: handle {} "
+ " flower {} src_mac {} dst_mac {} action drop {}".format(
+ device, prio, handle, skip, src_mac, dst_mac, share_action))
+
+
+def format_rep_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+ return ("filter replace dev {} {} protocol ip parent ffff: handle {} "
+ " flower {} src_mac {} dst_mac {} action drop {}".format(
+ device, prio, handle, skip, src_mac, dst_mac, share_action))
+
+
+def format_del_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+ return ("filter del dev {} {} protocol ip parent ffff: handle {} "
+ "flower".format(device, prio, handle))
+
+
+formatter = format_add_filter
+if args.operation == "del":
+ formatter = format_del_filter
+elif args.operation == "replace":
+ formatter = format_rep_filter
+
index = 0
for i in range(0x100):
for j in range(0x100):
for k in range(0x100):
mac = ("{:02x}:{:02x}:{:02x}".format(i, j, k))
- src_mac = "e4:11:00:" + mac
+ src_mac = "e4:11:{:02x}:{}".format(mac_prefix, mac)
dst_mac = "e4:12:00:" + mac
- cmd = ("filter add dev {} {} protocol ip parent ffff: flower {} "
- "src_mac {} dst_mac {} action drop {}".format
- (device, prio, skip, src_mac, dst_mac, share_action))
+ cmd = formatter(device, prio, handle_start + index, skip, src_mac,
+ dst_mac, share_action)
file.write("{}\n".format(cmd))
index += 1
if index >= number:
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index 6d91e48c2625..942c70c041be 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -15,6 +15,7 @@ NAMES = {
'DEV1': 'v0p1',
'DEV2': '',
'BATCH_FILE': './batch.txt',
+ 'BATCH_DIR': 'tmp',
# Length of time in seconds to wait before terminating a command
'TIMEOUT': 12,
# Name of the namespace to use
diff --git a/tools/testing/selftests/tc-testing/tdc_multibatch.py b/tools/testing/selftests/tc-testing/tdc_multibatch.py
new file mode 100755
index 000000000000..5e7237952e49
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_multibatch.py
@@ -0,0 +1,65 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+"""
+tdc_multibatch.py - a thin wrapper over tdc_batch.py to generate multiple batch
+files
+
+Copyright (C) 2019 Vlad Buslov <vladbu@mellanox.com>
+"""
+
+import argparse
+import os
+
+parser = argparse.ArgumentParser(
+ description='TC multiple batch file generator')
+parser.add_argument("device", help="device name")
+parser.add_argument("dir", help="where to put batch files")
+parser.add_argument(
+ "num_filters", type=int, help="how many lines per batch file")
+parser.add_argument("num_files", type=int, help="how many batch files")
+parser.add_argument(
+ "operation",
+ choices=['add', 'del', 'replace'],
+ help="operation to perform on filters")
+parser.add_argument(
+ "-x",
+ "--file_prefix",
+ default="",
+ help="prefix for generated batch file names")
+parser.add_argument(
+ "-d",
+ "--duplicate_handles",
+ action="store_true",
+ help="duplicate filter handle range in all files")
+parser.add_argument(
+ "-a",
+ "--handle_start",
+ type=int,
+ default=1,
+ help="start handle range from (default: 1)")
+parser.add_argument(
+ "-m",
+ "--mac_prefix",
+ type=int,
+ default=0,
+ choices=range(0, 256),
+ help="add this value to third byte of source MAC address of flower filter"
+ "(default: 0)")
+args = parser.parse_args()
+
+device = args.device
+dir = args.dir
+file_prefix = args.file_prefix + args.operation + "_"
+num_filters = args.num_filters
+num_files = args.num_files
+operation = args.operation
+duplicate_handles = args.duplicate_handles
+handle = args.handle_start
+mac_prefix = args.mac_prefix
+
+for i in range(num_files):
+ file = dir + '/' + file_prefix + str(i)
+ os.system("./tdc_batch.py -n {} -a {} -e {} -m {} {} {}".format(
+ num_filters, handle, operation, i + mac_prefix, device, file))
+ if not duplicate_handles:
+ handle += num_filters
diff --git a/tools/testing/selftests/tmpfs/.gitignore b/tools/testing/selftests/tmpfs/.gitignore
new file mode 100644
index 000000000000..a96838fad74d
--- /dev/null
+++ b/tools/testing/selftests/tmpfs/.gitignore
@@ -0,0 +1 @@
+/bug-link-o-tmpfile
diff --git a/tools/testing/selftests/tmpfs/Makefile b/tools/testing/selftests/tmpfs/Makefile
new file mode 100644
index 000000000000..953c81299181
--- /dev/null
+++ b/tools/testing/selftests/tmpfs/Makefile
@@ -0,0 +1,7 @@
+CFLAGS += -Wall -O2
+CFLAGS += -D_GNU_SOURCE
+
+TEST_GEN_PROGS :=
+TEST_GEN_PROGS += bug-link-o-tmpfile
+
+include ../lib.mk
diff --git a/tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c b/tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c
new file mode 100644
index 000000000000..b5c3ddb90942
--- /dev/null
+++ b/tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Test that open(O_TMPFILE), linkat() doesn't screw accounting. */
+#include <errno.h>
+#include <sched.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mount.h>
+#include <unistd.h>
+
+int main(void)
+{
+ int fd;
+
+ if (unshare(CLONE_NEWNS) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ fprintf(stderr, "error: unshare, errno %d\n", errno);
+ return 4;
+ }
+ fprintf(stderr, "error: unshare, errno %d\n", errno);
+ return 1;
+ }
+ if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
+ fprintf(stderr, "error: mount '/', errno %d\n", errno);
+ return 1;
+ }
+
+ /* Our heroes: 1 root inode, 1 O_TMPFILE inode, 1 permanent inode. */
+ if (mount(NULL, "/tmp", "tmpfs", 0, "nr_inodes=3") == -1) {
+ fprintf(stderr, "error: mount tmpfs, errno %d\n", errno);
+ return 1;
+ }
+
+ fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_TMPFILE, 0600);
+ if (fd == -1) {
+ fprintf(stderr, "error: open 1, errno %d\n", errno);
+ return 1;
+ }
+ if (linkat(fd, "", AT_FDCWD, "/tmp/1", AT_EMPTY_PATH) == -1) {
+ fprintf(stderr, "error: linkat, errno %d\n", errno);
+ return 1;
+ }
+ close(fd);
+
+ fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_TMPFILE, 0600);
+ if (fd == -1) {
+ fprintf(stderr, "error: open 2, errno %d\n", errno);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/tpm2/Makefile b/tools/testing/selftests/tpm2/Makefile
new file mode 100644
index 000000000000..9dd848427a7b
--- /dev/null
+++ b/tools/testing/selftests/tpm2/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+include ../lib.mk
+
+TEST_PROGS := test_smoke.sh test_space.sh
diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh
new file mode 100755
index 000000000000..80521d46220c
--- /dev/null
+++ b/tools/testing/selftests/tpm2/test_smoke.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+python -m unittest -v tpm2_tests.SmokeTest
diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh
new file mode 100755
index 000000000000..a6f5e346635e
--- /dev/null
+++ b/tools/testing/selftests/tpm2/test_space.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+python -m unittest -v tpm2_tests.SpaceTest
diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py
new file mode 100644
index 000000000000..40ea95ce2ead
--- /dev/null
+++ b/tools/testing/selftests/tpm2/tpm2.py
@@ -0,0 +1,696 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+import hashlib
+import os
+import socket
+import struct
+import sys
+import unittest
+from fcntl import ioctl
+
+
+TPM2_ST_NO_SESSIONS = 0x8001
+TPM2_ST_SESSIONS = 0x8002
+
+TPM2_CC_FIRST = 0x01FF
+
+TPM2_CC_CREATE_PRIMARY = 0x0131
+TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET = 0x0139
+TPM2_CC_CREATE = 0x0153
+TPM2_CC_LOAD = 0x0157
+TPM2_CC_UNSEAL = 0x015E
+TPM2_CC_FLUSH_CONTEXT = 0x0165
+TPM2_CC_START_AUTH_SESSION = 0x0176
+TPM2_CC_GET_CAPABILITY = 0x017A
+TPM2_CC_PCR_READ = 0x017E
+TPM2_CC_POLICY_PCR = 0x017F
+TPM2_CC_PCR_EXTEND = 0x0182
+TPM2_CC_POLICY_PASSWORD = 0x018C
+TPM2_CC_POLICY_GET_DIGEST = 0x0189
+
+TPM2_SE_POLICY = 0x01
+TPM2_SE_TRIAL = 0x03
+
+TPM2_ALG_RSA = 0x0001
+TPM2_ALG_SHA1 = 0x0004
+TPM2_ALG_AES = 0x0006
+TPM2_ALG_KEYEDHASH = 0x0008
+TPM2_ALG_SHA256 = 0x000B
+TPM2_ALG_NULL = 0x0010
+TPM2_ALG_CBC = 0x0042
+TPM2_ALG_CFB = 0x0043
+
+TPM2_RH_OWNER = 0x40000001
+TPM2_RH_NULL = 0x40000007
+TPM2_RH_LOCKOUT = 0x4000000A
+TPM2_RS_PW = 0x40000009
+
+TPM2_RC_SIZE = 0x01D5
+TPM2_RC_AUTH_FAIL = 0x098E
+TPM2_RC_POLICY_FAIL = 0x099D
+TPM2_RC_COMMAND_CODE = 0x0143
+
+TSS2_RC_LAYER_SHIFT = 16
+TSS2_RESMGR_TPM_RC_LAYER = (11 << TSS2_RC_LAYER_SHIFT)
+
+TPM2_CAP_HANDLES = 0x00000001
+TPM2_CAP_COMMANDS = 0x00000002
+TPM2_CAP_TPM_PROPERTIES = 0x00000006
+
+TPM2_PT_FIXED = 0x100
+TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41
+
+HR_SHIFT = 24
+HR_LOADED_SESSION = 0x02000000
+HR_TRANSIENT = 0x80000000
+
+SHA1_DIGEST_SIZE = 20
+SHA256_DIGEST_SIZE = 32
+
+TPM2_VER0_ERRORS = {
+ 0x000: "TPM_RC_SUCCESS",
+ 0x030: "TPM_RC_BAD_TAG",
+}
+
+TPM2_VER1_ERRORS = {
+ 0x000: "TPM_RC_FAILURE",
+ 0x001: "TPM_RC_FAILURE",
+ 0x003: "TPM_RC_SEQUENCE",
+ 0x00B: "TPM_RC_PRIVATE",
+ 0x019: "TPM_RC_HMAC",
+ 0x020: "TPM_RC_DISABLED",
+ 0x021: "TPM_RC_EXCLUSIVE",
+ 0x024: "TPM_RC_AUTH_TYPE",
+ 0x025: "TPM_RC_AUTH_MISSING",
+ 0x026: "TPM_RC_POLICY",
+ 0x027: "TPM_RC_PCR",
+ 0x028: "TPM_RC_PCR_CHANGED",
+ 0x02D: "TPM_RC_UPGRADE",
+ 0x02E: "TPM_RC_TOO_MANY_CONTEXTS",
+ 0x02F: "TPM_RC_AUTH_UNAVAILABLE",
+ 0x030: "TPM_RC_REBOOT",
+ 0x031: "TPM_RC_UNBALANCED",
+ 0x042: "TPM_RC_COMMAND_SIZE",
+ 0x043: "TPM_RC_COMMAND_CODE",
+ 0x044: "TPM_RC_AUTHSIZE",
+ 0x045: "TPM_RC_AUTH_CONTEXT",
+ 0x046: "TPM_RC_NV_RANGE",
+ 0x047: "TPM_RC_NV_SIZE",
+ 0x048: "TPM_RC_NV_LOCKED",
+ 0x049: "TPM_RC_NV_AUTHORIZATION",
+ 0x04A: "TPM_RC_NV_UNINITIALIZED",
+ 0x04B: "TPM_RC_NV_SPACE",
+ 0x04C: "TPM_RC_NV_DEFINED",
+ 0x050: "TPM_RC_BAD_CONTEXT",
+ 0x051: "TPM_RC_CPHASH",
+ 0x052: "TPM_RC_PARENT",
+ 0x053: "TPM_RC_NEEDS_TEST",
+ 0x054: "TPM_RC_NO_RESULT",
+ 0x055: "TPM_RC_SENSITIVE",
+ 0x07F: "RC_MAX_FM0",
+}
+
+TPM2_FMT1_ERRORS = {
+ 0x001: "TPM_RC_ASYMMETRIC",
+ 0x002: "TPM_RC_ATTRIBUTES",
+ 0x003: "TPM_RC_HASH",
+ 0x004: "TPM_RC_VALUE",
+ 0x005: "TPM_RC_HIERARCHY",
+ 0x007: "TPM_RC_KEY_SIZE",
+ 0x008: "TPM_RC_MGF",
+ 0x009: "TPM_RC_MODE",
+ 0x00A: "TPM_RC_TYPE",
+ 0x00B: "TPM_RC_HANDLE",
+ 0x00C: "TPM_RC_KDF",
+ 0x00D: "TPM_RC_RANGE",
+ 0x00E: "TPM_RC_AUTH_FAIL",
+ 0x00F: "TPM_RC_NONCE",
+ 0x010: "TPM_RC_PP",
+ 0x012: "TPM_RC_SCHEME",
+ 0x015: "TPM_RC_SIZE",
+ 0x016: "TPM_RC_SYMMETRIC",
+ 0x017: "TPM_RC_TAG",
+ 0x018: "TPM_RC_SELECTOR",
+ 0x01A: "TPM_RC_INSUFFICIENT",
+ 0x01B: "TPM_RC_SIGNATURE",
+ 0x01C: "TPM_RC_KEY",
+ 0x01D: "TPM_RC_POLICY_FAIL",
+ 0x01F: "TPM_RC_INTEGRITY",
+ 0x020: "TPM_RC_TICKET",
+ 0x021: "TPM_RC_RESERVED_BITS",
+ 0x022: "TPM_RC_BAD_AUTH",
+ 0x023: "TPM_RC_EXPIRED",
+ 0x024: "TPM_RC_POLICY_CC",
+ 0x025: "TPM_RC_BINDING",
+ 0x026: "TPM_RC_CURVE",
+ 0x027: "TPM_RC_ECC_POINT",
+}
+
+TPM2_WARN_ERRORS = {
+ 0x001: "TPM_RC_CONTEXT_GAP",
+ 0x002: "TPM_RC_OBJECT_MEMORY",
+ 0x003: "TPM_RC_SESSION_MEMORY",
+ 0x004: "TPM_RC_MEMORY",
+ 0x005: "TPM_RC_SESSION_HANDLES",
+ 0x006: "TPM_RC_OBJECT_HANDLES",
+ 0x007: "TPM_RC_LOCALITY",
+ 0x008: "TPM_RC_YIELDED",
+ 0x009: "TPM_RC_CANCELED",
+ 0x00A: "TPM_RC_TESTING",
+ 0x010: "TPM_RC_REFERENCE_H0",
+ 0x011: "TPM_RC_REFERENCE_H1",
+ 0x012: "TPM_RC_REFERENCE_H2",
+ 0x013: "TPM_RC_REFERENCE_H3",
+ 0x014: "TPM_RC_REFERENCE_H4",
+ 0x015: "TPM_RC_REFERENCE_H5",
+ 0x016: "TPM_RC_REFERENCE_H6",
+ 0x018: "TPM_RC_REFERENCE_S0",
+ 0x019: "TPM_RC_REFERENCE_S1",
+ 0x01A: "TPM_RC_REFERENCE_S2",
+ 0x01B: "TPM_RC_REFERENCE_S3",
+ 0x01C: "TPM_RC_REFERENCE_S4",
+ 0x01D: "TPM_RC_REFERENCE_S5",
+ 0x01E: "TPM_RC_REFERENCE_S6",
+ 0x020: "TPM_RC_NV_RATE",
+ 0x021: "TPM_RC_LOCKOUT",
+ 0x022: "TPM_RC_RETRY",
+ 0x023: "TPM_RC_NV_UNAVAILABLE",
+ 0x7F: "TPM_RC_NOT_USED",
+}
+
+RC_VER1 = 0x100
+RC_FMT1 = 0x080
+RC_WARN = 0x900
+
+ALG_DIGEST_SIZE_MAP = {
+ TPM2_ALG_SHA1: SHA1_DIGEST_SIZE,
+ TPM2_ALG_SHA256: SHA256_DIGEST_SIZE,
+}
+
+ALG_HASH_FUNCTION_MAP = {
+ TPM2_ALG_SHA1: hashlib.sha1,
+ TPM2_ALG_SHA256: hashlib.sha256
+}
+
+NAME_ALG_MAP = {
+ "sha1": TPM2_ALG_SHA1,
+ "sha256": TPM2_ALG_SHA256,
+}
+
+
+class UnknownAlgorithmIdError(Exception):
+ def __init__(self, alg):
+ self.alg = alg
+
+ def __str__(self):
+ return '0x%0x' % (alg)
+
+
+class UnknownAlgorithmNameError(Exception):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return name
+
+
+class UnknownPCRBankError(Exception):
+ def __init__(self, alg):
+ self.alg = alg
+
+ def __str__(self):
+ return '0x%0x' % (alg)
+
+
+class ProtocolError(Exception):
+ def __init__(self, cc, rc):
+ self.cc = cc
+ self.rc = rc
+
+ if (rc & RC_FMT1) == RC_FMT1:
+ self.name = TPM2_FMT1_ERRORS.get(rc & 0x3f, "TPM_RC_UNKNOWN")
+ elif (rc & RC_WARN) == RC_WARN:
+ self.name = TPM2_WARN_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN")
+ elif (rc & RC_VER1) == RC_VER1:
+ self.name = TPM2_VER1_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN")
+ else:
+ self.name = TPM2_VER0_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN")
+
+ def __str__(self):
+ if self.cc:
+ return '%s: cc=0x%08x, rc=0x%08x' % (self.name, self.cc, self.rc)
+ else:
+ return '%s: rc=0x%08x' % (self.name, self.rc)
+
+
+class AuthCommand(object):
+ """TPMS_AUTH_COMMAND"""
+
+ def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0,
+ hmac=''):
+ self.session_handle = session_handle
+ self.nonce = nonce
+ self.session_attributes = session_attributes
+ self.hmac = hmac
+
+ def __str__(self):
+ fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
+ return struct.pack(fmt, self.session_handle, len(self.nonce),
+ self.nonce, self.session_attributes, len(self.hmac),
+ self.hmac)
+
+ def __len__(self):
+ fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
+ return struct.calcsize(fmt)
+
+
+class SensitiveCreate(object):
+ """TPMS_SENSITIVE_CREATE"""
+
+ def __init__(self, user_auth='', data=''):
+ self.user_auth = user_auth
+ self.data = data
+
+ def __str__(self):
+ fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
+ return struct.pack(fmt, len(self.user_auth), self.user_auth,
+ len(self.data), self.data)
+
+ def __len__(self):
+ fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
+ return struct.calcsize(fmt)
+
+
+class Public(object):
+ """TPMT_PUBLIC"""
+
+ FIXED_TPM = (1 << 1)
+ FIXED_PARENT = (1 << 4)
+ SENSITIVE_DATA_ORIGIN = (1 << 5)
+ USER_WITH_AUTH = (1 << 6)
+ RESTRICTED = (1 << 16)
+ DECRYPT = (1 << 17)
+
+ def __fmt(self):
+ return '>HHIH%us%usH%us' % \
+ (len(self.auth_policy), len(self.parameters), len(self.unique))
+
+ def __init__(self, object_type, name_alg, object_attributes, auth_policy='',
+ parameters='', unique=''):
+ self.object_type = object_type
+ self.name_alg = name_alg
+ self.object_attributes = object_attributes
+ self.auth_policy = auth_policy
+ self.parameters = parameters
+ self.unique = unique
+
+ def __str__(self):
+ return struct.pack(self.__fmt(),
+ self.object_type,
+ self.name_alg,
+ self.object_attributes,
+ len(self.auth_policy),
+ self.auth_policy,
+ self.parameters,
+ len(self.unique),
+ self.unique)
+
+ def __len__(self):
+ return struct.calcsize(self.__fmt())
+
+
+def get_digest_size(alg):
+ ds = ALG_DIGEST_SIZE_MAP.get(alg)
+ if not ds:
+ raise UnknownAlgorithmIdError(alg)
+ return ds
+
+
+def get_hash_function(alg):
+ f = ALG_HASH_FUNCTION_MAP.get(alg)
+ if not f:
+ raise UnknownAlgorithmIdError(alg)
+ return f
+
+
+def get_algorithm(name):
+ alg = NAME_ALG_MAP.get(name)
+ if not alg:
+ raise UnknownAlgorithmNameError(name)
+ return alg
+
+
+def hex_dump(d):
+ d = [format(ord(x), '02x') for x in d]
+ d = [d[i: i + 16] for i in xrange(0, len(d), 16)]
+ d = [' '.join(x) for x in d]
+ d = os.linesep.join(d)
+
+ return d
+
+class Client:
+ FLAG_DEBUG = 0x01
+ FLAG_SPACE = 0x02
+ TPM_IOC_NEW_SPACE = 0xa200
+
+ def __init__(self, flags = 0):
+ self.flags = flags
+
+ if (self.flags & Client.FLAG_SPACE) == 0:
+ self.tpm = open('/dev/tpm0', 'r+b')
+ else:
+ self.tpm = open('/dev/tpmrm0', 'r+b')
+
+ def close(self):
+ self.tpm.close()
+
+ def send_cmd(self, cmd):
+ self.tpm.write(cmd)
+ rsp = self.tpm.read()
+
+ if (self.flags & Client.FLAG_DEBUG) != 0:
+ sys.stderr.write('cmd' + os.linesep)
+ sys.stderr.write(hex_dump(cmd) + os.linesep)
+ sys.stderr.write('rsp' + os.linesep)
+ sys.stderr.write(hex_dump(rsp) + os.linesep)
+
+ rc = struct.unpack('>I', rsp[6:10])[0]
+ if rc != 0:
+ cc = struct.unpack('>I', cmd[6:10])[0]
+ raise ProtocolError(cc, rc)
+
+ return rsp
+
+ def read_pcr(self, i, bank_alg = TPM2_ALG_SHA1):
+ pcrsel_len = max((i >> 3) + 1, 3)
+ pcrsel = [0] * pcrsel_len
+ pcrsel[i >> 3] = 1 << (i & 7)
+ pcrsel = ''.join(map(chr, pcrsel))
+
+ fmt = '>HII IHB%us' % (pcrsel_len)
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_PCR_READ,
+ 1,
+ bank_alg,
+ pcrsel_len, pcrsel)
+
+ rsp = self.send_cmd(cmd)
+
+ pcr_update_cnt, pcr_select_cnt = struct.unpack('>II', rsp[10:18])
+ assert pcr_select_cnt == 1
+ rsp = rsp[18:]
+
+ alg2, pcrsel_len2 = struct.unpack('>HB', rsp[:3])
+ assert bank_alg == alg2 and pcrsel_len == pcrsel_len2
+ rsp = rsp[3 + pcrsel_len:]
+
+ digest_cnt = struct.unpack('>I', rsp[:4])[0]
+ if digest_cnt == 0:
+ return None
+ rsp = rsp[6:]
+
+ return rsp
+
+ def extend_pcr(self, i, dig, bank_alg = TPM2_ALG_SHA1):
+ ds = get_digest_size(bank_alg)
+ assert(ds == len(dig))
+
+ auth_cmd = AuthCommand()
+
+ fmt = '>HII I I%us IH%us' % (len(auth_cmd), ds)
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_PCR_EXTEND,
+ i,
+ len(auth_cmd),
+ str(auth_cmd),
+ 1, bank_alg, dig)
+
+ self.send_cmd(cmd)
+
+ def start_auth_session(self, session_type, name_alg = TPM2_ALG_SHA1):
+ fmt = '>HII IIH16sHBHH'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_START_AUTH_SESSION,
+ TPM2_RH_NULL,
+ TPM2_RH_NULL,
+ 16,
+ '\0' * 16,
+ 0,
+ session_type,
+ TPM2_ALG_NULL,
+ name_alg)
+
+ return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
+
+ def __calc_pcr_digest(self, pcrs, bank_alg = TPM2_ALG_SHA1,
+ digest_alg = TPM2_ALG_SHA1):
+ x = []
+ f = get_hash_function(digest_alg)
+
+ for i in pcrs:
+ pcr = self.read_pcr(i, bank_alg)
+ if pcr == None:
+ return None
+ x += pcr
+
+ return f(bytearray(x)).digest()
+
+ def policy_pcr(self, handle, pcrs, bank_alg = TPM2_ALG_SHA1,
+ name_alg = TPM2_ALG_SHA1):
+ ds = get_digest_size(name_alg)
+ dig = self.__calc_pcr_digest(pcrs, bank_alg, name_alg)
+ if not dig:
+ raise UnknownPCRBankError(bank_alg)
+
+ pcrsel_len = max((max(pcrs) >> 3) + 1, 3)
+ pcrsel = [0] * pcrsel_len
+ for i in pcrs:
+ pcrsel[i >> 3] |= 1 << (i & 7)
+ pcrsel = ''.join(map(chr, pcrsel))
+
+ fmt = '>HII IH%usIHB3s' % ds
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_POLICY_PCR,
+ handle,
+ len(dig), str(dig),
+ 1,
+ bank_alg,
+ pcrsel_len, pcrsel)
+
+ self.send_cmd(cmd)
+
+ def policy_password(self, handle):
+ fmt = '>HII I'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_POLICY_PASSWORD,
+ handle)
+
+ self.send_cmd(cmd)
+
+ def get_policy_digest(self, handle):
+ fmt = '>HII I'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_POLICY_GET_DIGEST,
+ handle)
+
+ return self.send_cmd(cmd)[12:]
+
+ def flush_context(self, handle):
+ fmt = '>HIII'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_FLUSH_CONTEXT,
+ handle)
+
+ self.send_cmd(cmd)
+
+ def create_root_key(self, auth_value = ''):
+ attributes = \
+ Public.FIXED_TPM | \
+ Public.FIXED_PARENT | \
+ Public.SENSITIVE_DATA_ORIGIN | \
+ Public.USER_WITH_AUTH | \
+ Public.RESTRICTED | \
+ Public.DECRYPT
+
+ auth_cmd = AuthCommand()
+ sensitive = SensitiveCreate(user_auth=auth_value)
+
+ public_parms = struct.pack(
+ '>HHHHHI',
+ TPM2_ALG_AES,
+ 128,
+ TPM2_ALG_CFB,
+ TPM2_ALG_NULL,
+ 2048,
+ 0)
+
+ public = Public(
+ object_type=TPM2_ALG_RSA,
+ name_alg=TPM2_ALG_SHA1,
+ object_attributes=attributes,
+ parameters=public_parms)
+
+ fmt = '>HIII I%us H%us H%us HI' % \
+ (len(auth_cmd), len(sensitive), len(public))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_CREATE_PRIMARY,
+ TPM2_RH_OWNER,
+ len(auth_cmd),
+ str(auth_cmd),
+ len(sensitive),
+ str(sensitive),
+ len(public),
+ str(public),
+ 0, 0)
+
+ return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
+
+ def seal(self, parent_key, data, auth_value, policy_dig,
+ name_alg = TPM2_ALG_SHA1):
+ ds = get_digest_size(name_alg)
+ assert(not policy_dig or ds == len(policy_dig))
+
+ attributes = 0
+ if not policy_dig:
+ attributes |= Public.USER_WITH_AUTH
+ policy_dig = ''
+
+ auth_cmd = AuthCommand()
+ sensitive = SensitiveCreate(user_auth=auth_value, data=data)
+
+ public = Public(
+ object_type=TPM2_ALG_KEYEDHASH,
+ name_alg=name_alg,
+ object_attributes=attributes,
+ auth_policy=policy_dig,
+ parameters=struct.pack('>H', TPM2_ALG_NULL))
+
+ fmt = '>HIII I%us H%us H%us HI' % \
+ (len(auth_cmd), len(sensitive), len(public))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_CREATE,
+ parent_key,
+ len(auth_cmd),
+ str(auth_cmd),
+ len(sensitive),
+ str(sensitive),
+ len(public),
+ str(public),
+ 0, 0)
+
+ rsp = self.send_cmd(cmd)
+
+ return rsp[14:]
+
+ def unseal(self, parent_key, blob, auth_value, policy_handle):
+ private_len = struct.unpack('>H', blob[0:2])[0]
+ public_start = private_len + 2
+ public_len = struct.unpack('>H', blob[public_start:public_start + 2])[0]
+ blob = blob[:private_len + public_len + 4]
+
+ auth_cmd = AuthCommand()
+
+ fmt = '>HII I I%us %us' % (len(auth_cmd), len(blob))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_LOAD,
+ parent_key,
+ len(auth_cmd),
+ str(auth_cmd),
+ blob)
+
+ data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
+
+ if policy_handle:
+ auth_cmd = AuthCommand(session_handle=policy_handle, hmac=auth_value)
+ else:
+ auth_cmd = AuthCommand(hmac=auth_value)
+
+ fmt = '>HII I I%us' % (len(auth_cmd))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_UNSEAL,
+ data_handle,
+ len(auth_cmd),
+ str(auth_cmd))
+
+ try:
+ rsp = self.send_cmd(cmd)
+ finally:
+ self.flush_context(data_handle)
+
+ data_len = struct.unpack('>I', rsp[10:14])[0] - 2
+
+ return rsp[16:16 + data_len]
+
+ def reset_da_lock(self):
+ auth_cmd = AuthCommand()
+
+ fmt = '>HII I I%us' % (len(auth_cmd))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET,
+ TPM2_RH_LOCKOUT,
+ len(auth_cmd),
+ str(auth_cmd))
+
+ self.send_cmd(cmd)
+
+ def __get_cap_cnt(self, cap, pt, cnt):
+ handles = []
+ fmt = '>HII III'
+
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_GET_CAPABILITY,
+ cap, pt, cnt)
+
+ rsp = self.send_cmd(cmd)[10:]
+ more_data, cap, cnt = struct.unpack('>BII', rsp[:9])
+ rsp = rsp[9:]
+
+ for i in xrange(0, cnt):
+ handle = struct.unpack('>I', rsp[:4])[0]
+ handles.append(handle)
+ rsp = rsp[4:]
+
+ return handles, more_data
+
+ def get_cap(self, cap, pt):
+ handles = []
+
+ more_data = True
+ while more_data:
+ next_handles, more_data = self.__get_cap_cnt(cap, pt, 1)
+ handles += next_handles
+ pt += 1
+
+ return handles
diff --git a/tools/testing/selftests/tpm2/tpm2_tests.py b/tools/testing/selftests/tpm2/tpm2_tests.py
new file mode 100644
index 000000000000..3bb066fea4a0
--- /dev/null
+++ b/tools/testing/selftests/tpm2/tpm2_tests.py
@@ -0,0 +1,227 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+from argparse import ArgumentParser
+from argparse import FileType
+import os
+import sys
+import tpm2
+from tpm2 import ProtocolError
+import unittest
+import logging
+import struct
+
+class SmokeTest(unittest.TestCase):
+ def setUp(self):
+ self.client = tpm2.Client()
+ self.root_key = self.client.create_root_key()
+
+ def tearDown(self):
+ self.client.flush_context(self.root_key)
+ self.client.close()
+
+ def test_seal_with_auth(self):
+ data = 'X' * 64
+ auth = 'A' * 15
+
+ blob = self.client.seal(self.root_key, data, auth, None)
+ result = self.client.unseal(self.root_key, blob, auth, None)
+ self.assertEqual(data, result)
+
+ def test_seal_with_policy(self):
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
+
+ data = 'X' * 64
+ auth = 'A' * 15
+ pcrs = [16]
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ policy_dig = self.client.get_policy_digest(handle)
+ finally:
+ self.client.flush_context(handle)
+
+ blob = self.client.seal(self.root_key, data, auth, policy_dig)
+
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ result = self.client.unseal(self.root_key, blob, auth, handle)
+ except:
+ self.client.flush_context(handle)
+ raise
+
+ self.assertEqual(data, result)
+
+ def test_unseal_with_wrong_auth(self):
+ data = 'X' * 64
+ auth = 'A' * 20
+ rc = 0
+
+ blob = self.client.seal(self.root_key, data, auth, None)
+ try:
+ result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None)
+ except ProtocolError, e:
+ rc = e.rc
+
+ self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL)
+
+ def test_unseal_with_wrong_policy(self):
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
+
+ data = 'X' * 64
+ auth = 'A' * 17
+ pcrs = [16]
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ policy_dig = self.client.get_policy_digest(handle)
+ finally:
+ self.client.flush_context(handle)
+
+ blob = self.client.seal(self.root_key, data, auth, policy_dig)
+
+ # Extend first a PCR that is not part of the policy and try to unseal.
+ # This should succeed.
+
+ ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
+ self.client.extend_pcr(1, 'X' * ds)
+
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ result = self.client.unseal(self.root_key, blob, auth, handle)
+ except:
+ self.client.flush_context(handle)
+ raise
+
+ self.assertEqual(data, result)
+
+ # Then, extend a PCR that is part of the policy and try to unseal.
+ # This should fail.
+ self.client.extend_pcr(16, 'X' * ds)
+
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
+
+ rc = 0
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ result = self.client.unseal(self.root_key, blob, auth, handle)
+ except ProtocolError, e:
+ rc = e.rc
+ self.client.flush_context(handle)
+ except:
+ self.client.flush_context(handle)
+ raise
+
+ self.assertEqual(rc, tpm2.TPM2_RC_POLICY_FAIL)
+
+ def test_seal_with_too_long_auth(self):
+ ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
+ data = 'X' * 64
+ auth = 'A' * (ds + 1)
+
+ rc = 0
+ try:
+ blob = self.client.seal(self.root_key, data, auth, None)
+ except ProtocolError, e:
+ rc = e.rc
+
+ self.assertEqual(rc, tpm2.TPM2_RC_SIZE)
+
+ def test_too_short_cmd(self):
+ rejected = False
+ try:
+ fmt = '>HIII'
+ cmd = struct.pack(fmt,
+ tpm2.TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt) + 1,
+ tpm2.TPM2_CC_FLUSH_CONTEXT,
+ 0xDEADBEEF)
+
+ self.client.send_cmd(cmd)
+ except IOError, e:
+ rejected = True
+ except:
+ pass
+ self.assertEqual(rejected, True)
+
+class SpaceTest(unittest.TestCase):
+ def setUp(self):
+ logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG)
+
+ def test_make_two_spaces(self):
+ log = logging.getLogger(__name__)
+ log.debug("test_make_two_spaces")
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root1 = space1.create_root_key()
+ space2 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root2 = space2.create_root_key()
+ root3 = space2.create_root_key()
+
+ log.debug("%08x" % (root1))
+ log.debug("%08x" % (root2))
+ log.debug("%08x" % (root3))
+
+ def test_flush_context(self):
+ log = logging.getLogger(__name__)
+ log.debug("test_flush_context")
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root1 = space1.create_root_key()
+ log.debug("%08x" % (root1))
+
+ space1.flush_context(root1)
+
+ def test_get_handles(self):
+ log = logging.getLogger(__name__)
+ log.debug("test_get_handles")
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ space1.create_root_key()
+ space2 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ space2.create_root_key()
+ space2.create_root_key()
+
+ handles = space2.get_cap(tpm2.TPM2_CAP_HANDLES, tpm2.HR_TRANSIENT)
+
+ self.assertEqual(len(handles), 2)
+
+ log.debug("%08x" % (handles[0]))
+ log.debug("%08x" % (handles[1]))
+
+ def test_invalid_cc(self):
+ log = logging.getLogger(__name__)
+ log.debug(sys._getframe().f_code.co_name)
+
+ TPM2_CC_INVALID = tpm2.TPM2_CC_FIRST - 1
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root1 = space1.create_root_key()
+ log.debug("%08x" % (root1))
+
+ fmt = '>HII'
+ cmd = struct.pack(fmt, tpm2.TPM2_ST_NO_SESSIONS, struct.calcsize(fmt),
+ TPM2_CC_INVALID)
+
+ rc = 0
+ try:
+ space1.send_cmd(cmd)
+ except ProtocolError, e:
+ rc = e.rc
+
+ self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
+ tpm2.TSS2_RESMGR_TPM_RC_LAYER)
diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
index 9b777fa95f09..5a2d7b8efc40 100644
--- a/tools/testing/selftests/vm/map_hugetlb.c
+++ b/tools/testing/selftests/vm/map_hugetlb.c
@@ -23,6 +23,14 @@
#define MAP_HUGETLB 0x40000 /* arch specific */
#endif
+#ifndef MAP_HUGE_SHIFT
+#define MAP_HUGE_SHIFT 26
+#endif
+
+#ifndef MAP_HUGE_MASK
+#define MAP_HUGE_MASK 0x3f
+#endif
+
/* Only ia64 requires this */
#ifdef __ia64__
#define ADDR (void *)(0x8000000000000000UL)
@@ -58,12 +66,29 @@ static int read_bytes(char *addr)
return 0;
}
-int main(void)
+int main(int argc, char **argv)
{
void *addr;
int ret;
+ size_t length = LENGTH;
+ int flags = FLAGS;
+ int shift = 0;
+
+ if (argc > 1)
+ length = atol(argv[1]) << 20;
+ if (argc > 2) {
+ shift = atoi(argv[2]);
+ if (shift)
+ flags |= (shift & MAP_HUGE_MASK) << MAP_HUGE_SHIFT;
+ }
+
+ if (shift)
+ printf("%u kB hugepages\n", 1 << shift);
+ else
+ printf("Default size hugepages\n");
+ printf("Mapping %lu Mbytes\n", (unsigned long)length >> 20);
- addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, -1, 0);
+ addr = mmap(ADDR, length, PROTECTION, flags, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 584a91ae4a8f..951c507a27f7 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -211,4 +211,20 @@ else
echo "[PASS]"
fi
+echo "------------------------------------"
+echo "running vmalloc stability smoke test"
+echo "------------------------------------"
+./test_vmalloc.sh smoke
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
+ echo "[FAIL]"
+ exitcode=1
+fi
+
exit $exitcode
diff --git a/tools/testing/selftests/vm/test_vmalloc.sh b/tools/testing/selftests/vm/test_vmalloc.sh
new file mode 100755
index 000000000000..06d2bb109f06
--- /dev/null
+++ b/tools/testing/selftests/vm/test_vmalloc.sh
@@ -0,0 +1,176 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2018 Uladzislau Rezki (Sony) <urezki@gmail.com>
+#
+# This is a test script for the kernel test driver to analyse vmalloc
+# allocator. Therefore it is just a kernel module loader. You can specify
+# and pass different parameters in order to:
+# a) analyse performance of vmalloc allocations;
+# b) stressing and stability check of vmalloc subsystem.
+
+TEST_NAME="vmalloc"
+DRIVER="test_${TEST_NAME}"
+
+# 1 if fails
+exitcode=1
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+#
+# Static templates for performance, stressing and smoke tests.
+# Also it is possible to pass any supported parameters manualy.
+#
+PERF_PARAM="single_cpu_test=1 sequential_test_order=1 test_repeat_count=3"
+SMOKE_PARAM="single_cpu_test=1 test_loop_count=10000 test_repeat_count=10"
+STRESS_PARAM="test_repeat_count=20"
+
+check_test_requirements()
+{
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo "$0: Must be run as root"
+ exit $ksft_skip
+ fi
+
+ if ! which modprobe > /dev/null 2>&1; then
+ echo "$0: You need modprobe installed"
+ exit $ksft_skip
+ fi
+
+ if ! modinfo $DRIVER > /dev/null 2>&1; then
+ echo "$0: You must have the following enabled in your kernel:"
+ echo "CONFIG_TEST_VMALLOC=m"
+ exit $ksft_skip
+ fi
+}
+
+run_perfformance_check()
+{
+ echo "Run performance tests to evaluate how fast vmalloc allocation is."
+ echo "It runs all test cases on one single CPU with sequential order."
+
+ modprobe $DRIVER $PERF_PARAM > /dev/null 2>&1
+ echo "Done."
+ echo "Ccheck the kernel message buffer to see the summary."
+}
+
+run_stability_check()
+{
+ echo "Run stability tests. In order to stress vmalloc subsystem we run"
+ echo "all available test cases on all available CPUs simultaneously."
+ echo "It will take time, so be patient."
+
+ modprobe $DRIVER $STRESS_PARAM > /dev/null 2>&1
+ echo "Done."
+ echo "Check the kernel ring buffer to see the summary."
+}
+
+run_smoke_check()
+{
+ echo "Run smoke test. Note, this test provides basic coverage."
+ echo "Please check $0 output how it can be used"
+ echo "for deep performance analysis as well as stress testing."
+
+ modprobe $DRIVER $SMOKE_PARAM > /dev/null 2>&1
+ echo "Done."
+ echo "Check the kernel ring buffer to see the summary."
+}
+
+usage()
+{
+ echo -n "Usage: $0 [ performance ] | [ stress ] | | [ smoke ] | "
+ echo "manual parameters"
+ echo
+ echo "Valid tests and parameters:"
+ echo
+ modinfo $DRIVER
+ echo
+ echo "Example usage:"
+ echo
+ echo "# Shows help message"
+ echo "./${DRIVER}.sh"
+ echo
+ echo "# Runs 1 test(id_1), repeats it 5 times on all online CPUs"
+ echo "./${DRIVER}.sh run_test_mask=1 test_repeat_count=5"
+ echo
+ echo -n "# Runs 4 tests(id_1|id_2|id_4|id_16) on one CPU with "
+ echo "sequential order"
+ echo -n "./${DRIVER}.sh single_cpu_test=1 sequential_test_order=1 "
+ echo "run_test_mask=23"
+ echo
+ echo -n "# Runs all tests on all online CPUs, shuffled order, repeats "
+ echo "20 times"
+ echo "./${DRIVER}.sh test_repeat_count=20"
+ echo
+ echo "# Performance analysis"
+ echo "./${DRIVER}.sh performance"
+ echo
+ echo "# Stress testing"
+ echo "./${DRIVER}.sh stress"
+ echo
+ exit 0
+}
+
+function validate_passed_args()
+{
+ VALID_ARGS=`modinfo $DRIVER | awk '/parm:/ {print $2}' | sed 's/:.*//'`
+
+ #
+ # Something has been passed, check it.
+ #
+ for passed_arg in $@; do
+ key=${passed_arg//=*/}
+ val="${passed_arg:$((${#key}+1))}"
+ valid=0
+
+ for valid_arg in $VALID_ARGS; do
+ if [[ $key = $valid_arg ]] && [[ $val -gt 0 ]]; then
+ valid=1
+ break
+ fi
+ done
+
+ if [[ $valid -ne 1 ]]; then
+ echo "Error: key or value is not correct: ${key} $val"
+ exit $exitcode
+ fi
+ done
+}
+
+function run_manual_check()
+{
+ #
+ # Validate passed parameters. If there is wrong one,
+ # the script exists and does not execute further.
+ #
+ validate_passed_args $@
+
+ echo "Run the test with following parameters: $@"
+ modprobe $DRIVER $@ > /dev/null 2>&1
+ echo "Done."
+ echo "Check the kernel ring buffer to see the summary."
+}
+
+function run_test()
+{
+ if [ $# -eq 0 ]; then
+ usage
+ else
+ if [[ "$1" = "performance" ]]; then
+ run_perfformance_check
+ elif [[ "$1" = "stress" ]]; then
+ run_stability_check
+ elif [[ "$1" = "smoke" ]]; then
+ run_smoke_check
+ else
+ run_manual_check $@
+ fi
+ fi
+}
+
+check_test_requirements
+run_test $@
+
+exit 0
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 1ff3a6c0367b..6f64b2b93234 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -133,7 +133,7 @@ static const char * const page_flag_names[] = {
[KPF_NOPAGE] = "n:nopage",
[KPF_KSM] = "x:ksm",
[KPF_THP] = "t:thp",
- [KPF_BALLOON] = "o:balloon",
+ [KPF_OFFLINE] = "o:offline",
[KPF_PGTABLE] = "g:pgtable",
[KPF_ZERO_PAGE] = "z:zero_page",
[KPF_IDLE] = "i:idle_page",
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 334b16db0ebb..73818f1b2ef8 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -110,39 +110,42 @@ static void fatal(const char *x, ...)
static void usage(void)
{
printf("slabinfo 4/15/2011. (c) 2007 sgi/(c) 2011 Linux Foundation.\n\n"
- "slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
+ "slabinfo [-aADefhilnosrStTvz1LXBU] [N=K] [-dafzput] [slab-regexp]\n"
"-a|--aliases Show aliases\n"
"-A|--activity Most active slabs first\n"
- "-d<options>|--debug=<options> Set/Clear Debug options\n"
+ "-B|--Bytes Show size in bytes\n"
"-D|--display-active Switch line format to activity\n"
"-e|--empty Show empty slabs\n"
"-f|--first-alias Show first alias\n"
"-h|--help Show usage information\n"
"-i|--inverted Inverted list\n"
"-l|--slabs Show slabs\n"
+ "-L|--Loss Sort by loss\n"
"-n|--numa Show NUMA information\n"
- "-o|--ops Show kmem_cache_ops\n"
+ "-N|--lines=K Show the first K slabs\n"
+ "-o|--ops Show kmem_cache_ops\n"
+ "-r|--report Detailed report on single slabs\n"
"-s|--shrink Shrink slabs\n"
- "-r|--report Detailed report on single slabs\n"
"-S|--Size Sort by size\n"
"-t|--tracking Show alloc/free information\n"
"-T|--Totals Show summary information\n"
+ "-U|--Unreclaim Show unreclaimable slabs only\n"
"-v|--validate Validate slabs\n"
"-z|--zero Include empty slabs\n"
"-1|--1ref Single reference\n"
- "-N|--lines=K Show the first K slabs\n"
- "-L|--Loss Sort by loss\n"
"-X|--Xtotals Show extended summary information\n"
- "-B|--Bytes Show size in bytes\n"
- "-U|--Unreclaim Show unreclaimable slabs only\n"
- "\nValid debug options (FZPUT may be combined)\n"
- "a / A Switch on all debug options (=FZUP)\n"
- "- Switch off all debug options\n"
- "f / F Sanity Checks (SLAB_CONSISTENCY_CHECKS)\n"
- "z / Z Redzoning\n"
- "p / P Poisoning\n"
- "u / U Tracking\n"
- "t / T Tracing\n"
+
+ "\n"
+ "-d | --debug Switch off all debug options\n"
+ "-da | --debug=a Switch on all debug options (--debug=FZPU)\n"
+
+ "\n"
+ "-d[afzput] | --debug=[afzput]\n"
+ " f | F Sanity Checks (SLAB_CONSISTENCY_CHECKS)\n"
+ " z | Z Redzoning\n"
+ " p | P Poisoning\n"
+ " u | U Tracking\n"
+ " t | T Tracing\n"
);
}